prompt
stringlengths
19
1.03M
completion
stringlengths
4
2.12k
api
stringlengths
8
90
import argparse import ast import itertools import json import os from multiprocessing import Pool import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from matplotlib.ticker import FuncFormatter from load_data import NGRAM_RESULTS, CHALLENGES from scipy.stats import spearmanr, pearsonr from statsmodels.stats.inter_rater import fleiss_kappa, aggregate_raters, cohens_kappa from sklearn.metrics import cohen_kappa_score try: from ordert.transformers.borgr_code.load_data import LEN, SYN_DEPTH, HUMAN, blimp_human, blimp_gpt, blimp_txl, \ blimp_lstm, blimp_5 except ImportError as e: import sys sys.path.append(os.path.dirname(__file__)) from load_data import LEN, SYN_DEPTH, HUMAN sns.set() # plt.rc('legend', fontsize=20) # plt.rc('labelsize', fontsize=20) # plt.rc('xticks.labelsize', fontsize=20) # # plt.rc('xticks.labelsize', fontsize=20) params = {'legend.fontsize': 'large', # 'figure.figsize': (15, 5), 'axes.labelsize': 'large', 'axes.titlesize':'large', 'xtick.labelsize':'large', 'ytick.labelsize':'large'} plt.rcParams.update(params) # read from BLIMP = "/cs/snapless/oabend/borgr/ordert/blimp/data" POOL_SIZE = 16 BLIMP_SUPER_CAT = {"anaphor agreement": ["anaphor_gender_agreement", "anaphor_number_agreement"], "argument structure": ["animate_subject_passive", "animate_subject_trans", "causative", "drop_argument", "inchoative", "intransitive", "passive_1", "passive_2", "transitive"], "binding": ["principle_A_c_command", "principle_A_case_1", "principle_A_case_2", "principle_A_domain_1", "principle_A_domain_2", "principle_A_domain_3", "principle_A_reconstruction"], "control/raising": ["existential_there_object_raising", "existential_there_subject_raising", "expletive_it_object_raising", "tough_vs_raising_1", "tough_vs_raising_2"], "determiner noun agreement": ["determiner_noun_agreement_1", "determiner_noun_agreement_2", "determiner_noun_agreement_irregular_1", "determiner_noun_agreement_irregular_2", "determiner_noun_agreement_with_adj_1", "determiner_noun_agreement_with_adj_2", "determiner_noun_agreement_with_adj_irregular_1", "determiner_noun_agreement_with_adj_irregular_2"], "elipsis": ["ellipsis_n_bar_1", "ellipsis_n_bar_2"], "filler gap": ["wh_questions_object_gap", "wh_questions_subject_gap", "wh_questions_subject_gap_long_distance", "wh_vs_that_no_gap", "wh_vs_that_no_gap_long_distance", "wh_vs_that_with_gap", "wh_vs_that_with_gap_long_distance"], "irregular forms": ["irregular_past_participle_adjectives", "irregular_past_participle_verbs"], "island effects": ["adjunct_island", "complex_NP_ _island", "coordinate_structure_constraint_complex_left_branch", "coordinate_structure_constraint_object_extraction", "left_branch_island_echo_question", "left_branch_island_simple_question", "sentential_subject_island", "wh_island "], "npi licensing": ["matrix_question_npi_licensor_present", "npi_present_1", "npi_present_2", "only_npi_licensor_present", "only_npi_scope", "sentential_negation_npi_licensor_present", "sentential_negation_npi_scope"], "quantifiers": ["existential_there_quantifiers_1", "existential_there_quantifiers_2", "superlative_quantifiers_1", "superlative_quantifiers_2"], "subject verb agreement": ["distractor_agreement_relational_noun", "distractor_agreement_relative_clause", "irregular_plural_subject_verb_agreement_1", "irregular_plural_subject_verb_agreement_2", "regular_plural_subject_verb_agreement_1", "regular_plural_subject_verb_agreement_2"], } def accuracy_from_file(file): answers = correct_from_file(file) correct = sum(answers) wrong = len(answers) - correct accuracy = correct / len(answers) if answers else 0 if wrong + correct == 0: print(f"corrupt file {file}") return accuracy def average_correlation(orders, other_orders=None, pearson=True): corr = 0 # ranks = orders ranks = [] if not pearson: unique_orders = [] for order in orders: unique_order = [] for item in order: if item not in unique_order: unique_order.append(item) unique_orders.append(unique_order) orders = unique_orders for order in orders: if pearson: ranks.append(order) else: ranks.append([orders[0].index(item) for item in order]) if other_orders is None: pairs = itertools.combinations(ranks, 2) else: pairs = itertools.product(orders, other_orders) for pair_num, (rank_a, rank_b) in enumerate(pairs): if pearson: corr += pearsonr(rank_a, rank_b)[0] else: corr += spearmanr(rank_a, rank_b)[0] corr = corr / (pair_num + 1) return corr def learnt_orders(df, scores, measure="steps", pearson=True): """ returns the order of each challenge in each step as a list (model, steps, order) :param df: :param scores: :return: """ orders = [] df = df.drop_duplicates(["model", "challenge", measure]) for model in df["model"].unique(): if pearson: complexity_order = df[(df[measure] == scores) & (df["model"] == model)].sort_values("challenge")[ "accuracy"].tolist() else: complexity_order = df[(df[measure] == scores) & (df["model"] == model)].sort_values("accuracy")[ "challenge"].tolist() if complexity_order: if orders and len(orders[0][-1]) != len(complexity_order): print( f"warning wrong lengths in scores {scores} model {model} and {df['model'].unique()[-1]}, skipping ") else: orders.append((model, scores, complexity_order)) return orders def learnt_perp_orders(df, perplexity, pearson=True): """ returns the order of each challenge in each step as a list (model, perplexity, order) :param df: :param perplexity: :return: """ orders = [] df = df.drop_duplicates(["model", "challenge", measure]) for model in df["model"].unique(): close_perp = find_nearest(df[df["model"] == model]["perplexity"].unique(), perplexity) if pearson: complexity_order = df[(df["perplexity"] == close_perp) & (df["model"] == model)].sort_values("challenge")[ "accuracy"].tolist() else: complexity_order = df[(df["perplexity"] == close_perp) & (df["model"] == model)].sort_values("accuracy")[ "challenge"].tolist() if complexity_order: if len(complexity_order) != 67 or (orders and len(orders[0][-1]) != len(complexity_order)): print( f"warning wrong lengths in perplexity {perplexity} model {model} and {df['model'].unique()[-1]}, skipping ") return learnt_perp_orders(df[~((df["perplexity"] == close_perp) & (df["model"] == model))], perplexity) else: orders.append((model, perplexity, complexity_order)) return orders def correlate_with_base(df_base, df, name="", pearson=True, y_min=None): if name: name = name + "_" correlations_by_steps = [] for steps in set(df["steps"].unique()) & set(df_base["steps"].unique()): orders = learnt_orders(df, steps, pearson=pearson) orders = [order[-1] for order in orders] base_orders = learnt_orders(df_base, steps, pearson=pearson) base_orders = [order[-1] for order in base_orders] cor = average_correlation(orders, base_orders, pearson=pearson) correlations_by_steps.append((steps, cor)) correlations_by_steps = pd.DataFrame(correlations_by_steps, columns=["steps", "correlation"]) ax = sns.lineplot(x="steps", y="correlation", data=correlations_by_steps) ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x / 10000), ','))) # plt.legend(loc="best") plt.xlabel("10K Steps") plt.ylabel("Correlation") plt.ylim(bottom=y_min) if pearson: name = f"pearson_{name}" # plt.title("average spearman correlation of challenges rank as a function of steps") plt.savefig(os.path.join(graphs_path, f"{name}correlation_with_base_by_steps.png")) plt.clf() correlations_by_perplexity = [] base_perplexities = base_df["perplexity"].unique() perplexities_range = np.linspace(base_perplexities.min(), base_perplexities.max(), 15) for perplexity in perplexities_range: orders = learnt_perp_orders(df, perplexity) orders = [order[-1] for order in orders] base_orders = learnt_perp_orders(df_base, perplexity) base_orders = [order[-1] for order in base_orders] cor = average_correlation(orders, base_orders, pearson=pearson) correlations_by_perplexity.append((perplexity, cor)) correlations_by_perplexity = pd.DataFrame(correlations_by_perplexity, columns=["perplexity", "correlation"]) sns.lineplot(x="perplexity", y="correlation", data=correlations_by_perplexity) # plt.legend(loc="best") plt.xlabel("Preplexity") plt.ylabel("Correlation") plt.ylim(bottom=y_min) plt.gca().invert_xaxis() # plt.title("average spearman correlation of challenges rank as a function of perplexity") plt.savefig(os.path.join(graphs_path, f"{name}correlation_with_base_by_perplexity.png")) plt.clf() def correlate_sets_of_models(dfs, name="", save=True, pearson=True): max_steps = min((min(df.groupby(["model"])["steps"].max()) for _, df in dfs)) for df_name, df in dfs: correlations_by_steps = calc_correlation_by_step(df[df["steps"] <= max_steps], measure="steps", pearson=pearson) ax = sns.lineplot(x="steps", y="correlation", data=correlations_by_steps, label=df_name.capitalize()) ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x / 10000), ','))) plt.xlabel("10K Steps") plt.ylabel("Correlation") # plt.legend(loc="best") # plt.title("average spearman correlation of challenges rank as a function of steps") if pearson: name = f"pearson_{name}" if save: plt.savefig(os.path.join(graphs_path, f"{name}correlation_by_steps.png")) plt.clf() def calc_correlation_by_step(df, measure="steps", pearson=True): correlations_by_steps = [] expected_number_of_models = len(df["model"].unique()) for steps in df[measure].unique(): orders = learnt_orders(df, steps) orders = [order[-1] for order in orders] if len(orders) == expected_number_of_models: cor = average_correlation(orders, pearson=pearson) correlations_by_steps.append((steps, cor)) correlations_by_steps = pd.DataFrame(correlations_by_steps, columns=[measure, "correlation"]) return correlations_by_steps def correlate_models(df, name="", save=True): # calculate correlation between models on how hard each phenomenon is if name: name = name + "_" correlations_by_steps = calc_correlation_by_step(df, measure="steps") ax = sns.lineplot(x="steps", y="correlation", data=correlations_by_steps) ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x / 10000), ','))) plt.xlabel("10K Steps") plt.ylabel("Correlation") # plt.legend(loc="best") # plt.title("average spearman correlation of challenges rank as a function of steps") if save: plt.savefig(os.path.join(graphs_path, f"{name}correlation_by_steps.png")) plt.clf() def plot_fields(df): # plot per challenge together (on line per challenge) group = df.groupby(["steps", "challenge"]).mean() group = group["accuracy"].unstack() for field in df["field"].unique(): field_df = df[df["field"] == field] for challenge in field_df["challenge"].unique(): ax = sns.lineplot(data=group[challenge], label=challenge.capitalize()) ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x / 10000), ','))) plt.xlabel("10K Steps") plt.ylabel("Accuracy") # plt.title("Averaged") plt.legend(loc="best").remove() # Shrink current axis by 20% ax = plt.gca() box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.7, box.height]) # Put a legend to the right of the current axis l = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig(os.path.join(graphs_path, f"{field.replace(os.sep, '+')}.png"), bbox_extra_artists=(l,), bbox_inches='tight') plt.clf() def plot_categories(df): # plot per challenge together (on line per challenge) group = df.groupby(["steps", "challenge"]).mean() group = group["accuracy"].unstack() for category in BLIMP_SUPER_CAT: for challenge in df["challenge"].unique(): if challenge in BLIMP_SUPER_CAT[category]: ax = sns.lineplot(data=group[challenge], label=challenge.capitalize()) ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x / 10000), ','))) plt.xlabel("10K Steps") plt.ylabel("Accuracy") # plt.title("averaged") plt.legend(loc="best").remove() # Shrink current axis by 20% ax = plt.gca() box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.7, box.height]) # Put a legend to the right of the current axis l = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig(os.path.join(graphs_path, f"{category.replace(os.sep, '+')}.png"), bbox_extra_artists=(l,), bbox_inches='tight') plt.clf() def all_challenges(df): # plot per challenge together (on line per challenge) group = df.groupby(["steps", "challenge"]).mean() group = group["accuracy"].unstack() for challenge in df["challenge"].unique(): ax = sns.lineplot(data=group[challenge], label=challenge.capitalize()) ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x / 10000), ','))) plt.xlabel("10K Steps") plt.ylabel("Accuracy") # plt.title("averaged") plt.legend(loc="best").remove() # Shrink current axis by 20% ax = plt.gca() box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.7, box.height]) # Put a legend to the right of the current axis l = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size': 4}) plt.savefig(os.path.join(graphs_path, f"aaggregation_steps.png"), bbox_extra_artists=(l,), bbox_inches='tight') plt.clf() def rename_models(legend): for name in legend.get_texts(): new_name = name.get_text() if new_name == "gpt2": new_name = "GPT2$_{small}$" elif "seed" in new_name and "gpt" in new_name: new_name = "GPT2$_{tiny}^" + new_name[-1] + "$" if "gpt2Small" in new_name: new_name = "GPT$_{tiny" + new_name[-1] + "}$" new_name = new_name.replace("gpt", "GPT") new_name = new_name.replace("xl", "TransformerXL") new_name = new_name.replace("TransformerXLSmallTransformerXL", "XL$_{Small}") name.set_text(new_name) def average_accuracy(df, plot_steps=True, plot_perplexity=True, max_perp=30): out_path = os.path.join(graphs_path) os.makedirs(out_path, exist_ok=True) # df = df.groupby("challenge").mean() # for challenge in df["challenge"].unique(): measure = "Steps" if plot_steps else "Perplexity" for model in sorted(df["model"].unique()): line = df[df["model"] == model].groupby([measure.lower()]).mean() ax = sns.lineplot(x=line.index, y="accuracy", data=line, label=model.capitalize()) # plt.title(challenge) l = plt.legend(loc="best", bbox_to_anchor=(1, 0.5)) rename_models(l) if measure == "Steps": ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x / 10000), ','))) plt.xlabel("10K Steps") else: plt.xlabel(measure) plt.ylabel("Accuracy") plt.savefig(os.path.join(out_path, f"average_{measure.lower()}.png"), bbox_extra_artists=(l,), bbox_inches='tight') plt.clf() def per_challenge(df, plot_steps=True, plot_perplexity=True, max_perp=30): # Plot line per model (each challenge on a separate plot) out_path = os.path.join(graphs_path, "per_challenge") os.makedirs(out_path, exist_ok=True) for challenge in df["challenge"].unique(): if plot_steps: for model in sorted(df["model"].unique()): ax = sns.lineplot(x="steps", y="accuracy", data=df[(df["model"] == model) & (df["challenge"] == challenge)], label=model) # plt.title(challenge) l = plt.legend(loc="center left", bbox_to_anchor=(1, 0.5)) rename_models(l) ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x / 10000), ','))) plt.xlabel("10K Steps") plt.ylabel("Accuracy") plt.savefig(os.path.join(out_path, f"{challenge}_steps.png"), bbox_extra_artists=(l,), bbox_inches='tight') plt.clf() if plot_perplexity: df_perp = df[df["perplexity"] < max_perp] # Initialize figure and ax fig, ax = plt.subplots() ax.set(xscale="log") for i, model in enumerate(sorted(df_perp["model"].unique())): sns.lineplot(x="perplexity", y="accuracy", ax=ax, data=df_perp[(df_perp["model"] == model) & (df_perp["challenge"] == challenge)], label=model) ax.invert_xaxis() # plt.title(challenge.capitalize()) ax.grid(False) plt.xlabel("Perplexity") plt.ylabel("Accuracy") l = plt.legend(loc="center left", bbox_to_anchor=(1, 0.5)) rename_models(l) plt.savefig(os.path.join(out_path, f"{challenge}_perplexity.png"), bbox_extra_artists=(l,), bbox_inches='tight') plt.clf() def correct_from_file(file): res = [] with open(file) as fl: for i, line in enumerate(fl): lm_loss = float(line.strip().strip("[]")) if i % 2 == 1: # lm_loss is like perplexity (need e^ [loss * token num]), lower is better bad_loss = lm_loss if bad_loss > good_loss: res.append(1) else: res.append(0) else: good_loss = lm_loss if len(res) > 1000: print(f"Wrong number of lines, assuming to many time written {len(res)} {file}") res = res[:1000] assert len(res) == 1000, f"{len(res)} {file}" return res def find_first(array, value): for i, arr_val in enumerate(array): if arr_val - value > 0: return arr_val return arr_val def find_nearest(array, value): n = [abs(i - value) for i in array] idx = n.index(min(n)) return array[idx] def calculate_outer_agreement(models_df, base_df): # Plot line per model (each challenge on a separate plot) kappas = [] base_perplexities = base_df["perplexity"].unique() perplexities_range = np.linspace(base_perplexities.min(), base_perplexities.max(), 10) print("Calculating outer agreement...") for challenge in models_df["challenge"].unique(): for perplexity in perplexities_range: sub_df = models_df[(models_df["challenge"] == challenge)] corrects = [] for model in models_df["model"].unique(): close_perp = find_nearest(sub_df[sub_df["model"] == model]["perplexity"].unique(), perplexity) correct = sub_df[(sub_df["model"] == model) & (sub_df["perplexity"] == close_perp)]["correct"].tolist() if correct: corrects.append(ast.literal_eval(correct[0])) correct = [1 if x > 0.5 else 0 for x in np.mean(corrects, axis=0)] close_perp = find_nearest(base_df["perplexity"].unique(), perplexity) base_correct = base_df[(base_df["perplexity"] == close_perp)]["correct"].tolist() base_correct = ast.literal_eval(base_correct[0]) # raters = aggregate_raters(np.array(corrects).T, 2)[0] kappas.append((challenge, perplexity, cohen_kappa_score(base_correct, correct))) df =
pd.DataFrame(kappas, columns=["challenge", "perplexity", "kappa"])
pandas.DataFrame
import pandas as pd import numpy as np import os import shutil import scipy import tensorflow as tf from sklearn.preprocessing import StandardScaler, OneHotEncoder, QuantileTransformer from sklearn.compose import ColumnTransformer from math import ceil from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Dense, Flatten, concatenate, LeakyReLU, ReLU, Embedding, Activation from IPython.display import clear_output, display, Image, Video import matplotlib.pyplot as plt from tqdm.auto import tqdm import tensorflow_probability as tfp tfd = tfp.distributions class TabGAN: """ Class for creating a tabular GAN that can also generate counterfactual explanations through a post-processing step. """ def __init__(self, data, batch_size=500, n_hidden_layers=2, n_hidden_generator_layers=None, n_hidden_critic_layers=None, dim_hidden=256, dim_hidden_generator=None, dim_hidden_critic=None, dim_latent=128, gumbel_temperature=0.5, n_critic=5, wgan_lambda=10, quantile_transformation_int=True, quantile_rand_transformation=True, n_quantiles_int=1000, qtr_spread=0.4, qtr_lbound_apply=0.05, adam_amsgrad=False, optimizer="adam", opt_lr=0.0002, adam_beta1=0, adam_beta2=0.999, sgd_momentum=0.0, sgd_nesterov=False, rmsprop_rho=0.9, rmsprop_momentum=0, rmsprop_centered=False, ckpt_dir=None, ckpt_every=None, ckpt_max_to_keep=None, ckpt_name="ckpt_epoch", noise_discrete_unif_max=0, use_query=True, tf_make_train_step_graph=True, jit_compile_train_step=False): # Create variable defaults if needed if n_hidden_generator_layers is None: n_hidden_generator_layers = n_hidden_layers if n_hidden_critic_layers is None: n_hidden_critic_layers = n_hidden_layers if dim_hidden_generator is None: dim_hidden_generator = dim_hidden if dim_hidden_critic is None: dim_hidden_critic = dim_hidden if isinstance(dim_hidden_generator, list): assert len(dim_hidden_generator) == n_hidden_generator_layers else: dim_hidden_generator = [dim_hidden_generator] * n_hidden_generator_layers if isinstance(dim_hidden_critic, list): assert len(dim_hidden_critic) == n_hidden_critic_layers else: dim_hidden_critic = [dim_hidden_critic] * n_hidden_critic_layers # Initialize variables self.data = data self.columns = data.columns self.n_columns = len(self.columns) self.nrow = data.shape[0] self.batch_size = batch_size self.n_hidden_generator_layers = n_hidden_generator_layers self.n_hidden_critic_layers = n_hidden_critic_layers self.dim_latent = dim_latent self.dim_hidden_generator = dim_hidden_generator self.dim_hidden_critic = dim_hidden_critic self.gumbel_temperature = gumbel_temperature self.optimizer = optimizer self.n_critic = n_critic self.wgan_lambda = wgan_lambda self.opt_lr = opt_lr self.adam_beta1 = adam_beta1 self.adam_beta2 = adam_beta2 self.adam_amsgrad = adam_amsgrad self.sgd_momentum = sgd_momentum self.sgd_nesterov = sgd_nesterov self.rmsprop_rho = rmsprop_rho self.rmsprop_momentum = rmsprop_momentum self.rmsprop_centered = rmsprop_centered self.ckpt_dir = ckpt_dir self.ckpt_every = ckpt_every self.ckpt_max_to_keep = ckpt_max_to_keep self.ckpt_name = ckpt_name self.ckpt_prefix = os.path.join(self.ckpt_dir, self.ckpt_name) if not self.ckpt_dir is None else None self.noise_discrete_unif_max = noise_discrete_unif_max self.quantile_transformation_int = quantile_transformation_int self.quantile_rand_transformation = quantile_rand_transformation self.n_quantiles_int = n_quantiles_int self.initialized_gan = False self.qtr_spread = qtr_spread self.qtr_lbound_apply = qtr_lbound_apply self.use_query = use_query self.tf_make_train_step_graph = tf_make_train_step_graph self.jit_compile_train_step = jit_compile_train_step # Separate numeric data, fit numeric scaler and scale numeric data. Store numeric column names. self.data_num = data.select_dtypes(include=np.number) self.columns_num = self.data_num.columns self.n_columns_num = len(self.data_num.columns) self.columns_num_int_mask = self.data_num.dtypes.astype(str).str.contains("int") self.columns_int = self.columns_num[self.columns_num_int_mask] self.columns_num_int_pos = np.arange(len(self.columns_num))[self.columns_num_int_mask] self.columns_float = self.columns_num[np.logical_not(self.columns_num_int_mask)] if self.quantile_transformation_int: self.scaler_num = ColumnTransformer(transformers=[("float", StandardScaler(), self.columns_float), ( "int", QuantileTransformer(n_quantiles=n_quantiles_int, output_distribution='normal'), self.columns_int)]) self.data_num_scaled = self.scaler_num.fit_transform(self.data_num) if self.quantile_rand_transformation: self.data_num_scaled = self.randomize_quantile_transformation(self.data_num_scaled) else: self.scaler_num = StandardScaler() self.data_num_scaled = self.scaler_num.fit_transform(self.data_num) self.columns_num_int_mask = None self.columns_int = None self.columns_float = None self.columns_num_int_pos = None # Separate discrete data, fit one-hot-encoder, perform one hot encoding. Store discrete column names # and store the number of categories for each discrete variable self.data_discrete = data.select_dtypes(exclude=np.number) self.columns_discrete = self.data_discrete.columns self.n_columns_discrete = len(self.columns_discrete) self.oh_encoder = OneHotEncoder(sparse=False) self.data_discrete_oh = self.oh_encoder.fit_transform(self.data_discrete) self.n_columns_discrete_oh = self.data_discrete_oh.shape[1] if (self.noise_discrete_unif_max > 0): noise_discrete = np.random.uniform(low = 0, high = self.noise_discrete_unif_max, size = self.data_discrete_oh.shape) self.data_discrete_oh += noise_discrete * np.where(self.data_discrete_oh > 0.5, -1, 1) self.categories_len = [len(i) for i in self.oh_encoder.categories_] # Create Gumbel-activation function tf.keras.utils.get_custom_objects().update({'gumbel_softmax': Activation(self.gumbel_softmax)}) # Create generator and critic objects as well as critic and generator optimizer self.initialize_gan() # If needed create checkpoint manager if (self.ckpt_dir != None): self.initialize_cptk() def initialize_gan(self, tf_make_train_step_graph=True): """ Internal function used for initializing the GAN architecture """ # Create generator and critic objects self.generator = self.create_generator() self.critic = self.create_critic() # Create optimizers for generator and critic if self.optimizer.lower() == "adam": self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=self.opt_lr, beta_1=self.adam_beta1, beta_2=self.adam_beta2, amsgrad=self.adam_amsgrad) self.critic_optimizer = tf.keras.optimizers.Adam(learning_rate=self.opt_lr, beta_1=self.adam_beta1, beta_2=self.adam_beta2, amsgrad=self.adam_amsgrad) elif self.optimizer.lower() == "sgd": self.generator_optimizer = tf.keras.optimizers.SGD(learning_rate=self.opt_lr, momentum=self.sgd_momentum, nesterov=self.sgd_nesterov) self.critic_optimizer = tf.keras.optimizers.SGD(learning_rate=self.opt_lr, momentum=self.sgd_momentum, nesterov=self.sgd_nesterov) elif self.optimizer.lower() == "rmsprop": self.generator_optimizer = self.tf.keras.optimizers.RMSprop(learning_rate=self.opt_lr, rho=self.rmsprop_rho, momentum=self.rmsprop_rho, centered=self.rmsprop_centered) self.critic_optimizer = self.tf.keras.optimizers.RMSprop(learning_rate=self.opt_lr, rho=self.rmsprop_rho, momentum=self.rmsprop_rho, centered=self.rmsprop_centered) else: raise ValueError("Optimizer name not recognized. Currently only implemented optimizers: adam, sgd and rmsprop") self.start_epoch = 0 if tf_make_train_step_graph: if tf.__version__ < "2.5": jit_compile_args = {"experimental_compile" : self.jit_compile_train_step} else: jit_compile_args = {"jit_compile" : self.jit_compile_train_step} self.train_step = tf.function(self.train_step_func, **jit_compile_args) else: self.train_step = self.train_step_func self.initialized_gan = True def randomize_quantile_transformation(self, data): """ Internal function for performing the randomized quantile transformation """ qt_transformer = self.scaler_num.named_transformers_["int"] references = np.copy(qt_transformer.references_) quantiles = np.copy(qt_transformer.quantiles_) lower_bound_references = 1e-7 references[[0, -1]] = [lower_bound_references, 1 - lower_bound_references] for i, col in enumerate(self.columns_num_int_pos): quantiles_curr = quantiles[:, i] quantiles_unique_integer = np.unique(quantiles_curr) quantiles_unique_integer = quantiles_unique_integer[np.isclose(np.mod(quantiles_unique_integer, 1), 0)] for integer in quantiles_unique_integer: curr_references = references[np.isclose(quantiles_curr, integer)] n_curr_references = curr_references.shape[0] if (n_curr_references >= self.qtr_lbound_apply * self.n_quantiles_int): mask = self.data_num[self.columns_int[i]] == integer n_obs_curr = np.sum(mask) curr_reference_range = curr_references[-1] - curr_references[0] low = curr_references[0] + curr_reference_range * (0.5 - self.qtr_spread / 2) high = curr_references[0] + curr_reference_range * (0.5 + self.qtr_spread / 2) data[mask, col] = scipy.stats.norm.ppf(np.random.uniform(low=low, high=high, size=n_obs_curr)) return data def initialize_cptk(self): """ Internal function for initializing checkpoint manager used to save the progress of the model. """ os.makedirs(self.ckpt_dir, exist_ok=True) self.ckpt = tf.train.Checkpoint(epoch=tf.Variable(0), generator_opt=self.generator_optimizer, critic_opt=self.critic_optimizer, generator=self.generator, critic=self.critic) self.ckpt_manager = tf.train.CheckpointManager(self.ckpt, self.ckpt_dir, max_to_keep=self.ckpt_max_to_keep, checkpoint_name=self.ckpt_name) def inv_data_transform(self, data_num_scaled, data_discrete_oh): """ Internal function used for inverting the data transformation done in preprocessing """ data_discrete = pd.DataFrame(self.oh_encoder.inverse_transform(data_discrete_oh), columns=self.columns_discrete) if (self.quantile_transformation_int): if (len(self.columns_float) > 0): data_float = pd.DataFrame(self.scaler_num.named_transformers_["float"].inverse_transform( data_num_scaled[:, np.logical_not(self.columns_num_int_mask)]), columns=self.columns_float) else: data_float = None if (len(self.columns_int) > 0): data_int_scaled = pd.DataFrame(data_num_scaled[:, self.columns_num_int_mask], columns=self.columns_int) data_int = pd.DataFrame(self.scaler_num.named_transformers_["int"].inverse_transform(data_int_scaled), columns=self.columns_int) else: data_int = None else: data_float = pd.DataFrame(self.scaler_num.inverse_transform(data_num_scaled), columns=self.columns_num) data_int = None return (
pd.concat([data_float, data_int, data_discrete], axis=1)
pandas.concat
import atexit import gc import json import os import shutil import tempfile from os.path import exists from random import randrange from tempfile import mkdtemp import pandas as pd import yaml from rlrd.util import partial, save_json, partial_to_dict, partial_from_dict, load_json, dump, load, git_info from rlrd.training import Training import rlrd.sac import rlrd.sac_models_rd import rlrd.dcac import rlrd.dcac_models import rlrd.envs def iterate_episodes(run_cls: type = Training, checkpoint_path: str = None): """Generator [1] yielding episode statistics (list of pd.Series) while running and checkpointing - run_cls: can by any callable that outputs an appropriate run object (e.g. has a 'run_epoch' method) [1] https://docs.python.org/3/howto/functional.html#generators """ checkpoint_path = checkpoint_path or tempfile.mktemp("_remove_on_exit") try: if not exists(checkpoint_path): print("=== specification ".ljust(70, "=")) print(yaml.dump(partial_to_dict(run_cls), indent=3, default_flow_style=False, sort_keys=False), end="") run_instance = run_cls() dump(run_instance, checkpoint_path) print("") else: print("\ncontinuing...\n") run_instance = load(checkpoint_path) while run_instance.epoch < run_instance.epochs: # time.sleep(1) # on network file systems writing files is asynchronous and we need to wait for sync yield run_instance.run_epoch() # yield stats data frame (this makes this function a generator) print("") dump(run_instance, checkpoint_path) # we delete and reload the run_instance from disk to ensure the exact same code runs regardless of interruptions del run_instance gc.collect() run_instance = load(checkpoint_path) finally: if checkpoint_path.endswith("_remove_on_exit") and exists(checkpoint_path): os.remove(checkpoint_path) def log_environment_variables(): """add certain relevant environment variables to our config usage: `LOG_VARIABLES='HOME JOBID' python ...` """ return {k: os.environ.get(k, '') for k in os.environ.get('LOG_VARIABLES', '').strip().split()} def run(run_cls: type = Training, checkpoint_path: str = None): list(iterate_episodes(run_cls, checkpoint_path)) def run_wandb(entity, project, run_id, run_cls: type = Training, checkpoint_path: str = None): """run and save config and stats to https://wandb.com""" wandb_dir = mkdtemp() # prevent wandb from polluting the home directory atexit.register(shutil.rmtree, wandb_dir, ignore_errors=True) # clean up after wandb atexit handler finishes import wandb config = partial_to_dict(run_cls) config['seed'] = config['seed'] or randrange(1, 1000000) # if seed == 0 replace with random config['environ'] = log_environment_variables() config['git'] = git_info() resume = checkpoint_path and exists(checkpoint_path) wandb.init(dir=wandb_dir, entity=entity, project=project, id=run_id, resume=resume, config=config) for stats in iterate_episodes(run_cls, checkpoint_path): [wandb.log(json.loads(s.to_json())) for s in stats] def run_fs(path: str, run_cls: type = Training): """run and save config and stats to `path` (with pickle)""" if not exists(path): os.mkdir(path) save_json(partial_to_dict(run_cls), path + '/spec.json') if not exists(path + '/stats'): dump(
pd.DataFrame()
pandas.DataFrame
import numpy as np import pytest from pandas import Categorical, Series import pandas._testing as tm @pytest.mark.parametrize( "keep, expected", [ ("first", Series([False, False, False, False, True, True, False])), ("last", Series([False, True, True, False, False, False, False])), (False, Series([False, True, True, False, True, True, False])), ], ) def test_drop_duplicates(any_numpy_dtype, keep, expected): tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype)) if tc.dtype == "bool": pytest.skip("tested separately in test_drop_duplicates_bool") tm.assert_series_equal(tc.duplicated(keep=keep), expected) tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected]) sc = tc.copy() return_value = sc.drop_duplicates(keep=keep, inplace=True) assert return_value is None tm.assert_series_equal(sc, tc[~expected]) @pytest.mark.parametrize( "keep, expected", [ ("first", Series([False, False, True, True])), ("last", Series([True, True, False, False])), (False, Series([True, True, True, True])), ], ) def test_drop_duplicates_bool(keep, expected): tc = Series([True, False, True, False]) tm.assert_series_equal(tc.duplicated(keep=keep), expected) tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected]) sc = tc.copy() return_value = sc.drop_duplicates(keep=keep, inplace=True) tm.assert_series_equal(sc, tc[~expected]) assert return_value is None @pytest.mark.parametrize("values", [[], list(range(5))]) def test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values): tc = Series(values, dtype=np.dtype(any_numpy_dtype)) expected = Series([False] * len(tc), dtype="bool") if tc.dtype == "bool": # 0 -> False and 1-> True # any other value would be duplicated tc = tc[:2] expected = expected[:2] tm.assert_series_equal(tc.duplicated(keep=keep), expected) result_dropped = tc.drop_duplicates(keep=keep) tm.assert_series_equal(result_dropped, tc) # validate shallow copy assert result_dropped is not tc class TestSeriesDropDuplicates: @pytest.fixture( params=["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"] ) def dtype(self, request): return request.param @pytest.fixture def cat_series1(self, dtype, ordered): # Test case 1 cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype)) input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype)) cat = Categorical(input1, categories=cat_array, ordered=ordered) tc1 = Series(cat) return tc1 def test_drop_duplicates_categorical_non_bool(self, cat_series1): tc1 = cat_series1 expected = Series([False, False, False, True]) result = tc1.duplicated() tm.assert_series_equal(result, expected) result = tc1.drop_duplicates() tm.assert_series_equal(result, tc1[~expected]) sc = tc1.copy() return_value = sc.drop_duplicates(inplace=True) assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
pandas._testing.assert_series_equal
# Type: module # String form: <module 'WindPy' from '/opt/conda/lib/python3.6/WindPy.py'> # File: /opt/conda/lib/python3.6/WindPy.py # Source: from ctypes import * import threading import traceback from datetime import datetime, date, time, timedelta import time as t import re from WindData import * from WindBktData import * from XMLParser import XMLReader import pandas as pd import logging import getpass r = XMLReader("/wind/serverapi/wsq_decode.xml") # import speedtcpclient as client expolib = None speedlib = None TDB_lib = None c_lib = None # For test use! Should be replaced with a real userID # userID = "1214779" api_retry = 1 interval = 2 userName = getpass.getuser() authDataPath = "/home/" + userName + "/.wind/authData" authString = readFile(authDataPath) # userID = str(getJsonTag(authString, 'accountID')) # if userID == '': # userID = "1214779" wind_log_path = "/usr/local/log/" def DemoWSQCallback(out): print("DemoWSQCallback") print(out) wsq_items = [] def g_wsq_callback(reqID, indata): out = WindData() out.set(indata, 3) out.RequestID = reqID id2rtField = {} for item in wsq_items: id2rtField[item['id']] = item['funname'].upper() tmp = [id2rtField[str(val)] for val in out.Fields] out.Fields = tmp out.Times = datetime.now().strftime('%Y%m%d %H:%M:%S') try: g_wsq_callback.callback_funcs[reqID](out) except: print(out) SPDCBTYPE = CFUNCTYPE(None, c_int, POINTER(c_apiout)) spdcb = SPDCBTYPE(g_wsq_callback) g_wsq_callback.callback_funcs = {} REQUEST_ID_CANCELALL = 0 REQUEST_ID_SYNC = 1 REQUEST_ID_MAX_RESQUEST = 9999 REQUEST_ID_MIN_RESQUEST = 3 g_requestID = REQUEST_ID_MIN_RESQUEST # The minimum id of NONE BLOCKING MODE def retry(func): def wrapper(*args, **kargs): out = func(*args, **kargs) if not out: return out error_code = type_check(out) if error_code == -10: for i in range(api_retry): out = func(*args, **kargs) error_code = type_check(out) if error_code != -10: break return out # 判断out类型,若带usedf参数则为tuple def type_check(out): if isinstance(out, tuple): error_code = out[0] else: error_code = out.ErrorCode return error_code return wrapper class WindQnt: b_start = False def __static_var(var_name, inital_value): def _set_var(obj): setattr(obj, var_name, inital_value) return obj return _set_var def __stringify(arg): if arg is None: tmp = [""] elif arg == "": tmp = [""] elif isinstance(arg, str): a_l = arg.strip().split(',') arg = ','.join([a.strip() for a in a_l]) tmp = [arg] elif isinstance(arg, list): tmp = [str(x) for x in arg] elif isinstance(arg, tuple): tmp = [str(x) for x in arg] elif isinstance(arg, float) or isinstance(arg, int): tmp = [str(arg)] elif str(type(arg)) == "<type 'unicode'>": tmp = [arg] else: tmp = None if tmp is None: return None else: return ";".join(tmp) def __parseoptions(self, arga=None, argb=None): options = WindQnt._WindQnt__stringify(self) if options is None: return None if isinstance(arga, tuple): for i in range(len(arga)): v = WindQnt._WindQnt__stringify(arga[i]) if v is None: continue else: if options == "": options = v else: options = options + ";" + v if isinstance(argb, dict): keys = argb.keys() for key in keys: v = WindQnt._WindQnt__stringify(argb[key]) if v is None: continue else: if options == "": options = str(key) + "=" + v else: options = options + ";" + str(key) + "=" + v return options @staticmethod def format_option(options): if options is None: return None option_f = options.replace(';', '&&') return option_f # with_time param means you can format hours:minutes:seconds, but not must be def __parsedate(self, with_time=False): d = self if d is None: d = datetime.today().strftime("%Y-%m-%d") return d elif isinstance(d, date): d = d.strftime("%Y-%m-%d") return d elif isinstance(d, datetime): d = d.strftime("%Y-%m-%d") return d elif isinstance(d, str): try: d = pure_num = ''.join(list(filter(str.isdigit, d))) if len(d) != 8 and len(d) != 14: return None if len(pure_num) == 14: d = pure_num[:8] + ' ' + pure_num[8:] if int(d[9:11]) > 24 or int(d[9:11]) < 0 or \ int(d[11:13]) > 60 or int(d[11:13]) < 0 or \ int(d[13:15]) > 60 or int(d[13:15]) < 0: return None if int(d[:4]) < 1000 or int(d[:4]) > 9999 or \ int(d[4:6]) < 1 or int(d[4:6]) > 12 or \ int(d[6:8]) < 1 or int(d[6:8]) > 31: return None date_time = d.split(' ') YMD = date_time[0][:4] + '-' + date_time[0][4:6] + '-' + date_time[0][6:8] HMS = '' if with_time and len(date_time) == 2: HMS = ' ' + date_time[1][:2] + ':' + date_time[1][2:4] + ':' + date_time[1][4:6] d = YMD + HMS return d except: return None return d # def __parsedate(d): # if d is None: # d = datetime.today().strftime("%Y-%m-%d") # return d # elif isinstance(d, date): # d = d.strftime("%Y-%m-%d") # return d # elif isinstance(d, str): # try: # #Try to get datetime object from the user input string. # #We will go to the except block, given an invalid format. # if re.match(r'^(?:(?!0000)[0-9]{4}-(?:(?:0[1-9]|1[0-2])-(?:0[1-9]|1[0-9]|2[0-8])|(?:0[13-9]|1[0-2])-(?:29|30)|(?:0[13578]|1[02])-31)|(?:[0-9]{2}(?:0[48]|[2468][048]|[13579][26])|(?:0[48]|[2468][048]|[13579][26])00)-02-29)$',d, re.I|re.M): # d = datetime.strptime(d, "%Y-%m-%d") # return d.strftime("%Y-%m-%d") # elif re.match(r'^(?:(?!0000)[0-9]{4}(?:(?:0[1-9]|1[0-2])(?:0[1-9]|1[0-9]|2[0-8])|(?:0[13-9]|1[0-2])(?:29|30)|(?:0[13578]|1[02])31)|(?:[0-9]{2}(?:0[48]|[2468][048]|[13579][26])|(?:0[48]|[2468][048]|[13579][26])00)0229)$', d, re.I|re.M): # d = datetime.strptime(d, "%Y%m%d") # return d.strftime("%Y-%m-%d") # else: # return None # except: # return None # else: # return None # # return d def use_debug_file(self, debug_expo='/wind/serverapi/libExpoWrapperDebug.so', debug_speed='/wind/serverapi/libSpeedWrapperDebug.so'): WindQnt.debug_expo = debug_expo WindQnt.debug_speed = debug_speed @staticmethod def format_wind_data(error_codes, msg): out = WindData() out.ErrorCode = error_codes out.Codes = ['ErrorReport'] out.Fields = ['OUT MESSAGE'] out.Times = datetime.now().strftime('%Y%m%d %H:%M:%S') out.Data = [[msg]] return out @staticmethod def to_dataframe(out): if out.ErrorCode != 0: return pd.DataFrame([out.ErrorCode], columns=['ErrorCode']) col = out.Times if len(out.Codes) == len(out.Fields) == 1: idx = out.Fields elif len(out.Codes) > 1 and len(out.Fields) == 1: idx = out.Codes elif len(out.Codes) == 1 and len(out.Fields) > 1: idx = out.Fields else: idx = None df = pd.DataFrame(out.Data, columns=col) if idx: df.index = idx return df.T.infer_objects() def isconnected(self): return 0 class __start: def __init__(self): self.restype = c_int32 self.argtypes = [c_wchar_p, c_wchar_p, c_int32] self.lastCall = 0 def __call__(self, show_welcome=True, retry=1): global expolib global speedlib global TDB_lib global c_lib global api_retry if t.time() - self.lastCall > interval: if WindQnt.b_start: return WindQnt.b_start = True self.lastCall = t.time() TDB_lib = CDLL("/wind/serverapi/libtdb.so") c_lib = CDLL("/wind/serverapi/libtradeapi.so") c_lib.tLogon.restype = POINTER(c_variant) c_lib.tQuery.restype = POINTER(c_variant) c_lib.tLogout.restype = POINTER(c_variant) c_lib.tSendOrder.restype = POINTER(c_variant) c_lib.tCancelOrder.restype = POINTER(c_variant) if hasattr(WindQnt, "debug_expo"): expolib = CDLL(WindQnt.debug_expo) else: expolib = CDLL("/wind/serverapi/libExpoWrapper.so") expolib.SendMsg2Expo.restype = POINTER(c_apiout) if hasattr(WindQnt, "debug_speed"): speedlib = CDLL(WindQnt.debug_speed) else: speedlib = CDLL("/wind/serverapi/libSpeedWrapper.so") speedlib.SendMsg2SpeedAsyc.restype = POINTER(c_apiout) api_retry = int(retry) if int(retry) < 6 else 5 if show_welcome: print("COPYRIGHT (C) 2017 Wind Information Co., Ltd. ALL RIGHTS RESERVED.\n" "IN NO CIRCUMSTANCE SHALL WIND BE RESPONSIBLE FOR ANY DAMAGES OR LOSSES\n" "CAUSED BY USING WIND QUANT API FOR PYTHON.") return else: # print ("wait a while to start!") return ERR_WAIT def __str__(self): return ("Start the Wind Quant API") start = __start() class __wses: def __init__(self): self.restype = POINTER(c_apiout) self.argtypes = [c_wchar_p,c_wchar_p,c_wchar_p,c_wchar_p,c_wchar_p] self.lastCall = 0 @retry def __call__(self, codes, fields, beginTime=None, endTime=None, options=None, *arga, **argb): # write_log('call wsd') s = int(t.time()*1000) if expolib is None: return WindQnt.format_wind_data(-103, '') if t.time() - self.lastCall < interval: t.sleep(interval) if isinstance(endTime, str): # 判断是否为日期宏,若不是,则调用parsedate方法 endTime_compile = re.findall('\d\d\d\d\d\d\d\d', endTime.replace('-', '')) if endTime_compile: endTime = WindQnt._WindQnt__parsedate(endTime) else: # 处理datetime类型日期 endTime = WindQnt._WindQnt__parsedate(endTime) if endTime == None: print("Invalid date format of endTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01") return if isinstance(beginTime, str): beginTime_compile = re.findall('\d\d\d\d\d\d\d\d', beginTime.replace('-', '')) if beginTime_compile: beginTime = WindQnt._WindQnt__parsedate(beginTime) else: beginTime = WindQnt._WindQnt__parsedate(beginTime) if beginTime == None: print("Invalid date format of beginTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01") return if(endTime==None): endTime = datetime.today().strftime("%Y-%m-%d") if(beginTime==None): beginTime = endTime # chech if the endTime is before than the beginTime # endD = datetime.strptime(endTime, "%Y-%m-%d") # beginD = datetime.strptime(beginTime, "%Y-%m-%d") # if (endD-beginD).days < 0: # print("The endTime should be later than or equal to the beginTime!") # return codes = WindQnt._WindQnt__stringify(codes) fields = WindQnt._WindQnt__stringify(fields) options = WindQnt._WindQnt__parseoptions(options, arga, argb) if codes == None or fields == None or options == None: print("Insufficient arguments!") return userID = str(getJsonTag(authString, 'accountID')) if userID == '': userID = "1214779" tmp = "wses|"+codes+"|"+fields+"|"+beginTime+"|"+endTime+"|"+options+"|"+userID tmp = tmp.encode("utf16") + b"\x00\x00" apiOut = expolib.SendMsg2Expo(tmp, len(tmp)) self.lastCall = t.time() if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010: msg = 'Request Timeout' e = int(t.time()*1000) write_log(str(e-s) + ' call wses') return WindQnt.format_wind_data(-40521010, msg) else: out = WindData() out.set(apiOut, 1, asdate = True) if 'usedf' in argb.keys(): usedf = argb['usedf'] if usedf: if not isinstance(usedf, bool): print('the sixth parameter is usedf which should be the Boolean type!') return try: if out.ErrorCode != 0: df = pd.DataFrame(out.Data, index=out.Fields) df.columns = [x for x in range(df.columns.size)] return out.ErrorCode, df.T.infer_objects() col = out.Times if len(out.Codes) == len(out.Fields) == 1: idx = out.Fields elif len(out.Codes) > 1 and len(out.Fields) == 1: idx = out.Codes elif len(out.Codes) == 1 and len(out.Fields) > 1: idx = out.Fields else: idx = None df = pd.DataFrame(out.Data, columns=col) if idx: df.index = idx e = int(t.time()*1000) write_log(str(e-s) + ' call wsd') return out.ErrorCode, df.T.infer_objects() except Exception: print(traceback.format_exc()) return if out.ErrorCode != 0: if len(out.Data) != 0 and len(out.Data[0]) > 100: if len(out.Data) > 1: print(str(out.Data)[:10] + '...]...]') else: print(str(out.Data)[:10] + '...]]') else: print(out.Data) e = int(t.time()*1000) write_log(str(e-s) + ' call wses') return out def __str__(self): return ("WSES") wses = __wses() class __wsee: def __init__(self): self.restype = POINTER(c_apiout) self.argtypes = [c_wchar_p,c_wchar_p,c_wchar_p] #codes,fields,options self.lastCall = 0 @retry def __call__(self, codes, fields, options=None, *arga, **argb): # write_log('call wsee') s = int(t.time()*1000) if expolib is None: return WindQnt.format_wind_data(-103, '') if t.time() - self.lastCall < interval: t.sleep(interval) codes = WindQnt._WindQnt__stringify(codes) fields = WindQnt._WindQnt__stringify(fields) options = WindQnt._WindQnt__parseoptions(options, arga, argb) if fields == None or options == None: print("Insufficient arguments!") return userID = str(getJsonTag(authString, 'accountID')) if userID == '': userID = "1214779" tmp = "wsee|"+codes+"|"+fields+"|"+options+"|"+userID tmp = tmp.encode("utf16") + b"\x00\x00" apiOut = expolib.SendMsg2Expo(tmp, len(tmp)) self.lastCall = t.time() if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010: msg = 'Request Timeout' e = int(t.time()*1000) write_log(str(e-s) + ' call wsee') return WindQnt.format_wind_data(-40521010, msg) else: out = WindData() out.set(apiOut, 1, asdate=True) #将winddata类型数据改为dataframe格式 if 'usedf' in argb.keys(): usedf = argb['usedf'] if usedf: if not isinstance(usedf, bool): print('the fourth parameter is usedf which should be the Boolean type!') return try: if out.ErrorCode != 0: df = pd.DataFrame(out.Data, index=out.Fields) df.columns = [x for x in range(df.columns.size)] return out.ErrorCode, df.T.infer_objects() if out.Codes == 1 or out.Fields == 1: return out.ErrorCode, WindQnt.to_dataframe(out) else: df = pd.DataFrame(out.Data, columns=out.Codes, index=out.Fields) e = int(t.time()*1000) write_log(str(e-s) + ' call wsee') return out.ErrorCode, df.T.infer_objects() except Exception as e: print(traceback.format_exc()) return if out.ErrorCode != 0: if len(out.Data) != 0 and len(out.Data[0]) > 100: if len(out.Data) > 1: print(str(out.Data)[:10] + '...]...]') else: print(str(out.Data)[:10] + '...]]') else: print(out.Data) e = int(t.time()*1000) write_log(str(e-s) + ' call wsee') return out def __str__(self): return ("wsee") wsee = __wsee() class __wsi: def __init__(self): self.restype = POINTER(c_apiout) self.argtypes = [c_wchar_p, c_wchar_p, c_wchar_p, c_wchar_p, c_wchar_p] self.lastCall = 0 @retry def __call__(self, codes, fields, beginTime=None, endTime=None, options=None, usedf=False, *arga, **argb): # write_log('call wsi') s = int(t.time() * 1000) if expolib is None: return WindQnt.format_wind_data(-103, '') if t.time() - self.lastCall < interval: t.sleep(interval) # endTime = WindQnt._WindQnt__parsedate(endTime) # if endTime is None: # print("Invalid date format of endTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01") # return # # beginTime = WindQnt._WindQnt__parsedate(beginTime) # if beginTime is None: # print("Invalid date format of beginTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01") # return if (endTime is None): endTime = datetime.today().strftime("%Y-%m-%d") if (beginTime is None): beginTime = endTime # chech if the endTime is before than the beginTime # endD = datetime.strptime(endTime, "%Y-%m-%d") # beginD = datetime.strptime(beginTime, "%Y-%m-%d") # if (endD-beginD).days < 0: # print("The endTime should be later than or equal to the beginTime!") # return codes = WindQnt._WindQnt__stringify(codes) fields = WindQnt._WindQnt__stringify(fields) options = WindQnt._WindQnt__parseoptions(options, arga, argb) if codes is None or fields is None or options is None: print("Insufficient arguments!") return userID = str(getJsonTag(authString, 'accountID')) if userID == '': userID = "1214779" tmp = "wsi|" + codes + "|" + fields + "|" + beginTime + "|" + endTime + "|" + options + "|" + userID tmp = tmp.encode("utf16") + b"\x00\x00" apiOut = expolib.SendMsg2Expo(tmp, len(tmp)) self.lastCall = t.time() if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010: msg = 'Request Timeout' e = int(t.time() * 1000) write_log(str(e - s) + ' call wsi') return WindQnt.format_wind_data(-40521010, msg) else: out = WindData() out.set(apiOut, 1, asdate=False) if usedf: if not isinstance(usedf, bool): print('the sixth parameter is usedf which should be the Boolean type!') return try: if out.ErrorCode != 0: df = pd.DataFrame(out.Data, index=out.Fields) df.columns = [x for x in range(df.columns.size)] return out.ErrorCode, df.T.infer_objects() col = out.Times if len(out.Codes) == len(out.Fields) == 1: idx = out.Fields elif len(out.Codes) > 1 and len(out.Fields) == 1: idx = out.Codes elif len(out.Codes) == 1 and len(out.Fields) > 1: idx = out.Fields else: idx = None df = pd.DataFrame(out.Data, columns=col) if idx: df.index = idx e = int(t.time() * 1000) write_log(str(e - s) + ' call wsi') return out.ErrorCode, df.T.infer_objects() except Exception: print(traceback.format_exc()) return if out.ErrorCode != 0: if len(out.Data) != 0 and len(out.Data[0]) > 100: if len(out.Data) > 1: print(str(out.Data)[:10] + '...]...]') else: print(str(out.Data)[:10] + '...]]') else: print(out.Data) e = int(t.time() * 1000) write_log(str(e - s) + ' call wsi') return out def __str__(self): return ("WSI") wsi = __wsi() class __wsd: def __init__(self): self.restype = POINTER(c_apiout) self.argtypes = [c_wchar_p, c_wchar_p, c_wchar_p, c_wchar_p, c_wchar_p] self.lastCall = 0 @retry def __call__(self, codes, fields, beginTime=None, endTime=None, options=None, usedf=False, *arga, **argb): # write_log('call wsd') s = int(t.time() * 1000) if expolib is None: return WindQnt.format_wind_data(-103, '') if t.time() - self.lastCall < interval: t.sleep(interval) # endTime = WindQnt._WindQnt__parsedate(endTime) # if endTime is None: # print("Invalid date format of endTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01") # return # # beginTime = WindQnt._WindQnt__parsedate(beginTime) # if beginTime is None: # print("Invalid date format of beginTime! Please use the '%Y-%m-%d' format! E.g. 2016-01-01") # return if (endTime is None): endTime = datetime.today().strftime("%Y-%m-%d") if (beginTime is None): beginTime = endTime # chech if the endTime is before than the beginTime # endD = datetime.strptime(endTime, "%Y-%m-%d") # beginD = datetime.strptime(beginTime, "%Y-%m-%d") # if (endD-beginD).days < 0: # print("The endTime should be later than or equal to the beginTime!") # return codes = WindQnt._WindQnt__stringify(codes) fields = WindQnt._WindQnt__stringify(fields) options = WindQnt._WindQnt__parseoptions(options, arga, argb) if codes is None or fields is None or options is None: print("Insufficient arguments!") return userID = str(getJsonTag(authString, 'accountID')) if userID == '': userID = "1214779" tmp = "wsd|" + codes + "|" + fields + "|" + beginTime + "|" + endTime + "|" + options + "|" + userID tmp = tmp.encode("utf16") + b"\x00\x00" apiOut = expolib.SendMsg2Expo(tmp, len(tmp)) self.lastCall = t.time() if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010: msg = 'Request Timeout' e = int(t.time() * 1000) write_log(str(e - s) + ' call wsd') return WindQnt.format_wind_data(-40521010, msg) else: out = WindData() out.set(apiOut, 1, asdate=True) if usedf: if not isinstance(usedf, bool): print('the sixth parameter is usedf which should be the Boolean type!') return try: if out.ErrorCode != 0: df = pd.DataFrame(out.Data, index=out.Fields) df.columns = [x for x in range(df.columns.size)] return out.ErrorCode, df.T.infer_objects() col = out.Times if len(out.Codes) == len(out.Fields) == 1: idx = out.Fields elif len(out.Codes) > 1 and len(out.Fields) == 1: idx = out.Codes elif len(out.Codes) == 1 and len(out.Fields) > 1: idx = out.Fields else: idx = None df = pd.DataFrame(out.Data, columns=col) if idx: df.index = idx e = int(t.time() * 1000) write_log(str(e - s) + ' call wsd') return out.ErrorCode, df.T.infer_objects() except Exception: print(traceback.format_exc()) return if out.ErrorCode != 0: if len(out.Data) != 0 and len(out.Data[0]) > 100: if len(out.Data) > 1: print(str(out.Data)[:10] + '...]...]') else: print(str(out.Data)[:10] + '...]]') else: print(out.Data) e = int(t.time() * 1000) write_log(str(e - s) + ' call wsd') return out def __str__(self): return ("WSD") wsd = __wsd() class __wst: def __init__(self): self.restype = POINTER(c_apiout) self.argtypes = [c_wchar_p, c_wchar_p, c_wchar_p, c_wchar_p, c_wchar_p] self.lastCall = 0 @retry def __call__(self, codes, fields, beginTime=None, endTime=None, options=None, usedf=False, *arga, **argb): # write_log('call wst') s = int(t.time() * 1000) if expolib is None: return WindQnt.format_wind_data(-103, '') if t.time() - self.lastCall < interval: t.sleep(interval) if (endTime is None): endTime = datetime.today().strftime("%Y-%m-%d") if (beginTime is None): beginTime = endTime codes = WindQnt._WindQnt__stringify(codes) fields = WindQnt._WindQnt__stringify(fields) options = WindQnt._WindQnt__parseoptions(options, arga, argb) if codes is None or fields is None or options is None: print("Insufficient arguments!") return userID = str(getJsonTag(authString, 'accountID')) if userID == '': userID = "1214779" tmp = "wst|" + codes + "|" + fields + "|" + beginTime + "|" + endTime + "|" + options + "|" + userID tmp = tmp.encode("utf16") + b"\x00\x00" apiOut = expolib.SendMsg2Expo(tmp, len(tmp)) self.lastCall = t.time() if apiOut.contents.ErrorCode == -1 or apiOut.contents.ErrorCode == -40521010: msg = 'Request Timeout' e = int(t.time() * 1000) write_log(str(e - s) + ' call wst') return WindQnt.format_wind_data(-40521010, msg) else: out = WindData() out.set(apiOut, 1, asdate=False) if usedf: if not isinstance(usedf, bool): print('the sixth parameter is usedf which should be the Boolean type!') return try: if out.ErrorCode != 0: df = pd.DataFrame(out.Data, index=out.Fields) df.columns = [x for x in range(df.columns.size)] return out.ErrorCode, df.T.infer_objects() col = out.Times if len(out.Codes) == len(out.Fields) == 1: idx = out.Fields elif len(out.Codes) > 1 and len(out.Fields) == 1: idx = out.Codes elif len(out.Codes) == 1 and len(out.Fields) > 1: idx = out.Fields else: idx = None df =
pd.DataFrame(out.Data, columns=col)
pandas.DataFrame
import numpy as np import pandas as pd import shap import glmnet import xgboost as xgb from sklearn.preprocessing import StandardScaler from slickml.formatting import Color from slickml.utilities import df_to_csr from slickml.plotting import ( plot_xgb_cv_results, plot_xgb_feature_importance, plot_shap_summary, plot_shap_waterfall, plot_glmnet_cv_results, plot_glmnet_coeff_path, ) class XGBoostRegressor: """XGBoost Regressor. This is wrapper using XGBoost regressor to train a XGBoost model with using number of boosting rounds from the inputs. This function is pretty useful when feature selection is done and you want to train a model on the whole data and test on a separate validation set. Main reference is XGBoost Python API: (https://xgboost.readthedocs.io/en/latest/python/python_api.html) Parameters ---------- num_boost_round: int, optional (default=200) Number of boosting round to train the model metrics: str or tuple[str], optional (default=("rmse")) Metric used for evaluation at cross-validation using xgboost.cv(). Please note that this is different than eval_metric that needs to be passed to params dict. Possible values are "rmse", "rmsle", "mae" sparse_matrix: bool, optional (default=False) Flag to convert data to sparse matrix with csr format. This would increase the speed of feature selection for relatively large datasets scale_mean: bool, optional (default=False) Flag to center the data before scaling. This flag should be False when using sparse_matrix=True, since it centering the data would decrease the sparsity and in practice it does not make any sense to use sparse matrix method and it would make it worse. scale_std: bool, optional (default=False) Flag to scale the data to unit variance (or equivalently, unit standard deviation) importance_type: str, optional (default="total_gain") Importance type of xgboost.train() with possible values "weight", "gain", "total_gain", "cover", "total_cover" params: dict, optional Set of parameters for evaluation of xboost.train() (default={"eval_metric" : "rmse", "tree_method": "hist", "objective" : "reg:squarederror", "learning_rate" : 0.05, "max_depth": 2, "min_child_weight" : 1, "gamma" : 0.0, "reg_alpha" : 0.0, "reg_lambda" : 1.0, "subsample" : 0.9, "max_delta_step": 1, "verbosity" : 0, "nthread" : 4}) Other options for objective: "reg:logistic", "reg:squaredlogerror" Attributes ---------- feature_importance_: dict() Returns a dict() of all feature importance based on importance_type at each fold of each iteration during selection process scaler_: StandardScaler object Returns the scaler object if any of scale_mean or scale_std was passed True. X_train_: pandas.DataFrame Returns scaled training data set that passed if if any of scale_mean or scale_std was passed as True, else X_train. X_test_: pandas.DataFrame Returns transformed testing data set using scaler_ object if if any of scale_mean or scale_std was passed as True, else X_train. d_train_: xgboost.DMatrix object Returns the xgboost.DMatrix(X_train_, y_train) d_test_: xgboost.DMatrix object Returns the xgboost.DMatrix(X_test_, y_test) shap_values_train_: numpy.array SHAP values from treeExplainer using X_train shap_values_test_: numpy.array SHAP values from treeExplainer using X_test fit(X_train, y_train): instance method Returns None and applies the training process using the (X_train, y_train) set using xgboost.train() predict(X_test, y_test): instance method Return the predicted target values. get_params(): instance method Returns params dict get_feature_importance(): instance method Returns feature importance based on importance_type plot_feature_importance(): instance method Plots feature importance plot_shap_summary(): instance method Plot shap values summary """ def __init__( self, num_boost_round=None, metrics=None, sparse_matrix=False, scale_mean=False, scale_std=False, importance_type=None, params=None, ): if num_boost_round is None: self.num_boost_round = 200 else: if not isinstance(num_boost_round, int): raise TypeError("The input num_boost_round must have integer dtype.") else: self.num_boost_round = num_boost_round if metrics is None: self.metrics = "rmse" else: if not isinstance(metrics, str): raise TypeError("The input metric must be a str dtype.") else: # TODO: update metric in next API update # mape, mphe should be added if metrics in ["rmse", "rmsle", "mae"]: self.metrics = metrics else: raise ValueError("The input metric value is not valid.") if not isinstance(sparse_matrix, bool): raise TypeError("The input sparse_matrix must have bool dtype.") else: self.sparse_matrix = sparse_matrix if not isinstance(scale_mean, bool): raise TypeError("The input scale_mean must have bool dtype.") else: self.scale_mean = scale_mean if not isinstance(scale_std, bool): raise TypeError("The input scale_std must have bool dtype.") else: self.scale_std = scale_std if importance_type is None: self.importance_type = "total_gain" else: if not isinstance(importance_type, str): raise TypeError("The input importance_type must have str dtype.") else: if importance_type in [ "weight", "gain", "total_gain", "cover", "total_cover", ]: self.importance_type = importance_type else: raise ValueError("The input importance_type value is not valid.") params_ = { "eval_metric": "rmse", "tree_method": "hist", "objective": "reg:squarederror", "learning_rate": 0.05, "max_depth": 2, "min_child_weight": 1, "gamma": 0.0, "reg_alpha": 0.0, "reg_lambda": 1.0, "subsample": 0.9, "max_delta_step": 1, "verbosity": 0, "nthread": 4, } if params is None: self.params = params_ else: if not isinstance(params, dict): raise TypeError("The input params must have dict dtype.") else: self.params = params_ for key, val in params.items(): self.params[key] = val def fit(self, X_train, y_train): """ Function to run xgboost.train() method based on the given number of boosting round from the inputs using (X_train, y_train) set and returns it. Parameters ---------- X_train: numpy.array or pandas.DataFrame Training features data y_train: numpy.array[int] or list[int] List of training ground truth binary values [0, 1] """ # creating dtrain self.dtrain_ = self._dtrain(X_train, y_train) # train model self.model_ = self._model() # feature importance self.feature_importance_ = self._imp_to_df() return None def get_params(self): """ Function to return the train parameters for XGBoost. """ return self.params def get_feature_importance(self): """ Function to return the feature importance of the best model at each fold of each iteration of feature selection. """ return self.feature_importance_ def predict(self, X_test, y_test=None): """ Function to return the prediction of target values. Parameters ---------- X_test: numpy.array or pandas.DataFrame Validation features data y_test: numpy.array[int] or list[int], optional (default=None) List of validation ground truth binary values [0, 1] """ self.dtest_ = self._dtest(X_test, y_test) self.y_pred_ = self.model_.predict(self.dtest_, output_margin=False) return self.y_pred_ def plot_feature_importance( self, figsize=None, color=None, marker=None, markersize=None, markeredgecolor=None, markerfacecolor=None, markeredgewidth=None, fontsize=None, save_path=None, ): """Function to plot XGBoost feature importance. This function is a helper function based on the feature_importance_ attribute of the XGBoostRegressor class. Parameters ---------- feature importance: Pandas DataFrame Feature frequency figsize: tuple, optional, (default=(8, 5)) Figure size color: str, optional, (default="#87CEEB") Color of the vertical lines of lollipops marker: str, optional, (default="o") Market style of the lollipops. Complete valid marker styke can be found at: (https://matplotlib.org/2.1.1/api/markers_api.html#module-matplotlib.markers) markersize: int or float, optional, (default=10) Markersize markeredgecolor: str, optional, (default="1F77B4") Marker edge color markerfacecolor: str, optional, (default="1F77B4") Marker face color markeredgewidth: int or float, optional, (default=1) Marker edge width fontsize: int or float, optional, (default=12) Fontsize for xlabel and ylabel, and ticks parameters save_path: str, optional (default=None) The full or relative path to save the plot including the image format. For example "myplot.png" or "../../myplot.pdf" """ plot_xgb_feature_importance( feature_importance=self.feature_importance_, figsize=figsize, color=color, marker=marker, markersize=markersize, markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor, markeredgewidth=markeredgewidth, fontsize=fontsize, save_path=save_path, ) def plot_shap_summary( self, validation=True, plot_type=None, figsize=None, color=None, max_display=None, feature_names=None, title=None, show=True, sort=True, color_bar=True, layered_violin_max_num_bins=None, class_names=None, class_inds=None, color_bar_label=None, save_path=None, ): """Function to plot shap summary plot. This function is a helper function to plot the shap summary plot based on all types of shap explainers including tree, linear, and dnn. Please note that this function should be ran after the predict_proba to make sure the X_test is being instansiated. Parameters ---------- validation: bool, optional, (default=True) Flag to calculate SHAP values of X_test if it is True. If validation=False, it calculates the SHAP values of X_train and plots the summary plot. plot_type: str, optional (single-output default="dot", multi-output default="bar") The type of summar plot. Options are "bar", "dot", "violin", "layered_violin", and "compact_dot" which is recommended for SHAP interactions figsize: tuple, optional, (default="auto") Figure size color: str, optional, (default= "#D0AAF3" for "bar") Color of violin and layered violin plots are "RdBl" cmap Color of the horizontal lines when plot_type="bar" is "#D0AAF3" max_display: int, optional, (default=20) Limit to show the number of features in the plot feature_names: str, optional, (default=None) List of feature names to pass. It should follow the order of fatures title: str, optional, (default=None) Title of the plot show: bool, optional, (default=True) Flag to show the plot in inteactive environment sort: bool, optional, (default=True) Flag to plot sorted shap vlues in descending order color_bar: bool, optional, (default=True) Flag to show color_bar when plot_type is "dot" or "violin" layered_violin_max_num_bins: int, optional, (default=10) The number of bins for calculating the violin plots ranges and outliers class_names: list, optional, (default=None) List of class names for multi-output problems class_inds: list, optional, (default=True) List of class indices for multi-output problems color_bar_label: str, optional, (default="Feature Value") Label for color bar save_path: str, optional (default=None) The full or relative path to save the plot including the image format. For example "myplot.png" or "../../myplot.pdf" """ # define tree explainer self.explainer_ = shap.TreeExplainer(self.model_) self.shap_values_test_ = self.explainer_.shap_values(self.X_test_) self.shap_values_train_ = self.explainer_.shap_values(self.X_train_) # check the validation flag if validation: # define shap values for X_test shap_values = self.shap_values_test_ features = self.X_test_ else: # define shap values for X_train shap_values = self.shap_values_train_ features = self.X_train_ plot_shap_summary( shap_values=shap_values, features=features, plot_type=plot_type, figsize=figsize, color=color, max_display=max_display, feature_names=feature_names, title=title, show=show, sort=sort, color_bar=color_bar, layered_violin_max_num_bins=layered_violin_max_num_bins, class_names=class_names, class_inds=class_inds, color_bar_label=color_bar_label, save_path=save_path, ) def plot_shap_waterfall( self, validation=True, figsize=None, bar_color=None, bar_thickness=None, line_color=None, marker=None, markersize=None, markeredgecolor=None, markerfacecolor=None, markeredgewidth=None, max_display=None, title=None, fontsize=None, save_path=None, ): """Function to plot shap waterfall plot. This function is a helper function to plot the shap waterfall plot based on all types of shap explainers including tree, linear, and dnn. This would show the cumulitative/composite ratios of shap values per feature. Therefore, it can be easily seen with each feature how much explainability we can acheieve. Please note that this function should be ran after the predict_proba to make sure the X_test is being instansiated. Parameters ---------- validation: bool, optional, (default=True) Flag to calculate SHAP values of X_test if it is True. If validation=False, it calculates the SHAP values of X_train and plots the summary plot. figsize: tuple, optional, (default=(8, 5)) Figure size bar_color: str, optional, (default="#B3C3F3") Color of the horizontal bar lines bar_thickness: float, optional, (default=0.5) Thickness (hight) of the horizontal bar lines line_color: str, optional, (default="purple") Color of the line plot marker: str, optional, (default="o") Marker style marker style can be found at: (https://matplotlib.org/2.1.1/api/markers_api.html#module-matplotlib.markers) markersize: int or float, optional, (default=7) Markersize markeredgecolor: str, optional, (default="purple") Marker edge color markerfacecolor: str, optional, (default="purple") Marker face color markeredgewidth: int or float, optional, (default=1) Marker edge width max_display: int, optional, (default=20) Limit to show the number of features in the plot title: str, optional, (default=None) Title of the plot fontsize: int or float, optional, (default=12) Fontsize for xlabel and ylabel, and ticks parameters save_path: str, optional (default=None) The full or relative path to save the plot including the image format. For example "myplot.png" or "../../myplot.pdf" """ # define tree explainer self.explainer_ = shap.TreeExplainer(self.model_) self.shap_values_test_ = self.explainer_.shap_values(self.X_test_) self.shap_values_train_ = self.explainer_.shap_values(self.X_train_) # check the validation flag if validation: # define shap values for X_test shap_values = self.shap_values_test_ features = self.X_test_ else: # define shap values for X_train shap_values = self.shap_values_train_ features = self.X_train_ plot_shap_waterfall( shap_values, features, figsize=figsize, bar_color=bar_color, bar_thickness=bar_thickness, line_color=line_color, marker=marker, markersize=markersize, markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor, markeredgewidth=markeredgewidth, max_display=max_display, title=title, fontsize=fontsize, save_path=save_path, ) def _dtrain(self, X_train, y_train): """ Function to return dtrain matrix based on input parameters including sparse_matrix, and scaled using both numpy array and pandas DataFrame. Parameters ---------- X_train: numpy.array or pandas.DataFrame Training features data y_train: numpy.array[int] or list[int] List of training ground truth binary values [0, 1] """ if isinstance(X_train, np.ndarray): self.X_train = pd.DataFrame( X_train, columns=[f"F_{i}" for i in range(X_train.shape[1])] ) elif isinstance(X_train, pd.DataFrame): self.X_train = X_train else: raise TypeError( "The input X_train must be numpy array or pandas DataFrame." ) if isinstance(y_train, np.ndarray) or isinstance(y_train, list): self.y_train = y_train else: raise TypeError("The input y_train must be numpy array or list.") if self.sparse_matrix and self.scale_mean: raise ValueError( "The scale_mean should be False in conjuction of using sparse_matrix=True." ) if self.scale_mean or self.scale_std: self.scaler_ = StandardScaler( with_mean=self.scale_mean, with_std=self.scale_std ) self.X_train_ = pd.DataFrame( self.scaler_.fit_transform(self.X_train), columns=self.X_train.columns.tolist(), ) else: self.X_train_ = self.X_train.copy() if not self.sparse_matrix: dtrain = xgb.DMatrix(data=self.X_train_, label=self.y_train) else: dtrain = xgb.DMatrix( data=df_to_csr(self.X_train_, fillna=0.0, verbose=False), label=self.y_train, feature_names=self.X_train_.columns.tolist(), ) return dtrain def _dtest(self, X_test, y_test=None): """ Function to return dtest matrix based on input X_test, y_test including sparse_matrix, and scaled using both numpy array and pandas DataFrame. It does apply scaler transformation in case it was used. Please note that y_test is optional since it might not be available while validating the model. Parameters ---------- X_test: numpy.array or pandas.DataFrame Testing/validation features data y_test: numpy.array[int] or list[int], optional (default=None) List of testing/validation ground truth binary values [0, 1] """ if isinstance(X_test, np.ndarray): self.X_test = pd.DataFrame( X_test, columns=[f"F_{i}" for i in range(X_test.shape[1])] ) elif isinstance(X_test, pd.DataFrame): self.X_test = X_test else: raise TypeError("The input X_test must be numpy array or pandas DataFrame.") if y_test is None: self.y_test = None elif isinstance(y_test, np.ndarray) or isinstance(y_test, list): self.y_test = y_test else: raise TypeError("The input y_test must be numpy array or list.") if self.scale_mean or self.scale_std: self.X_test_ = pd.DataFrame( self.scaler_.transform(self.X_test), columns=self.X_test.columns.tolist(), ) else: self.X_test_ = self.X_test.copy() if not self.sparse_matrix: dtest = xgb.DMatrix(data=self.X_test_, label=self.y_test) else: dtest = xgb.DMatrix( data=df_to_csr(self.X_test_, fillna=0.0, verbose=False), label=self.y_test, feature_names=self.X_test_.columns.tolist(), ) return dtest def _model(self): """ Function to train XGBoost model based on the given number of boosting round. """ model = xgb.train( params=self.params, dtrain=self.dtrain_, num_boost_round=self.num_boost_round - 1, ) return model def _imp_to_df(self): """ Function to build convert feature importance to df. """ data = {"feature": [], f"{self.importance_type}": []} features_gain = self.model_.get_score(importance_type=self.importance_type) for key, val in features_gain.items(): data["feature"].append(key) data[f"{self.importance_type}"].append(val) df = (
pd.DataFrame(data)
pandas.DataFrame
#!/usr/bin/env python # -*- coding: utf-8 -*- # parallel weak scaling import sys import numpy as np import subprocess import datetime import time from cycler import cycler import os import pandas_utility input_filename = "logs/log.csv" list_columns = False show_plots = False # parse arguments for arg in sys.argv[1:]: if ".csv" in arg: input_filename = arg else: if "l" in arg: list_columns = True if "p" in arg: show_plots = True if len(sys.argv) == 1: print("usage: {} [-l] [-p] [<input_filename>]".format(sys.argv[0])) print(" -l: list column names") print(" -p: show plot window (else create pdf plot)") print(" <input_filename> log file, default: logs/log.csv") print("") # load matplotlib import matplotlib if not matplotlib.is_interactive() or not show_plots: matplotlib.use('Agg') import matplotlib.pyplot as plt import pandas as pd import numpy as np # define global plotting parameters plt.rcParams.update({'font.size': 16}) plt.rcParams['lines.linewidth'] = 2 def remove_duplicates(seq): seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))] # determine columns to load from the log file with open(input_filename) as f: line = f.readline() if "~nDofs" in line: pos = line.find("~nDofs") line = line[0:pos] column_names = list(line.split(";")) # rename "n" columns for i, column_name in enumerate(column_names): if column_name == "n": column_names[i] = "{}_n".format(column_names[i-1]) while "" in column_names: column_names.remove("") seen = set() column_names2 = [] for x in list(column_names): if x not in seen: seen.add(x) column_names2.append(x) else: print("Note: column \"{}\" appears multiple times".format(x)) while x in seen: x = "{}_2".format(x) seen.add(x) column_names2.append(x) column_names = column_names2 n_columns = len(column_names) if list_columns: print("File {} contains {} colums: {}".format(input_filename, n_columns, column_names)) # load data frame #df = pd.read_csv(input_filename, sep=';', error_bad_lines=False, warn_bad_lines=True, comment="#", header=None, names=column_names, usecols=range(n_columns), mangle_dupe_cols=True) df = pandas_utility.load_df(input_filename) # filter data #df = df.loc[df['endTime'] == 1] if not list_columns: print("File {} contains {} rows and {} colums.".format(input_filename, len(df.index), n_columns)) # parse timestamp df['# timestamp'] =
pd.to_datetime(df['# timestamp'])
pandas.to_datetime
import pandas as pd import numpy as np import sqlite3 import click import os from .data_handling import check_sqlite_table from .report import plot_scores def export_tsv(infile, outfile, format, outcsv, transition_quantification, max_transition_pep, ipf, ipf_max_peptidoform_pep, max_rs_peakgroup_qvalue, peptide, max_global_peptide_qvalue, protein, max_global_protein_qvalue): con = sqlite3.connect(infile) ipf_present = False if ipf: ipf_present = check_sqlite_table(con, "SCORE_IPF") # Main query for peptidoform IPF if ipf_present and ipf=='peptidoform': idx_query = ''' CREATE INDEX IF NOT EXISTS idx_precursor_precursor_id ON PRECURSOR (ID); CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_precursor_id ON PRECURSOR_PEPTIDE_MAPPING (PRECURSOR_ID); CREATE INDEX IF NOT EXISTS idx_feature_precursor_id ON FEATURE (PRECURSOR_ID); CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_peptide_id ON PRECURSOR_PEPTIDE_MAPPING (PEPTIDE_ID); CREATE INDEX IF NOT EXISTS idx_peptide_peptide_id ON PEPTIDE (ID); CREATE INDEX IF NOT EXISTS idx_run_run_id ON RUN (ID); CREATE INDEX IF NOT EXISTS idx_feature_run_id ON FEATURE (RUN_ID); CREATE INDEX IF NOT EXISTS idx_feature_feature_id ON FEATURE (ID); ''' if check_sqlite_table(con, "FEATURE_MS1"): idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms1_feature_id ON FEATURE_MS1 (FEATURE_ID);" if check_sqlite_table(con, "FEATURE_MS2"): idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms2_feature_id ON FEATURE_MS2 (FEATURE_ID);" if check_sqlite_table(con, "SCORE_MS1"): idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms1_feature_id ON SCORE_MS1 (FEATURE_ID);" score_ms1_pep = "SCORE_MS1.PEP" link_ms1 = "LEFT JOIN SCORE_MS1 ON SCORE_MS1.FEATURE_ID = FEATURE.ID" else: score_ms1_pep = "NULL" link_ms1 = "" if check_sqlite_table(con, "SCORE_MS2"): idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms2_feature_id ON SCORE_MS2 (FEATURE_ID);" if check_sqlite_table(con, "SCORE_IPF"): idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ipf_feature_id ON SCORE_IPF (FEATURE_ID);" idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ipf_peptide_id ON SCORE_IPF (PEPTIDE_ID);" query = ''' SELECT RUN.ID AS id_run, PEPTIDE.ID AS id_peptide, PEPTIDE_IPF.MODIFIED_SEQUENCE || '_' || PRECURSOR.ID AS transition_group_id, PRECURSOR.DECOY AS decoy, RUN.ID AS run_id, RUN.FILENAME AS filename, FEATURE.EXP_RT AS RT, FEATURE.EXP_RT - FEATURE.DELTA_RT AS assay_rt, FEATURE.DELTA_RT AS delta_rt, FEATURE.NORM_RT AS iRT, PRECURSOR.LIBRARY_RT AS assay_iRT, FEATURE.NORM_RT - PRECURSOR.LIBRARY_RT AS delta_iRT, FEATURE.ID AS id, PEPTIDE_IPF.UNMODIFIED_SEQUENCE AS Sequence, PEPTIDE_IPF.MODIFIED_SEQUENCE AS FullPeptideName, PRECURSOR.CHARGE AS Charge, PRECURSOR.PRECURSOR_MZ AS mz, FEATURE_MS2.AREA_INTENSITY AS Intensity, FEATURE_MS1.AREA_INTENSITY AS aggr_prec_Peak_Area, FEATURE_MS1.APEX_INTENSITY AS aggr_prec_Peak_Apex, FEATURE.LEFT_WIDTH AS leftWidth, FEATURE.RIGHT_WIDTH AS rightWidth, %s AS ms1_pep, SCORE_MS2.PEP AS ms2_pep, SCORE_IPF.PRECURSOR_PEAKGROUP_PEP AS precursor_pep, SCORE_IPF.PEP AS ipf_pep, SCORE_MS2.RANK AS peak_group_rank, SCORE_MS2.SCORE AS d_score, SCORE_MS2.QVALUE AS ms2_m_score, SCORE_IPF.QVALUE AS m_score FROM PRECURSOR INNER JOIN PRECURSOR_PEPTIDE_MAPPING ON PRECURSOR.ID = PRECURSOR_PEPTIDE_MAPPING.PRECURSOR_ID INNER JOIN PEPTIDE ON PRECURSOR_PEPTIDE_MAPPING.PEPTIDE_ID = PEPTIDE.ID INNER JOIN FEATURE ON FEATURE.PRECURSOR_ID = PRECURSOR.ID INNER JOIN RUN ON RUN.ID = FEATURE.RUN_ID LEFT JOIN FEATURE_MS1 ON FEATURE_MS1.FEATURE_ID = FEATURE.ID LEFT JOIN FEATURE_MS2 ON FEATURE_MS2.FEATURE_ID = FEATURE.ID %s LEFT JOIN SCORE_MS2 ON SCORE_MS2.FEATURE_ID = FEATURE.ID LEFT JOIN SCORE_IPF ON SCORE_IPF.FEATURE_ID = FEATURE.ID INNER JOIN PEPTIDE AS PEPTIDE_IPF ON SCORE_IPF.PEPTIDE_ID = PEPTIDE_IPF.ID WHERE SCORE_MS2.QVALUE < %s AND SCORE_IPF.PEP < %s ORDER BY transition_group_id, peak_group_rank; ''' % (score_ms1_pep, link_ms1, max_rs_peakgroup_qvalue, ipf_max_peptidoform_pep) # Main query for augmented IPF elif ipf_present and ipf=='augmented': idx_query = ''' CREATE INDEX IF NOT EXISTS idx_precursor_precursor_id ON PRECURSOR (ID); CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_precursor_id ON PRECURSOR_PEPTIDE_MAPPING (PRECURSOR_ID); CREATE INDEX IF NOT EXISTS idx_feature_precursor_id ON FEATURE (PRECURSOR_ID); CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_peptide_id ON PRECURSOR_PEPTIDE_MAPPING (PEPTIDE_ID); CREATE INDEX IF NOT EXISTS idx_peptide_peptide_id ON PEPTIDE (ID); CREATE INDEX IF NOT EXISTS idx_run_run_id ON RUN (ID); CREATE INDEX IF NOT EXISTS idx_feature_run_id ON FEATURE (RUN_ID); CREATE INDEX IF NOT EXISTS idx_feature_feature_id ON FEATURE (ID); ''' if check_sqlite_table(con, "FEATURE_MS1"): idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms1_feature_id ON FEATURE_MS1 (FEATURE_ID);" if check_sqlite_table(con, "FEATURE_MS2"): idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms2_feature_id ON FEATURE_MS2 (FEATURE_ID);" if check_sqlite_table(con, "SCORE_MS1"): idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms1_feature_id ON SCORE_MS1 (FEATURE_ID);" score_ms1_pep = "SCORE_MS1.PEP" link_ms1 = "LEFT JOIN SCORE_MS1 ON SCORE_MS1.FEATURE_ID = FEATURE.ID" else: score_ms1_pep = "NULL" link_ms1 = "" if check_sqlite_table(con, "SCORE_MS2"): idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms2_feature_id ON SCORE_MS2 (FEATURE_ID);" if check_sqlite_table(con, "SCORE_IPF"): idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ipf_feature_id ON SCORE_IPF (FEATURE_ID);" idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ipf_peptide_id ON SCORE_IPF (PEPTIDE_ID);" query = ''' SELECT RUN.ID AS id_run, PEPTIDE.ID AS id_peptide, PRECURSOR.ID AS transition_group_id, PRECURSOR.DECOY AS decoy, RUN.ID AS run_id, RUN.FILENAME AS filename, FEATURE.EXP_RT AS RT, FEATURE.EXP_RT - FEATURE.DELTA_RT AS assay_rt, FEATURE.DELTA_RT AS delta_rt, FEATURE.NORM_RT AS iRT, PRECURSOR.LIBRARY_RT AS assay_iRT, FEATURE.NORM_RT - PRECURSOR.LIBRARY_RT AS delta_iRT, FEATURE.ID AS id, PEPTIDE.UNMODIFIED_SEQUENCE AS Sequence, PEPTIDE.MODIFIED_SEQUENCE AS FullPeptideName, PRECURSOR.CHARGE AS Charge, PRECURSOR.PRECURSOR_MZ AS mz, FEATURE_MS2.AREA_INTENSITY AS Intensity, FEATURE_MS1.AREA_INTENSITY AS aggr_prec_Peak_Area, FEATURE_MS1.APEX_INTENSITY AS aggr_prec_Peak_Apex, FEATURE.LEFT_WIDTH AS leftWidth, FEATURE.RIGHT_WIDTH AS rightWidth, SCORE_MS2.RANK AS peak_group_rank, SCORE_MS2.SCORE AS d_score, SCORE_MS2.QVALUE AS m_score, %s AS ms1_pep, SCORE_MS2.PEP AS ms2_pep FROM PRECURSOR INNER JOIN PRECURSOR_PEPTIDE_MAPPING ON PRECURSOR.ID = PRECURSOR_PEPTIDE_MAPPING.PRECURSOR_ID INNER JOIN PEPTIDE ON PRECURSOR_PEPTIDE_MAPPING.PEPTIDE_ID = PEPTIDE.ID INNER JOIN FEATURE ON FEATURE.PRECURSOR_ID = PRECURSOR.ID INNER JOIN RUN ON RUN.ID = FEATURE.RUN_ID LEFT JOIN FEATURE_MS1 ON FEATURE_MS1.FEATURE_ID = FEATURE.ID LEFT JOIN FEATURE_MS2 ON FEATURE_MS2.FEATURE_ID = FEATURE.ID %s LEFT JOIN SCORE_MS2 ON SCORE_MS2.FEATURE_ID = FEATURE.ID WHERE SCORE_MS2.QVALUE < %s ORDER BY transition_group_id, peak_group_rank; ''' % (score_ms1_pep, link_ms1, max_rs_peakgroup_qvalue) query_augmented = ''' SELECT FEATURE_ID AS id, MODIFIED_SEQUENCE AS ipf_FullUniModPeptideName, PRECURSOR_PEAKGROUP_PEP AS ipf_precursor_peakgroup_pep, PEP AS ipf_peptidoform_pep, QVALUE AS ipf_peptidoform_m_score FROM SCORE_IPF INNER JOIN PEPTIDE ON SCORE_IPF.PEPTIDE_ID = PEPTIDE.ID WHERE SCORE_IPF.PEP < %s; ''' % ipf_max_peptidoform_pep # Main query for standard OpenSWATH else: idx_query = ''' CREATE INDEX IF NOT EXISTS idx_precursor_precursor_id ON PRECURSOR (ID); CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_precursor_id ON PRECURSOR_PEPTIDE_MAPPING (PRECURSOR_ID); CREATE INDEX IF NOT EXISTS idx_feature_precursor_id ON FEATURE (PRECURSOR_ID); CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_peptide_id ON PRECURSOR_PEPTIDE_MAPPING (PEPTIDE_ID); CREATE INDEX IF NOT EXISTS idx_peptide_peptide_id ON PEPTIDE (ID); CREATE INDEX IF NOT EXISTS idx_run_run_id ON RUN (ID); CREATE INDEX IF NOT EXISTS idx_feature_run_id ON FEATURE (RUN_ID); CREATE INDEX IF NOT EXISTS idx_feature_feature_id ON FEATURE (ID); ''' if check_sqlite_table(con, "FEATURE_MS1"): idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms1_feature_id ON FEATURE_MS1 (FEATURE_ID);" if check_sqlite_table(con, "FEATURE_MS2"): idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms2_feature_id ON FEATURE_MS2 (FEATURE_ID);" if check_sqlite_table(con, "SCORE_MS2"): idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms2_feature_id ON SCORE_MS2 (FEATURE_ID);" query = ''' SELECT RUN.ID AS id_run, PEPTIDE.ID AS id_peptide, PRECURSOR.ID AS transition_group_id, PRECURSOR.DECOY AS decoy, RUN.ID AS run_id, RUN.FILENAME AS filename, FEATURE.EXP_RT AS RT, FEATURE.EXP_RT - FEATURE.DELTA_RT AS assay_rt, FEATURE.DELTA_RT AS delta_rt, FEATURE.NORM_RT AS iRT, PRECURSOR.LIBRARY_RT AS assay_iRT, FEATURE.NORM_RT - PRECURSOR.LIBRARY_RT AS delta_iRT, FEATURE.ID AS id, PEPTIDE.UNMODIFIED_SEQUENCE AS Sequence, PEPTIDE.MODIFIED_SEQUENCE AS FullPeptideName, PRECURSOR.CHARGE AS Charge, PRECURSOR.PRECURSOR_MZ AS mz, FEATURE_MS2.AREA_INTENSITY AS Intensity, FEATURE_MS1.AREA_INTENSITY AS aggr_prec_Peak_Area, FEATURE_MS1.APEX_INTENSITY AS aggr_prec_Peak_Apex, FEATURE.LEFT_WIDTH AS leftWidth, FEATURE.RIGHT_WIDTH AS rightWidth, SCORE_MS2.RANK AS peak_group_rank, SCORE_MS2.SCORE AS d_score, SCORE_MS2.QVALUE AS m_score FROM PRECURSOR INNER JOIN PRECURSOR_PEPTIDE_MAPPING ON PRECURSOR.ID = PRECURSOR_PEPTIDE_MAPPING.PRECURSOR_ID INNER JOIN PEPTIDE ON PRECURSOR_PEPTIDE_MAPPING.PEPTIDE_ID = PEPTIDE.ID INNER JOIN FEATURE ON FEATURE.PRECURSOR_ID = PRECURSOR.ID INNER JOIN RUN ON RUN.ID = FEATURE.RUN_ID LEFT JOIN FEATURE_MS1 ON FEATURE_MS1.FEATURE_ID = FEATURE.ID LEFT JOIN FEATURE_MS2 ON FEATURE_MS2.FEATURE_ID = FEATURE.ID LEFT JOIN SCORE_MS2 ON SCORE_MS2.FEATURE_ID = FEATURE.ID WHERE SCORE_MS2.QVALUE < %s ORDER BY transition_group_id, peak_group_rank; ''' % max_rs_peakgroup_qvalue # Execute main SQLite query click.echo("Info: Reading peak group-level results.") con.executescript(idx_query) # Add indices data = pd.read_sql_query(query, con) # Augment OpenSWATH results with IPF scores if ipf_present and ipf=='augmented': data_augmented = pd.read_sql_query(query_augmented, con) data_augmented = data_augmented.groupby('id').apply(lambda x: pd.Series({'ipf_FullUniModPeptideName': ";".join(x[x['ipf_peptidoform_pep'] == np.min(x['ipf_peptidoform_pep'])]['ipf_FullUniModPeptideName']), 'ipf_precursor_peakgroup_pep': x[x['ipf_peptidoform_pep'] == np.min(x['ipf_peptidoform_pep'])]['ipf_precursor_peakgroup_pep'].values[0], 'ipf_peptidoform_pep': x[x['ipf_peptidoform_pep'] == np.min(x['ipf_peptidoform_pep'])]['ipf_peptidoform_pep'].values[0], 'ipf_peptidoform_m_score': x[x['ipf_peptidoform_pep'] == np.min(x['ipf_peptidoform_pep'])]['ipf_peptidoform_m_score'].values[0]})).reset_index(level='id') data = pd.merge(data, data_augmented, how='left', on='id') # Append transition-level quantities if transition_quantification: if check_sqlite_table(con, "SCORE_TRANSITION"): idx_transition_query = ''' CREATE INDEX IF NOT EXISTS idx_feature_transition_transition_id ON FEATURE_TRANSITION (TRANSITION_ID); CREATE INDEX IF NOT EXISTS idx_transition_transition_id ON TRANSITION (ID); CREATE INDEX IF NOT EXISTS idx_feature_transition_transition_id_feature_id ON FEATURE_TRANSITION (TRANSITION_ID, FEATURE_ID); CREATE INDEX IF NOT EXISTS idx_score_transition_transition_id_feature_id ON SCORE_TRANSITION (TRANSITION_ID, FEATURE_ID); CREATE INDEX IF NOT EXISTS idx_feature_transition_feature_id ON FEATURE_TRANSITION (FEATURE_ID); ''' transition_query = ''' SELECT FEATURE_TRANSITION.FEATURE_ID AS id, GROUP_CONCAT(AREA_INTENSITY,';') AS aggr_Peak_Area, GROUP_CONCAT(APEX_INTENSITY,';') AS aggr_Peak_Apex, GROUP_CONCAT(TRANSITION.ID || "_" || TRANSITION.TYPE || TRANSITION.ORDINAL || "_" || TRANSITION.CHARGE,';') AS aggr_Fragment_Annotation FROM FEATURE_TRANSITION INNER JOIN TRANSITION ON FEATURE_TRANSITION.TRANSITION_ID = TRANSITION.ID INNER JOIN SCORE_TRANSITION ON FEATURE_TRANSITION.TRANSITION_ID = SCORE_TRANSITION.TRANSITION_ID AND FEATURE_TRANSITION.FEATURE_ID = SCORE_TRANSITION.FEATURE_ID WHERE TRANSITION.DECOY == 0 AND SCORE_TRANSITION.PEP < %s GROUP BY FEATURE_TRANSITION.FEATURE_ID ''' % max_transition_pep else: idx_transition_query = ''' CREATE INDEX IF NOT EXISTS idx_feature_transition_transition_id ON FEATURE_TRANSITION (TRANSITION_ID); CREATE INDEX IF NOT EXISTS idx_transition_transition_id ON TRANSITION (ID); CREATE INDEX IF NOT EXISTS idx_feature_transition_feature_id ON FEATURE_TRANSITION (FEATURE_ID); ''' transition_query = ''' SELECT FEATURE_ID AS id, GROUP_CONCAT(AREA_INTENSITY,';') AS aggr_Peak_Area, GROUP_CONCAT(APEX_INTENSITY,';') AS aggr_Peak_Apex, GROUP_CONCAT(TRANSITION.ID || "_" || TRANSITION.TYPE || TRANSITION.ORDINAL || "_" || TRANSITION.CHARGE,';') AS aggr_Fragment_Annotation FROM FEATURE_TRANSITION INNER JOIN TRANSITION ON FEATURE_TRANSITION.TRANSITION_ID = TRANSITION.ID GROUP BY FEATURE_ID ''' click.echo("Info: Reading transition-level results.") con.executescript(idx_transition_query) # Add indices data_transition = pd.read_sql_query(transition_query, con) data = pd.merge(data, data_transition, how='left', on=['id']) # Append concatenated protein identifier click.echo("Info: Reading protein identifiers.") con.executescript(''' CREATE INDEX IF NOT EXISTS idx_peptide_protein_mapping_protein_id ON PEPTIDE_PROTEIN_MAPPING (PROTEIN_ID); CREATE INDEX IF NOT EXISTS idx_protein_protein_id ON PROTEIN (ID); CREATE INDEX IF NOT EXISTS idx_peptide_protein_mapping_peptide_id ON PEPTIDE_PROTEIN_MAPPING (PEPTIDE_ID); ''') data_protein = pd.read_sql_query(''' SELECT PEPTIDE_ID AS id_peptide, GROUP_CONCAT(PROTEIN.PROTEIN_ACCESSION,';') AS ProteinName FROM PEPTIDE_PROTEIN_MAPPING INNER JOIN PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = PROTEIN.ID GROUP BY PEPTIDE_ID; ''', con) data = pd.merge(data, data_protein, how='inner', on=['id_peptide']) # Append peptide error-rate control peptide_present = False if peptide: peptide_present = check_sqlite_table(con, "SCORE_PEPTIDE") if peptide_present and peptide: click.echo("Info: Reading peptide-level results.") data_peptide_run = pd.read_sql_query(''' SELECT RUN_ID AS id_run, PEPTIDE_ID AS id_peptide, QVALUE AS m_score_peptide_run_specific FROM SCORE_PEPTIDE WHERE CONTEXT == 'run-specific'; ''', con) if len(data_peptide_run.index) > 0: data =
pd.merge(data, data_peptide_run, how='inner', on=['id_run','id_peptide'])
pandas.merge
#!/usr/bin/env python """ collection of functions for the final case study solution """ import os import re import numpy as np import pandas as pd import json import glob import logging from collections import defaultdict import shutil #logging = logging.getlogging(__name__) logging.basicConfig(level=logging.INFO) import time import warnings warnings.filterwarnings("ignore") def fetch_data(path): """fetch json data from a path """ correct_cols_name =sorted(['country', 'customer_id', 'day', 'invoice', 'month', 'price', 'stream_id', 'times_viewed', 'year']) all_json_data = [] logging.info(f'start loading data...') for i in glob.glob(os.path.join(path,'*.json')): with open(i) as f: data = json.load(f) unstandardized_element = [] for idx,item in enumerate(data): if 'total_price' in item.keys(): data[idx]['price'] = data[idx].pop('total_price') logging.debug(f'key_name total_price is not standardized and has been changed to price in file {i}') if 'StreamID' in item.keys(): data[idx]['stream_id'] = data[idx].pop('StreamID') if 'TimesViewed' in item.keys(): data[idx]['times_viewed'] = data[idx].pop('TimesViewed') if sorted(list(item.keys())) != correct_cols_name: logging.warning(f"key name of element {idx} in file {i} is {item.keys().__str__()}" + "and it has been removed from file") unstandardized_element.append(idx) data = list( data[i] for i in range(len(data)) if i not in unstandardized_element) all_json_data.extend(data) return pd.DataFrame(all_json_data) def convert_to_ts(df_all,country ='United Kingdom'): try: df= df_all[df_all.country==country] df['date'] = pd.to_datetime(df[['year', 'month', 'day']],format='%Y%m%d') df_agg = df.groupby('date',as_index=False)[['price','times_viewed']].agg({'price':['sum','count'],'times_viewed':'sum'}) df_agg.columns = ['date','revenue','purchases','total_views'] df_agg['unique_streams'] = df.groupby('date',as_index=False)['stream_id'].transform(lambda x:x.nunique()) df_agg['unique_invoices'] = df.groupby('date',as_index=False)['invoice'].transform(lambda x:x.nunique()) # fill empty date df_agg = df_agg.set_index(df_agg['date']) df_agg.index =
pd.DatetimeIndex(df_agg.index)
pandas.DatetimeIndex
import numpy as np import numpy.testing as npt import pandas as pd import pandas.testing as pdt import pytest import datetime from pandas.api.types import is_numeric_dtype import timeserio.ini as ini from timeserio.data.mock import mock_fit_data from timeserio.preprocessing import PandasDateTimeFeaturizer from timeserio.preprocessing.datetime import ( get_fractional_day_from_series, get_fractional_hour_from_series, get_fractional_year_from_series, truncate_series, get_zero_indexed_month_from_series, get_time_is_in_interval_from_series, get_is_holiday_from_series ) datetime_column = ini.Columns.datetime seq_column = f'seq_{ini.Columns.datetime}' usage_column = ini.Columns.target @pytest.fixture def df(): return mock_fit_data(start_date=datetime.datetime(2017, 1, 1, 1, 0)) @pytest.fixture def featurizer(): return PandasDateTimeFeaturizer() def test_get_fractional_hour_from_series(): series = pd.Series( pd.date_range(start='2000-01-01', freq='0.5H', periods=48) ) fractionalhour = get_fractional_hour_from_series(series) expected = pd.Series(np.linspace(0, 23.5, 48)) pdt.assert_series_equal(fractionalhour, expected) def test_get_fractional_day_from_series(): series = pd.Series(
pd.date_range(start='2000-01-01', freq='6H', periods=5)
pandas.date_range
# -*- coding: utf-8 -*- ############################################################### # Author: <EMAIL> (<NAME>) # # Created: 10/12/2020 # # Python : 3.x # ############################################################### # The future package will provide support for running your code on Python 2.6, 2.7, and 3.3+ mostly unchanged. # http://python-future.org/quickstart.html from __future__ import (absolute_import, division, print_function, unicode_literals) from builtins import * ##### Basic packages ##### import datetime import sys, os import pandas as pd import math import re ##### CMD packages ##### from tqdm import tqdm #from tabulate import tabulate ##### GUI packages ##### from gooey import Gooey, GooeyParser from colored import stylize, attr, fg # 417574686f723a205061747269636520506f6e6368616e74 ########################################################## # Main code # ########################################################## # this needs to be *before* the @Gooey decorator! # (this code allows to only use Gooey when no arguments are passed to the script) if len(sys.argv) >= 2: if not '--ignore-gooey' in sys.argv: sys.argv.append('--ignore-gooey') cmd = True else: cmd = False # GUI Configuration # Preparing your script for packaging https://chriskiehl.com/article/packaging-gooey-with-pyinstaller # Prevent stdout buffering # https://github.com/chriskiehl/Gooey/issues/289 @Gooey( program_name='Rename tool for sensors using the spreadsheet generated by splsensors', progress_regex=r"^progress: (?P<current>\d+)/(?P<total>\d+)$", progress_expr="current / total * 100", hide_progress_msg=True, richtext_controls=True, #richtext_controls=True, terminal_font_family = 'Courier New', # for tabulate table nice formatation #dump_build_config=True, #load_build_config="gooey_config.json", default_size=(800, 750), timing_options={ 'show_time_remaining':True, 'hide_time_remaining_on_complete':True }, tabbed_groups=True, navigation='Tabbed', header_bg_color = '#95ACC8', #body_bg_color = '#95ACC8', menu=[{ 'name': 'File', 'items': [{ 'type': 'AboutDialog', 'menuTitle': 'About', 'name': 'renamensensorsspl', 'description': 'Rename tool for sensors using the spreadsheet generated by splsensors', 'version': '0.2.0', 'copyright': '2020', 'website': 'https://github.com/Shadoward/renamesensors-spl', 'developer': '<EMAIL>', 'license': 'MIT' }] },{ 'name': 'Help', 'items': [{ 'type': 'Link', 'menuTitle': 'Documentation', 'url': '' }] }] ) def main(): desc = "Rename tool for sensors using the spreadsheet generated by splsensors" parser = GooeyParser(description=desc) mainopt = parser.add_argument_group('Full Rename Options', gooey_options={'columns': 1}) lnopt = parser.add_argument_group('LineName Rename Options', gooey_options={'columns': 1}) revertopt = parser.add_argument_group('Reverse Renaming Options', description='This option is to be used in csae or you need to rename back the renamed files', gooey_options={'columns': 1}) # Full Rename Arguments mainopt.add_argument( '-i', '--xlsxFile', dest='xlsxFile', metavar='sheets_combined.xlsx File Path', help='This is the merge file with all the Final spreadsheet generated by the splsensors tool.\n Please be sure that you have QC the spreadsheet!', widget='FileChooser', gooey_options={'wildcard': "Excel Files(.xlsx)|*.xlsx"}) mainopt.add_argument( '-n', '--filename', dest='filename', metavar='Filename', widget='TextField', type=str, help='File name to be use to rename the file.\nYou can use the following wildcard to automate the linename:\n[V] = vessel;\n[LN] = Linename from SPL;\n[ST] = Sensor Type;\n[SD] = Start Date from the sensor (yyyymmdd_hhmmss);\n[N] = sequence number if the sensor have split.\ne.g: [V]_[LN]_[SD]_ASOW.') mainopt.add_argument( '-s', '--seqnumber', dest='seqnumber', metavar='Sequence Number Format', widget='TextField', #default='FugroBrasilis-CRP-Position', help='Sequence number format for split files. e.g.: 000 or 00') mainopt.add_argument( '-t', '--timeFormat', dest='timeFormat', metavar='Timestamp Format', widget='TextField', default='%Y%m%d_%H%M', help='Timestamp format to be use in the file name.\ne.g.: %Y%m%d_%H%M%S --> 20201224_152432') # LineName Rename Arguments lnopt.add_argument( '-I', '--xlsxFile2', dest='xlsxFile2', metavar='sheets_combined.xlsx File Path', help='This is the merge file with all the Final spreadsheet generated by the splsensors tool.\n Please be sure that you have QC the spreadsheet!', widget='FileChooser', gooey_options={'wildcard': "Excel Files(.xlsx)|*.xlsx"}) # Additional Arguments revertopt.add_argument( '-r', '--reverseFile', dest='reverseFile', metavar='reverse_rename.csv File Path', help='This is the file generate by this tool after you have rename the files.\nThe file can be edited to remove what you do not need to reverse back the name.', widget='FileChooser', gooey_options={'wildcard': "Comma separated file (*.csv)|*reverse*.csv"}) # Use to create help readme.md. TO BE COMMENT WHEN DONE # if len(sys.argv)==1: # parser.print_help() # sys.exit(1) args = parser.parse_args() process(args, cmd) def process(args, cmd): """ Uses this if called as __main__. """ xlsxFile = args.xlsxFile xlsxFile2 = args.xlsxFile2 filename = args.filename reverseFile = args.reverseFile seqnumber = args.seqnumber if args.seqnumber is not None else "000" timeFormat = args.timeFormat Fseqnumber = len(seqnumber) ########################################################## # Checking before continuing # ########################################################## # Check if Final merge spreadsheet is selected if not xlsxFile and not xlsxFile2: print ('') sys.exit(stylize('Final spreadsheet was not selected. Please select the Final spreadsheet created by splsensors tool, quitting', fg('red'))) if xlsxFile: try: xl = pd.read_excel(xlsxFile, sheet_name=None, engine='openpyxl') sheets = xl.keys() except IOError: print('') sys.exit(stylize(f'The following file is lock ({xlsxFile}). Please close the files, quitting.', fg('red'))) if xlsxFile2: try: xl = pd.read_excel(xlsxFile2, sheet_name=None, engine='openpyxl') sheets = xl.keys() except IOError: print('') sys.exit(stylize(f'The following file is lock ({xlsxFile}). Please close the files, quitting.', fg('red'))) if not any(key in list(sheets) for key in ['Full_List', 'Rename_LN']): print ('') sys.exit(stylize('Correct Final spreadsheet was not selected. Please select a correct Final spreadsheet created by splsensors tool, quitting', fg('red'))) # Check if filename is defined if xlsxFile and not filename: print ('') sys.exit(stylize('Filename empty. Please define the new file name, quitting', fg('red'))) ########################################################## # Reverse Naming # ########################################################## if args.reverseFile is not None: print('', flush = True) print('##################################################', flush = True) print('RENAMING BACK THE FILES. PLEASE WAIT....', flush = True) print('##################################################', flush = True) now = datetime.datetime.now() # record time of the subprocess dfreverse = pd.read_csv(reverseFile, usecols=["OldName","NewName"]) pbar = tqdm(total=len(dfreverse)) if cmd else print(f"Renaming the files.\nNote: Output show file counting every {math.ceil(len(dfreverse)/10)}") #cmd vs GUI for index, row in dfreverse.iterrows(): oldname = row['OldName'] newname = row['NewName'] if os.path.exists(newname): os.rename(newname, oldname) progressBar(cmd, pbar, index, dfreverse) print('', flush = True) print('##################################################', flush = True) print('SUMMARY', flush = True) print('##################################################', flush = True) print('', flush = True) print(f'A total of {len(dfreverse)} files were renamed back.\n', flush = True) print("Subprocess Duration: ", (datetime.datetime.now() - now), flush = True) sys.exit() # Remove old reverse log if xlsxFile: xlsxFilePath = os.path.dirname(os.path.abspath(xlsxFile)) else: xlsxFilePath = os.path.dirname(os.path.abspath(xlsxFile2)) if os.path.exists(xlsxFilePath + '\\reverse_rename.csv'): try: os.remove(xlsxFilePath + '\\reverse_rename.csv') except IOError: print('') sys.exit(stylize(f'The reverse_rename.csv file is lock. Please close the files, quitting.', fg('red'))) ########################################################## # Listing the files # ########################################################## print('', flush = True) print('##################################################', flush = True) print('READING THE SPREADSHEET AND RENAMING THE FILES.', flush = True) print('PLEASE WAIT....', flush = True) print('##################################################', flush = True) if args.xlsxFile2 is not None: dfreverse = lnrename(xlsxFile2) else: dfreverse = fullrename(xlsxFile, timeFormat, Fseqnumber, filename) dfreverse.to_csv(xlsxFilePath + '\\reverse_rename.csv', index=True) print('', flush = True) print('##################################################', flush = True) print('SUMMARY', flush = True) print('##################################################', flush = True) print('', flush = True) print(f'A total of {len(dfreverse)} files were renamed.\n', flush = True) print('') print(f'Reverse Log can be found in {xlsxFilePath}.\n', flush = True) ########################################################## # Functions # ########################################################## def lnrename(xlsxFile2): dfRename = pd.read_excel(xlsxFile2, sheet_name='Rename_LN', engine='openpyxl') dfreverse = pd.DataFrame(columns = ["OldName", "NewName"]) pbar = tqdm(total=len(dfRename)) if cmd else print(f"Renaming the files.\nNote: Output show file counting every {math.ceil(len(dfRename)/10)}") #cmd vs GUI for index, row in dfRename.iterrows(): FilePath = row['FilePath'] path = os.path.dirname(os.path.abspath(FilePath)) ext = os.path.splitext(os.path.basename(FilePath))[1] newname = row['New LineName'] # Renaming if os.path.exists(FilePath): os.rename(FilePath, os.path.join(path, newname + ext)) # Generate log reverse dfreverse = dfreverse.append(pd.Series([FilePath, os.path.join(path, newname + ext)], index=dfreverse.columns), ignore_index=True) progressBar(cmd, pbar, index, dfRename) return dfreverse def fullrename(xlsxFile, timeFormat, Fseqnumber, filename): dfRename = pd.read_excel(xlsxFile, sheet_name='Full_List', engine='openpyxl') coldrop = ['Summary', 'Difference Start [s]', 'Session Start', 'Session End', 'Session Name', 'Session MaxGap', 'Sensor FileName'] dfRename.drop(columns=coldrop, inplace=True) dfRename.dropna(subset=['SPL LineName'], inplace = True) dfRename['Incremental'] = None # https://stackoverflow.com/questions/59875334/add-incremental-value-for-duplicates # https://stackoverflow.com/questions/56137222/pandas-group-by-then-apply-throwing-a-warning # Try using .loc[row_indexer,col_indexer] = value instead dftmp = dfRename[dfRename.duplicated(subset='SPL LineName', keep=False)] dfRename.loc[dftmp.index, 'Incremental'] = dftmp.groupby(['SPL LineName']).cumcount() + 1 dfRename.update(dftmp) dfreverse =
pd.DataFrame(columns = ["OldName", "NewName", "Incremental", "Sensor Type", "Vessel Name"])
pandas.DataFrame
''' 特征工程部分: 1.各特征转化为数值型特征 2.异常值检测 3.缺失值填充 4.特征变换 5.特征选择 最终数据集:包含做完特征变换后的特征样本值,以及未变换前的房屋总价和房屋每平米价 ''' import pymysql import pandas as pd import numpy as np import category_encoders as ce import re import cn2an import datetime from scipy.stats import skew from scipy import stats from sklearn.preprocessing import PowerTransformer conn = pymysql.connect( host = '192.168.127.12', user = 'root', passwd = '<PASSWORD>', db = 'house', port=3306, charset = 'utf8' ) cursor01 = conn.cursor() cursor01.execute( "select column_name, column_comment from information_schema.columns where table_schema ='house' and table_name = 'allhouses'") all_info = list(cursor01.fetchall()) print(all_info) df = pd.read_sql('select * from allhouses',conn) column_names = [] for i in all_info: column_names.append(i[1]) df.columns = column_names df1 = df.copy() #有几十个房子由于页面特殊爬取错乱 df1 = df1.dropna(subset=['装修情况']) #某些特征无用,删除 df1 = df1.drop(columns=['小区详情url','房屋年限','房子ID'],axis=1) df1.to_excel('初始数据集.xlsx',index=None) ''' (1)各特征转化为数值特征 1.去除单位 2.特征组合 3.特征编码 ''' #物业费用有些是1.2至100元/平米/月 有些是1.5元/平米/月的类型,对于前者取中值,对于后者只去除单位 df1.loc[df1['小区物业费用']=='暂无信息','小区物业费用'] = np.nan #对于暂无信息的值记作空值 df1.loc[df1['小区物业费用'].notnull(),'小区物业费用']=\ df1.loc[df1['小区物业费用'].notnull(),'小区物业费用'].apply(lambda x:x[:-6]) df1.loc[df1['小区物业费用'].notnull(),'小区物业费用']=\ df1.loc[df1['小区物业费用'].notnull(),'小区物业费用'].apply(lambda x: (np.double(x.split('至')[0])+np.double(x.split('至')[1]))/2 if '至' in x else x) #对于上次交易时间与挂牌时间,做衍生指标处理,为挂牌时间-上次交易时间,单位天数 df1.loc[df['上次交易']=='暂无数据','上次交易'] = np.nan #对于暂无信息的值记作空值 today = str(datetime.datetime.now().year)+'-'+str(datetime.datetime.now().month)+'-'+str(datetime.datetime.now().day) df1['当前时间'] = today df1['当前时间'] = pd.to_datetime(df1['当前时间'],errors = 'coerce') df1['上次交易'] = pd.to_datetime(df1['上次交易'],errors = 'coerce') df1['挂牌时间'] = pd.to_datetime(df1['挂牌时间'],errors = 'coerce') df1['挂牌时间-上次交易时间'] = ((df1['挂牌时间'] - df1['上次交易']).values/np.timedelta64(1, 'h'))/24 df1['当前时间-挂牌时间']=((df1['当前时间'] - df1['挂牌时间']).values/np.timedelta64(1, 'h'))/24 #删除原指标 df1 = df1.drop(columns = ['上次交易','挂牌时间','当前时间']) #房屋用途,标签编码 yongtu_mapping = { '别墅':5, '商业':4, '商住两用':3, '普通住宅':2, '平房':1, } df1['房屋用途'] = df1['房屋用途'].map(yongtu_mapping) #抵押信息,将信息改为有无抵押,暂无信息则改为NaN,后进行编码处理 df1.loc[(df1['抵押信息']!='无抵押')&(df1['抵押信息'].notnull()),'抵押信息']=\ df1.loc[(df1['抵押信息']!='无抵押')&(df1['抵押信息'].notnull()),'抵押信息'].apply(lambda x:x[:3]) df1.loc[df1['抵押信息']=='暂无数','抵押信息']=np.nan #前3个特征为有序特征,如产权共有的房子会更受消费者的青睐,因此均作二值化处理 df1.loc[df1['产权所属']=='共有','产权所属']=1 df1.loc[df1['产权所属']=='非共有','产权所属']=0 df1.loc[df1['抵押信息']=='无抵押','抵押信息']=1 df1.loc[df1['抵押信息']=='有抵押','抵押信息']=0 df1.loc[df1['房本备件']=='已上传房本照片','房本备件']=1 df1.loc[df1['房本备件']=='未上传房本照片','房本备件']=0 #对于交易权属,使用频数编码 count_enc = ce.CountEncoder() #Transform the features, rename the columns with the _count suffix, and join to dataframe df1['交易权属'] = count_enc.fit_transform(df1['交易权属']) #建筑时间 df1.loc[df1['建楼时间'].notnull(),'建楼时间']=df1.loc[df1['建楼时间'].notnull(),'建楼时间'].apply(lambda x:x[:-2]) df1.loc[df1['建楼时间']=='未知','建楼时间']=np.nan df1['建楼时间-小区建筑年代']=np.nan df1['建楼时间-小区建筑年代']=df1.loc[df1['建楼时间'].notnull(),'建楼时间'].astype(float)-df1.loc[df1['小区建筑年代'].notnull(),'小区建筑年代'].astype(float) df1.loc[df1['建楼时间'].notnull(),'建楼时间']=\ float(datetime.datetime.now().year)-(df1.loc[df1['建楼时间'].notnull(),'建楼时间'].astype(float)) df1.loc[df1['小区建筑年代'].notnull(),'小区建筑年代']=\ float(datetime.datetime.now().year)-(df1.loc[df1['小区建筑年代'].notnull(),'小区建筑年代'].astype(float)) df1.rename(columns={"建楼时间":"建楼距今时长", "小区建筑年代":"小区建成距今时长"},inplace=True) #面积特征 df1.loc[df1['套内面积']=='暂无数据','套内面积']=np.nan df1.loc[df1['建筑面积'].notnull(),'建筑面积']=df1.loc[df1['建筑面积'].notnull(),'建筑面积'].apply(lambda x:x[:-1]).astype(float) df1.loc[df1['套内面积'].notnull(),'套内面积']=df1.loc[df1['套内面积'].notnull(),'套内面积'].apply(lambda x:x[:-1]).astype(float) #衍生特征,建筑面积-套内面积=公摊面积 df1['公摊面积']=np.nan df1['公摊面积']=df1.loc[df1['建筑面积'].notnull(),'建筑面积']-df1.loc[df1['套内面积'].notnull(),'套内面积'] #梯户比例:衍生特征户数/梯数 re2 = re.compile('(.+)梯(.+)户') def cn_extraction(x): if x==np.nan: return ['一','零'] ret = re2.findall(x) if ret: return (re2.findall(x))[0] else: return ['一','零'] def calculate_ratio(x): h = cn2an.cn2an(x[1],'smart') t = cn2an.cn2an(x[0],'smart') r = h/t return r df1.loc[df1['梯户比例'].notnull(),'梯户比例']=df1.loc[df1['梯户比例'].notnull(),'梯户比例'].apply(cn_extraction).apply(calculate_ratio) #房屋户型:衍生特征:卧室数量,客厅数量,厨房数量,卫生间数量 re1 = re.compile('\d+') temp = df1.loc[:,'房屋户型'].apply(re1.findall) df1['卧室数量']=df1['客厅数量']=df1['厨房数量']=df1['卫生间数量']=np.nan df1['卧室数量'] = temp.apply(lambda x:x[0]) df1['客厅数量'] = temp.apply(lambda x:x[1]) df1['厨房数量'] = temp.apply(lambda x:x[2]) df1['卫生间数量'] = temp.apply(lambda x:x[3]) df1 = df1.drop(columns = ['房屋户型']) #所在楼层衍生特征总楼层数目 re3 = re.compile('.+共(\d+)层.+') df1['总层数'] = df1.loc[:,'所在楼层'].apply(lambda x:re3.findall(x)[0]) # 户型分间衍生以下特征 # 1.客厅面积占比 # 2.卧室面积占比 # 3.实际使用面积 # 4.落地窗数量 # 5.有无储物间 # 6.有无入室花园 # 7.窗户数量 # 8.平均卧室面积 # 9.客厅、卧室、阳台朝向,采光等级 #朝向以等级分级 #南>东南=西南>东=西>东北=西北>北 rank_direction = { '南':5, '东南':4, '西南':4, '东':3, '西':3, '东北':2, '西北':2, '北':1, '无':None } def change_direct(x): for k,j in rank_direction.items(): if k in x: return j df1.loc[df1['户型分间']=='{}','户型分间']=np.nan list1=[] list2=[] list3=[] list4=[] list5=[] list6=[] list7=[] list8=[] list9=[] list10=[] list11=[] for item in df1.loc[df1['户型分间'].notnull(), '户型分间']: item = eval(item) # 转换为字典 df2 =
pd.DataFrame(item)
pandas.DataFrame
import glob import os import sys import tkinter as tk from tkinter import messagebox from xml.etree import ElementTree import cv2 import numpy as np import pandas as pd from PIL import Image, ImageTk from utils import annotator FOLDER_PATH = 'purifier/folders.pkl' def get_folders(): """ read all trial folders and return a dataframe :return: dataframe """ # get the list of all folders folders_path = sorted(glob.glob("data/Original-data/belvedere/*")) # Create a dic to hold number of invalid images per folder f_dic = {} for path in folders_path: f_dic[path] = 0 # Get the invalid images # invalid_images = glob.glob("data/Original-data/*/*/*.jpg_") # loop over all invalid images and +1 to the folder # for img in invalid_images: # t = img.split("/") # f_path = '/'.join(t[:-1]) # f_dic[f_path] += 1 # make a data frame from dic # f_list = [[k, v] for k, v in f_dic.items()] folder_df = pd.DataFrame(data=folders_path, columns=["folder"]) folder_df["checked"] = False # # folder_df = folder_df.sort_values(["invalids"], ascending=False) # folder_df.reset_index(inplace=True) folder_df.to_pickle(FOLDER_PATH) for i, row in folder_df.iterrows(): print(row.folder) return folder_df def get_dataframe(_path): """ get a path and read images and labels (xmls) from current directory :param _path: directory path :return: a dataframe """ all_images = sorted(glob.glob(_path + "/*.bmp")) all_xmls = sorted(glob.glob(_path + "/*.xml")) data = [] for i, img in enumerate(all_images): vals = read_xml(all_xmls[i]) # add image number to sort the dataframe based on it name = img.split("/")[-1] num = name.split(".")[0] num = int(num[:-2]) data.append([img, vals[0], vals[1], vals[2], vals[3], vals[4], num]) df = pd.DataFrame(data=data, columns=["path", "xt", "yt", "wt", "ht", "angt", "num"]) df = df.sort_values(["num"]) df.reset_index(inplace=True) df["status"] = 0 return df def read_xml(xml_path): e = ElementTree.parse(xml_path).getroot() x = np.float32(e[0].text) y = np.float32(e[1].text) w = np.float32(e[2].text) h = np.float32(e[3].text) a = np.float32(e[4].text) return [x, y, w, h, a] def numpy2pil(np_array: np.ndarray) -> Image: """ convert an HxWx3 numpy array into an RGB Image :param np_array: input numpy array :return: A PIL Image object """ assert_mfg = "input shall be a HxWx3 ndarray" assert isinstance(np_array, np.ndarray), assert_mfg assert np.ndim(np_array) == 3, assert_mfg assert np_array.shape[2] == 3, assert_mfg img = Image.fromarray(np_array, 'RGB') return img class inspector_gui: def __init__(self, master, data): self.frame = tk.Frame(master) self.frame.pack_propagate(0) self.frame.pack(fill=tk.BOTH, expand=1) # Folder index self.f_idx = 0 self.folder_df = data self.n_folders = len(data) self.current_df = None self.n_img = 0 self.current_df_dirty = False # folder navigation self.prev_folder_btn = tk.Button(self.frame, text="previous Folder", command=lambda: self.change_folder(-1)) self.prev_folder_btn.place(width=140, height=30, x=20, y=5) self.path_lbl = tk.Label(self.frame, text="Image path: ", anchor=tk.CENTER) self.path_lbl.place(width=380, height=20, x=200, y=5) self.next_folder_btn = tk.Button(self.frame, text="next Folder", command=lambda: self.change_folder(1)) self.next_folder_btn.place(width=140, height=30, x=640, y=5) # big labeled image self.canvas = tk.Canvas(self.frame, width=576, height=576, bg="yellow") self.canvas.place(width=576, height=576, x=12, y=40) img = Image.open("0in.jpg") self.photo = ImageTk.PhotoImage(img) self.image_ref = self.canvas.create_image((288, 288), image=self.photo) # thumbsnail image self.canvas_s = tk.Canvas(self.frame, width=192, height=192) self.canvas_s.place(width=192, height=192, x=596, y=40) self.photo_s = ImageTk.PhotoImage(img) self.image_refs = self.canvas_s.create_image((96, 96), image=self.photo_s) self.pager_lbl = tk.Label(self.frame, text="0/1234", anchor=tk.CENTER) self.pager_lbl.place(width=192, height=20, x=596, y=225) self.status_lbl = tk.Label(self.frame, text="0", anchor=tk.CENTER, font=("Courier", 34)) self.status_lbl.place(width=192, height=40, x=596, y=255) # true false buttons self.incorrect_btn = tk.Button(self.frame, text="Incorrect (i)", bg="red", command=lambda: self.updateDF(2)) self.incorrect_btn.place(width=80, height=40, x=610, y=410) self.correct_btn = tk.Button(self.frame, text="correct (c)", bg="green", command=lambda: self.updateDF(1)) self.correct_btn.place(width=80, height=40, x=700, y=410) # back and forward buttons for images self.backButton = tk.Button(self.frame, text="<- back", command=lambda: self.updateIndex(-1)) self.backButton.place(width=80, height=30, x=610, y=470) self.nextButton = tk.Button(self.frame, text="next ->", command=lambda: self.updateIndex(1)) self.nextButton.place(width=80, height=30, x=700, y=470) # capture image and save dataframe buttons self.capture_btn = tk.Button(self.frame, text="Capture (p)", command=self.capture) self.capture_btn.place(width=80, height=30, x=610, y=530) self.save_btn = tk.Button(self.frame, text="save", command=self.saveDF) self.save_btn.place(width=80, height=30, x=700, y=530) # export and rename buttons self.export_btn = tk.Button(self.frame, text="export path", command=self.exportPath) self.export_btn.place(width=80, height=30, x=700, y=580) self.rename_btn = tk.Button(self.frame, text="rename path", command=self.file_renamer) self.rename_btn.place(width=80, height=30, x=610, y=580) # bind events with keyboard master.bind('<Left>', self.leftKey) master.bind('<Right>', self.rightKey) master.bind('i', self.enterKey) master.bind('c', self.spaceKey) master.bind('p', self.captureKey) # select the first folder as start point self.goto_folder(0) def rightKey(self, event): self.updateIndex(1) def leftKey(self, event): self.updateIndex(-1) def spaceKey(self, event): self.updateDF(1) def enterKey(self, event): self.updateDF(2) def captureKey(self, event): self.capture() def findNextIndex(self): """ loop over dataframe and return an index with status 0 if not found, alert and return index= 0 :return: """ status_0 = self.current_df.index[self.current_df["status"] == 0].tolist() status_0 = sorted(status_0) if len(status_0) == 0: status_1 = self.current_df.index[self.current_df["status"] == 1].tolist() status_1 = sorted(status_1) if len(status_1) == 0: return self.goto_folder(1) else: return status_1[0] else: return status_0[0] def capture(self): row = self.current_df.iloc[self.img_index] img = cv2.imread(row.path, cv2.IMREAD_GRAYSCALE) truth = [row.xt, row.yt, row.wt, row.ht, row.angt] # Update the labeled image img = annotator((0, 250, 0), img, *truth) # Green save_path = row.path.replace("/", "-") cv2.imwrite("purifier/" + save_path, img) def change_folder(self, val): """ update the folder index and clip it between 0 and n_folders :param val: +1 go next, -1 go previous :return: updated folder_idx """ if self.current_df_dirty: res = messagebox.askquestion("Save Data", "Did you save the data?", icon='warning') if res == 'no': return self.f_idx += val self.f_idx = np.clip(self.f_idx, 0, self.n_folders - 1) self.goto_folder(self.f_idx) def goto_folder(self, idx): """ Get the path from folder data frame. We should check if the upcomming folder has already a dataframe for its images. if not, create one. :param idx: index of current folder to be shown """ # get the row of current path row = self.folder_df.iloc[idx] # check if dataframe is already exist df_name = row.folder.replace("/", "_") df_path = "purifier/" + df_name + ".pkl" if os.path.exists(df_path): self.current_df = pd.read_pickle(df_path) else: # read all images and labels in current directory self.current_df = get_dataframe(row.folder) # reset the image index self.img_index = self.findNextIndex() self.n_img = len(self.current_df) # update the folder name label new_text = "{0}".format(row.folder) self.path_lbl.configure(text=new_text) self.current_df_dirty = False # finally update GUI with new data self.updateGUI() def exportPath(self): """ export path of images which flaged as incorrect :return: """ incorrects = self.current_df[self.current_df.status == 2] path_txt = [] # loop over rows and extract the paths for i, row in incorrects.iterrows(): path_txt.append(row.path + "\n") # save file f_row = self.folder_df.iloc[self.f_idx] export_path = f_row.folder + "/incorrects.txt" open(export_path, mode='w').writelines(path_txt) # corrects = self.df[self.df.status == 1] # with open(CHECKED_PATH, mode='a') as f: # for i, row in corrects.iterrows(): # path = row.trial + "/" + row.img_id + "\n" # f.writelines(path) messagebox.showinfo("Export path", "incorrect paths exported successfuly at {}".format(export_path)) def updateDF(self, val): """ update the status of current row and go to next image :return: """ self.current_df.at[self.img_index, "status"] = val r = self.current_df.iloc[self.img_index] print("{0} has been marked as {1}".format(r.path, r.status)) self.current_df_dirty = True self.updateIndex(1) def saveDF(self): """ save incorrect labeled images into a file :return: """ # get the row of current path row = self.folder_df.iloc[self.f_idx] # check if dataframe is already exist df_name = row.folder.replace("/", "_") df_path = "purifier/"+df_name+".pkl" try: self.current_df.to_pickle(df_path) except IOError: print("IO Error") except RuntimeError: print("RuntimeError") except EOFError: print("EOFError") except OSError: print("OSError") except: print("Unexpected error:", sys.exc_info()[0]) self.current_df_dirty = False messagebox.showinfo("save data", "Data saved successfuly at {}".format(df_path)) def updateIndex(self, val): """ update the image index and clip between 0, len(n_img). finally update the GUI :param val: :return: """ self.img_index += val self.img_index = np.clip(self.img_index, 0, self.n_img - 1) self.updateGUI() def updateGUI(self): """ update the GUI based on img_index :return: """ row = self.current_df.iloc[self.img_index] # update pager new_text = "{0}/{1}".format(self.img_index + 1, self.n_img) self.pager_lbl.configure(text=new_text) # update status self.status_lbl.configure(text=str(row.status)) # update image holder # load image file = row.path.split(".")[0] file = file + ".bmp" if row.status == 2: img = cv2.imread(file, cv2.IMREAD_GRAYSCALE) else: img = cv2.imread(file, cv2.IMREAD_GRAYSCALE) # update thumbnails before manipulation s_img = np.asarray(img, dtype=np.uint8) s_img = Image.fromarray(s_img, 'L') self.photo_s = ImageTk.PhotoImage(image=s_img) self.canvas_s.itemconfig(self.image_refs, image=self.photo_s) # resize image 3x and put label on it img = cv2.resize(img, (576, 576)) truth = [row.xt * 3, row.yt * 3, row.wt * 3, row.ht * 3, row.angt] img = annotator((0, 250, 0), img, *truth) # Green img = numpy2pil(img) self.photo = ImageTk.PhotoImage(image=img) self.canvas.itemconfig(self.image_ref, image=self.photo) def file_renamer(self): """ get the file path of miss labeled data, and read the paths inside the file, and rename the extension part to jpg_ and xml_ :param file_path: list of bad-labeled images :return: """ f_row = self.folder_df.iloc[self.f_idx] export_path = f_row.folder + "/incorrects.txt" counter = 0 with open(export_path, mode='r') as f: for line in f: line = line.strip() root = line.split(".")[0] os.rename(root + ".jpg", root + ".jpg_") xml = root.replace("in.", "gt.") os.rename(xml + ".xml", xml + ".xml_") counter += 1 messagebox.showinfo("rename", " {} images has been renamed".format(counter)) if __name__ == '__main__': top = tk.Tk() top.title('Label inspector') top.geometry("800x620") top.resizable(0, 0) # check if folder dataframe already saved on disk if os.path.exists(FOLDER_PATH): fdf =
pd.read_pickle(FOLDER_PATH)
pandas.read_pickle
from gensim.models import FastText from fse.models import Average from fse import IndexedList import pandas as pd from sklearn.manifold import TSNE from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score from sklearn.mixture import GaussianMixture from sklearn.cluster import AgglomerativeClustering import json import re from ast import literal_eval sentences = [] sentences2 = [] short_input_texts = [] short_bot_texts = [] short_entities = [] short_actions = [] df = pd.read_csv('data/ExpectedConversation_27_04.csv') for index, value in df.iterrows(): text = str(value['input_text']) if "scontent.xx.fbcdn.net" in str(value['input_text']): text = re.sub('\[|\]|\'|\n|\{|\}', '', str(value['cv_outputs'])) text = text.split(',')[0].replace('object_type:','') sentence = text sentences.append(sentence.split()) sentences2.append(sentence) short_input_text = text short_bot_text = str(value['bot_text']) if "scontent.xx.fbcdn.net" not in str(value['bot_text']) else "url" if len(short_input_text) > 50: n = int(len(short_input_text) / 50) short_input_text = " ".join([short_input_text[50 * x:50 * (x + 1)] + "-" + "<br>" for x in range(n)]) if len(short_bot_text) > 50: n = int(len(short_bot_text) / 50) short_bot_text = " ".join([short_bot_text[50 * x:50 * (x + 1)] + "-" + "<br>" for x in range(n)]) short_entity = str(value['entities']) if "scontent.xx.fbcdn.net" not in str(value['entities']) else "url" short_actions = str(value['action_1']) if "scontent.xx.fbcdn.net" not in str(value['action_1']) else "url" short_input_texts.append(short_input_text) short_bot_texts.append(short_bot_text) short_entities.append(short_entity) ft = FastText(sentences, min_count=1, size=10) model = Average(ft) model.train(IndexedList(sentences)) vectors_list = model.sv.vectors.tolist() # 10 dimensions vectors tsne = TSNE(n_components=2) # tsne = TSNE(n_components=3) tsne_vectors = tsne.fit_transform(vectors_list) scores = [] # for k in range(2,20): # x = k # kmeans = KMeans(n_clusters=x, random_state=0) # kmeans = kmeans.fit(tsne_vectors) # labels = kmeans.labels_ # score = silhouette_score(tsne_vectors, labels) # inertia = kmeans.inertia_ # scores.append((k, score,inertia)) # # scores_df = pd.DataFrame(scores, columns=['k', 'silhouette_score', 'inertia']) # scores_df.to_csv("data/scores_input_text_only.csv", index=False) gm = GaussianMixture(n_components=6, n_init=10, covariance_type="full").fit(tsne_vectors) hc = AgglomerativeClustering(n_clusters=6, affinity='euclidean', linkage='ward').fit_predict(tsne_vectors) for k in range(2,20): x = k kmeans = KMeans(n_clusters=x, random_state=0) kmeans = kmeans.fit(tsne_vectors) labels = kmeans.labels_ score = silhouette_score(tsne_vectors, gm.predict(tsne_vectors)) inertia = kmeans.inertia_ scores.append((k, score,inertia)) kmeans = KMeans(n_clusters=6, random_state=0).fit(tsne_vectors) scores_df =
pd.DataFrame(scores, columns=['k', 'silhouette_score','inertia'])
pandas.DataFrame
import numpy as np import pandas as pd import matplotlib.pyplot as plt import os.path as osp import statistics import random import torch from torch_geometric.datasets import TUDataset import torch_geometric.transforms as T import torch.nn.functional as F from torch_geometric.data import DataLoader, Dataset from optimal_R import option, all_possible_concatenation from graph_property import G_property, binning from model.aug_GNN import augGNN #from model.StrucFea_GNN import StrucFeaGNN from model.StrucFea_GNN_cit import StrucFeaGNN from utils import max_len_arr def reserve(task, dn, loader, folds): for f in range(folds): t = 0 random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) np.random.seed(1) for load in loader[f]: G = [] # construct graph for p1 in range(np.array(load.edge_index).shape[1]): G.append((int(load.edge_index[0][p1]),int(load.edge_index[1][p1]))) # calculate graph properties constant = G_property(G, constant_bool=1) degrees, graph = G_property(G, degree_bool=1, bin_bool=0) clustering, graph = G_property(G, clustering_bool=1, bin_bool=0) pagerank, graph = G_property(G, pagerank_bool=1, bin_bool=0) avg_path_len_G, graph = G_property(G, avg_path_length_bool=1, bin_bool=0) matrix = torch.cat((constant,degrees),1) matrix = torch.cat((matrix,clustering),1) matrix = torch.cat((matrix,pagerank),1) matrix = torch.cat((matrix,avg_path_len_G),1) matrix = matrix.numpy() matrix = pd.DataFrame(matrix,columns = ['Constant_feature','Degree','Clustering_coefficient','Pagerank','Aver_path_len']) name = '/home/jiaqing/桌面/Fea2Fea/Result/TUdataset/{}/{}_property{}{}_fold{}.txt'.format(dn, dn, t, task, f) matrix.to_csv(name, sep = '\t', index=False) t+=1 def train(i, dn, model, task, optimizer, train_loader, device, folds): ### for example, the folds that need to be trained are 0-8, the valid fold then is 9 ### if total number of folds is equal to 10 model.train() correct_arr = [] tot_loss = [] length = [] for f in folds: t= 0 correct = 0 total_loss = 0 random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) np.random.seed(1) for load in train_loader[f]: name = '/home/jiaqing/桌面/Fea2Fea/Result/TUdataset/{}/{}_property{}{}_fold{}.txt'.format(dn, dn, t, task, f) property_file = pd.read_csv(name, sep = '\t') propert_i = property_file.iloc[:,list(i)] if isinstance(i,tuple) else property_file.iloc[:,[i]] array = np.array(propert_i) load.x = torch.cat((load.x, torch.tensor(array).float()), dim = 1) load = load.to(device) optimizer.zero_grad() out = model(load) loss = F.nll_loss(out,load.y) loss.backward() optimizer.step() total_loss += loss.item() * len(load.y) with torch.no_grad(): load = load.to(device) pred = model(load).max(dim=1)[1] correct += pred.eq(load.y).sum().item() t+=1 correct_arr.append(correct) length.append(len(train_loader[f].dataset)) tot_loss.append(total_loss) return sum(correct_arr)/sum(length), sum(tot_loss)/sum(length) def valid(i, dn, model, task, train_loader, device, fold): model.eval() correct = 0 t = 0 random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) np.random.seed(1) for load in train_loader[fold]: name = '/home/jiaqing/桌面/Fea2Fea/Result/TUdataset/{}/{}_property{}{}_fold{}.txt'.format(dn, dn, t, task, fold) property_file = pd.read_csv(name, sep = '\t') propert_i = property_file.iloc[:,list(i)] if isinstance(i,tuple) else property_file.iloc[:,[i]] array = np.array(propert_i) load.x = torch.cat((load.x, torch.tensor(array).float()), dim = 1) with torch.no_grad(): load = load.to(device) pred = model(load).max(dim=1)[1] correct += pred.eq(load.y).sum().item() t+=1 valid_acc = correct / len(train_loader[fold].dataset) return valid_acc def test(i, dn, model, task, test_loader, device, fold): correct = 0 model.eval() total_num_nodes = 0 t = 0 random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) np.random.seed(1) for load in test_loader[fold]: name = '/home/jiaqing/桌面/Fea2Fea/Result/TUdataset/{}/{}_property{}{}_fold{}.txt'.format(dn, dn, t, task, fold) property_file = pd.read_csv(name, sep = '\t') propert_i = property_file.iloc[:,list(i)] if isinstance(i,tuple) else property_file.iloc[:,[i]] array = np.array(propert_i) load.x = torch.cat((load.x, torch.tensor(array).float()), dim = 1) with torch.no_grad(): load = load.to(device) pred = model(load).max(dim=1)[1] correct += pred.eq(load.y).sum().item() t+=1 test_acc = correct / len(test_loader[fold].dataset) return test_acc if __name__ == '__main__': o = option() o.multiple_dataset = True folds = 10 # k-fold cross-validation saved = [] device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') datasets = [] datasets.append(o.dataset) plt.figure() for dataset in datasets: ans = all_possible_concatenation(o) min_ans_len, max_ans_len = max_len_arr(ans) c_index = 0 path = osp.join('/home/jiaqing/桌面/Fea2Fea/data/') data_set = TUDataset(root = path + o.dataset, name = o.dataset, use_node_attr = False) train_split = 0.9 test_split = 0.1 num_train_graphs = int(len(data_set) * train_split) num_test_graphs = int(len(data_set) * test_split) # fix shuffle seeds random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) np.random.seed(1) perm = torch.randperm(len(data_set)) train_idx = perm[:num_train_graphs] test_idx = perm[num_train_graphs:] num_each_fold = int(num_train_graphs / folds) batchsize = 64 if dataset != 'ENZYMES' else 128 train_loader = [] test_loader = [] test_loader.append(DataLoader(data_set[test_idx], batch_size = batchsize , shuffle = False)) #### batch size 32 for NCI1 # split cross-validation sets for i in range(folds): # fix shuffle seeds random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) np.random.seed(1) train_loader.append(DataLoader(data_set[train_idx[(i*num_each_fold):((i+1)*num_each_fold)]], batch_size = batchsize , shuffle=True, worker_init_fn=random.seed(12345))) input_dim = 0 first = 0 random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) np.random.seed(1) ans = [(3,4)] for i in train_loader[1:]: for load in i: input_dim = load.x.shape[1] first = load.x.shape[0] break input_dim+= len(ans[0]) print('input dim: {}'.format(input_dim)) num_classes = {'ENZYMES':6, 'PROTEINS':2, 'NCI1':2} # reserve train_loader and test_loader ''' reserve('train', dataset, train_loader, folds) reserve('test', dataset, test_loader, 1) ''' #ans = [(0,2,4)] ans = [(3,4)] folds_arr = [i for i in range(folds)] folds_arr = np.array(folds_arr) for value in ans: # for each combination entry: mean_test_acc = [] mean_valid_acc = [] for fo in range(folds): model = StrucFeaGNN(concat_fea=False, concat_fea_num = 2, embed_method = 'GIN', input_dim = input_dim, output_dim = num_classes[o.dataset], depth = 3).to(device) #model = StrucFeaGNN(concat_fea=True, concat_fea_num = 2, embed_method = 'GIN', input_dim = input_dim, output_dim = num_classes[o.dataset], depth = 2).to(device) #model = StrucFeaGNN(concat_fea=True, concat_fea_num = 2, embed_method = 'GIN', input_dim = input_dim, output_dim = num_classes[o.dataset], depth = 2, cat_method = 'Bilinear').to(device) #model = StrucFeaGNN(concat_fea=True, concat_fea_num = 2, embed_method = 'GIN', input_dim = input_dim, output_dim = num_classes[o.dataset], depth = 2, cat_method = 'NTN').to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.002, weight_decay=1e-4) best_epoch = 0 best_valid_acc = 0 best_test_acc = 0 op_iters = 0 if o.dataset == 'NCI1': if o.aim_feature == 2: break # for train for epoch in range(1, 800): tr_acc, t_loss = train(value, o.dataset, model, 'train', optimizer, train_loader, device, folds_arr[folds_arr!=fo]) # for valid v_acc = valid(value, o.dataset, model, 'train', train_loader, device, fo) # for test t_acc = test(value, o.dataset, model, 'test', test_loader, device, 0) #print(v_acc) if v_acc > best_valid_acc and t_acc > best_test_acc: best_valid_acc = v_acc best_test_acc = t_acc best_epoch = epoch #torch.save(model, '/home/jiaqing/桌面/Fea2Fea/src/model_pkl/best_model_{}.pkl'.format(o.dataset)) op_iters=0 op_iters+=1 if op_iters > 100: break print('added_features: {}, validation fold:{}, best valid acc: {:.4f}, best test acc: {:.4f}'.format(ans, fo, best_valid_acc, best_test_acc)) mean_test_acc.append(best_test_acc) mean_valid_acc.append(best_valid_acc) print('average test acc: {:.4f}, std: {:.4f}'.format(sum(mean_test_acc)/len(mean_test_acc), np.std(mean_test_acc))) print('average valid acc: {:.4f}, std: {:.4f}'.format(sum(mean_valid_acc)/len(mean_valid_acc), np.std(mean_valid_acc))) break c_index+=1 break # save mean acc and std acc saved =
pd.DataFrame(saved)
pandas.DataFrame
# coding: utf-8 # Author: <NAME> <<EMAIL>> # License: BSD 3 clause import numpy as np import pandas as pd import pickle from sklearn.preprocessing import LabelEncoder import os import warnings import time os.system("ipcluster start --profile=home &") import ipyparallel as ipp def convert_list(serie): """Converts lists in a pandas serie into a dataframe where which element of a list is a column Parameters ---------- serie : pandas Serie The serie you want to cast into a dataframe Returns ------- pandas DataFrame The converted dataframe """ import numpy import pandas if(serie.apply(lambda x: type(x)==list).sum()>0): serie = serie.apply(lambda x: [x] if type(x)!=list else x) cut = int(numpy.percentile(serie.apply(len),90)) #a tester serie = serie.apply(lambda x: x[:cut]) return pandas.DataFrame(serie.tolist(),index=serie.index,columns=[serie.name+"_item"+str(i+1) for i in range(cut)]) else: return serie def convert_float_and_dates(serie, date_strategy): """Converts into float if possible and converts dates Parameters ---------- serie : pandas Serie The serie you want to convert date_strategy : str, defaut = "complete" The strategy to encode dates : - complete : creates timestamp from 01/01/2017, month, day and day_of_week - to_timestamp : creates timestamp from 01/01/2017 Returns ------- pandas DataFrame The converted dataframe """ import pandas ### dtype is already a date ### if (serie.dtype == 'datetime64[ns]'): df = pandas.DataFrame([], index=serie.index) df[serie.name + "_TIMESTAMP"] = (pandas.DatetimeIndex(serie) - pandas.datetime(2017, 1, 1)).total_seconds() if (date_strategy == "complete"): df[serie.name + "_MONTH"] = pandas.DatetimeIndex(serie).month.astype( float) # be careful with nan ! object or float ?? df[serie.name + "_DAY"] = pandas.DatetimeIndex(serie).day.astype( float) # be careful with nan ! object or float ?? df[serie.name + "_DAYOFWEEK"] = pandas.DatetimeIndex(serie).dayofweek.astype( float) # be careful with nan ! object or float ?? return df else: ### convert float ### try: serie = serie.apply(float) except: pass ### cleaning/converting dates ### if (serie.dtype != 'object'): return serie else: # trying to cast into date df = pandas.DataFrame([], index=serie.index) try: df[serie.name + "_TIMESTAMP"] = ( pandas.DatetimeIndex(pandas.to_datetime(serie)) - pandas.datetime(2017, 1, 1)).total_seconds() if (date_strategy == "complete"): df[serie.name + "_MONTH"] = pandas.DatetimeIndex(pandas.to_datetime(serie)).month.astype( float) # be careful with nan ! object or float ?? df[serie.name + "_DAY"] = pandas.DatetimeIndex(pandas.to_datetime(serie)).day.astype( float) # be careful with nan ! object or float ?? df[serie.name + "_DAYOFWEEK"] = pandas.DatetimeIndex(pandas.to_datetime(serie)).dayofweek.astype( float) # be careful with nan ! object or float ?? return df except: return serie class Reader(): """Reads and cleans data Parameters ---------- sep : str, defaut = None Delimiter to use when reading a csv file. header : int or None, defaut = 0. If header=0, the first line is considered as a header. Otherwise, there is no header. Useful for csv and xls files. to_hdf5 : bool, defaut = True If True, dumps each file to hdf5 format to_path : str, defaut = "save" Name of the folder where files and encoders are saved verbose : bool, defaut = True Verbose mode """ def __init__(self, sep = None, header = 0, to_hdf5 = False, to_path = "save", verbose = True): self.sep = sep self.header = header self.to_hdf5 = to_hdf5 self.to_path = to_path self.verbose = verbose self.__client = ipp.Client(profile='home') self.__dview = self.__client.direct_view() def clean(self, path, date_strategy = "complete", drop_duplicate = False): """Reads and cleans data (accepted formats : csv, xls, json and h5): - del Unnamed columns - casts lists into variables - try to cast variables into float - cleans dates - drop duplicates (if drop_duplicate=True) Parameters ---------- path : str The path to the dataset. date_strategy : str, defaut = "complete" The strategy to encode dates : - complete : creates timestamp from 01/01/2017, month, day and day_of_week - to_timestamp : creates timestamp from 01/01/2017 drop_duplicate : bool, defaut = False If True, drop duplicates when reading each file. Returns ------- pandas dataframe Cleaned dataset. """ ########################################################################### ################################# reading ################################# ########################################################################### start_time = time.time() if (path == None): raise ValueError("You must specify the path to load the data") else: type_doc = path.split(".")[-1] if (type_doc == 'csv'): if (self.sep == None): raise ValueError("You must specify the separator for a csv file") else: if (self.verbose): print("") print("reading csv : " + path.split("/")[-1] + " ...") df = pd.read_csv(path, sep=self.sep, header=self.header, engine='c', error_bad_lines=False) elif (type_doc == 'xls'): if (self.verbose): print("") print("reading xls : " + path.split("/")[-1] + " ...") df =
pd.read_excel(path, header=self.header)
pandas.read_excel
from datetime import datetime from pycoingecko import CoinGeckoAPI import pandas as pd today = datetime.today() startDate = datetime.timestamp(datetime(2019, 4, 1)) endDate = datetime.timestamp(today) cg = CoinGeckoAPI() try: # Bitcoin-price coinInfosCG = cg.get_coin_market_chart_range_by_id(id='bitcoin',vs_currency='eur',from_timestamp=startDate,to_timestamp=endDate) coinInfosCGUSD = cg.get_coin_market_chart_range_by_id(id='bitcoin',vs_currency='usd',from_timestamp=startDate,to_timestamp=endDate) temp = pd.DataFrame(coinInfosCG['prices']) temp['BTCPriceUSD'] = pd.DataFrame(coinInfosCGUSD['prices'])[1] temp = temp.rename(columns={0:'Date',1:'BTCPriceEUR',2:'BTCPriceUSD'}) temp['Date'] = pd.to_datetime(temp['Date'], unit='ms').dt.date temp.set_index('Date', inplace=True) coinPricesDF = temp # Ethereum-price coinInfosCGEUR = cg.get_coin_market_chart_range_by_id(id='ethereum',vs_currency='eur',from_timestamp=startDate,to_timestamp=endDate) coinInfosCGUSD = cg.get_coin_market_chart_range_by_id(id='ethereum',vs_currency='usd',from_timestamp=startDate,to_timestamp=endDate) temp =
pd.DataFrame(coinInfosCGEUR['prices'])
pandas.DataFrame
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler exogenous_features = ["High_mean_lag3", "High_std_lag3", "Low_mean_lag3", "Low_std_lag3", "Volume_mean_lag3", "Volume_std_lag3", "Turnover_mean_lag3", "Turnover_std_lag3", "Trades_mean_lag3", "Trades_std_lag3", "High_mean_lag7", "High_std_lag7", "Low_mean_lag7", "Low_std_lag7", "Volume_mean_lag7", "Volume_std_lag7", "Turnover_mean_lag7", "Turnover_std_lag7", "Trades_mean_lag7", "Trades_std_lag7", "High_mean_lag30", "High_std_lag30", "Low_mean_lag30", "Low_std_lag30", "Volume_mean_lag30", "Volume_std_lag30", "Turnover_mean_lag30", "Turnover_std_lag30", "Trades_mean_lag30", "Trades_std_lag30", "month", "week", "day", "day_of_week"] def prepare_features(df): df.reset_index(drop=True, inplace=True) lag_features = ["High", "Low", "Volume", "Turnover", "Trades"] window1 = 3 window2 = 7 window3 = 30 df_rolled_3d = df[lag_features].rolling(window=window1, min_periods=0) df_rolled_7d = df[lag_features].rolling(window=window2, min_periods=0) df_rolled_30d = df[lag_features].rolling(window=window3, min_periods=0) df_mean_3d = df_rolled_3d.mean().shift(1).reset_index().astype(np.float32) df_mean_7d = df_rolled_7d.mean().shift(1).reset_index().astype(np.float32) df_mean_30d = df_rolled_30d.mean().shift(1).reset_index().astype(np.float32) df_std_3d = df_rolled_3d.std().shift(1).reset_index().astype(np.float32) df_std_7d = df_rolled_7d.std().shift(1).reset_index().astype(np.float32) df_std_30d = df_rolled_30d.std().shift(1).reset_index().astype(np.float32) for feature in lag_features: df[f"{feature}_mean_lag{window1}"] = df_mean_3d[feature] df[f"{feature}_mean_lag{window2}"] = df_mean_7d[feature] df[f"{feature}_mean_lag{window3}"] = df_mean_30d[feature] df[f"{feature}_std_lag{window1}"] = df_std_3d[feature] df[f"{feature}_std_lag{window2}"] = df_std_7d[feature] df[f"{feature}_std_lag{window3}"] = df_std_30d[feature] df.fillna(df.mean(), inplace=True) df.set_index("Date", drop=False, inplace=True) df.Date =
pd.to_datetime(df.Date, format="%Y-%m-%d")
pandas.to_datetime
import json import pandas as pd from datetime import datetime from src.func import tweet_utils from src.func import regex def load_tweets(geotweet_path): with open(geotweet_path, 'r') as f: tweets = json.load(f) return remove_duplicates(tweets) def remove_duplicates(tweets): df = pd.DataFrame.from_records(tweets) df.drop_duplicates(subset='id_str', inplace=True) tweets = df.to_dict('records') return tweets def bad_tweet_filter(tweet, hashtag_list): if pd.isnull(tweet['pure_text']): return True tweet_ngrams = dict(regex.get_ngrams(tweet['pure_text'], path='ngrams.bin')) if tweet_utils.is_retweet(tweet): return True # check Date ts = tweet['tweet_created_at'] day = datetime.strptime(ts, '%Y-%m-%d %H:%M:%S') if day > datetime(2015, 4, 27): return True if any(x in tweet_ngrams.keys() for x in hashtag_list): return True return False def user_control_split(tweets, hashtag_list=["#job"]): park_users = {} # print("Building list of park users") for tweet in tweets: if
pd.isnull(tweet['ParkID'])
pandas.isnull
import argparse from ast import parse from os import P_ALL, error, path import sys import math from numpy.core.fromnumeric import repeat from numpy.core.numeric import full import pandas as pd from pandas import plotting as pdplot import numpy as np from pandas.core.frame import DataFrame from statsmodels.tsa.statespace.sarimax import SARIMAX from statsmodels.tsa.statespace import mlemodel from statsmodels.iolib import summary from statsmodels.tsa.stattools import adfuller, kpss from statsmodels.regression import linear_model from sklearn.metrics import mean_squared_error from math import sqrt from copy import deepcopy from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from pmdarima.arima import auto_arima from pmdarima import arima as pmd from pmdarima import model_selection from pmdarima.arima.utils import ndiffs from matplotlib import pyplot as plt from streamlit.state.session_state import Value from src.utils import * from src.data import TimeSeriesData from src.import_data import ExogeneousDataImporter, EndogeneousDataImporter, EndogeneousDataFormats, ExogeneousDataFormats import enum # display options pd.set_option("display.max_columns", 999) pd.set_option("display.max_rows", 999) '''time_series.py Tools for performing a time series regression. This file contains various models that may be used to perform a time series regression, as listed in TimeSeriesRegressionModels. SARIMARegression is the base class and implements all public methods. Its subclasses provide alternate models that may be used for regression. Typical usage examples: foo = SARIMARegression() foo.fit(y, X) bar = BruteForceARIMARegression(y, X, order_arg_ranges) bar.predict(future_X) ''' class TimeSeriesRegressionModels(enum.Enum): '''Represents models for time series regression.''' # SARIMARegression = SARIMARegression # AutoARIMARegression = AutoARIMARegression # BruteForceARIMARegression = BruteForceARIMARegression SARIMARegression = 'SARIMARegression' AutoARIMARegression = 'AutoARIMARegression' BruteForceARIMARegression = 'BruteForceARIMARegression' def __str__(self): return self.value class OverspecifiedError(Exception): ''' Error for models which are overspecified, that is, the number of regressors is greater than or equal to the number of observations. ''' class SARIMARegression(): ''' Represents an immutable SARIMA time series regression model. The order of the model is fixed at initialization and cannot be altered. The model provides various methods to invoke on endogeneous and exogeneous data, but the underlying model parameters remain the same. Attributes: p_d_q_order: the order of the model in the form (p,d,q) P_D_Q_order: the seasonal order of the model in the form (P,D,Q,m) ''' # Abstraction function: # AF(p_d_q_order, P_D_Q_order) = A SARIMA regression model of order p_d_q_order and seasonal # order P_D_Q_order # Rep invariant: # - p_d_q_order is a length three iterable of whole numbers (Z+) # - P_D_Q_order is a length four iterable of whole numbers (Z+) # - P_D_Q_order[3] is a natural number >=2 # - total_order= sum(p_d_q_order) + sum(P_D_Q_order[0:3]) # Safety from rep exposure: # - p_d_q order and P_D_Q_order are immutable types # - No mutator methods # - Getter methods for attributes use the @property decorator so no reassignments can be made max_iter = 100 def __init__(self, p_d_q_order : tuple=(0,0,0), P_D_Q_order : tuple=(0,0,0,4)): ''' Instantiates a new SARIMARegression object of the given order. Keyword arguments: p_d_q_order -- the order of the regression of the form (p, d, q). Requires p,d, and q to be >=0. P_D_Q_order -- the order of the regression of the form (P, D, Q, m). Requires P,D, and Q to be >=0. Requires m>=2 Raises: ValueError if the model contains any seasonal and ordinary AR or MA lag of the same order. ''' # use construction to test for valueerror repeat_lags = self._find_repeat_lags(p_d_q_order, P_D_Q_order) if len(repeat_lags) >0: raise ValueError('Repeat ordinary and seasonal lags of orders %s.' %repeat_lags) self._p_d_q_order = p_d_q_order self._P_D_Q_order = P_D_Q_order self._total_order = p_d_q_order[0] + p_d_q_order[2] + P_D_Q_order[0] + P_D_Q_order[2] # TODO refactor to instantiate model here self._checkrep() def _checkrep(self): '''Asserts the rep invariant''' # Length 3 tuple assert len(self._p_d_q_order) == 3 assert len(self._P_D_Q_order) == 4 # whole numbers for i in self._p_d_q_order: assert i >= 0 for j in self._P_D_Q_order: assert j >= 0 # natural number assert self._P_D_Q_order[3] >=2 # sum assert self._total_order == self._p_d_q_order[0] + self._p_d_q_order[2] + self._P_D_Q_order[0] + self._P_D_Q_order[2] @property def p_d_q_order(self) -> tuple: '''Order of the model of the form (p,d,q)''' return tuple(self._p_d_q_order) @property def P_D_Q_order(self) -> tuple: '''Order of the model of the form (P,D,Q,m)''' return tuple(self._P_D_Q_order) def _check_overspecified(self, y : pd.Series, X : pd.DataFrame) -> bool: ''' Checks whether the model is overspecified. The model is overspecified if the total number of autoregressive and moving average lags (both normal and seasonal) plus the number of X variables plus the added variable for the time trend is greater than or equal to the number of observations in the model. Keyword arguments: y -- the endogeneous data. X -- the exogeneous data. Requires len(X) == len(y) Returns: True if the model would be overspecified when fit for y on X, false otherwise. ''' num_exog_vars = 0 if X is None else X.shape[1] # TODO refactor as constant for class trend_order =1 # x vars + AR + MA + trend if not num_exog_vars + self._total_order +trend_order < len(y): return True return False def _find_repeat_lags(self, p_d_q_order : tuple, P_D_Q_order : tuple) -> set: ''' Finds repeat AR or MA lags in the ordinary or seasonal components. Keyword arguments: p_d_q_order -- the order of the model in (p,d,q) format. P_D_Q_order -- the seasonal order of the model in (P,D,Q,m) format. Returns: The set of all repeat lags found in the model. ''' # AR lags ar_lags = set([*range(1,p_d_q_order[0]+1)]) seasonal_ar_lags = set(np.array([*range(1, P_D_Q_order[0]+1)]) * P_D_Q_order[3]) duplicate_ar_lags = ar_lags.intersection(seasonal_ar_lags) # MA lags ma_lags = set([*range(1,p_d_q_order[2]+1)]) seasonal_ma_lags = set(np.array([*range(1, P_D_Q_order[2]+1)]) * P_D_Q_order[3]) duplicate_ma_lags = ma_lags.intersection(seasonal_ma_lags) return duplicate_ar_lags.union(duplicate_ma_lags) def _fit_model(self, y : pd.Series, X : pd.DataFrame) -> mlemodel.MLEResults: ''' Fits the SARIMAX model to the data. Keyword arguments: y --The regressor. Requires len(y) > 1. X -- The regressand (or None if no regressand). If provided, requires len(y) == len(X) and X must not be empty (no columns). Returns: An ARIMA model fit object. ''' # TODO change to pmdarima ar = SARIMAX(endog=y, exog=X, order=self.p_d_q_order, seasonal_order=self.P_D_Q_order, trend='ct', measurement_error=False, enforce_stationarity=True, enforce_invertibility=True) model_fit = ar.fit(cov_type='robust', maxiter=self.max_iter, full_output=False, disp=False) return model_fit def fit(self, y : pd.Series, X : pd.DataFrame) -> summary.Summary: ''' Fits the model. Keyword arguments: y -- the endogeneous data Series in float64 format with a PeriodIndex. Requires len(y) >= 0. X - the exogeneous DataFrame in float64 format with a PeriodIndex. Requires len(y) == len(X) If no exogeneous data, pass an empty DataFrame containing the required index. Returns: A summary of the results with a string representation. Raises: OverspecifiedError if the model is overspecified, that is, if the number of provided exogeneous variables plus the number of lags (seasonal and otherwise) and trend order are greater than or equal to the number of exogeneous variables. ''' assert len(y) == len(X), 'y and X must be of the same length' if self._check_overspecified(y, X): raise OverspecifiedError('Model is overspecified. Remove exogeneous vars or reduce order') # _fit does not accept empty X if len(X.columns) == 0: X = None else: X = X # length of y already greater than 1 since if it were equal to 1, # model would have failed overspecified test since trend order is 1 fit = self._fit_model(y, X) self._checkrep() return fit.summary() def _predict(self, y : pd.Series, X : pd.DataFrame, future_X : pd.DataFrame, n : int) -> linear_model.PredictionResults: ''' Fits the model for y on X and predicts n future observations for y. Keyword arguments: y --The regressor. Requires len(y) > 1. X -- The regressand (or None if no regressand). Length must be equal to the length of endogeneous_data and must not be empty (no columns). future_X -- future exogeneous data observations or None if and only if X is None. Requires len(future_X) == n. n -- The number of future periods to predict. Requires n>0. Returns: Results of the prediction. ''' assert not ((X is None) ^ (future_X is None)), 'X and future_X must be both provided or niether provided' fit = self._fit_model(y, X) return fit.forecast(n, exog=future_X) def predict(self, n : int, y : pd.Series, X : pd.DataFrame, future_X : pd.DataFrame=None) -> pd.Series: ''' Predicts n future values of y using future values of X, future_X. Uses a model fit for y on X and then predicts using that model. Keyword arguments: n -- the number of future periods to predict. Requires n>=0. y -- the endogeneous data Series in float64 format with a PeriodIndex. X - the exogeneous DataFrame in float64 format with a PeriodIndex. Requires len(y) == len(X) If no exogeneous data, pass an empty DataFrame containing the required index. future_X -- Dataframe containing the future values of the exogeneous variables X in float64 format, or None if X has no columns (X is an empty dataframe). Requires the same set of columns in the same order as X, and with a PeriodIndex matching in frequency and containing subsequent consecutive periods to the last periods in X, with no gaps. If given, must be of length >=n. Returns: A Pandas Series indexed by PeriodIndex in float64 format containing n future predictions for y. Raises: OverspecifiedError if the model is overspecified, that is, if the number of provided exogeneous variables plus the number of lags (seasonal and otherwise) and trend are greater than or equal to the number of exogeneous variables. ValueError if n is larger than the number of X observations provided. ''' # TODO should we change spec to require provision of future_X? # - more SFB - we can check that length = n # - more SFB - we can use this index as same for return value # - fail fast - catch error if n wrong when getting data, not when plugging in here assert n >= 0, 'Number of periods to predict must be >=0' if self._check_overspecified(y, X): raise OverspecifiedError('Model is overspecified. Remove exogeneous vars or reduce order') if len(X.columns) == 0: X = None else: # X provided so future_X must be also X = X if n > len(future_X): raise ValueError('Not enough future exogeneous data provided for horizon %s prediction' %n) future_X = future_X.iloc[:n] # TODO make util start_date = y.index.max() + y.index.freq index =
pd.period_range(start=start_date, periods=n, freq=y.index.freq)
pandas.period_range
from flask import render_template, request, redirect, url_for, session from app import app from model import * from model.main import * import json import pandas as pd import numpy as np class DataStore(): model=None model_month=None sale_model=None data = DataStore() @app.route('/', methods=["GET"]) def home(): percent=percentageMethod() total_month=totalMonth() file1=pd.read_json('total_month.json',orient='index') month_index=np.array(file1['month_year']) month_data=np.array(file1['total']) with open('percent.json') as f: file2 = json.load(f) labels=file2['index'] data=file2['data'] if "username" in session: return render_template('index.html', last_year=lastYear(), last_month=lastMonth(),dataset=data, label=labels, percent=percent, month_index=month_index, month_data=month_data) else: return render_template('login.html') # Register new user @app.route('/register', methods=["GET", "POST"]) def register(): if request.method == "GET": return render_template("register.html") elif request.method == "POST": registerUser() return redirect(url_for("login")) #Check if email already exists in the registratiion page @app.route('/checkusername', methods=["POST"]) def check(): return checkusername() # Everything Login (Routes to renderpage, check if username exist and also verifypassword through Jquery AJAX request) @app.route('/login', methods=["GET"]) def login(): if request.method == "GET": if "username" not in session: return render_template("login.html") else: return redirect(url_for("home")) @app.route('/checkloginusername', methods=["POST"]) def checkUserlogin(): return checkloginusername() @app.route('/checkloginpassword', methods=["POST"]) def checkUserpassword(): return checkloginpassword() #The admin logout @app.route('/logout', methods=["GET"]) # URL for logout def logout(): # logout function session.pop('username', None) # remove user session return redirect(url_for("home")) # redirect to home page with message #Forgot Password @app.route('/forgot-password', methods=["GET"]) def forgotpassword(): return render_template('forgot-password.html') #404 Page @app.route('/404', methods=["GET"]) def errorpage(): return render_template("404.html") #Blank Page @app.route('/blank', methods=["GET"]) def blank(): return render_template('blank.html') @app.route('/totalyear', methods=["GET"]) def total_year(): total_year=totalYear() file1=pd.read_json('total_year.json',orient='index') year_index=np.array(file1['year']) year_data=np.array(file1['total']) return render_template("total_year.html",year_index=year_index, year_data=year_data) @app.route('/totalmonth', methods=["GET"]) def total_month(): total_month=totalMonth() file1=pd.read_json('total_month.json',orient='index') month_index=np.array(file1['month_year']) month_data=np.array(file1['total']) num=6 # Fit model model=fit_model() data.model_month=model predict_rs, fitted_data=predict(model,6) pred_index=np.array(predict_rs['month_year']) pred_data=np.array(predict_rs['total']) #Test model test_rs= test(pred_data[0], fitted_data) return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=model, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num) def check_stationary(): total_month=totalMonth() data1=total_month[['month_year','total']] data1.set_index('month_year', inplace=True) result=stationary(data1) return result def fit_model(): total_month=totalMonth() data1=total_month[['month_year','total']] data1.set_index('month_year', inplace=True) data=data1['total'] stationary=check_stationary() p=stationary[1] if (p<0.05): result1 = fit_model_stationary(data) else: result1 = fit_model_non_stationary(data) return result1 def predict(model,num_predict): if num_predict==0: num_predict=6 fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True) df2=df[['total', 'date']] total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True) data=total_day[['date','total']] data.set_index('date', inplace=True) date = pd.date_range(data.index[-1], periods=num_predict, freq='MS') fitted_seri_month =
pd.Series(fitted_month, index=date)
pandas.Series
import pandas as pd import numpy as np import joblib from sys import getsizeof from chainladder.core.display import TriangleDisplay from chainladder.core.dunders import TriangleDunders from chainladder.core.pandas import TrianglePandas from chainladder.core.slice import TriangleSlicer class IO: ''' Class intended to allow persistence of triangle or estimator objects to disk ''' def to_pickle(self, path, protocol=None): joblib.dump(self, filename=path, protocol=protocol) def __contains__(self, value): if self.__dict__.get(value, None) is None: return False return True @property def memory_usage(self): return sum([getsizeof(v) for k, v in self.__dict__.items()]) class TriangleBase(IO, TriangleDisplay, TriangleSlicer, TriangleDunders, TrianglePandas): def __init__(self, data=None, origin=None, development=None, columns=None, index=None, *args, **kwargs): # Sanitize inputs index, columns, origin, development = self.str_to_list( index, columns, origin, development) key_gr = origin + self.flatten(development, index) # Aggregate data data_agg = data.groupby(key_gr).sum().reset_index() if not index: index = ['Total'] data_agg[index[0]] = 'Total' # Initialize origin and development dates and grains origin_date = TriangleBase.to_datetime(data_agg, origin) self.origin_grain = TriangleBase._get_grain(origin_date) if development: development_date = TriangleBase.to_datetime( data_agg, development, period_end=True) self.development_grain = TriangleBase._get_grain(development_date) col = 'development' else: development_date = origin_date self.development_grain = self.origin_grain col = None # Prep the data for 4D Triangle data_agg = self._get_axes(data_agg, index, columns, origin_date, development_date) data_agg = pd.pivot_table(data_agg, index=index+['origin'], columns=col, values=columns, aggfunc='sum') # Assign object properties self.kdims = np.array(data_agg.index.droplevel(-1).unique()) self.odims = np.array(data_agg.index.levels[-1].unique()) if development: self.ddims = np.array(data_agg.columns.levels[-1].unique()) self.ddims = self.ddims*({'Y': 12, 'Q': 3, 'M': 1} [self.development_grain]) self.vdims = np.array(data_agg.columns.levels[0].unique()) else: self.ddims = np.array([None]) self.vdims = np.array(data_agg.columns.unique()) self.valuation_date = development_date.max() self.key_labels = index self.set_slicers() # Create 4D Triangle triangle = \ np.reshape(np.array(data_agg), (len(self.kdims), len(self.odims), len(self.vdims), len(self.ddims))) triangle = np.swapaxes(triangle, 1, 2) # Set all 0s to NAN for nansafe ufunc arithmetic triangle[triangle == 0] = np.nan self.values = np.array(triangle, dtype=kwargs.get('dtype', None)) # Used to show NANs in lower part of triangle self.nan_override = False self.valuation = self._valuation_triangle() def _len_check(self, x, y): if len(x) != len(y): raise ValueError( 'Length mismatch: Expected axis has ', '{} elements, new values have'.format(len(x)), ' {} elements'.format(len(y))) def _get_date_axes(self, origin_date, development_date): ''' Function to find any missing origin dates or development dates that would otherwise mess up the origin/development dimensions. ''' def complete_date_range(origin_date, development_date, origin_grain, development_grain): ''' Determines origin/development combinations in full. Useful for when the triangle has holes in it. ''' origin_unique = pd.period_range( start=origin_date.min(), end=origin_date.max(), freq=origin_grain).to_timestamp() development_unique = pd.period_range( start=origin_date.min(), end=development_date.max(), freq=development_grain).to_timestamp() development_unique = TriangleBase._period_end(development_unique) # Let's get rid of any development periods before origin periods cart_prod = TriangleBase._cartesian_product( origin_unique, development_unique) cart_prod = cart_prod[cart_prod[:, 0] <= cart_prod[:, 1], :] return pd.DataFrame(cart_prod, columns=['origin', 'development']) cart_prod_o = complete_date_range( pd.Series(origin_date.min()), development_date, self.origin_grain, self.development_grain) cart_prod_d = complete_date_range( origin_date, pd.Series(origin_date.max()), self.origin_grain, self.development_grain) cart_prod_t = pd.DataFrame({'origin': origin_date, 'development': development_date}) cart_prod = cart_prod_o.append(cart_prod_d, sort=True) \ .append(cart_prod_t, sort=True) \ .drop_duplicates() cart_prod = cart_prod[cart_prod['development'] >= cart_prod['origin']] return cart_prod def _get_axes(self, data_agg, groupby, columns, origin_date, development_date): ''' Preps axes for the 4D triangle ''' date_axes = self._get_date_axes(origin_date, development_date) kdims = data_agg[groupby].drop_duplicates() kdims['key'] = date_axes['key'] = 1 all_axes =
pd.merge(date_axes, kdims, on='key')
pandas.merge
import math import matplotlib.pyplot as plt import numpy as np import pandas as pd import os from pandas.core.frame import DataFrame from torch.utils.data import Dataset, DataLoader import torch import pickle import datetime class data_loader(Dataset): def __init__(self, df_feature, df_label, df_label_reg, t=None): assert len(df_feature) == len(df_label) assert len(df_feature) == len(df_label_reg) # df_feature = df_feature.reshape(df_feature.shape[0], df_feature.shape[1] // 6, df_feature.shape[2] * 6) self.df_feature=df_feature self.df_label=df_label self.df_label_reg = df_label_reg self.T=t self.df_feature=torch.tensor( self.df_feature, dtype=torch.float32) self.df_label=torch.tensor( self.df_label, dtype=torch.float32) self.df_label_reg=torch.tensor( self.df_label_reg, dtype=torch.float32) def __getitem__(self, index): sample, target, label_reg =self.df_feature[index], self.df_label[index], self.df_label_reg[index] if self.T: return self.T(sample), target else: return sample, target, label_reg def __len__(self): return len(self.df_feature) def create_dataset(df, station, start_date, end_date, mean=None, std=None): data=df[station] feat, label, label_reg =data[0], data[1], data[2] referece_start_time=datetime.datetime(2013, 3, 1, 0, 0) referece_end_time=datetime.datetime(2017, 2, 28, 0, 0) assert (pd.to_datetime(start_date) - referece_start_time).days >= 0 assert (pd.to_datetime(end_date) - referece_end_time).days <= 0 assert (pd.to_datetime(end_date) - pd.to_datetime(start_date)).days >= 0 index_start=(pd.to_datetime(start_date) - referece_start_time).days index_end=(pd.to_datetime(end_date) - referece_start_time).days feat=feat[index_start: index_end + 1] label=label[index_start: index_end + 1] label_reg=label_reg[index_start: index_end + 1] # ori_shape_1, ori_shape_2=feat.shape[1], feat.shape[2] # feat=feat.reshape(-1, feat.shape[2]) # feat=(feat - mean) / std # feat=feat.reshape(-1, ori_shape_1, ori_shape_2) return data_loader(feat, label, label_reg) def create_dataset_shallow(df, station, start_date, end_date, mean=None, std=None): data=df[station] feat, label, label_reg =data[0], data[1], data[2] referece_start_time=datetime.datetime(2013, 3, 1, 0, 0) referece_end_time=datetime.datetime(2017, 2, 28, 0, 0) assert (pd.to_datetime(start_date) - referece_start_time).days >= 0 assert (
pd.to_datetime(end_date)
pandas.to_datetime
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import random df_motor =
pd.read_csv('/home/ubuntu/bagfiles/3r/r2_motor.csv', header=0)
pandas.read_csv
from datetime import datetime import numpy as np import pandas as pd from scipy.stats import pearsonr from scipy.stats import zscore import matplotlib.pyplot as pyplot def drawHist(x): #创建散点图 #第一个参数为点的横坐标 #第二个参数为点的纵坐标 pyplot.hist(x, 100) pyplot.xlabel('x') pyplot.ylabel('y') pyplot.title('gaosi') pyplot.show() def read(): return pd.read_csv('./google-play-store-apps/googleplaystore.csv') def readComment(): return pd.read_csv('./google-play-store-apps/googleplaystore_user_reviews.csv') def removeNaData(): data = read() print(data.info()) data.dropna(axis=0, inplace=True) print("--------after remove na ------") print(data.info()) return data def changeSize(size): if 'G' in size: return float(size.replace('G', '')) * 1024 * 1024 * 1024 elif 'M' in size: return float(size.replace('M', '')) * 1024 * 1024 elif 'K' in size: return float(size.replace('K', '')) * 1024 else: return 0 def changeInstalls(install): return install.replace(',', '').replace('+', '') def changePrice(price): if '$' in price: return float(price.replace('$', '')) else: return 0 if __name__ == '__main__': data = removeNaData() result = data.copy() print(data.head()) # preprocessing Category # get all type categoryType = data["Category"].unique() # change type to dict categoryDict = {} cont = 0 for type in categoryType: categoryDict[type] = cont cont += 1 result["CategoryTypeNumber"] = data["Category"].map(categoryDict).astype(int) # preprocessing Reviews result["Reviews"] = data["Reviews"].astype(int) # preprocessing Size result["Size"] = data["Size"].map(changeSize).astype('float32') # preprocessing Installs result["Installs"] = data["Installs"].map(changeInstalls).astype(int) # preprocessing Type typeType = data["Type"].unique() typeDict = {} cont = 0 for type in typeType: typeDict[type] = cont cont += 1 result["Type"] = data["Type"].map(typeDict).astype(int) # preprocessing Price result["Price"] = data["Price"].map(changePrice).astype('float32') # preprocessing Content Rating contentType = data["Content Rating"].unique() contentDict = {} cont = 0 for type in contentType: contentDict[type] = cont cont += 1 result["Content Rating"] = data["Content Rating"].map(contentDict).astype(int) # preprocessing Genres genresType = data["Genres"].unique() genresDict = {} cont = 0 for type in genresType: genresDict[type] = cont cont += 1 result["Genres"] = data["Genres"].map(genresDict).astype(int) # preprocessing last updated result["Last Updated"] =
pd.to_datetime(result["Last Updated"])
pandas.to_datetime
import pandas as pd import os import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import random from sklearn import svm from keras.optimizers import Adam from keras.layers import LeakyReLU from nltk.stem import WordNetLemmatizer import operator from textblob import TextBlob from nltk.tokenize import sent_tokenize, word_tokenize import nltk import re from wordcloud import WordCloud from nltk.stem import PorterStemmer from nltk.stem import LancasterStemmer from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from keras.models import Model from keras.layers import LSTM, Activation, Dense, Dropout, Input, Embedding from keras.preprocessing.text import Tokenizer from keras.preprocessing import sequence from keras.callbacks import EarlyStopping class MBTI(): def __init__(self): self.csv_path = "mbti_1.csv" self.df = pd.read_csv(self.csv_path) self.original_df = self.df.copy() self.porter = PorterStemmer() self.lancaster = LancasterStemmer() self.lemmatizer = WordNetLemmatizer() self.all_words = {} def store_clean_df(self): self.df.to_csv('clean.csv') def load_clean_df(self): self.df = pd.read_csv('clean.csv') def transform_df(self): # Transform the df into four different df - one for each subproblem (IE,JP,NS,TF) transformed_df = self.df.copy() transformed_df['posts'] = transformed_df['posts'].apply(lambda x: x.replace('|||', '')) transformed_df['posts'] = transformed_df['posts'].apply(lambda x: ''.join([i for i in x if not i.isdigit()])) counter = 0 print(transformed_df.size) transformed_df['posts'] = transformed_df.apply(lambda row: nltk.word_tokenize(row['posts']), axis=1) for row_posts in transformed_df['posts'].tolist(): print(counter) print(row_posts) counter+=1 for feature in row_posts: try: self.all_words[feature] += 1 except: self.all_words[feature] = 0 print('Features found') self.all_words = dict(sorted(self.all_words.items(), key=operator.itemgetter(1), reverse=True)) keys = list(self.all_words.keys())[:5000] exists = {} counter = 0 for word in keys: counter +=1 print(counter) exists[word] = [] for row_posts in transformed_df['posts'].tolist(): features = row_posts exists[word].append(features.count(word)) for word in exists: transformed_df[word]= exists[word] del transformed_df['type'] del transformed_df['posts'] IE_df = transformed_df.copy() del IE_df['JP'] del IE_df['TF'] del IE_df['NS'] del IE_df['Unnamed: 0'] JP_df = transformed_df.copy() del JP_df['IE'] del JP_df['TF'] del JP_df['NS'] del JP_df['Unnamed: 0'] TF_df = transformed_df.copy() del TF_df['JP'] del TF_df['IE'] del TF_df['NS'] del TF_df['Unnamed: 0'] NS_df = transformed_df.copy() del NS_df['JP'] del NS_df['IE'] del NS_df['TF'] del NS_df['Unnamed: 0'] print('Finished') return IE_df, JP_df, TF_df, NS_df def post_cleaner(self, post): post = post.lower() post = re.sub( r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''', '', post, flags=re.MULTILINE) puncs1 = ['@', '#', '$', '%', '^', '&', '*', '(', ')', '-', '_', '+', '=', '{', '}', '[', ']', '\\', '"', "'", ';', ':', '<', '>', '/'] for punc in puncs1: post = post.replace(punc, '') puncs2 = [',', '.', '?', '!', '\n'] for punc in puncs2: post = post.replace(punc, ' ') post = re.sub('\s+', ' ', post).strip() return post def perform_eda(self): # ++++++ Print information and description of the data #print("+++++++++++ self.df.info:") print(self.df.info()) types = self.df.type.tolist() pd.Series(types).value_counts().plot(kind="bar") plt.savefig("plot1.png") def stemSentence(self, sentence): token_words = word_tokenize(sentence) stem_sentence = [] for word in token_words: stem_sentence.append(self.lemmatizer.lemmatize(word)) stem_sentence.append(" ") return "".join(stem_sentence) def prepare_df(self): posts = self.df.posts.tolist() #clean posts = [self.post_cleaner(post) for post in posts] #lemmatize posts = [self.stemSentence(post) for post in posts] self.df['posts'] = posts #print(self.df.head(1)) # Create 4 more columns for binary classification - LABEL ENCODING, ONE-HOT ENCODING map1 = {"I": 0, "E": 1} map2 = {"N": 0, "S": 1} map3 = {"T": 0, "F": 1} map4 = {"J": 0, "P": 1} self.df['IE'] = self.df['type'].astype(str).str[0] self.df['IE'] = self.df['IE'].map(map1) self.df['NS'] = self.df['type'].astype(str).str[1] self.df['NS'] = self.df['NS'].map(map2) self.df['TF'] = self.df['type'].astype(str).str[2] self.df['TF'] = self.df['TF'].map(map3) self.df['JP'] = self.df['type'].astype(str).str[3] self.df['JP'] = self.df['JP'].map(map4) def add_features(self): # Add new features, such as words per comment, links per comment, images per comment... self.df['ellipsis_per_comment'] = self.df['posts'].apply(lambda x: x.count('...') / (x.count("|||") + 1)) self.df['words_per_comment'] = self.df['posts'].apply(lambda x: x.count(' ') / (x.count("|||") + 1)) self.df['words'] = self.df['posts'].apply(lambda x: x.count(' ')) self.df['link_per_comment'] = self.df['posts'].apply(lambda x: x.count('http') / (x.count("|||") + 1)) self.df['smiles_per_comment'] = self.df['posts'].apply(lambda x: (x.count(':-)') + x.count(':)') + x.count(':-D') + x.count(':D')) / (x.count("|||") + 1)) self.df['sad'] = self.df['posts'].apply(lambda x: (x.count(':(') + x.count('):') ) / (x.count("|||") + 1)) self.df['heart'] = self.df['posts'].apply(lambda x: x.count('<3') / (x.count("|||") + 1)) self.df['smiling'] = self.df['posts'].apply(lambda x: x.count(';)') / (x.count("|||") + 1)) self.df['exclamation_mark_per_comment'] = self.df['posts'].apply(lambda x: x.count("!") / (x.count("|||") + 1)) self.df['question_mark_per_comment'] = self.df['posts'].apply(lambda x: x.count("?") / (x.count("|||") + 1)) self.df['polarity'] = self.df['posts'].apply(lambda x: TextBlob(x).sentiment.polarity) def plot(self): # Plot each category to see if it is balanced - We observe that IE and NS are fairly imbalanced. binary1 = self.df.IE.tolist() pd.Series(binary1).value_counts().plot(kind="bar", title="0=I, 1=E") # plt.show() plt.savefig("IE.png") binary1 = self.df.NS.tolist() pd.Series(binary1).value_counts().plot(kind="bar", title="0=N, 1=S") # plt.show() plt.savefig("NS.png") binary1 = self.df.TF.tolist() pd.Series(binary1).value_counts().plot(kind="bar", title="0=T, 1=F") # plt.show() plt.savefig("TF.png") binary1 = self.df.JP.tolist() pd.Series(binary1).value_counts().plot(kind="bar", title="0=J, 1=P") # plt.show() plt.savefig("JP.png") # PLOT 2 plt.figure(figsize=(15, 10)) sns.swarmplot("type", "words_per_comment", data=self.df) plt.savefig("plot2.png") # PLOT 3 plt.figure(figsize=(15, 10)) sns.jointplot("variance_of_word_counts", "words_per_comment", data=self.df, kind="hex") # plt.show() plt.savefig("plot3.png") def wordcloud(self): fig, ax = plt.subplots(len(self.df['type'].unique()), sharex=True, figsize=(15,10*len(self.df['type'].unique()))) k = 0 for i in self.df['type'].unique(): df_4 = self.df[self.df['type'] == i] wordcloud = WordCloud().generate(df_4['posts'].to_string()) ax[k].imshow(wordcloud) ax[k].set_title(i) ax[k].axis("off") k+=1 wordcloud.to_file('N.png') def create_clean_df(self): self.perform_eda() self.add_features() self.prepare_df() self.store_clean_df() def create_transformed_df(self): self.load_clean_df() IE_df, JP_df, TF_df, NS_df = self.transform_df() IE_df.to_csv('IE_df.csv') JP_df.to_csv('JP_df.csv') TF_df.to_csv('TF_df.csv') NS_df.to_csv('NS_df.csv') def remove_bars(self): self.df['posts'] = self.df['posts'].apply(lambda x: x.replace('|||', '')) def svm(self): IE_df =
pd.read_csv('IE_df.csv')
pandas.read_csv
# -*- coding: utf-8 -*- # Copyright (c) May 2021, Wageningen Environmental Research # <NAME> (<EMAIL>) import sys, os import xarray as xr import pandas as pd CMD_MODE = True if os.environ["CMD_MODE"] == "1" else False from .util import create_agera5_fnames, convert_to_celsius def extract_point(agera5_dir, point, startday, endday, tocelsius=False): df_final =
pd.DataFrame()
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Oct 24 15:00:30 2020 @author: jesper """ import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors import seaborn as sn class Table(): # Makes a table at the end of a season def __init__(self): self.table = pd.DataFrame( # Initiate an empty table columns = ['Team','Points','Win','Draw','Lose','Goals for','Goals against','Goal difference'] ) def add_numbers(self,team_list): # Is called from the simulate_season method of the Stats class for i in range(len(team_list)): t = team_list[i] self.table = self.table.append( pd.DataFrame( [[t.name,(t.wins*3+t.draws*1),t.wins,t.draws,t.losses, t.goals_for,t.goals_against,(t.goals_for-t.goals_against)]], columns= ['Team','Points','Win','Draw','Lose','Goals for','Goals against','Goal difference'] ) ) self.table = self.table.sort_values(by='Points',ascending=False) self.table.index = range(1,len(self.table)+1) def show_table(self): return self.table class Team(): # Team objects which populate the Table def __init__(self,name): self.name = name self.wins = 0 self.draws = 0 self.losses = 0 self.goals_for = 0 self.goals_against = 0 def add_result(self,scored,conceded): if scored > conceded: # win self.wins += 1 elif scored == conceded: # draw self.draws += 1 else: # loss self.losses +=1 self.goals_for += scored self.goals_against += conceded class Stats(): # raw data, teams, colors, parameters, etc. def __init__(self,df): self.df = df self.team_colors = {'Arsenal':'#ef0107', 'Ast<NAME>':'#95bfe5', 'Bournemouth':'#da291c', 'Brighton':'#0057b8', 'Burnley':'#6c1d45', 'Chelsea':'#034694', 'Crystal Palace':'#1b458f', 'Everton':'#003399', 'Leicester':'#003090', 'Liverpool':'#c8102e', 'Man City':'#6cabdd', 'Man United':'#da291c', 'Newcastle':'#241f20', 'Norwich':'#fff200', 'Sheffield United':'#ee2737', 'Southampton':'#d71920', 'Tottenham':'#132257', 'Watford':'#fbee23', 'West Ham':'#7a263a', 'Wolves':'#fdb913'} #https://towardsdatascience.com/visualizing-the-2019-20-english-premier-league-season-with-matplotlib-and-pandas-fd491a07cfda self.teams =list(set(df['HomeTeam'])) self.home_teams = list(df['HomeTeam']) self.away_teams = list(df['AwayTeam']) expected_values = pd.DataFrame(columns = ['Team','ExpectedScored','ExpectedConceded']) # initiate empty DataFrame # Naive approach, each team has a offense and a defense expected value # Generates a DataFrame with teams and their excpected values for i in range(len(self.teams)): avg_score = (np.sum(df.loc[df['HomeTeam'] == self.teams[i]]['FTHG']) + np.sum(df.loc[df['AwayTeam'] == self.teams[i]]['FTAG']))/(len(df)/len(self.teams)*2) avg_letin = (np.sum(df.loc[df['HomeTeam'] == self.teams[i]]['FTAG']) + np.sum(df.loc[df['AwayTeam'] == self.teams[i]]['FTHG']))/(len(df)/len(self.teams)*2) expected_values = expected_values.append( # Populate the DataFrame pd.DataFrame( [[self.teams[i],avg_score,avg_letin]], columns= ['Team','ExpectedScored','ExpectedConceded'] ) ) expected_values.index = range(1,len(self.teams)+1) self.expected_values = expected_values # The input values for the naive approach # Including home advantage, each team has two home and away parameters # Generates a DataFrame with teams and their excpected values expected_values_home = pd.DataFrame(columns = ['Team','ExpectedScored','ExpectedConceded']) expected_values_away =
pd.DataFrame(columns = ['Team','ExpectedScored','ExpectedConceded'])
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jan 26 15:39:02 2018 @author: joyce """ import pandas as pd import numpy as np from numpy.matlib import repmat from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\ Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\ Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama class stAlpha(object): def __init__(self,begin,end): self.begin = begin self.end = end self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close') self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open') self.high = get_stockdata_from_sql(1,self.begin,self.end,'High') self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low') self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol') self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount') self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap') self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg') self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH') self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH') # self.mkt = get_fama_from_sql() @timer def alpha1(self): volume = self.volume ln_volume = np.log(volume) ln_volume_delta = Delta(ln_volume,1) close = self.close Open = self.open price_temp = pd.concat([close,Open],axis = 1,join = 'outer') price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open'] del price_temp['Close'],price_temp['Open'] r_ln_volume_delta = Rank(ln_volume_delta) r_ret = Rank(price_temp) rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner') rank.columns = ['r1','r2'] corr = Corr(rank,6) alpha = corr alpha.columns = ['alpha1'] return alpha @timer def alpha2(self): close = self.close low = self.low high = self.high temp = pd.concat([close,low,high],axis = 1,join = 'outer') temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \ / (temp['High'] - temp['Low']) del temp['Close'],temp['Low'],temp['High'] alpha = -1 * Delta(temp,1) alpha.columns = ['alpha2'] return alpha @timer def alpha3(self): close = self.close low = self.low high = self.high temp = pd.concat([close,low,high],axis = 1,join = 'outer') close_delay = Delay(pd.DataFrame(temp['Close']),1) close_delay.columns = ['close_delay'] temp = pd.concat([temp,close_delay],axis = 1,join = 'inner') temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low'])) temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High'])) temp['alpha_temp'] = 0 temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min'] temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max'] alpha = Sum(pd.DataFrame(temp['alpha_temp']),6) alpha.columns = ['alpha3'] return alpha @timer def alpha4(self): close = self.close volume = self.volume close_mean_2 = Mean(close,2) close_mean_8 = Mean(close,8) close_std = STD(close,8) volume_mean_20 = Mean(volume,20) data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner') data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume'] data['alpha'] = -1 data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1 data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1 alpha = pd.DataFrame(data['alpha']) alpha.columns = ['alpha4'] return alpha @timer def alpha5(self): volume = self.volume high = self.high r1 = TsRank(volume,5) r2 = TsRank(high,5) rank = pd.concat([r1,r2],axis = 1,join = 'inner') rank.columns = ['r1','r2'] corr = Corr(rank,5) alpha = -1 * TsMax(corr,5) alpha.columns = ['alpha5'] return alpha @timer def alpha6(self): Open = self.open high = self.high df = pd.concat([Open,high],axis = 1,join = 'inner') df['price'] = df['Open'] * 0.85 + df['High'] * 0.15 df_delta = Delta(pd.DataFrame(df['price']),1) alpha = Rank(np.sign(df_delta)) alpha.columns = ['alpha6'] return alpha @timer def alpha7(self): close = self.close vwap = self.vwap volume = self.volume volume_delta = Delta(volume,3) data = pd.concat([close,vwap],axis = 1,join = 'inner') data['diff'] = data['Vwap'] - data['Close'] r1 = Rank(TsMax(pd.DataFrame(data['diff']),3)) r2 = Rank(TsMin(pd.DataFrame(data['diff']),3)) r3 = Rank(volume_delta) rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner') rank.columns = ['r1','r2','r3'] alpha = (rank['r1'] + rank['r2'])* rank['r3'] alpha = pd.DataFrame(alpha) alpha.columns = ['alpha7'] return alpha @timer def alpha8(self): high = self.high low = self.low vwap = self.vwap data = pd.concat([high,low,vwap],axis = 1,join = 'inner') data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2 data_price_delta = Delta(pd.DataFrame(data_price),4) * -1 alpha = Rank(data_price_delta) alpha.columns = ['alpha8'] return alpha @timer def alpha9(self): high = self.high low = self.low volume = self.volume data = pd.concat([high,low,volume],axis = 1,join = 'inner') data['price']= (data['High'] + data['Low'])/2 data['price_delay'] = Delay(pd.DataFrame(data['price']),1) alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol'] alpha_temp_unstack = alpha_temp.unstack(level = 'ID') alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean() alpha_final = alpha.stack() alpha = pd.DataFrame(alpha_final) alpha.columns = ['alpha9'] return alpha @timer def alpha10(self): ret = self.ret close = self.close ret_std = STD(pd.DataFrame(ret),20) ret_std.columns = ['ret_std'] data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner') temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0]) temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0]) temp1.columns = ['temp'] temp2.columns = ['temp'] temp = pd.concat([temp1,temp2],axis = 0,join = 'outer') temp_order = pd.concat([data,temp],axis = 1) temp_square = pd.DataFrame(np.power(temp_order['temp'],2)) alpha_temp = TsMax(temp_square,5) alpha = Rank(alpha_temp) alpha.columns = ['alpha10'] return alpha @timer def alpha11(self): high = self.high low = self.low close = self.close volume = self.volume data = pd.concat([high,low,close,volume],axis = 1,join = 'inner') data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\ /(data['High'] - data['Low']) * data['Vol'] alpha = Sum(pd.DataFrame(data_temp),6) alpha.columns = ['alpha11'] return alpha @timer def alpha12(self): Open = self.open vwap = self.vwap close = self.close data = pd.concat([Open,vwap,close],axis = 1, join = 'inner') data['p1'] = data['Open'] - Mean(data['Open'],10) data['p2'] = data['Close'] - data['Vwap'] r1 = Rank(pd.DataFrame(data['p1'])) r2 = Rank(pd.DataFrame(np.abs(data['p2']))) rank = pd.concat([r1,r2],axis = 1,join = 'inner') rank.columns = ['r1','r2'] alpha = rank['r1'] - rank['r2'] alpha = pd.DataFrame(alpha) alpha.columns = ['alpha12'] return alpha @timer def alpha13(self): high = self.high low = self.low vwap = self.vwap data = pd.concat([high,low,vwap],axis = 1,join = 'inner') alpha = (data['High'] + data['Low'])/2 - data['Vwap'] alpha = pd.DataFrame(alpha) alpha.columns = ['alpha13'] return alpha @timer def alpha14(self): close = self.close close_delay = Delay(close,5) close_delay.columns = ['close_delay'] data = pd.concat([close,close_delay],axis = 1, join = 'inner') alpha = data['Close'] - data['close_delay'] alpha = pd.DataFrame(alpha) alpha.columns = ['alpha14'] return alpha @timer def alpha15(self): Open = self.open close = self.close close_delay = Delay(close,1) close_delay.columns = ['close_delay'] data = pd.concat([Open,close_delay],axis = 1,join = 'inner') alpha = data['Open']/data['close_delay'] - 1 alpha = pd.DataFrame(alpha) alpha.columns = ['alpha15'] return alpha @timer def alpha16(self): vwap = self.vwap volume = self.volume data = pd.concat([vwap,volume],axis = 1, join = 'inner') r1 = Rank(pd.DataFrame(data['Vol'])) r2 = Rank(pd.DataFrame(data['Vwap'])) rank = pd.concat([r1,r2],axis = 1, join = 'inner') rank.columns = ['r1','r2'] corr = Corr(rank,5) alpha = -1 * TsMax(Rank(corr),5) alpha.columns = ['alpha16'] return alpha @timer def alpha17(self): vwap = self.vwap close = self.close data = pd.concat([vwap,close],axis = 1, join = 'inner') data['vwap_max15'] = TsMax(data['Vwap'],15) data['close_delta5'] = Delta(data['Close'],5) temp = np.power(data['vwap_max15'],data['close_delta5']) alpha = Rank(pd.DataFrame(temp)) alpha.columns = ['alpha17'] return alpha @timer def alpha18(self): """ this one is similar with alpha14 """ close = self.close close_delay = Delay(close,5) close_delay.columns = ['close_delay'] data = pd.concat([close,close_delay],axis = 1, join = 'inner') alpha = data['Close']/data['close_delay'] alpha = pd.DataFrame(alpha) alpha.columns = ['alpha18'] return alpha @timer def alpha19(self): close = self.close close_delay = Delay(close,5) close_delay.columns = ['close_delay'] data = pd.concat([close,close_delay],axis = 1, join = 'inner') data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay'] data['temp2'] = (data['Close'] - data['close_delay'])/data['Close'] temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']]) temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']]) temp1.columns = ['temp'] temp2.columns = ['temp'] temp = pd.concat([temp1,temp2],axis = 0) data = pd.concat([data,temp],axis = 1,join = 'outer') alpha = pd.DataFrame(data['temp']) alpha.columns = ['alpha19'] return alpha @timer def alpha20(self): close = self.close close_delay = Delay(close,6) close_delay.columns = ['close_delay'] data = pd.concat([close,close_delay],axis = 1, join = 'inner') alpha = (data['Close'] - data['close_delay'])/data['close_delay'] alpha = pd.DataFrame(alpha) alpha.columns = ['alpha20'] return alpha @timer def alpha21(self): close = self.close close_mean = Mean(close,6) alpha = RegBeta(0,close_mean,None,6) alpha.columns = ['alpha21'] return alpha @timer def alpha22(self): close = self.close close_mean = Mean(close,6) data = pd.concat([close,close_mean],axis = 1,join = 'inner') data.columns = ['close','close_mean'] temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean']) temp_delay = Delay(temp,3) data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner') data_temp.columns = ['temp','temp_delay'] temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']) alpha = SMA(temp2,12,1) alpha.columns = ['alpha22'] return alpha @timer def alpha23(self): close = self.close close_std = STD(close,20) close_delay = Delay(close,1) data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner') data.columns = ['Close','close_std','close_delay'] data['temp'] = data['close_std'] data['temp'][data['Close'] <= data['close_delay']] = 0 temp = pd.DataFrame(data['temp']) sma1 = SMA(temp,20,1) sma2 = SMA(pd.DataFrame(data['close_std']),20,1) sma = pd.concat([sma1,sma2],axis = 1, join = 'inner') sma.columns = ['sma1','sma2'] alpha = pd.DataFrame(sma['sma1']/sma['sma2']) alpha.columns = ['alpha23'] return alpha @timer def alpha24(self): close = self.close close_delay = Delay(close,5) close_delay.columns = ['close_delay'] data = pd.concat([close,close_delay],axis=1 ,join = 'inner' ) temp = data['Close'] - data['close_delay'] temp = pd.DataFrame(temp) alpha = SMA(temp,5,1) alpha.columns = ['alpha24'] return alpha @timer def alpha25(self): close = self.close close_delta = Delta(close,7) ret = self.ret r1 = Rank(close_delta) r3 = Rank(Sum(ret,250)) volume = self.volume volume_mean = Mean(pd.DataFrame(volume['Vol']),20) volume_mean.columns = ['volume_mean'] data = pd.concat([volume,volume_mean],axis = 1,join = 'inner') temp0 = pd.DataFrame(data['Vol']/data['volume_mean']) temp = DecayLinear(temp0,9) r2 = Rank(temp) rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner') rank.columns = ['r1','r2','r3'] alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3']) alpha.columns = ['alpha25'] return alpha @timer def alpha26(self): close = self.close vwap = self.vwap close_mean7 = Mean(close,7) close_mean7.columns = ['close_mean7'] close_delay5 = Delay(close,5) close_delay5.columns = ['close_delay5'] data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner') corr = Corr(data,230) corr.columns = ['corr'] data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner') alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr'] alpha = pd.DataFrame(alpha) alpha.columns = ['alpha26'] return alpha @timer def alpha27(self): """ uncompleted """ close = self.close close_delay3 = Delay(close,3) close_delay6 = Delay(close,6) data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner') data.columns = ['close','close_delay3','close_delay6'] temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100) temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100) data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner') data_temp.columns = ['temp1','temp2'] temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2']) alpha = DecayLinear(temp,12) alpha.columns = ['alpha27'] return alpha @timer def alpha28(self): close = self.close low = self.low high = self.high low_min = TsMin(low,9) high_max = TsMax(high,9) data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner') data.columns = ['Close','low_min','high_max'] temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min'])) sma1 = SMA(temp1,3,1) sma2 = SMA(sma1,3,1) sma = pd.concat([sma1,sma2],axis = 1, join = 'inner') sma.columns = ['sma1','sma2'] alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3) alpha.columns = ['alpha28'] return alpha @timer def alpha29(self): close = self.close volume = self.volume close_delay = Delay(close,6) close_delay.columns = ['close_delay'] data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner') alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol'] alpha = pd.DataFrame(alpha) alpha.columns = ['alpha29'] return alpha @timer def alpha30(self): close = self.close close_delay = Delay(close,1) @timer def alpha31(self): close = self.close close_delay = Delay(close,12) close_delay.columns = ['close_delay'] data = pd.concat([close,close_delay],axis = 1, join = 'inner') alpha = (data['Close'] - data['close_delay'])/data['close_delay'] alpha = pd.DataFrame(alpha) alpha.columns = ['alpha31'] return alpha @timer def alpha32(self): volume = self.volume high = self.high r1 = Rank(volume) r2 = Rank(high) rank = pd.concat([r1,r2],axis = 1,join = 'inner') corr = Corr(rank,3) r = Rank(corr) alpha = -1 * Sum(r,3) alpha.columns = ['alpha32'] return alpha @timer def alpha33(self): low = self.low volume = self.volume ret = self.ret low_min = TsMin(low,5) low_min_delay = Delay(low_min,5) data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner') data1.columns = ['low_min','low_min_delay'] ret_sum240 = Sum(ret,240) ret_sum20 = Sum(ret,20) ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner') ret_temp.columns = ['ret240','ret20'] temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min']) temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220) r_temp2 = Rank(temp2) r_volume = TsRank(volume,5) temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner') temp.columns = ['temp1','r_temp2','r_volume'] alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume'] alpha = pd.DataFrame(alpha) alpha.columns = ['alpha33'] return alpha @timer def alpha34(self): close = self.close close_mean = Mean(close,12) close_mean.columns = ['close_mean'] data = pd.concat([close,close_mean],axis = 1, join = 'inner') alpha = pd.DataFrame(data['close_mean']/data['Close']) alpha.columns = ['alpha34'] return alpha @timer def alpha35(self): volume = self.volume Open = self.open open_delay = Delay(Open,1) open_delay.columns = ['open_delay'] open_linear = DecayLinear(Open,17) open_linear.columns = ['open_linear'] open_delay_temp = DecayLinear(open_delay,15) r1 = Rank(open_delay_temp) data = pd.concat([Open,open_linear],axis = 1,join = 'inner') Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear'] rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner') rank.columns = ['r1','r2'] corr = Corr(rank,7) r2 = Rank(-1 * corr) r = pd.concat([r1,r2],axis = 1,join = 'inner') r.columns = ['r1','r2'] alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2'])) alpha = pd.DataFrame(alpha) alpha.columns = ['alpha35'] return alpha @timer def alpha36(self): volume = self.volume vwap = self.vwap r1 = Rank(volume) r2 = Rank(vwap) rank = pd.concat([r1,r2],axis = 1,join = 'inner') corr = Corr(rank,6) temp = Sum(corr,2) alpha = Rank(temp) alpha.columns = ['alpha36'] return alpha @timer def alpha37(self): Open = self.open ret = self.ret open_sum = Sum(Open,5) ret_sum = Sum(ret,5) data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner') data.columns = ['open_sum','ret_sum'] temp = data['open_sum'] * data['ret_sum'] temp_delay = Delay(temp,10) data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner') data_temp.columns = ['temp','temp_delay'] alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])) alpha.columns = ['alpha37'] return alpha @timer def alpha38(self): high = self.high high_mean = Mean(high,20) high_delta = Delta(high,2) data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner') data.columns = ['high','high_mean','high_delta'] data['alpha'] = -1 * data['high_delta'] data['alpha'][data['high_mean'] >= data['high']] = 0 alpha = pd.DataFrame(data['alpha']) alpha.columns = ['alpha38'] return alpha @timer def alpha39(self): close = self.close Open = self.open vwap = self.vwap volume = self.volume close_delta2 = Delta(close,2) close_delta2_decay = DecayLinear(close_delta2,8) r1 = Rank(close_delta2_decay) price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner') price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7) volume_mean = Mean(volume,180) volume_mean_sum = Sum(volume_mean,37) rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner') corr = Corr(rank,14) corr_decay = DecayLinear(corr,12) r2 = Rank(corr_decay) r = pd.concat([r1,r2],axis = 1,join = 'inner') r.columns = ['r1','r2'] alpha = pd.DataFrame(r['r2'] - r['r1']) alpha.columns = ['alpha39'] return alpha @timer def alpha40(self): close = self.close volume = self.volume close_delay = Delay(close,1) data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner') data.columns = ['close','volume','close_delay'] data['temp1'] = data['volume'] data['temp2'] = data['volume'] data['temp1'][data['close'] <= data['close_delay']] = 0 data['temp2'][data['close'] > data['close_delay']] = 0 s1 = Sum(pd.DataFrame(data['temp1']),26) s2 = Sum(pd.DataFrame(data['temp2']),26) s = pd.concat([s1,s2], axis = 1, join = 'inner') s.columns = ['s1','s2'] alpha = pd.DataFrame(s['s1']/s['s2'] * 100) alpha.columns = ['alpha40'] return alpha @timer def alpha41(self): vwap = self.vwap vwap_delta = Delta(vwap,3) vwap_delta_max = TsMax(vwap_delta,5) alpha = -1 * Rank(vwap_delta_max) alpha.columns = ['alpha41'] return alpha @timer def alpha42(self): high = self.high volume = self.volume high_std = STD(high,10) r1 = Rank(high_std) data = pd.concat([high,volume],axis = 1,join = 'inner') corr = Corr(data,10) r = pd.concat([r1,corr],axis = 1,join = 'inner') r.columns = ['r1','corr'] alpha = pd.DataFrame(-1 * r['r1'] * r['corr']) alpha.columns = ['alpha42'] return alpha @timer def alpha43(self): close = self.close volume = self.volume close_delay = Delay(close,1) close_delay.columns = ['close_delay'] data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner') data['sign'] = 1 data['sign'][data['Close'] < data['close_delay']] = -1 temp = pd.DataFrame(data['Vol'] * data['sign']) alpha = Sum(temp,6) alpha.columns = ['alpha43'] return alpha @timer def alpha44(self): volume = self.volume vwap = self.vwap low = self.low volume_mean = Mean(volume,10) rank = pd.concat([low,volume_mean],axis = 1,join = 'inner') corr = Corr(rank,7) corr_decay = DecayLinear(corr,6) r1 = TsRank(corr_decay,4) vwap_delta = Delta(vwap,3) vwap_delta_decay = DecayLinear(vwap_delta,10) r2 = TsRank(vwap_delta_decay,15) r = pd.concat([r1,r2],axis = 1,join = 'inner') r.columns = ['r1','r2'] alpha = pd.DataFrame(r['r1'] + r['r2']) alpha.columns = ['alpha44'] return alpha @timer def alpha45(self): volume = self.volume vwap = self.vwap close = self.close Open = self.open price = pd.concat([close,Open],axis = 1,join = 'inner') price['price'] = price['Close'] * 0.6 + price['Open'] * 0.4 price_delta = Delta(pd.DataFrame(price['price']),1) r1 = Rank(price_delta) volume_mean = Mean(volume,150) data = pd.concat([vwap,volume_mean],axis = 1,join = 'inner') corr = Corr(data,15) r2 = Rank(corr) r = pd.concat([r1,r2],axis = 1,join = 'inner') r.columns = ['r1','r2'] alpha = pd.DataFrame(r['r1'] + r['r2']) alpha.columns = ['alpha45'] return alpha @timer def alpha46(self): close = self.close close_mean3 = Mean(close,3) close_mean6 = Mean(close,6) close_mean12 = Mean(close,12) close_mean24 = Mean(close,24) data = pd.concat([close,close_mean3,close_mean6,close_mean12,close_mean24],axis = 1,join = 'inner') data.columns = ['c','c3','c6','c12','c24'] alpha = (data['c3'] + data['c6'] + data['c12'] + data['c24'])/(4 * data['c']) alpha = pd.DataFrame(alpha) alpha.columns = ['alpha46'] return alpha @timer def alpha47(self): close = self.close low = self.low high = self.high high_max = TsMax(high,6) low_min = TsMin(low,6) data = pd.concat([high_max,low_min,close],axis = 1,join = 'inner') data.columns = ['high_max','low_min','close'] temp = pd.DataFrame((data['high_max'] - data['close'])/(data['high_max'] - \ data['low_min']) * 100) alpha = SMA(temp,9,1) alpha.columns = ['alpha47'] return alpha @timer def alpha48(self): close = self.close volume = self.volume temp1 = Delta(close,1) temp1_delay1 = Delay(temp1,1) temp1_delay2 = Delay(temp1,2) data = pd.concat([temp1,temp1_delay1,temp1_delay2],axis = 1,join = 'inner') data.columns = ['temp1','temp1_delay1','temp1_delay2'] temp2 = pd.DataFrame(np.sign(data['temp1']) + np.sign(data['temp1_delay1']) \ + np.sign(data['temp1_delay2'])) volume_sum5 = Sum(volume,5) volume_sum20 = Sum(volume,20) data_temp =
pd.concat([temp2,volume_sum5,volume_sum20],axis = 1,join = 'inner')
pandas.concat
""" Routines for casting. """ from contextlib import suppress from datetime import date, datetime, timedelta from typing import ( TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Set, Sized, Tuple, Type, Union, ) import numpy as np from pandas._libs import lib, tslib, tslibs from pandas._libs.tslibs import ( NaT, OutOfBoundsDatetime, Period, Timedelta, Timestamp, conversion, iNaT, ints_to_pydatetime, ints_to_pytimedelta, ) from pandas._libs.tslibs.timezones import tz_compare from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.common import ( DT64NS_DTYPE, INT64_DTYPE, POSSIBLY_CAST_DTYPES, TD64NS_DTYPE, ensure_int8, ensure_int16, ensure_int32, ensure_int64, ensure_object, ensure_str, is_bool, is_bool_dtype, is_categorical_dtype, is_complex, is_complex_dtype, is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, is_extension_array_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_sparse, is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype, is_unsigned_integer_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeArray, ABCDatetimeIndex, ABCExtensionArray, ABCPeriodArray, ABCPeriodIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like from pandas.core.dtypes.missing import ( is_valid_nat_for_dtype, isna, na_value_for_dtype, notna, ) if TYPE_CHECKING: from pandas import Series from pandas.core.arrays import ExtensionArray from pandas.core.indexes.base import Index _int8_max = np.iinfo(np.int8).max _int16_max = np.iinfo(np.int16).max _int32_max = np.iinfo(np.int32).max _int64_max = np.iinfo(np.int64).max def maybe_convert_platform(values): """ try to do platform conversion, allow ndarray or list here """ if isinstance(values, (list, tuple, range)): values = construct_1d_object_array_from_listlike(values) if getattr(values, "dtype", None) == np.object_: if hasattr(values, "_values"): values = values._values values = lib.maybe_convert_objects(values) return values def is_nested_object(obj) -> bool: """ return a boolean if we have a nested object, e.g. a Series with 1 or more Series elements This may not be necessarily be performant. """ if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype): if any(isinstance(v, ABCSeries) for v in obj._values): return True return False def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar: """ Cast scalar to Timestamp or Timedelta if scalar is datetime-like and dtype is not object. Parameters ---------- value : scalar dtype : Dtype, optional Returns ------- scalar """ if dtype == object: pass elif isinstance(value, (np.datetime64, datetime)): value = tslibs.Timestamp(value) elif isinstance(value, (np.timedelta64, timedelta)): value = tslibs.Timedelta(value) return value def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]): """ try to cast to the specified dtype (e.g. convert back to bool/int or could be an astype of float64->float32 """ do_round = False if is_scalar(result): return result elif isinstance(result, ABCDataFrame): # occurs in pivot_table doctest return result if isinstance(dtype, str): if dtype == "infer": inferred_type = lib.infer_dtype(ensure_object(result), skipna=False) if inferred_type == "boolean": dtype = "bool" elif inferred_type == "integer": dtype = "int64" elif inferred_type == "datetime64": dtype = "datetime64[ns]" elif inferred_type == "timedelta64": dtype = "timedelta64[ns]" # try to upcast here elif inferred_type == "floating": dtype = "int64" if issubclass(result.dtype.type, np.number): do_round = True else: dtype = "object" dtype = np.dtype(dtype) elif dtype.type is Period: from pandas.core.arrays import PeriodArray with suppress(TypeError): # e.g. TypeError: int() argument must be a string, a # bytes-like object or a number, not 'Period return PeriodArray(result, freq=dtype.freq) converted = maybe_downcast_numeric(result, dtype, do_round) if converted is not result: return converted # a datetimelike # GH12821, iNaT is cast to float if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]: if hasattr(dtype, "tz"): # not a numpy dtype if dtype.tz: # convert to datetime and change timezone from pandas import to_datetime result = to_datetime(result).tz_localize("utc") result = result.tz_convert(dtype.tz) else: result = result.astype(dtype) return result def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False): """ Subset of maybe_downcast_to_dtype restricted to numeric dtypes. Parameters ---------- result : ndarray or ExtensionArray dtype : np.dtype or ExtensionDtype do_round : bool Returns ------- ndarray or ExtensionArray """ if not isinstance(dtype, np.dtype): # e.g. SparseDtype has no itemsize attr return result if isinstance(result, list): # reached via groupby.agg._ohlc; really this should be handled earlier result = np.array(result) def trans(x): if do_round: return x.round() return x if dtype.kind == result.dtype.kind: # don't allow upcasts here (except if empty) if result.dtype.itemsize <= dtype.itemsize and result.size: return result if is_bool_dtype(dtype) or is_integer_dtype(dtype): if not result.size: # if we don't have any elements, just astype it return trans(result).astype(dtype) # do a test on the first element, if it fails then we are done r = result.ravel() arr = np.array([r[0]]) if isna(arr).any(): # if we have any nulls, then we are done return result elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)): # a comparable, e.g. a Decimal may slip in here return result if ( issubclass(result.dtype.type, (np.object_, np.number)) and notna(result).all() ): new_result = trans(result).astype(dtype) if new_result.dtype.kind == "O" or result.dtype.kind == "O": # np.allclose may raise TypeError on object-dtype if (new_result == result).all(): return new_result else: if np.allclose(new_result, result, rtol=0): return new_result elif ( issubclass(dtype.type, np.floating) and not is_bool_dtype(result.dtype) and not is_string_dtype(result.dtype) ): return result.astype(dtype) return result def maybe_cast_result( result: ArrayLike, obj: "Series", numeric_only: bool = False, how: str = "" ) -> ArrayLike: """ Try casting result to a different type if appropriate Parameters ---------- result : array-like Result to cast. obj : Series Input Series from which result was calculated. numeric_only : bool, default False Whether to cast only numerics or datetimes as well. how : str, default "" How the result was computed. Returns ------- result : array-like result maybe casted to the dtype. """ dtype = obj.dtype dtype = maybe_cast_result_dtype(dtype, how) assert not is_scalar(result) if ( is_extension_array_dtype(dtype) and not is_categorical_dtype(dtype) and dtype.kind != "M" ): # We have to special case categorical so as not to upcast # things like counts back to categorical cls = dtype.construct_array_type() result = maybe_cast_to_extension_array(cls, result, dtype=dtype) elif numeric_only and is_numeric_dtype(dtype) or not numeric_only: result = maybe_downcast_to_dtype(result, dtype) return result def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj: """ Get the desired dtype of a result based on the input dtype and how it was computed. Parameters ---------- dtype : DtypeObj Input dtype. how : str How the result was computed. Returns ------- DtypeObj The desired dtype of the result. """ from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import Int64Dtype if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)): return np.dtype(np.int64) elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype): return
Int64Dtype()
pandas.core.arrays.integer.Int64Dtype
from bs4 import BeautifulSoup as bs import re import lxml import urllib.request from datetime import datetime as dt import pandas as pd """Initialize Variables and read data""" start = dt.now() read = pd.read_csv('ngc_complete.csv') ourl = read['url'] uid = read['uid'] totalcount = [] pm = [] completeft = [] """Clean the data""" newurl = [x.strip() for x in ourl] df = [list(y) for y in zip(newurl, uid)] df = pd.DataFrame(df) df.to_csv('ngcclean.csv') """Read AHRQ and turn to soup""" for num in uid: num = str(num) url = 'http://www.guideline.gov/content.aspx?id=' + num info = urllib.request.urlopen(url).read() soup = bs(info, 'lxml') """Search soup for tag a""" taga = soup.find_all('a') tempft = [] length = len(taga) """find full text articles and store them in list with uid ahrq url and the url of full text after cleaning full text url""" for z in range(length): linkft = str(taga[z]) if '(full text)' in linkft: finalft = re.findall(r'href=(.*)"', str(linkft)) if len(finalft) > 0: totalcount.append(len(finalft)) for eachL in finalft: eachL = str(eachL) tempft.append(eachL) if len(tempft) > 0: a = tempft[0] a = a.rpartition(' id="ct')[0] a = a.replace('"', '') a = a.replace("'", '') completeft.append((url,a,len(tempft), num)) """Find pubmed in ahrq and store pmid and uid in list""" for y in range(length): linkpm = str(taga[y]) if 'PubMed' in linkpm: final = re.findall(r'href=(.*)"', str(linkpm)) final = str(final) if 'list_uids' in final: index = final.index('list_uids') + 10 index2 = final.index(' ') final = final[index:index2-1] pm.append((final,num)) break """df = pd.DataFrame(completeft)""" df.to_csv('completefttrial.csv') df =
pd.DataFrame(pm)
pandas.DataFrame
import pandas as pd import numpy as np import openpyxl Data = pd.date_range("09/01/2021", periods = 30) Vendas = pd.Series(["R$700,00", "R$450,00", "R$1.345,00", "R$789,00", "R$809,00", "R$605,00", "R$2.034,00", "R$784,00", "R$600,00", "R$606,00", "R$809,00", "R$500,00", "R$550,00", "R$450,00", "R$1.507,00", "R$700,00", "R$450,00", "R$1.345,00", "R$789,00", "R$809,00", "R$605,00", "R$2.034,00", "R$784,00", "R$600,00", "R$606,00", "R$809,00", "R$500,00", "R$550,00", "R$450,00", "R$1.507,00"]) Vendas_Cartao = pd.Series(["R$350,00", "R$113,00", "R$587,00", "R$529,00", "R$598,00", "R$401,00", "R$1.150,00", "R$578,00", "R$450,00", "R$236,00", "R$307,00", "R$378,00", "R$230,00", "R$157,00", "R$890,00", "R$376,00", "R$98,00", "R$693,00", "R$508,00", "R$600,00", "R$205,00", "R$1.008,00", "R$597,00", "R$425,00", "R$159,00", "R$589,00", "R$360,00", "R$160,00", "R$250,00", "R$1.000,00"]) def Rs(x): x = str(x) x1 = "" cont = 0 for num in x: if len(x) >= 6: if cont == 1: x1 += "." if num == "." and cont > 2: x1 += "," x1 += "0" else: x1 += num cont += 1 return x1 Vendas_Dinheiro = Vendas.apply(lambda x: x.replace("R$", "").replace(".", "").replace(",", ".")).astype(np.float16) - Vendas_Cartao.apply(lambda x: x.replace("R$", "").replace(".", "").replace(",", ".")).astype(np.float16) Vendas_Dinheiro = Vendas_Dinheiro.astype("object").apply(Rs).apply(lambda x: f"R${x}") df =
pd.DataFrame({"Data":Data, "Vendas":Vendas, "Vendas no Cartão":Vendas_Cartao, "Vendas no Dinheiro": Vendas_Dinheiro})
pandas.DataFrame
# import Ipynb_importer import pandas as pd from .public_fun import * # 全局变量 class glv: def _init(): global _global_dict _global_dict = {} def set_value(key,value): _global_dict[key] = value def get_value(key,defValue=None): try: return _global_dict[key] except KeyError: return defValue ## fun_01to06 class fun_01to06(object): def __init__(self, data): self.cf = [2, 1, 1, 17, 1, 2] self.cf_a = hexlist2(self.cf) self.o = data[0:self.cf_a[-1]] self.list_o = [ "起始符", "命令标识", "应答标志", "唯一识别码", "数据单元加密方式", "数据单元长度" ] self.oj = list2dict(self.o, self.list_o, self.cf_a) self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o) self.pj = { "起始符":hex2str(self.oj["起始符"]), "命令标识":dict_list_replace('02', self.oj['命令标识']), "应答标志":dict_list_replace('03', self.oj['应答标志']), "唯一识别码":hex2str(self.oj["唯一识别码"]), "数据单元加密方式":dict_list_replace('05', self.oj['数据单元加密方式']), "数据单元长度":hex2dec(self.oj["数据单元长度"]), } self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o) self.next = data[len(self.o):] self.nextMark = data[len(self.o):len(self.o)+2] self.mo = self.oj["命令标识"] glv.set_value('data_f', self.next) glv.set_value('data_mo', self.mo) glv.set_value('data_01to07', self.o) print('fun_01to06 done!') ## fun_07 class fun_07: def __init__(self, data): self.mo = glv.get_value("data_mo") if self.mo == '01': self.o = fun_07_01(glv.get_value('data_f')) elif self.mo == '02' or self.mo == '03': self.o = fun_07_02(glv.get_value('data_f')) elif self.mo == '04': self.o = fun_07_04(glv.get_value('data_f')) elif self.mo == '05': self.o = fun_07_05(glv.get_value('data_f')) elif self.mo == '06': self.o = fun_07_06(glv.get_value('data_f')) else : print('命令标识:',self.mo,'有误') self.c = fun_07_cursor(glv.get_value('data_f')) self.oj = dict(self.o.oj, **self.c.oj) self.oj2 = {'数据单元':self.oj} self.ol = pd.merge(self.o.ol, self.c.ol, left_index=True, right_index=True) self.pj = dict(self.o.pj, **self.c.pj) self.pj2 = {'数据单元':self.pj} self.pl = pd.merge(self.o.pl, self.c.pl, left_index=True, right_index=True) print('fun_07 done!') ## fun_07_01 class fun_07_01(object): def __init__(self, data): self.cf = [6, 2, 20, 1, 1] self.cf_a = hexlist2(self.cf) self.n = hex2dec(data[self.cf_a[3]:self.cf_a[4]]) self.m = hex2dec(data[self.cf_a[4]:self.cf_a[5]]) self.cf.append(self.n*self.m) self.cf_a = hexlist2(self.cf) self.o = data[0:self.cf_a[-1]] self.list_o = [ "数据采集时间", "登入流水号", "ICCID", "可充电储能子系统数", "可充电储能系统编码长度", "可充电储能系统编码", ] self.oj = list2dict(self.o, self.list_o, self.cf_a) self.oj2 = {'车辆登入': self.oj} self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o) self.pj = { "数据采集时间":get_datetime(self.oj['数据采集时间']), "登入流水号":hex2dec(self.oj['登入流水号']), "ICCID":hex2str(self.oj['ICCID']), "可充电储能子系统数":hex2dec(self.oj['可充电储能子系统数']), "可充电储能系统编码长度":hex2dec(self.oj['可充电储能系统编码长度']), "可充电储能系统编码":fun_07_01.fun_07_01_06(self.oj['可充电储能系统编码'], self.oj['可充电储能子系统数'], self.oj['可充电储能系统编码长度']), } self.pj2 = {'车辆登入': self.pj} self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o) self.next = data[len(self.o):] self.nextMark = data[len(self.o):len(self.o)+2] glv.set_value('data_f', self.next) glv.set_value('data_07_01', self.o) print('fun_07_01 done!') def fun_07_01_06(data, n, m): if m=='00': return "NA" else : n = hex2dec(n) m = hex2dec(m) * 2 output = [] for i in range(n): output_unit = hex2str(data[i * m: i* m +m]) output.append(output_unit) return output ## fun_07_04 class fun_07_04(object): def __init__(self, data): self.cf = [6, 2] self.cf_a = hexlist2(self.cf) self.o = data[0:self.cf_a[-1]] self.list_o = [ "登出时间", "登出流水号", ] self.oj = list2dict(self.o, self.list_o, self.cf_a) self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o) self.pj = { "登出时间":get_datetime(self.oj['登出时间']), "登出流水号":hex2dec(self.oj['登出流水号']), } self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o) self.next = data[len(self.o):] self.nextMark = data[len(self.o):len(self.o)+2] glv.set_value('data_f', self.next) glv.set_value('data_07_04', self.o) print('fun_07_04 done!') ## fun_07_05 class fun_07_05(object): def __init__(self, data): self.cf = [6, 2, 12, 20, 1] self.cf_a = hexlist2(self.cf) self.o = data[0:self.cf_a[-1]] self.list_o = [ "平台登入时间", "登入流水号", "平台用户名", "平台密码", "加密规则", ] self.oj = list2dict(self.o, self.list_o, self.cf_a) self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o) self.pj = { "平台登入时间":get_datetime(self.oj['平台登入时间']), "登入流水号":hex2dec(self.oj['登入流水号']), "平台用户名":hex2str(self.oj['平台用户名']), "平台密码":hex2str(self.oj['平台密码']), "加密规则":dict_list_replace('07_05_05',self.oj['加密规则']), } self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o) self.next = data[len(self.o):] self.nextMark = data[len(self.o):len(self.o)+2] glv.set_value('data_f', self.next) glv.set_value('data_07_05', self.o) print('fun_07_05 done!') ## fun_07_06 class fun_07_06(object): def __init__(self, data): self.cf = [6, 2] self.cf_a = hexlist2(self.cf) self.o = data[0:self.cf_a[-1]] self.list_o = [ "登出时间", "登出流水号", ] self.oj = list2dict(self.o, self.list_o, self.cf_a) print(self.oj) self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o) self.pj = { "登出时间":get_datetime(self.oj['登出时间']), "登出流水号":hex2dec(self.oj['登出流水号']), } self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o) self.next = data[len(self.o):] self.nextMark = data[len(self.o):len(self.o)+2] glv.set_value('data_f', self.next) glv.set_value('data_07_06', self.o) print('fun_07_06 done!') ## fun_07_02 class fun_07_02: def __init__(self, data): self.o = data self.oj = {'数据采集时间': self.o[:12]} self.ol = pd.DataFrame({'01':['01']}) self.pj = {'数据采集时间': get_datetime(self.oj['数据采集时间'])} self.pl = pd.DataFrame({'01':['01']}) glv.set_value('data_f', data[12:]) glv.set_value('m_07_02', data[12:14]) self.mo_list = glv.get_value('model') self.do_list = [] while(glv.get_value('m_07_02') in self.mo_list): # 记录已执行的 self.do_list.append(glv.get_value('m_07_02')) # 删除已执行的 self.mo_list.remove(glv.get_value('m_07_02')) if glv.get_value('m_07_02') == '01': self.f_01 = fun_07_02_01(glv.get_value('data_f')) elif glv.get_value('m_07_02') == '02': self.f_02 = fun_07_02_02(glv.get_value('data_f')) elif glv.get_value('m_07_02') == '03': self.f_03 = fun_07_02_03(glv.get_value('data_f')) elif glv.get_value('m_07_02') == '04': self.f_04 = fun_07_02_04(glv.get_value('data_f')) elif glv.get_value('m_07_02') == '05': self.f_05 = fun_07_02_05(glv.get_value('data_f')) elif glv.get_value('m_07_02') == '06': self.f_06 = fun_07_02_06(glv.get_value('data_f')) elif glv.get_value('m_07_02') == '07': self.f_07 = fun_07_02_07(glv.get_value('data_f')) elif glv.get_value('m_07_02') == '08': self.f_08 = fun_07_02_08(glv.get_value('data_f')) elif glv.get_value('m_07_02') == '09': self.f_09 = fun_07_02_09(glv.get_value('data_f')) else: print("fun_07_02 done") print(glv.get_value('data_f')) print(glv.get_value('m_07_02')) self.do_list.sort() for i in self.do_list: if i == '01': self.oj = dict(self.oj,**self.f_01.oj2) self.ol = pd.merge(self.ol, self.f_01.ol, left_index=True, right_index=True) self.pj = dict(self.pj,**self.f_01.pj2) self.pl = pd.merge(self.pl, self.f_01.pl, left_index=True, right_index=True) elif i == '02': self.oj = dict(self.oj,**self.f_02.oj2) self.ol = pd.merge(self.ol, self.f_02.ol, left_index=True, right_index=True) self.pj = dict(self.pj,**self.f_02.pj2) self.pl = pd.merge(self.pl, self.f_02.pl, left_index=True, right_index=True) elif i == '03': self.oj = dict(self.oj,**self.f_03.oj2) self.ol = pd.merge(self.ol, self.f_03.ol, left_index=True, right_index=True) self.pj = dict(self.pj,**self.f_03.pj2) self.pl =
pd.merge(self.pl, self.f_03.pl, left_index=True, right_index=True)
pandas.merge
#!/usr/bin/env python # coding: utf-8 # %% # %% ''' Welcome! This script is organized as follows: 0. Inputs a. Input parameters with activities b. Input parameters with intensities c. Input parameters with inventories 1. Inventories a. Read CEDS inventory b. Read GFED inventory c. Aggregations and exports 2. Scaling a. Calculate activity trends b. Calculate intensity trends c. Create intensity scenarios 3. Outputs a. Export intensity outputs b. Export scaling of CEDS inventory c. Export scaling of GFED inventory Note: requires inventory and GAINS scenario files as described in https://github.com/watkin-mit/TAPS/wiki#2-external-data-sources Other key user inputs are marked by the keyword "choose:" ''' # import packages import numpy as np import gcgridobj import xarray import netCDF4 import os import io import datetime import time import pandas # %% #~# 0: Inputs #~# 0(a): Input parameters with activities ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~# input_location ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # folder for input data and where the output is to go input_dir = 'input_files' output_dir = 'scaling_output' #~# input_activity_scenarios ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Read in EPPA model energy outputs, which will be inputs for activity scaling EPPA_energy = pandas.read_csv(os.path.join(input_dir,'EPPA7_Sectoral_energy_20210903.csv')) # choose: scenarios considered for emitting activities (default is all in file; based on climate policy extent) act_scen = EPPA_energy.scenario.unique() print('climate policy scenarios considered:',act_scen) #~# input_years ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # choose: year inputs (default is all in file; could change based on research question) yr_list = list(np.sort(EPPA_energy.year.unique())) ref_yr = yr_list[0] end_yr = yr_list[-1] yr_list = [yr for yr in yr_list if yr <= end_yr] # limit year list and EPPA datasets to end year yrs_from_ref = list(np.array(yr_list)-ref_yr) #~# format_activity_scenarios ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # EPPA regions and gridding -- produced via gen_GPW_to_EPPA_mappings.py and process_CEDS.py in input_files/regional_mapping region_names = [var for var in EPPA_energy.region.unique()] xmask_data_CEDS = xarray.open_dataset(os.path.join(input_dir,'CEDS_EPPA_masks_filled.nc')) xmask_data_GFED = xarray.open_dataset(os.path.join(input_dir,'GFED_EPPA_masks_filled.nc')) # set the index as the year so you can fill missing values later on EPPA_energy = EPPA_energy[EPPA_energy.year <= end_yr] EPPA_energy = EPPA_energy.set_index('year') # certain zero values are included as 'Eps' ... change those to zero EPPA_energy['seuse (EJ)'] = pandas.to_numeric(EPPA_energy['seuse (EJ)'],errors = 'coerce').astype(float) EPPA_energy['seuse (EJ)'] = EPPA_energy['seuse (EJ)'].fillna(0) # EPPA 'energy' categories to scale CEDS 'fuels' ene_to_fuels = {'total-coal':['COAL'], 'solid-biofuel':['COAL','GAS','ROIL','bio','hydro','nuclear','renewables'], 'liquid-fuel-plus-natural-gas':['GAS','ROIL'], 'process':['COAL','GAS','ROIL','bio','hydro','nuclear','renewables'] } # now, read in EPPA non-energy output for population and land use scaling EPPA_other = pandas.read_csv(os.path.join(input_dir,'EPPA7_nonsector_results.csv')) # reformat and extract the variables you need EPPA_other = EPPA_other[EPPA_other.year <= end_yr] EPPA_other.set_index('year',inplace=True) pop = '04_Population (million people)' # for population scaling landdict = {'CROP':'31_landuse_Cropland', # for land use scaling 'LIVE':'33_landuse_Pasture', 'FORS':'34_landuse_Managed forest' } # certain zero values are included as 'Eps' ... change those to zero EPPA_other.Outlook = pandas.to_numeric(EPPA_other.Outlook,errors = 'coerce').astype(float).fillna(0) # map scenarios from EPPA_energy to EPPA_other format scen_map_EPPA_file = pandas.read_csv(os.path.join(input_dir,'scen_map_EPPA.csv')) scen_map_EPPA = dict(zip(scen_map_EPPA_file.EPPA_energy,scen_map_EPPA_file.EPPA_other)) # %% #~# 0(b): Input parameters with intensities ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'Note: GAINS files must be separately uploaded to input_dir, as described in https://github.com/watkin-mit/TAPS/wiki#2-external-data-sources' #~# input_GAINSEMF ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # files GAINSEMF_em_file = 'G102021export_EMISS_LIMITS_2.csv' # emissions GAINSEMF_act_file = 'G102021ACTIVITY_LIMITS_2.csv' # activities # sectors (GAINS activities to GAINS emissions) sec_map_GAINSGAINS_file = pandas.read_csv(os.path.join(input_dir,'GAINSEMF_sectoral_mapping.csv')) sec_map_GAINSGAINS = dict(zip(sec_map_GAINSGAINS_file.Emissions,sec_map_GAINSGAINS_file.Activities)) # sectors (GAINS emissions to inventory) sec_map_inv_GAINS_file = pandas.read_csv(os.path.join(input_dir,'CEDS_GAINSEMF_sectoral_mapping.csv')) sec_map_inv_GAINS = sec_map_inv_GAINS_file.groupby('CEDS2020')['GAINS_EMF'].apply(list).to_dict() sec_map_inv_GAINSfuels = sec_map_inv_GAINS_file.groupby(['CEDS2020','FuelCEDS'])['GAINS_EMF'].apply(list).to_dict() # regions (from other mapping doc) reg_map_EPPAGAINSEMF_file = pandas.read_csv(os.path.join(input_dir,'EPPA7_GAINSEMF_regional_mapping.csv'),usecols=['EPPA7','EMF30REGION'],nrows=25) reg_GAINSEMF = reg_map_EPPAGAINSEMF_file['EMF30REGION'].unique() # list of all GAINSEMF regions reg_map_EPPAGAINSEMF = reg_map_EPPAGAINSEMF_file.groupby('EPPA7')['EMF30REGION'].apply(list).to_dict() #~# input_GAINSNH3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # NH3 files (n) fn = 'G102021export_NH3_by_source_G20.csv' n = pandas.read_csv(os.path.join(input_dir,fn)) n = n.set_index('IDYEARS') # correct any negative numbers. No NA fill needed (no blanks) n.iloc[:,-3:] = n.iloc[:,-3:].abs() # sectors (GAINS to inventory) sec_map_inv_GAINSNH3_file = pandas.read_csv(os.path.join(input_dir,'CEDS_GAINSNH3_sectoral_mapping.csv')) sec_map_inv_GAINSNH3fuels = sec_map_inv_GAINSNH3_file.groupby(['CEDS','CEDSFuel'])['GAINS'].apply(list).to_dict() # regions reg_map_EPPAGAINSNH3_file = pandas.read_csv(os.path.join(input_dir,'EPPA7_GAINSG20_FAO_regional_mapping.csv')) reg_GAINSNH3 = reg_map_EPPAGAINSNH3_file['GAINSG20'].unique() # list of all GAINSNH3 regions reg_map_EPPAGAINSNH3 = reg_map_EPPAGAINSNH3_file.groupby('EPPA7')['GAINSG20'].apply(list).to_dict() # subset of EPPA regions that are well-covered by G20 reg_G20 = ['CAN','USA','MEX','BRA','EUR','RUS','IND','KOR','CHN','JPN','ANZ'] # so the others would be ['LAM','AFR','ROE','MES','ASI','IDZ','REA'] #~# input_FAO ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # agriculture (FAO to G20) sectors and regions sec_map_NH3FAO = sec_map_inv_GAINSNH3_file.groupby(['GAINS'])[['Item','Element','Units']].apply(lambda g: g.values.tolist()).to_dict() reg_map_NH3FAO = reg_map_EPPAGAINSNH3_file.groupby(['GAINSG20'])['FAO'].apply(list).to_dict() reg_map_EPPAFAO = reg_map_EPPAGAINSNH3_file.groupby(['EPPA7'])['FAO'].apply(list).to_dict() reg_FAO = (reg_map_EPPAGAINSNH3_file.FAO.unique()) # list of FAO global regions # FAO data, scenarios and years FAOyr = [2012,2030,2050] FAO = pandas.read_csv(os.path.join(input_dir,'FOFA2050RegionsData_all.csv')).set_index('Year').loc[FAOyr] FAOscen = ['Business As Usual','Toward Sustainability'] # matching GAINS CLE / MFR scen_map_EPPAFAO = dict(zip(scen_map_EPPA_file.EPPA_energy,scen_map_EPPA_file.FAO)) # multiply normalizations by this to normalize to our base year, not the FAO base year (2012) FAOscale = [(fyr-yr_list[0])/(fyr-FAOyr[0]) for fyr in FAOyr[1:] ] # Separate csv for total domestic production (to avoid having to do the math in subsequent loops) FAOtotal = pandas.read_csv(os.path.join(input_dir,'FOFA2050RegionsData_all_total.csv')) FAOtotal = FAOtotal.set_index(FAOtotal.columns[0])[list(map(str, FAOyr))].reset_index() # Separate csv for total yields by scenario (to avoid having to do the math in subsequent loops) FAOyield = pandas.read_csv(os.path.join(input_dir,'FOFA2050RegionsData_all_yield.csv')) # prepare the conversion/extension from FAO years to EPPA years FAOyieldn = FAOyield.T.iloc[1:,:] FAOyieldn.index = pandas.to_datetime(FAOyieldn.index,format="%Y") FAOyieldn2 = FAOyieldn.reindex(pandas.to_datetime(list(np.sort(list(set(yr_list+FAOyr)))),format="%Y"),).astype(float) # linearly interpolate and extrapolate (based on the final timepoints) FAOyieldint = FAOyieldn2.interpolate(method="slinear", axis=0, fill_value="extrapolate", limit_direction="both") # and convert back into a dataframe with a readable format of EPPA years FAOyieldint.index = FAOyieldint.index.year FAOEPPA = pandas.concat([FAOyield.T.iloc[0:1,:],FAOyieldint]).T.drop(columns=FAOyr[0]) # %% #~# 0(c): Input parameters with inventories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'Note: Inventory files must be separately downloaded to a user-specified directory, as described in https://github.com/watkin-mit/TAPS/wiki#2-external-data-sources' #~# Prepare CEDS_GBD-MAPS and GFED4.1s inputs ~~~~~~~~~~~~~~~~~ # CEDS inputs (McDuffie et al., 2020) CEDS2020_data_dir = '/net/geoschem/data/gcgrid/data/ExtData/HEMCO/CEDS/v2020-08' # choose: specify your folder fuels = ['total-coal','solid-biofuel','liquid-fuel-plus-natural-gas','process'] # species mapping spc_map_csv = pandas.read_csv(os.path.join(input_dir,'spc_map.csv')) spc_map_CEDS = spc_map_csv.groupby('EPPA')['CEDS'].apply(list).to_dict() spc_map_CEDS['VOC'] = set(spc_map_CEDS['VOC']) # remove duplicates spc_map_GFED = spc_map_csv.dropna().groupby('EPPA')['GFED'].apply(list).to_dict() spc_map_CEDSGFED = spc_map_csv.groupby('CEDS')['GFED'].apply(list).to_dict() spc_map_GAINS = dict(zip(spc_map_csv.dropna().EPPA,spc_map_csv.dropna().GAINS)) # also keep a list of all the species in each for convenience spc_all_CEDS = list(spc_map_csv.CEDS.unique()) spc_all_GFED = list(spc_map_csv.GFED.unique()) spc_all_GFED.remove(np.nan) spc_GAINSNH3 = ['NH3'] spc_GAINSEMF = [spc for spc in spc_map_csv.dropna().GAINS.unique() if spc not in spc_GAINSNH3] # GFED inputs (van Marle et al., 2017). This reflects the latest source for the 2014 base year used here GFED_data_dir = '/net/geoschem/data/gcgrid/data/ExtData/HEMCO/GFED4/v2015-10' # choose: your download folder # compile monthly base year data tmplst = [] for mth in range(1,13): file = f'GFED4_gen.025x025.{str(ref_yr).zfill(4)}{str(mth).zfill(2)}.nc' tmplst.append(xarray.open_dataset(os.path.join(GFED_data_dir,str(ref_yr),file))) GFED = xarray.concat(tmplst,dim='time') # read in GFED emission factors (EF), from https://www.geo.vu.nl/~gwerf/GFED/GFED4/ancill/ GFED4_EF = pandas.read_csv(input_dir+'/'+"GFED4_Emission_Factors.txt", sep="\s+",lineterminator='\n', header=15,skiprows=[1], names=['SPECIE','SAVA','BORF','TEMF','DEFO','PEAT','AGRI']) #~# prepare the CEDS and GFED grids ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # CEDS: extract grid cell area from CEDS 0.5x0.5 example file (with example species and fuel) spc = 'BC' f = 'process' CEDS_path = os.path.join(CEDS2020_data_dir,'{:04d}'.format(ref_yr),'{:s}-em-{:s}_CEDS_{:04d}.nc'.format(spc,f,ref_yr)) f_nc = netCDF4.Dataset(CEDS_path,'r') try: hrz_grid_CEDS = gcgridobj.latlontools.extract_grid(f_nc) finally: f_nc.close() # GFED: extract grid cell area from GFED 0.25x0.25 example file GFED_path = os.path.join(GFED_data_dir,str(ref_yr),'GFED4_gen.025x025.201401.nc') f_nc = netCDF4.Dataset(GFED_path,'r') try: hrz_grid_GFED = gcgridobj.latlontools.extract_grid(f_nc) finally: f_nc.close() #~# Prepare the sectoral secaling with the activity data ~~~~~~~~~~~~~~~ inv_sec_map_file = pandas.read_csv(os.path.join(input_dir,'sectoral_mapping_EPPA7_inventories.csv')) # map inventory sector names to codes inv_sec_map = inv_sec_map_file.groupby('Inventory_Name')['Inventory_Code'].apply(list).to_dict() for key, values in inv_sec_map.items(): inv_sec_map[key] = list(set(inv_sec_map[key])) # map inventory sector names to which inventory it comes from inv_sec_map_source = inv_sec_map_file.groupby('Inventory_Name')['Inventory'].apply(list).to_dict() for key, values in inv_sec_map_source.items(): inv_sec_map_source[key] = list(set(inv_sec_map_source[key])) # find the subset of sectors from GFED (labeled "DM" = Dry Matter) GFED_sec_dict = {key:value for (key,value) in inv_sec_map.items() if 'DM' in value[0]} # map inventory sector names to activity sectors inv_act_sec_map = inv_sec_map_file.groupby('Inventory_Name')['EPPA7'].apply(list).to_dict() # %% #~# 1: Inventories #~# 1(a): Process CEDS inventory ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Read in the CEDS base-year emissions (em) t0 = time.time() print('getting base-year CEDS emissions') em_CEDS = {} em_CEDS_export = [] # export file for other scripts for spc in spc_map_CEDSGFED.keys(): print('working on ',spc) em_CEDS[spc] = {} for f in fuels: em_CEDS[spc][f] = {} CEDS_path = os.path.join(CEDS2020_data_dir,'{:04d}'.format(ref_yr),'{:s}-em-{:s}_CEDS_{:04d}.nc'.format(spc,f,ref_yr)) CEDS = xarray.open_dataset(CEDS_path) for sec,values in CEDS.items(): em_CEDS[spc][f][sec] = {} # use annual mean (m) for annual scaling (to keep the base year's monthly distribution, as in Feng et al. (2020) for the SSPs)) CEDSm = values.mean(dim='time') # get emissions (em) from species (kg m-2 s-1 * m2 * s yr-1 * Tg kg-1 = Tg yr-1), using the Sidereal year CEDSem = CEDSm * hrz_grid_CEDS.area * (365.25636*24*3600) * 1e-9 for reg in region_names: # use masks (specifying proportions of each grid cell in each region) to get regional data CEDSemgrid = CEDSem * xmask_data_CEDS[reg] em_CEDS[spc][f][sec][reg] = float(CEDSemgrid.sum().values) # and export inventory values em_CEDS_export.append([spc,f,sec,reg,em_CEDS[spc][f][sec][reg]]) em_CEDS_path = os.path.join(output_dir,'em_CEDS.csv') em_CEDS_df = pandas.DataFrame(em_CEDS_export, columns = ['Species','Fuel','Sector', 'Region',str(ref_yr)]) em_CEDS_df.to_csv(em_CEDS_path, index = False, header=True) print('seconds taken: ' + str(time.time() - t0)) print('estimated minutes to finish script: ',round((time.time() - t0)*(11/60))) # %% #~# 1(b): Process GFED inventory ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # now, do the same for GFED (defined as "process" emissions) t0 = time.time() print('getting base-year GFED emissions') em_GFED = {} em_GFED_export = [] # export file for other scripts for i_sec,(sec_name,sec_code) in enumerate(GFED_sec_dict.items()): em_GFED[sec_name] = {} GFEDm = GFED[sec_code].mean(dim='time') # use annual mean (m) for annual scaling (same as above) sec_code_EF = sec_code[0][3:] # emission factor code doesn't have the "DM_" for spc in spc_all_GFED: print('working on',spc) em_GFED[sec_name][spc] = {} EF = GFED4_EF[GFED4_EF.SPECIE == spc][sec_code_EF].squeeze() # get emissions from species (kg m-2 s-1 * m2 * s yr-1 * Tg kg-1 = Tg yr-1) GFEDem = GFEDm * EF * hrz_grid_GFED.area * (365.25636*24*3600) * 1e-12 for reg in region_names: # allot emissions to each region based on grid cell area and mask data GFEDemgrid = GFEDem * xmask_data_GFED[reg] em_GFED[sec_name][spc][reg] = float(GFEDemgrid.sum().to_array()) # and export inventory values in format that matches the CEDS order (species, region, sector) em_GFED_export.append([spc,reg,sec_name,em_GFED[sec_name][spc][reg]]) em_GFED_path = os.path.join(output_dir,'em_GFED.csv') em_GFED_df = pandas.DataFrame(em_GFED_export, columns = ['Species','Region','Sector', str(ref_yr)]) em_GFED_df.to_csv(em_GFED_path, index = False, header=True) print('seconds taken: ' + str(time.time() - t0)) # %% #~# 1(c): Aggregations and exports ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~# inventory by fuel across pollutants, to distinguish the nonzero categories for activity scaling emsum = {} for fuel in fuels: emsum[fuel] = {} for inv_sector in inv_act_sec_map.keys(): emsum[fuel][inv_sector] = {} for reg in region_names: emsum[fuel][inv_sector][reg] = 0.0 for s, (spc_cat,spcs_CEDS) in enumerate(spc_map_CEDS.items()): for spc_CEDS in spcs_CEDS: if inv_sector in GFED_sec_dict.keys(): # define GFED fuel as "process"; GFED sum is 0 for other fuels if fuel == 'process': for spc_GFED in spc_map_CEDSGFED[spc_CEDS]: if type(spc_GFED) == str: emsum[fuel][inv_sector][reg] += em_GFED[inv_sector][spc][reg] else: emsum[fuel][inv_sector][reg] = 0.0 else: sec = spc_CEDS + '_' + inv_sec_map[inv_sector][0] emsum[fuel][inv_sector][reg] += em_CEDS[spc_CEDS][fuel][sec][reg] #~# calculate proportion of species/sector/region's emissions from each CEDS fuel (for activity scaling) fpct = {} fpctexport = [] for spc in spc_map_CEDSGFED.keys(): fpct[spc] = {} for inv_sector,sec_code in inv_sec_map.items(): if inv_sector not in GFED_sec_dict.keys(): sec = spc + '_' + sec_code[0] fpct[spc][sec] = {} for reg in region_names: fpct[spc][sec][reg] = {} # first, find the sum over all fuels fuelsum = 0.0 for f in fuels: fuelsum += em_CEDS[spc][f][sec][reg] # then, find the proportion from each fuel for f in fuels: if fuelsum == 0.0: fpct[spc][sec][reg][f] = 0.0 else: fpct[spc][sec][reg][f] = em_CEDS[spc][f][sec][reg] / fuelsum # the rest is export code if desired: fpctexport.append([spc,sec,reg,f,fpct[spc][sec][reg][f]]) fpctpath = os.path.join(output_dir,'fuel_proportions.csv') fpctdf =
pandas.DataFrame(fpctexport, columns = ['Species','Sector','Region','Fuel','2014_Proportion'])
pandas.DataFrame
"""ROSS Machine Learning. ROSS-ML is a module to create neural networks aimed at calculating rotodynamic coefficients for bearings and seals. """ # fmt: off import shutil import webbrowser from pathlib import Path from pickle import dump, load import numpy as np import pandas as pd from plotly import graph_objects as go from plotly.figure_factory import create_scatterplotmatrix from plotly.subplots import make_subplots from scipy.stats import (chisquare, entropy, ks_2samp, normaltest, skew, ttest_1samp, ttest_ind) from sklearn.decomposition import PCA from sklearn.feature_selection import (SelectKBest, f_regression, mutual_info_regression) from sklearn.metrics import (explained_variance_score, mean_absolute_error, mean_squared_error, r2_score) from sklearn.model_selection import train_test_split from sklearn.preprocessing import (MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, RobustScaler, StandardScaler) from sklearn.tree import DecisionTreeRegressor from statsmodels.distributions.empirical_distribution import ECDF from statsmodels.stats.diagnostic import het_breuschpagan from tensorflow.keras.layers import Activation, Dense, Dropout from tensorflow.keras.models import Sequential, load_model from tensorflow.keras.optimizers import Adam # fmt: on __all__ = [ "HTML_formater", "available_models", "remove_model", "Pipeline", "Model", "PostProcessing", ] def HTML_formater(df, name, file): """Table to HTML formater. This function takes a pandas DataFrame and writes it to HTML format. It's an auxiliary function to build the HTML Report. Parameters ---------- df : pd.DataFrame DESCRIPTION. name : str Neural network tag, indicating from which model it refers to. file : str The file name to save the DataFrame. """ path = Path(__file__).parent html_string = """ <html> <head><title>HTML Pandas Dataframe with CSS</title></head> <link rel="stylesheet" type="text/css" href="css/panda_style.css"/> <body> {table} </body> </html>. """ with open(path / f"models/{name}/tables/{file}.html", "w") as f: f.write(html_string.format(table=df.to_html(classes="mystyle"))) def available_models(): """Check for available neural network models. This function returns a list of all neural network models saved. If None is available, it returns a message informing there's no models previously saved. Returns ------- dirs : list List of all neural network models saved. Examples -------- >>> import rossml as rsml >>> rsml.available_models() ['test_model'] """ try: path = Path(__file__).parent / "models" dirs = [folder.name for folder in path.iterdir() if folder.is_dir()] if len(dirs) == 0: dirs = "No neural network models available." except FileNotFoundError: dirs = "No neural network models available." return dirs def remove_model(name): """Remove a previously saved network model. This function removes a neural network from "models" folder. Parameters ---------- name : str The neural network folder's name to be deleted. Examples -------- >>> import pandas as pd >>> import rossml as rsml >>> file = Path(__file__).parent / "tests/data/seal_data.csv" >>> df = pd.read_csv(file) Building the neural network model >>> name = "Model" >>> D = rsml.Pipeline(df, name) >>> sorted(rsml.available_models()) ['Model', 'test_model'] Removing a neural network >>> rsml.remove_model('Model') >>> rsml.available_models() ['test_model'] """ if isinstance(name, str): path = Path(__file__).parent / f"models/{name}" elif isinstance(name, Path): path = name for child in path.glob("*"): if child.is_file(): child.unlink() elif child.is_dir(): remove_model(child) path.rmdir() class Pipeline: r"""Generate an artificial neural netowrk. This class is a pipeline for building neural network models. From the data spreadsheet to the model it self, each function for this class has to be called in an exact order to guarantee its the correct functioning. The basic order to follow: - Pipeline - set_features() - set_labels() - feature_reduction() - data_scaling() - build_Sequential_ANN() - model_run() Parameters ---------- df : pd.Dataframe name : str A tag for the neural network. This name is used to save the model, figures and tables within a folder named after this string. Examples -------- >>> import pandas as pd >>> import rossml as rsml >>> from pathlib import Path >>> from sklearn.preprocessing import RobustScaler Importing and collecting data >>> file = Path(__file__).parent / "tests/data/seal_data.csv" >>> df = pd.read_csv(file) Building the neural network model >>> name = "Model" # this name will be used to save your work >>> D = rsml.Pipeline(df, name) Selecting features and labels >>> features = D.set_features(1, 21) >>> labels = D.set_labels(21, len(D.df.columns)) >>> new_features = D.feature_reduction(15) Data scaling and running the model >>> x_train, x_test, y_train, y_test = D.data_scaling( ... 0.1, scalers=[RobustScaler(), RobustScaler()], scaling=True ... ) >>> model = D.build_Sequential_ANN(4, [50, 50, 50, 50]) >>> model, predictions = D.model_run(batch_size=300, epochs=200) # doctest: +ELLIPSIS Epoch 1/200... Get the model configurations to change it afterwards. These evaluations are important to decide whether the neural network meets or not the user requirements. >>> # model.get_config() >>> # fig = D.model_history() >>> # D.metrics() >>> df_test = D.hypothesis_test() Post-processing Data >>> results = PostProcessing(D.train, D.test, name) >>> fig = results.plot_overall_results() >>> fig = results.plot_confidence_bounds(a = 0.01) >>> fig = results.plot_standardized_error() >>> fig = results.plot_qq() Saving a model >>> # D.save() Displays the HTML report >>> # url = 'results' >>> # results.report(url) Loading a neural network model >>> # model = rsml.Model("Model") >>> # X = Pipeline(df).set_features(1, 21) >>> # results = model.predict(X) Removing models >>> rsml.remove_model('Model') """ def __init__(self, df, name="Model"): path_model = Path(__file__).parent / f"models/{name}" path_img = Path(__file__).parent / f"models/{name}/img" path_table = Path(__file__).parent / f"models/{name}/tables" if not path_model.exists(): path_model.mkdir() path_img.mkdir() path_table.mkdir() self.df = df self.df.dropna(inplace=True) self.name = name self.best = None def set_features(self, start, end): """Select the features from the input DataFrame. This methods takes the DataFrame and selects all the columns from "start" to "end" values to indicate which columns should be treated as features. Parameters ---------- start : int Start column of dataframe features end : int End column of dataframe features Returns ------- x : pd.DataFrame DataFrame with features parameters Example ------- """ self.x = self.df[self.df.columns[start:end]] self.columns = self.x.columns return self.x def set_labels(self, start, end): """Select the labels from the input DataFrame. This methods takes the DataFrame and selects all the columns from "start" to "end" values to indicate which columns should be treated as labels. Parameters ---------- start : int Start column of dataframe labels end : int End column of dataframe labels Returns ------- y : pd.DataFrame DataFrame with labels parameters Example ------- """ self.y = self.df[self.df.columns[start:end]] return self.y def feature_reduction(self, n): """Feature reduction using Decision Tree Regression method. This function uses Decision Tree Regression to select the "n" best features. Due it's random aspects, the selected best features may not be the same every time this function is called. Parameters ---------- n : int Number of relevant features. Returns ------- x : pd.DataFrame Minimum number of features that satisfies "n" for each label. """ # define the model model = DecisionTreeRegressor() # fit the model model.fit(self.x, self.y) # get importance importance = model.feature_importances_ # summarize feature importance featureScores = pd.concat( [pd.DataFrame(self.x.columns), pd.DataFrame(importance)], axis=1 ) featureScores.columns = ["Specs", "Score"] self.best = featureScores.nlargest(n, "Score")["Specs"].values self.x = self.x[self.best] return self.x def data_scaling(self, test_size, scaling=False, scalers=None): """Perform data scalling. This function scales the input and output data. The DataFrame provided may have multiple variables with different units and leading to completely distinguished magnitude values. Unscaled input variables can result in a slow or unstable learning process, whereas unscaled target variables on regression problems can result in exploding gradients causing the learning process to fail. Parameters ---------- test_size : float Percentage of data destined for testing. scaling : boolean, optional Choose between scaling the data or not. The default is False. scalers : scikit-learn object scikit-learn scalers method. Check sklearn.preprocessing for more informations about each scaler method. The default is None. Returns ------- x_train : array Features destined for training. x_test : array Features destined for test. y_train : array Labels destined for training. y_test : array Labels destined for test. """ if scalers is None: scalers = [] self.x_train, self.x_test, self.y_train, self.y_test = train_test_split( self.x, self.y, test_size=test_size ) if scaling: if len(scalers) >= 1: self.scaler1 = scalers[0] self.x_train = self.scaler1.fit_transform(self.x_train) self.x_test = self.scaler1.transform(self.x_test) if len(scalers) == 2: self.scaler2 = scalers[1] self.y_train = self.scaler2.fit_transform(self.y_train) self.y_test = self.scaler2.transform(self.y_test) else: self.x_train = self.x_train.values self.x_test = self.x_test.values self.y_train = self.y_train.values self.y_test = self.y_test.values self.scaler1 = None self.scaler2 = None return self.x_train, self.x_test, self.y_train, self.y_test def build_Sequential_ANN(self, hidden, neurons, dropout_layers=None, dropout=None): """Construct a sequential Artificial Neural Network from Keras models. Parameters ---------- hidden : int Number of hidden layers. neurons : list Number of neurons per layer. dropout_layers : list, optional Dropout layers position. The default is []. dropout : list, optional List with dropout values. The default is []. Returns ------- model : keras neural network """ if dropout_layers is None: dropout_layers = [] if dropout is None: dropout = [] self.model = Sequential() self.model.add(Dense(len(self.x.columns), activation="relu")) j = 0 # Dropout counter for i in range(hidden): if i in dropout_layers: self.model.add(Dropout(dropout[j])) j += 1 self.model.add(Dense(neurons[i], activation="relu")) self.model.add(Dense(len(self.y.columns))) self.config = self.model.get_config() return self.model def model_run(self, optimizer="adam", loss="mse", batch_size=16, epochs=500): """Run the neural network model. Parameters ---------- optimizer : string, optional Choose a optimizer. The default is 'adam'. loss : string, optional Choose a loss. The default is 'mse'. batch_size : int, optional batch_size . The default is 16. epochs : int, optional Choose number of epochs. The default is 500. Returns ------- model : keras neural network """ self.model.compile(optimizer=optimizer, loss=loss) self.history = self.model.fit( x=self.x_train, y=self.y_train, validation_data=(self.x_test, self.y_test), batch_size=batch_size, epochs=epochs, ) self.predictions = self.model.predict(self.x_test) if self.scaler2 is None: self.train = pd.DataFrame( self.scaler2.inverse_transform(self.predictions), columns=self.y.columns ) self.test = pd.DataFrame( self.scaler2.inverse_transform(self.y_test), columns=self.y.columns ) else: self.train =
pd.DataFrame(self.predictions, columns=self.y.columns)
pandas.DataFrame
""" Tests for DatetimeIndex timezone-related methods """ from datetime import date, datetime, time, timedelta, tzinfo import dateutil from dateutil.tz import gettz, tzlocal import numpy as np import pytest import pytz from pandas._libs.tslibs import conversion, timezones import pandas.util._test_decorators as td import pandas as pd from pandas import ( DatetimeIndex, Index, Timestamp, bdate_range, date_range, isna, to_datetime, ) import pandas._testing as tm class FixedOffset(tzinfo): """Fixed offset in minutes east from UTC.""" def __init__(self, offset, name): self.__offset = timedelta(minutes=offset) self.__name = name def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return timedelta(0) fixed_off = FixedOffset(-420, "-07:00") fixed_off_no_name = FixedOffset(-330, None) class TestDatetimeIndexTimezones: # ------------------------------------------------------------- # DatetimeIndex.tz_convert def test_tz_convert_nat(self): # GH#5546 dates = [pd.NaT] idx = DatetimeIndex(dates) idx = idx.tz_localize("US/Pacific") tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific")) idx = idx.tz_convert("US/Eastern") tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern")) idx = idx.tz_convert("UTC") tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC")) dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT] idx = DatetimeIndex(dates) idx = idx.tz_localize("US/Pacific") tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific")) idx = idx.tz_convert("US/Eastern") expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT] tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) idx = idx + pd.offsets.Hour(5) expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT] tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) idx = idx.tz_convert("US/Pacific") expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT] tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific")) idx = idx + np.timedelta64(3, "h") expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT] tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific")) idx = idx.tz_convert("US/Eastern") expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT] tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) @pytest.mark.parametrize("prefix", ["", "dateutil/"]) def test_dti_tz_convert_compat_timestamp(self, prefix): strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern") conv = idx[0].tz_convert(prefix + "US/Pacific") expected = idx.tz_convert(prefix + "US/Pacific")[0] assert conv == expected def test_dti_tz_convert_hour_overflow_dst(self): # Regression test for: # https://github.com/pandas-dev/pandas/issues/13306 # sorted case US/Eastern -> UTC ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"] tt = DatetimeIndex(ts).tz_localize("US/Eastern") ut = tt.tz_convert("UTC") expected = Index([13, 14, 13]) tm.assert_index_equal(ut.hour, expected) # sorted case UTC -> US/Eastern ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"] tt = DatetimeIndex(ts).tz_localize("UTC") ut = tt.tz_convert("US/Eastern") expected = Index([9, 9, 9]) tm.assert_index_equal(ut.hour, expected) # unsorted case US/Eastern -> UTC ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"] tt = DatetimeIndex(ts).tz_localize("US/Eastern") ut = tt.tz_convert("UTC") expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
pandas._testing.assert_index_equal
import numpy as np import pandas as pd from sklearn.base import TransformerMixin class OneHotEncoder(TransformerMixin): def __init__(self, covariates): self.covariates = covariates self.transformed_covariates = [] def fit(self, df, *args, **kwargs): from sklearn.preprocessing import OneHotEncoder self.encoder = OneHotEncoder() self.dummies = self.encoder.fit_transform(df[self.covariates]).toarray() return self def transform(self, df, *args, **kwargs): temp = df.copy() temp.drop(self.covariates, axis=1, inplace=True) columns = np.array([]) for i, covariate in enumerate(self.covariates): columns = np.append(columns, covariate + '_' + self.encoder.categories_[i]) self.dummies =
pd.DataFrame(self.dummies, columns=columns, index=df.index)
pandas.DataFrame
# encoding: utf-8 import itertools import random from datetime import date from typing import List, Tuple import pandas as pd class DataFrameMock: @staticmethod def df_generic(sample_size): """ Create a generic DataFrame with ``sample_size`` samples and 2 columns. The 2 columns of the returned DataFrame contain numerical and string values, respectively. Parameters ---------- sample_size: Number of samples in the returned DataFrame. Returns ------- pd.DataFrame Pandas DataFrame instance with ``sample_size`` samples and 2 columns: one with numerical values and the other with string values only. """ return pd.DataFrame( { "metadata_num_col": list(range(sample_size)), "metadata_str_col": [f"value_{i}" for i in range(sample_size)], "exam_num_col_0": list(range(sample_size)), "exam_num_col_1": list(range(sample_size)), "exam_str_col_0": [f"value_{i}" for i in range(sample_size)], } ) @staticmethod def df_many_nans(nan_ratio: float, n_columns: int) -> pd.DataFrame: """ Create pandas DataFrame with ``n_columns`` containing ``nan_ratio`` ratio of NaNs. DataFrame has 100 rows and ``n_columns``+5 columns. The additional 5 columns contain less than ``nan_ratio`` ratio of NaNs. Parameters ---------- nan_ratio : float Ratio of NaNs that will be present in ``n_columns`` of the DataFrame. n_columns : int Number of columns that will contain ``nan_ratio`` ratio of NaNs. Returns ------- pd.DataFrame Pandas DataFrame with ``n_columns`` containing ``nan_ratio`` ratio of NaNs and 5 columns with a lower ratio of NaNs. """ many_nan_dict = {} sample_count = 100 # Create n_columns columns with NaN nan_sample_count = int(sample_count * nan_ratio) for i in range(n_columns): many_nan_dict[f"nan_{i}"] = [pd.NA] * nan_sample_count + [1] * ( sample_count - nan_sample_count ) # Create not_nan_columns with less than nan_ratio ratio of NaNs not_nan_columns = 5 for j in range(not_nan_columns): nan_ratio_per_column = nan_ratio - 0.01 * (j + 1) # If nan_ratio_per_column < 0, set 0 samples to NaN (to avoid negative # sample counts) if nan_ratio_per_column < 0: nan_sample_count = 0 else: nan_sample_count = int(sample_count * nan_ratio_per_column) many_nan_dict[f"not_nan_{j}"] = [pd.NA] * nan_sample_count + [1] * ( sample_count - nan_sample_count ) return pd.DataFrame(many_nan_dict) @staticmethod def df_nans_filled(columns: List[str]) -> pd.DataFrame: """Starting from the df returned by ``.df_many_nans``, set ``columns`` to 1s. Parameters ---------- columns : List[str] Name of the columns to set to 1s Returns ------- pd.DataFrame DataFrame with the ``columns`` set to 1s """ df = DataFrameMock.df_many_nans(nan_ratio=0.5, n_columns=3) for column in columns: df[column] = pd.Series(pd.Series([1] * 100)) return df @staticmethod def df_same_value(n_columns: int) -> pd.DataFrame: """ Create pandas DataFrame with ``n_columns`` containing the same repeated value. DataFrame has 100 rows and ``n_columns``+5 columns. The additional 5 columns contain different valid values (and a variable count of a repeated value). Parameters ---------- n_columns : int Number of columns that will contain the same repeated value. Returns ------- pd.DataFrame Pandas DataFrame with ``n_columns`` containing the same repeated value and 5 columns with some different values. """ random.seed(42) constant_value_dict = {} sample_count = 100 # Create n_columns columns with same repeated value for i in range(n_columns): constant_value_dict[f"same_{i}"] = [4] * sample_count # Create not_constant_columns with repeated values and random values not_constant_columns = 5 for j in range(not_constant_columns): constant_value_sample_count = int(sample_count * (1 - 0.1 * (j + 1))) constant_value_dict[f"not_same_{j}"] = [4] * constant_value_sample_count + [ random.random() for _ in range(sample_count - constant_value_sample_count) ] return pd.DataFrame(constant_value_dict) @staticmethod def df_trivial(n_columns: int) -> pd.DataFrame: """ Create pandas DataFrame with ``n_columns`` containing trivial values. Half of the trivial columns contains lots of NaN, and the other half contains repeated values. DataFrame has 100 rows and ``n_columns``+5 columns. The additional 5 columns contain random values and a variable count of a repeated value and NaNs. Parameters ---------- n_columns : int Number of columns that will contain the same repeated value. Returns ------- pd.DataFrame Pandas DataFrame with ``n_columns`` containing the same repeated value and 5 columns with some different values. """ random.seed(42) trivial_dict = {} sample_count = 100 nan_columns = n_columns // 2 constant_value_columns = n_columns - nan_columns # Create half of n_columns columns with NaN for i in range(nan_columns): trivial_dict[f"nan_{i}"] = [pd.NA] * sample_count # Create half of n_columns columns with repeated value for j in range(constant_value_columns): trivial_dict[f"same_{j}"] = [4] * sample_count # Create 5 more columns with valid values (with NaN, repeated and random values) valid_values_columns = 5 for k in range(valid_values_columns): constant_value_sample_count = int(sample_count * (1 - 0.05 * (k + 1)) / 2) nan_sample_count = int(sample_count * (1 - 0.05 * (k + 1)) / 2) random_samples = [ random.random() * 100 for _ in range( sample_count - constant_value_sample_count - nan_sample_count ) ] trivial_dict[f"not_nan_not_same_{k}"] = ( [4] * constant_value_sample_count + [pd.NA] * nan_sample_count + random_samples ) return pd.DataFrame(trivial_dict) @staticmethod def df_multi_type(sample_size: int) -> pd.DataFrame: """ Create pandas DataFrame with columns containing values of different types. The returned DataFrame has a number of rows equal to the biggest value V such that: a) V < ``sample_size`` b) V is divisible by 10. The DataFrame has columns as follows: 1. One column containing boolean values 2. One column containing string values 3. One column containing string repeated values ("category" dtype) 4. One column containing string values (it is meant to simulate metadata) 5. One column containing numerical values 6. One column containing numerical repeated values ("category" dtype) 7. One column containing datetime values 8. One column containing 'interval' typed values 9. One column containing values of mixed types 10. One column containing repeated values 11. One column containing NaN values (+ 1 numerical value) Parameters ---------- sample_size: int Number of samples that the returned DataFrame will contain. Returns ------- pd.DataFrame Pandas DataFrame with ``sample_size`` samples and 5 columns containing values of different types. """ random.seed(42) # Get only the part that is divisible by 2 and 5 sample_size = sample_size // 10 * 10 bool_col = [True, False, True, True, False] * (sample_size // 5) random.shuffle(bool_col) df_multi_type_dict = { "metadata_num_col": list(range(sample_size)), "bool_col": bool_col, "string_col": [f"value_{i}" for i in range(sample_size)], "str_forced_categorical_col": pd.Series( ["category_0", "category_1", "category_2", "category_3", "category_4"] * (sample_size // 5), dtype="category", ), "str_categorical_col": pd.Series( ["category_0", "category_1", "category_2", "category_3", "category_4"] * (sample_size // 5) ), "int_forced_categorical_col": pd.Series( [0, 1, 2, 3, 4] * (sample_size // 5), dtype="category" ), "int_categorical_col": pd.Series([0, 1, 2, 3, 4] * (sample_size // 5)), "float_col": [0.05 * i for i in range(sample_size)], "int_col": list(range(sample_size)), "datetime_col": [date(2000 + i, 8, 1) for i in range(sample_size)], "interval_col": pd.arrays.IntervalArray( [pd.Interval(0, i) for i in range(sample_size)], ), "mixed_type_col": list(range(sample_size // 2)) + [f"value_{i}" for i in range(sample_size // 2)], "same_col": [2] * sample_size, "nan_col": [pd.NA] * (sample_size - 1) + [3], } return pd.DataFrame(df_multi_type_dict) @staticmethod def df_column_names_by_type() -> pd.DataFrame: """ Create DataFrame sample that contains column name and types of a generic DataFrame. DataFrame has 11 rows and 2 columns. One column called "col_name" contains some strings (that represent the column names of another DataFrame sample "df2"). Another column called "col_type" contains some possible outputs from ``trousse.dataset._find_single_column_type`` function that describe the type of values contained in the column of "df2". Returns ------- pd.DataFrame Pandas DataFrame with 2 columns containing strings and types respectively. """ return pd.DataFrame( [ {"col_name": "bool_col_0", "col_type": "bool_col"}, {"col_name": "bool_col_1", "col_type": "bool_col"}, {"col_name": "string_col_0", "col_type": "string_col"}, {"col_name": "string_col_1", "col_type": "string_col"}, {"col_name": "string_col_2", "col_type": "string_col"}, {"col_name": "numerical_col_0", "col_type": "numerical_col"}, {"col_name": "other_col_0", "col_type": "other_col"}, {"col_name": "mixed_type_col_0", "col_type": "mixed_type_col"}, {"col_name": "mixed_type_col_1", "col_type": "mixed_type_col"}, {"col_name": "mixed_type_col_2", "col_type": "mixed_type_col"}, {"col_name": "mixed_type_col_3", "col_type": "mixed_type_col"}, ] ) @staticmethod def df_categorical_cols(sample_size: int) -> pd.DataFrame: """ Create pandas DataFrame with columns containing categorical values The returned DataFrame will contain ``sample_size`` samples and 12 columns. The columns will be distinguished based on value types (sample_type) and number of unique values (unique_value_count). Parameters ---------- sample_size: int Number of samples in the returned DataFrame Returns ------- pd.DataFrame Pandas DataFrame containing ``sample_size`` samples and 12 columns with various sample types and number of unique values """ random.seed(42) unique_value_counts = (3, 5, 8, 40) categ_cols_dict = {} mixed_list = [f"string_{i}" for i in range(20)] + [i * 20 for i in range(21)] random.shuffle(mixed_list) value_per_sample_type = { "numerical": [i * 20 for i in range(41)], "string": [f"string_{i}" for i in range(41)], "mixed": mixed_list, } for unique_value_count, sample_type in itertools.product( unique_value_counts, value_per_sample_type.keys() ): if sample_size < unique_value_count: # Cannot have more unique values than samples unique_value_count = sample_size # how many times every value will be repeated to fill up the column repetitions_per_value = sample_size // unique_value_count # This is to always have the same sample_size last_value_repetitions = sample_size - repetitions_per_value * ( unique_value_count - 1 ) # Create list of lists containing the same repeated values (category) repeated_categories_list = [] for i in range(unique_value_count - 1): repeated_categories_list.append( [value_per_sample_type[sample_type][i]] * repetitions_per_value ) repeated_categories_list.append( [value_per_sample_type[sample_type][unique_value_count]] * last_value_repetitions ) # Combine lists into one column of the DataFrame categ_cols_dict[f"{sample_type}_{unique_value_count}"] = list( itertools.chain.from_iterable(repeated_categories_list) ) return pd.DataFrame(categ_cols_dict) @staticmethod def df_multi_nan_ratio(sample_size: int) -> pd.DataFrame: """ Create pandas DataFrame with columns containing variable ratios of NaN values. The returned DataFrame has a number of rows equal to the biggest value V such that: a) V < ``sample_size`` b) V is divisible by 10. The DataFrame has columns as follows: 1. One column containing numerical values 2. One column containing 50% of NaN values 3. One column containing NaN values + 1 numerical value 4. One column containing 100% of NaN values Parameters ---------- sample_size: int Number of samples that the returned DataFrame will contain Returns ------- pd.DataFrame Pandas DataFrame with ``sample_size`` samples and 5 columns containing variable ratios of NaN values. """ sample_size = sample_size // 10 * 10 ratio_50 = int(sample_size * 0.5) num_values = [0.05 * i for i in range(sample_size)] df_multi_type_dict = { "0nan_col": [0.05 * i for i in range(sample_size)], "50nan_col": ([pd.NA] * ratio_50) + num_values[:ratio_50], "99nan_col": [pd.NA] * (sample_size - 1) + [3], "100nan_col": ([pd.NA] * sample_size), } return pd.DataFrame(df_multi_type_dict) @staticmethod def df_duplicated_columns(duplicated_cols_count: int) -> pd.DataFrame: """ Create DataFrame with ``duplicated_cols_count`` duplicated columns. The returned DataFrame has 5 rows and ``duplicated_cols_count`` + 2 columns. Two columns are considered duplicated if they have the same column name Parameters ---------- duplicated_cols_count: int Number of columns with duplicated name. Returns ------- pd.DataFrame Pandas DataFrame with 5 rows and ``duplicated_cols_count`` duplicated columns (+ 2 generic columns). """ df_duplicated = pd.DataFrame({"col_0": list(range(5)), "col_3": list(range(5))}) for _ in range(duplicated_cols_count): single_col_df = pd.DataFrame({"duplic_col": list(range(5))}) df_duplicated = pd.concat([df_duplicated, single_col_df], axis=1) return pd.DataFrame(df_duplicated) @staticmethod def df_with_private_info(private_cols: Tuple[str]): """ Create DataFrame with private info columns along with data columns The returned DataFrame mock contains (len(private_cols) + 2) columns and 5 rows. Particularly it contains the columns listed in ``private_cols`` with string values, and 2 data columns containing integer values. Two of these rows have same values in ``private_cols`` columns, but different values in the other 2 data columns (this could be simulating a DataFrame with multiple rows related to the same customer/patient). Parameters ---------- private_cols: Tuple[str] List of columns that will be created as private columns Returns ------- pd.DataFrame DataFrame mock containing (len(private_cols) + 2) columns and 5 rows. Particularly it contains the columns listed in ``private_cols`` with generic string values, and 2 data columns containing integer values. """ df_private_info_dict = {} sample_size = 5 for i, col in enumerate(private_cols): df_private_info_dict[col] = [ f"col_{i}_value_{k}" for k in range(sample_size - 1) ] # Add a duplicated row (it may be associated to the same customer) df_private_info_dict[col].append(f"col_{i}_value_{sample_size-2}") df_private_info_dict["data_col_0"] = list(range(sample_size)) df_private_info_dict["data_col_1"] = list(range(sample_size)) return pd.DataFrame(df_private_info_dict) class SeriesMock: @staticmethod # noqa: C901 def series_by_type(series_type: str): col_name = "column_name" if "bool" in series_type: return pd.Series([True, False, True, pd.NA, False], name=col_name) elif "str" in series_type: return pd.Series( ["value_0", "value_1", "value_2", pd.NA, "value_4"], name=col_name ) elif "float" in series_type: return pd.Series([0.05 * i for i in range(4)] + [pd.NA], name=col_name) elif "int" in series_type: return pd.Series(list(range(4)) + [pd.NA], name=col_name) elif "float_int" in series_type: return pd.Series([2, 3, 0.1, 4, 5], name=col_name) elif "date" in series_type: return pd.Series([date.today() for i in range(4)] + [pd.NA], name=col_name) elif "category" in series_type: return pd.Series( ["category_1", "category_1", "category_0", "category_1", "category_0"], name=col_name, dtype="category", ) elif "interval" in series_type: return pd.Series( pd.arrays.IntervalArray( [ pd.Interval(0, 1),
pd.Interval(1, 5)
pandas.Interval
import numpy as np from numpy import sqrt from scipy.integrate import solve_bvp import math import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import fsolve import sympy as sp from IPython.display import display #%% Process parameters r1,r2=[0.7,0.6] k1,k2=[3.33,3.35] a1,a2,a3,a4=[0.071,0.057,0.071,0.057] A1,A2,A3,A4=[28,32,28,32] x10,x20,x30,x40=[12.4,12.7,1.8,1.4] u10,u20=[3,3] g=981 X0=[x10,x20,x30,x40] #%% Control parameters C=np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]) Qc1=0.001 Qc2=1 Rc1=0.01 Q=(np.transpose(C)@C+Qc1*np.eye(4))*Qc2 Q[0,0]=10 Q[1,1]=10 R=Rc1*np.eye(2) #%% U0=np.array([u10,u20]).reshape((2,1)) #initial input Xs=np.array([13,13.5,1.72,1.5]).reshape((4,1)) #set point #%%% symbolic calculations t=sp.symbols('t', real=True) x1,x2,x3,x4,u1,u2,p1,p2,p3,p4=sp.symbols('x1,x2,x3,x4,u1,u2,p1,p2,p3,p4') dx1=r1*k1*u1/A1+a3*(sp.sqrt(2*g*x3))/A1-a1*(sp.sqrt(2*g*x1))/A1 dx2=r2*k2*u2/A2+a4*(sp.sqrt(2*g*x4))/A2-a2*(sp.sqrt(2*g*x2))/A2 dx3=(1-r2)*k2*u2/A3-a3*(sp.sqrt(2*g*x3))/A3 dx4=(1-r1)*k1*u1/A4-a4*(sp.sqrt(2*g*x4))/A4 f_m = sp.Matrix([dx1,dx2,dx3,dx4]) x=sp.Matrix([x1,x2,x3,x4]) u=sp.Matrix([u1,u2]) #%%% Hamiltonian calculation g=np.transpose(x-Xs)@Q@(x-Xs)+np.transpose(u-U0)@R@(u-U0) p_m = sp.Matrix([p1,p2,p3,p4]) H=g+np.transpose(p_m)@f_m #%% state and costate equations p1_s=-sp.diff(H,x1)[0] p2_s=-sp.diff(H,x2)[0] p3_s=-sp.diff(H,x3)[0] p4_s=-sp.diff(H,x4)[0] u1_s=sp.diff(H,u1)[0] u2_s=sp.diff(H,u2)[0] sol_u=sp.solve((u1_s,u2_s),(u1,u2)) #solving for u #%% saving subsituted equations for bvp df=
pd.DataFrame([dx1,dx2,dx3,dx4,p1_s,p2_s,p3_s,p4_s,sol_u[u1],sol_u[u2]])
pandas.DataFrame
#!/usr/bin/env python3 import urllib.request import subprocess import os import sys from pathlib import Path from datetime import datetime, timedelta import pandas as pd def contains_future_date(series): try: return (series > datetime.now()).any() except TypeError as err: raise TypeError(f'series must have datetime compatible dtype') def main(): # Download data cases_data_url = 'https://data.nsw.gov.au/data/dataset/97ea2424-abaf-4f3e-a9f2-b5c883f42b6a/resource/2776dbb8-f807-4fb2-b1ed-184a6fc2c8aa/download/covid-19-cases-by-notification-date-location-and-likely-source-of-infection.csv' tests_data_url = 'https://data.nsw.gov.au/data/dataset/60616720-3c60-4c52-b499-751f31e3b132/resource/fb95de01-ad82-4716-ab9a-e15cf2c78556/download/covid-19-tests-by-date-and-postcode-local-health-district-and-local-government-area-aggregated.csv' urls = [cases_data_url, tests_data_url] files = [url.split('/')[-1] for url in urls] url_filename_pairs = zip(urls, files) for url, filename in url_filename_pairs: urllib.request.urlretrieve(url, filename) # Test for invalid dates date_columns = {'case': 'notification_date', 'test': 'test_date'} names = date_columns.keys() name_file_pairs = zip(names, files) dfs = {} for name, file in name_file_pairs: df =
pd.read_csv(file, dtype=str)
pandas.read_csv
""" Plotting helpers ================ """ import os import joblib import logging import sklearn.metrics import pandas as pd from .list_runs import ListRuns import seaborn as sns import matplotlib.pyplot as plt import numpy as np import math logger = logging.getLogger(__name__) def plot_confusion_matrix( run, log_scale, normalize_sum, normalize_test, stylesheet, figsize_x, figsize_y, vmin, vmax, vmin_norm, vmax_norm, plot_formats ): f_path = os.path.join(os.getcwd(), 'output', run) if not os.path.isdir(f_path): raise FileNotFoundError(f'Could not find run directory {f_path}') test_output_file = os.path.join(os.getcwd(), 'output', run, 'test_output.csv') if not os.path.isfile(test_output_file): raise FileNotFoundError(f'No file {test_output_file} found for run {run}. Pass the option `write_test_output: true` when training the model.') if stylesheet: plt.style.use(stylesheet) df = pd.read_csv(test_output_file) labels = sorted(set(df.label).union(set(df.prediction))) cnf_matrix = sklearn.metrics.confusion_matrix(df.label, df.prediction) df = pd.DataFrame(cnf_matrix, columns=labels, index=labels) # Plotting fig, ax = plt.subplots(1, 1, figsize=(figsize_x, figsize_y)) fmt = 'd' f_name = run df_sum = df.sum().sum() vmin = df.min().min() if vmin is None else vmin vmax = df.max().max() if vmax is None else vmax if normalize_sum: df = df.div(df.sum().sum().astype(float)) * 1000 vmin = df.min().min() if vmin_norm is None else vmin_norm vmax = df.max().max() if vmax_norm is None else vmax_norm fmt = '3.0f' f_name += '_normalized_sum' elif normalize_test: df = df.divide(df.sum(axis=1), axis=0) * 1000 vmin = df.min().min() if vmax_norm is None else vmax_norm vmax = df.max().max() if vmin_norm is None else vmin_norm fmt = '3.0f' f_name += '_normalized_test' df_annot = df.copy() if log_scale: vmin = np.log(vmin) if vmin >= 1 else 0.0 vmax = np.log(vmax) df = np.log(df + 1) f_name += '_log_scale' ax = sns.heatmap( df, ax=ax, annot=df_annot, fmt=fmt, annot_kws={"fontsize": 8}, vmin=vmin, vmax=vmax) if log_scale: # Set colorbar ticks cbar = ax.collections[0].colorbar ticks = list(range(math.floor(vmin), math.ceil(vmax))) cbar.set_ticks(ticks) exp_0 = lambda x: np.exp(x) if x > 0 else 0.0 cbar.set_ticklabels(np.vectorize(exp_0)(ticks).astype(int)) ax.set(xlabel='predicted label', ylabel='true label') if normalize_sum or normalize_test: ax.set_title(f'Total samples: {df_sum}') save_fig(fig, 'confusion_matrix', f_name, plot_formats=plot_formats) def plot_compare_runs(runs, performance_scores, order_by): df = [] run_dict = {} for run in runs: if ':' in run: run_name, alt_name = run.split(':') run_dict[run_name] = alt_name else: run_dict[run] = run for run, alt_name in run_dict.items(): _df = ListRuns.collect_results(run=run) _df['name'] = alt_name if len(_df) == 0: raise FileNotFoundError(f'Could not find the run "{run}" in ./output/') elif len(_df) > 1: raise ValueError(f'Run name "{run}" is not unique. Found {len(_df):,} matching runs for this pattern.') df.append(_df) df =
pd.concat(df)
pandas.concat
''''' Authors: <NAME> (@anabab1999) and <NAME> (@felipezara2013) ''' from calendars import DayCounts import pandas as pd from pandas.tseries.offsets import DateOffset from bloomberg import BBG import numpy as np bbg = BBG() #Puxando os tickers para a curva zero tickers_zero_curve = ['S0023Z 1Y BLC2 Curncy', 'S0023Z 1D BLC2 Curncy', 'S0023Z 3M BLC2 Curncy', 'S0023Z 1W BLC2 Curncy', 'S0023Z 10Y BLC2 Curncy', 'S0023Z 1M BLC2 Curncy', 'S0023Z 2Y BLC2 Curncy', 'S0023Z 6M BLC2 Curncy', 'S0023Z 2M BLC2 Curncy', 'S0023Z 5Y BLC2 Curncy', 'S0023Z 4M BLC2 Curncy', 'S0023Z 2D BLC2 Curncy', 'S0023Z 9M BLC2 Curncy', 'S0023Z 3Y BLC2 Curncy', 'S0023Z 4Y BLC2 Curncy', 'S0023Z 50Y BLC2 Curncy', 'S0023Z 12Y BLC2 Curncy', 'S0023Z 18M BLC2 Curncy', 'S0023Z 7Y BLC2 Curncy', 'S0023Z 5M BLC2 Curncy', 'S0023Z 6Y BLC2 Curncy', 'S0023Z 2W BLC2 Curncy', 'S0023Z 11M BLC2 Curncy', 'S0023Z 15M BLC2 Curncy', 'S0023Z 21M BLC2 Curncy', 'S0023Z 15Y BLC2 Curncy', 'S0023Z 25Y BLC2 Curncy', 'S0023Z 8Y BLC2 Curncy', 'S0023Z 10M BLC2 Curncy', 'S0023Z 20Y BLC2 Curncy', 'S0023Z 33M BLC2 Curncy', 'S0023Z 7M BLC2 Curncy', 'S0023Z 8M BLC2 Curncy', 'S0023Z 11Y BLC2 Curncy', 'S0023Z 14Y BLC2 Curncy', 'S0023Z 18Y BLC2 Curncy', 'S0023Z 19Y BLC2 Curncy', 'S0023Z 23D BLC2 Curncy', 'S0023Z 9Y BLC2 Curncy', 'S0023Z 17M BLC2 Curncy', 'S0023Z 1I BLC2 Curncy', 'S0023Z 22Y BLC2 Curncy', 'S0023Z 28Y BLC2 Curncy', 'S0023Z 2I BLC2 Curncy', 'S0023Z 30Y BLC2 Curncy', 'S0023Z 31Y BLC2 Curncy', 'S0023Z 32Y BLC2 Curncy', 'S0023Z 38Y BLC2 Curncy', 'S0023Z 39Y BLC2 Curncy', 'S0023Z 40Y BLC2 Curncy', 'S0023Z 42D BLC2 Curncy', 'S0023Z 48Y BLC2 Curncy'] df_bbg = bbg.fetch_series(tickers_zero_curve, "PX_LAST", startdate = pd.to_datetime('today'), enddate = pd.to_datetime('today')) df_bbg = df_bbg.transpose() df_bbg_m = bbg.fetch_contract_parameter(tickers_zero_curve, "MATURITY") '''' The Zero curve will be used on the interpolation, to discover the rate for a specific term. ''' # fazendo a curva zero zero_curve = pd.concat([df_bbg, df_bbg_m], axis=1, sort= True).set_index('MATURITY').sort_index() zero_curve = zero_curve.astype(float) zero_curve = zero_curve.interpolate(method='linear', axis=0, limit=None, inplace=False, limit_direction='backward', limit_area=None, downcast=None) zero_curve.index =
pd.to_datetime(zero_curve.index)
pandas.to_datetime
""" Some of the runs were super unstable with smallest epsilon, so need to make some exceptions """ import pandas as pd import pickle import numpy as np from itertools import product import os script_dir = os.path.dirname(os.path.abspath(__file__)) parent_dir = os.path.split(script_dir)[0] + "/" ## Load DPVI fits epsilons = [0.74, 1.99, 3.92] #epsilons = [1.99, 3.92] epsilons = np.array(epsilons) seeds = range(1234,1244) n_runs = len(seeds)*10 ## No stratification ## For females syn_no_strat_coef_female_dict = {} syn_no_strat_p_value_female_dict = {} for eps in epsilons: female_coefs = [] for seed in seeds: for rep in range(10): try: female_coef_df = pd.read_csv(parent_dir+'R/ablation_study/no_strat/csvs/female_coef_matrix_dpvi_{}_{}_{}.csv'.format(seed, eps, rep), index_col=0) female_p_value_df = pd.read_csv(parent_dir+'R/ablation_study/no_strat/csvs/female_p_value_matrix_dpvi_{}_{}_{}.csv'.format(seed, eps, rep), index_col=0) if len(female_coefs)==0: female_coefs = female_coef_df female_p_values = female_p_value_df else: female_coefs = pd.concat([female_coefs, female_coef_df], axis=1) female_p_values = pd.concat([female_p_values, female_p_value_df], axis=1) except: pass syn_no_strat_coef_female_df = pd.DataFrame(female_coefs.values.T, columns=female_coefs.index) syn_no_strat_p_value_female_df = pd.DataFrame(female_p_values.values.T, columns=female_p_values.index) syn_no_strat_coef_female_dict[eps] = syn_no_strat_coef_female_df syn_no_strat_p_value_female_dict[eps] = syn_no_strat_p_value_female_df # For males syn_no_strat_coef_male_dict = {} syn_no_strat_p_value_male_dict = {} for eps in epsilons: male_coefs = [] for seed in seeds: for rep in range(10): try: male_coef_df = pd.read_csv(parent_dir+'R/ablation_study/no_strat/csvs/male_coef_matrix_dpvi_{}_{}_{}.csv'.format(seed, eps, rep), index_col=0) male_p_value_df = pd.read_csv(parent_dir+'R/ablation_study/no_strat/csvs/male_p_value_matrix_dpvi_{}_{}_{}.csv'.format(seed, eps, rep), index_col=0) if len(male_coefs)==0: male_coefs = male_coef_df male_p_values = male_p_value_df else: male_coefs = pd.concat([male_coefs, male_coef_df], axis=1) male_p_values = pd.concat([male_p_values, male_p_value_df], axis=1) except: pass syn_no_strat_coef_male_df =
pd.DataFrame(male_coefs.values.T, columns=male_coefs.index)
pandas.DataFrame
# python -m unittest tests/test_ml_training.py import copy import numpy as np import pandas as pd import os import shutil import unittest from collections import OrderedDict from subroutines.exceptions import AlgorithmError, create_generator from subroutines.train import ( make_separate_subclass_splits, bootstrap_data, make_feat_importance_plots, check_arguments, RunML ) class TestClass(unittest.TestCase): def test_make_separate_subclass_splits(self): """ Tests make_separate_subclass_splits in train.py """ print('Testing make_separate_subclass_splits') exp_input_dict = { 1: [['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], np.array([['A', 'C'], ['B', 'D']], dtype=object)], 2: [np.array([['A', 'B', 'C', 'D'], ['B', 'A', 'D', 'C'], ['C', 'A', 'D', 'B']], dtype=object), np.array([['A', 'C'], ['B', 'D']], dtype=object)], 3: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', np.nan, 'D', 'B'], dtype=object), np.array([['A', 'C'], ['B', 'D']], dtype=object)], 4: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], dtype=object), [['A', 'C'], ['B', 'D']]], 5: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], dtype=object), np.array([[np.nan, 'C'], ['B', 'D']], dtype=object)], 6: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], dtype=object), np.array([['A', 'C'], ['B', 'A']], dtype=object)], 7: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'E', 'C', 'A', 'D', 'B'], dtype=object), np.array([['A', 'C'], ['B', 'D']], dtype=object)], 8: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'E', 'C', 'A', 'D', 'B'], dtype=object), np.array([['A', 'C'], ['B', 'D'], ['E', 'F']], dtype=object)], 9: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], dtype=object), np.array([['A', 'C'], ['B', 'D']], dtype=object)] } for num in exp_input_dict.keys(): subclasses = exp_input_dict[num][0] subclass_splits = exp_input_dict[num][1] if num == 1: with self.assertRaises(TypeError) as message: splits = make_separate_subclass_splits(subclasses, subclass_splits) next(splits) self.assertEqual( str(message.exception), 'Expect "subclasses" to be a (1D) ' 'array of subclass values' ) elif num == 2: with self.assertRaises(ValueError) as message: splits = make_separate_subclass_splits(subclasses, subclass_splits) next(splits) self.assertEqual( str(message.exception), 'Expect "subclasses" to be a 1D array' ) elif num == 3: with self.assertRaises(ValueError) as message: splits = make_separate_subclass_splits(subclasses, subclass_splits) next(splits) self.assertEqual( str(message.exception), 'NaN value(s) detected in ' '"subclasses" array' ) elif num == 4: with self.assertRaises(TypeError) as message: splits = make_separate_subclass_splits(subclasses, subclass_splits) next(splits) self.assertEqual( str(message.exception), 'Expect "subclass_splits" to be a ' '(2D) array of subclass values' ) elif num == 5: with self.assertRaises(ValueError) as message: splits = make_separate_subclass_splits(subclasses, subclass_splits) next(splits) self.assertEqual( str(message.exception), 'NaN value(s) detected in ' '"subclass_splits" array' ) elif num == 6: with self.assertRaises(ValueError) as message: splits = make_separate_subclass_splits(subclasses, subclass_splits) next(splits) self.assertEqual( str(message.exception), 'Repeated subclass labels detected ' 'in "subclass_splits"' ) elif num == 7: with self.assertRaises(ValueError) as message: splits = make_separate_subclass_splits(subclasses, subclass_splits) next(splits) self.assertEqual( str(message.exception), 'Subclass E is found in ' '"subclasses" but not "subclass_splits"' ) elif num == 8: with self.assertRaises(ValueError) as message: splits = make_separate_subclass_splits(subclasses, subclass_splits) next(splits) self.assertEqual( str(message.exception), 'Subclass F is found in ' '"subclass_splits" but not "subclasses"' ) elif num == 9: exp_split = (sub_list for sub_list in [np.array([0, 2, 5, 7, 8, 9]), np.array([1, 3, 4, 6, 10, 11])]) act_split = make_separate_subclass_splits(subclasses, subclass_splits) for i, split_1 in enumerate(list(exp_split)): for j, split_2 in enumerate(list(act_split)): if i == j: np.testing.assert_equal(split_1, split_2) def test_bootstrap_data(self): """ Tests bootstrap_data in train.py """ print('Testing bootstrap_data') exp_input_dict = { 1: [[[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8], [1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6], [3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1], [1.6, 0.5, 1.0]], np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']), ['1', '2', '3'], True], 2: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8], [1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6], [3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1], [1.6, 0.5, 1.0]]), ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], ['1', '2', '3'], True], 3: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8], [1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6], [3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1], [1.6, 0.5, 1.0]]), np.array([['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']]), ['1', '2', '3'], True], 4: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8], [1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6], [3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1], [1.6, 0.5, 1.0]]), np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']), ['1', '2', '3'], True], 5: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8], [1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6], [3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1], [1.6, 0.5, 1.0]]), np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']), np.array(['1', '2', '3']), True], 6: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8], [1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6], [3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1], [1.6, 0.5, 1.0]]), np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']), ['1', '2', '3', '4'], True], 7: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8], [1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6], [3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1], [1.6, 0.5, 1.0]]), np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']), ['1', '2', '3'], 1.0], 8: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8], [1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6], [3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1], [1.6, 0.5, 1.0]]), np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']), ['1', '2', '3'], False], 9: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8], [1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6], [3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1], [1.6, 0.5, 1.0]]), np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']), ['1', '2', '3'], True] } for num in exp_input_dict.keys(): x = exp_input_dict[num][0] y = exp_input_dict[num][1] features = exp_input_dict[num][2] scale = exp_input_dict[num][3] if num == 1: with self.assertRaises(TypeError) as message: bootstrap_data( x, y, features, scale, True ) self.assertEqual( str(message.exception), 'Expect "x" to be a (2D) array of x' ' values' ) if num == 2: with self.assertRaises(TypeError) as message: bootstrap_data( x, y, features, scale, True ) self.assertEqual( str(message.exception), 'Expect "y" to be a (1D) array of y' ' values' ) if num == 3: with self.assertRaises(ValueError) as message: bootstrap_data( x, y, features, scale, True ) self.assertEqual( str(message.exception), 'Expect "y" to be a 1D array of y ' 'values' ) if num == 4: with self.assertRaises(ValueError) as message: bootstrap_data( x, y, features, scale, True ) self.assertEqual( str(message.exception), 'Different numbers of rows in ' 'arrays "x" and "y"' ) if num == 5: with self.assertRaises(TypeError) as message: bootstrap_data( x, y, features, scale, True ) self.assertEqual( str(message.exception), 'Expect "features" to be a list' ) if num == 6: with self.assertRaises(ValueError) as message: bootstrap_data( x, y, features, scale, True ) self.assertEqual( str(message.exception), 'Expect entries in "features" list ' 'to correspond to the columns in "x"' ) if num == 7: with self.assertRaises(TypeError) as message: bootstrap_data( x, y, features, scale, True ) self.assertEqual( str(message.exception), 'Expect "scale" to be a Boolean ' 'value (either True or False)' ) if num == 8: exp_out_x = pd.DataFrame( np.array([[1.0, 1.5, 1.2], [3.4, 2.5, 1.4], [4.6, 2.3, 2.1], [1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6], [4.0, 4.0, 3.1], [1.0, 1.5, 1.2], [3.4, 2.5, 1.4], [4.1, 3.3, 2.6]]), index=None, columns=features ) exp_out_y = ['a', 'g', 'b', 'd', 'e', 'f', 'i', 'a', 'g', 'f'] act_out_x, act_out_y = bootstrap_data(x, y, features, scale, True) pd.testing.assert_frame_equal(exp_out_x, act_out_x) self.assertEqual(exp_out_y, act_out_y) if num == 9: exp_out_x = pd.DataFrame( np.array([[-0.83478261, -0.5625, -0.15686275], [0., 0.0625, 0.], [0.4173913, -0.0625, 0.54901961], [-0.55652174, -0.8125, -0.62745098], [-0.93913043, -0.9375, -0.54901961], [0.24347826, 0.5625, 0.94117647], [0.20869565, 1., 1.33333333], [-0.83478261, -0.5625, -0.15686275], [0., 0.0625, 0.], [0.24347826, 0.5625, 0.94117647]]), index=None, columns=features ) exp_out_y = ['a', 'g', 'b', 'd', 'e', 'f', 'i', 'a', 'g', 'f'] act_out_x, act_out_y = bootstrap_data(x, y, features, scale, True) pd.testing.assert_frame_equal(exp_out_x, act_out_x) self.assertEqual(exp_out_y, act_out_y) def test_make_feat_importance_plots(self): """ Tests make_feat_importance_plots in train.py """ print('Testing make_feat_importance_plots') input_feat_importances = { 'Feature_1': [7.8, 8.7, 0.1, 8.1, 0.4], 'Feature_2': [6.4, 0.1, 0.6, 8.3, 5.2], 'Feature_3': [7.1, 8.4, 0.0, 9.3, 2.5], 'Feature_4': [3.4, 2.1, 1.6, 5.6, 9.4], 'Feature_5': [8.5, 3.4, 6.6, 6.4, 9.0], 'Feature_6': [3.5, 4.3, 8.9, 2.3, 4.1], 'Feature_7': [6.5, 8.4, 2.1, 3.2, 7.8], 'Feature_8': [8.2, 4.7, 4.3, 1.0, 4.3], 'Feature_9': [8.2, 5.6, 5.0, 0.8, 0.9], 'Feature_10': [1.9, 4.0, 0.5, 6.0, 7.8] } input_results_dir = 'tests/Temp_output' input_plt_name = 'PlaceHolder' for num in range(1, 7): if num == 1: with self.assertRaises(FileNotFoundError) as message: make_feat_importance_plots( input_feat_importances, input_results_dir, input_plt_name, True ) self.assertEqual( str(message.exception), 'Directory {} does not exist'.format(input_results_dir) ) elif num == 2: os.mkdir(input_results_dir) with open('{}/{}_feat_importance_percentiles.svg'.format( input_results_dir, input_plt_name ), 'w') as f: f.write('PlaceHolder') with self.assertRaises(FileExistsError) as message: make_feat_importance_plots( input_feat_importances, input_results_dir, input_plt_name, True ) self.assertEqual( str(message.exception), 'File {}/{}_feat_importance_percentiles.svg already exists ' '- please rename this file so it is not overwritten by ' 'running this function'.format(input_results_dir, input_plt_name) ) shutil.rmtree(input_results_dir) elif num == 3: os.mkdir(input_results_dir) with open('{}/{}_feat_importance_all_data.svg'.format( input_results_dir, input_plt_name ), 'w') as f: f.write('PlaceHolder') with self.assertRaises(FileExistsError) as message: make_feat_importance_plots( input_feat_importances, input_results_dir, input_plt_name, True ) self.assertEqual( str(message.exception), 'File {}/{}_feat_importance_all_data.svg already exists - ' 'please rename this file so it is not overwritten by ' 'running this function'.format(input_results_dir, input_plt_name) ) shutil.rmtree(input_results_dir) elif num == 4: os.mkdir(input_results_dir) with self.assertRaises(TypeError) as message: make_feat_importance_plots( pd.DataFrame({}), input_results_dir, input_plt_name, True ) self.assertEqual( str(message.exception), 'Expect "feature_importances" to be a dictionary of ' 'importance scores' ) shutil.rmtree(input_results_dir) elif num == 5: os.mkdir(input_results_dir) with self.assertRaises(TypeError) as message: make_feat_importance_plots( input_feat_importances, input_results_dir, 1.0, True ) self.assertEqual( str(message.exception), 'Expect "plt_name" to a string to append to the start of ' 'the names of the saved plots' ) shutil.rmtree(input_results_dir) elif num == 6: os.mkdir(input_results_dir) exp_importance_df = pd.DataFrame({ 'Feature': ['Feature_1', 'Feature_3', 'Feature_5', 'Feature_7', 'Feature_2', 'Feature_9', 'Feature_8', 'Feature_6', 'Feature_10', 'Feature_4'], 'Score': [7.8, 7.1, 6.6, 6.5, 5.2, 5.0, 4.3, 4.1, 4.0, 3.4], 'Lower conf limit': [0.13, 0.25, 3.7, 2.21, 0.15, 0.81, 1.33, 2.42, 0.64, 1.65], 'Upper conf limit': [8.64, 9.21, 8.95, 8.34, 8.11, 7.94, 7.85, 8.44, 7.62, 9.02] }) exp_cols = [ 'Feature_1', 'Feature_2', 'Feature_3', 'Feature_4', 'Feature_5', 'Feature_6', 'Feature_7', 'Feature_8', 'Feature_9', 'Feature_10' ] exp_cols_all = [ 'Feature_1', 'Feature_1', 'Feature_1', 'Feature_1', 'Feature_1', 'Feature_2', 'Feature_2', 'Feature_2', 'Feature_2', 'Feature_2', 'Feature_3', 'Feature_3', 'Feature_3', 'Feature_3', 'Feature_3', 'Feature_4', 'Feature_4', 'Feature_4', 'Feature_4', 'Feature_4', 'Feature_5', 'Feature_5', 'Feature_5', 'Feature_5', 'Feature_5', 'Feature_6', 'Feature_6', 'Feature_6', 'Feature_6', 'Feature_6', 'Feature_7', 'Feature_7', 'Feature_7', 'Feature_7', 'Feature_7', 'Feature_8', 'Feature_8', 'Feature_8', 'Feature_8', 'Feature_8', 'Feature_9', 'Feature_9', 'Feature_9', 'Feature_9', 'Feature_9', 'Feature_10', 'Feature_10', 'Feature_10', 'Feature_10', 'Feature_10' ] exp_all_vals = [ 7.8, 8.7, 0.1, 8.1, 0.4, 6.4, 0.1, 0.6, 8.3, 5.2, 7.1, 8.4, 0.0, 9.3, 2.5, 3.4, 2.1, 1.6, 5.6, 9.4, 8.5, 3.4, 6.6, 6.4, 9.0, 3.5, 4.3, 8.9, 2.3, 4.1, 6.5, 8.4, 2.1, 3.2, 7.8, 8.2, 4.7, 4.3, 1.0, 4.3, 8.2, 5.6, 5.0, 0.8, 0.9, 1.9, 4.0, 0.5, 6.0, 7.8] exp_median_vals = [7.8, 5.2, 7.1, 3.4, 6.6, 4.1, 6.5, 4.3, 5.0, 4.0] exp_lower_conf_limit_vals = [ 0.13, 0.15, 0.25, 1.65, 3.7, 2.42, 2.21, 1.33, 0.81, 0.64 ] exp_upper_conf_limit_vals = [ 8.64, 8.11, 9.21, 9.02, 8.95, 8.44, 8.34, 7.85, 7.94, 7.62 ] ( act_importance_df, act_cols, act_cols_all, act_all_vals, act_median_vals, act_lower_conf_limit_vals, act_upper_conf_limit_vals ) = make_feat_importance_plots( input_feat_importances, input_results_dir, input_plt_name, True ) pd.testing.assert_frame_equal(exp_importance_df, act_importance_df) self.assertEqual(exp_cols, act_cols) self.assertEqual(exp_cols_all, act_cols_all) np.testing.assert_almost_equal(exp_all_vals, act_all_vals, 7) np.testing.assert_almost_equal( exp_median_vals, act_median_vals, 7 ) np.testing.assert_almost_equal( exp_lower_conf_limit_vals, act_lower_conf_limit_vals, 7 ) np.testing.assert_almost_equal( exp_upper_conf_limit_vals, act_upper_conf_limit_vals, 7 ) shutil.rmtree(input_results_dir) def test_check_arguments(self): """ Tests check_arguments in train.py """ print('Testing check_arguments') # Sets "recognised" parameter values that will not raise an exception x_train = np.array([]) y_train = np.array([]) train_groups = np.array([]) x_test = np.array([]) y_test = np.array([]) selected_features = [] splits = [(y_train, np.array([]))] const_split = True resampling_method = 'no_balancing' n_components_pca = None run = 'randomsearch' fixed_params = {} tuned_params = {} train_scoring_metric = 'accuracy' test_scoring_funcs = {} n_iter = None cv_folds_inner_loop = 5 cv_folds_outer_loop = 5 draw_conf_mat = True plt_name = '' # "Recognised" parameter values should not raise an exception output_str = check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual(output_str, 'All checks passed') # "Unrecognised" parameter values should raise an exception # Tests x_train type x_train_str = '' with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train_str, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "x_train" to be a numpy array of ' 'training data fluorescence readings' ) # Tests y_train type y_train_str = '' with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train_str, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "y_train" to be a numpy array of ' 'training data class labels' ) # Tests train_groups type train_groups_str = '' with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups_str, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "train_groups" to be a numpy array ' 'of training data subclass labels' ) # Tests x_test type x_test_str = '' with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test_str, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "x_test" to be a numpy array of ' 'test data fluorescence readings' ) # Tests y_test type y_test_str = '' with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test_str, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "y_test" to be a numpy array of ' 'test data class labels' ) # Tests y_train is a 1D array x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([[2, 2], [2, 2], [2, 2], [2, 2]]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "y_train" to be a 1D array' ) # Tests mismatch in x_train and y_train shape x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2, 2]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Different number of entries (rows) in ' '"x_train" and "y_train"' ) # Tests train_groups is a 1D array x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([[3], [3], [3], [3]]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "train_groups" to be a 1D array' ) # Tests mismatch in x_train and train_groups shape x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([3, 3, 3]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Different number of entries (rows) in ' '"x_train" and "train_groups"' ) # Tests y_test is a 1D array x_test_array = np.array([[4, 4], [4, 4], [4, 4], [4, 4]]) y_test_array = np.array([[5, 5], [5, 5], [5, 5], [5, 5]]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test_array, y_test_array, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "y_test" to be a 1D array' ) # Tests mismatch in x_test and y_test shape x_test_array = np.array([[4, 4], [4, 4], [4, 4], [4, 4]]) y_test_array = np.array([5, 5, 5, 5, 5, 5]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test_array, y_test_array, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Different number of entries (rows) in ' '"x_test" and "y_test"' ) # Tests mismatch in x_train and x_test shape x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([3, 3, 3, 3]) x_test_array = np.array([[4, 4, 4], [4, 4, 4]]) y_test_array = np.array([5, 5]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Different number of features incorporated ' 'in the training and test data' ) # Tests no NaN in x_train x_train_array = np.array([[1, np.nan], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([3, 3, 3, 3]) x_test_array = np.array([[4, 4], [4, 4]]) y_test_array = np.array([5, 5]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'NaN value(s) detected in "x_train" data' ) # Tests no non-numeric entries in x_train x_train_array = np.array([[1, 1], [1, 'X'], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([3, 3, 3, 3]) x_test_array = np.array([[4, 4], [4, 4]]) y_test_array = np.array([5, 5]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Non-numeric value(s) in "x_train" - expect' ' all values in "x_train" to be integers / floats' ) # Tests no NaN in y_train x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, np.nan, 2]) train_groups_array = np.array([3, 3, 3, 3]) x_test_array = np.array([[4, 4], [4, 4]]) y_test_array = np.array([5, 5]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'NaN value(s) detected in "y_train" data' ) # Tests no NaN in train_groups x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([np.nan, 3, 3, 3]) x_test_array = np.array([[4, 4], [4, 4]]) y_test_array = np.array([5, 5]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'NaN value(s) detected in "train_groups" data' ) # Tests no NaN in x_test x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([3, 3, 3, 3]) x_test_array = np.array([[np.nan, 4], [4, 4]]) y_test_array = np.array([5, 5]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'NaN value(s) detected in "x_test" data' ) # Tests no non-numeric values in x_test x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([3, 3, 3, 3]) x_test_array = np.array([[4, 4], [4, 'X']]) y_test_array = np.array([5, 5]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Non-numeric value(s) in "x_test" - expect ' 'all values in "x_test" to be integers / floats' ) # Tests no NaN in y_test x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([3, 3, 3, 3]) x_test_array = np.array([[4, 4], [4, 4]]) y_test_array = np.array([5, np.nan]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'NaN value(s) detected in "y_test" data' ) # Test selected_features is a list or a positive integer selected_features_str = 'X' with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features_str, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "selected_features" to be either a ' 'list of features to retain in the analysis, or an integer number ' 'of features (to be selected via permutation analysis)' ) # Test selected_features is a list or a positive integer selected_features_str = 0 with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features_str, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'The number of selected_features must be a ' 'positive integer' ) # Test length of selected_features list is less than or equal to the # number of columns in x_train selected_features_list = ['X', 'X', 'X'] x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([3, 3, 3, 3]) x_test_array = np.array([[4, 4], [4, 4]]) y_test_array = np.array([5, 5]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features_list, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'There is a greater number of features ' 'in "selected_features" than there are columns in the ' '"x_train" input arrays' ) # Test length of selected_features list is less than or equal to the # number of columns in x_test (when x_train is not defined) selected_features_list = ['X', 'X', 'X'] x_train_array = np.array([]) y_train_array = np.array([]) train_groups_array = None x_test_array = np.array([[4, 4], [4, 4]]) y_test_array = np.array([5, 5]) with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features_list, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'There is a greater number of features ' 'in "selected_features" than there are columns in the "x_test" ' 'input arrays' ) # Tests splits type x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([3, 3, 3, 3]) x_test_array = np.array([[4, 4], [4, 4]]) y_test_array = np.array([5, 5]) selected_features_list = ['X', 'X'] splits_gen = create_generator(x_train_array.shape[0]) with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features_list, splits_gen, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "splits" to be a list of train/test' ' splits' ) # Tests splits list matches dimensions of x_train x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([3, 3, 3, 3]) x_test_array = np.array([[4, 4], [4, 4]]) y_test_array = np.array([5, 5]) selected_features_list = ['X', 'X'] splits_list = [(np.array([6, 6, 6]), np.array([6])), (np.array([]), np.array([6, 6, 6, 6])), (np.array([6]), np.array([6, 6, 6, 6]))] with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features_list, splits_list, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Size of train test splits generated by ' '"splits" does not match the number of rows in the input array ' '"x_train"' ) # Tests const_split type const_split_int = 1 with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split_int, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "const_split" to be a Boolean (True' ' or False)' ) # Tests resampling_method is recognised resampling_method_str = '' with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method_str, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), '"resampling_method" unrecognised - expect ' 'value to be one of the following list entries:\n[\'no_balancing\',' ' \'max_sampling\', \'smote\', \'smoteenn\', \'smotetomek\']' ) # Test n_components_pca is an integer n_components_pca_str = 2.0 with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca_str, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "n_components_pca" to be set either' ' to None or to a positive integer value between 1 and the number ' 'of features' ) # Test n_components_pca is an integer in the range of 1 - number of # features x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([3, 3, 3, 3]) x_test_array = np.array([[4, 4], [4, 4]]) y_test_array = np.array([5, 5]) selected_features_list = ['X', 'X'] splits_list = [(np.array([6, 6, 6]), np.array([6])), (np.array([]), np.array([6, 6, 6, 6])), (np.array([6]), np.array([6, 6, 6]))] n_components_pca_int = x_train_array.shape[1] + 1 with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features_list, splits_list, const_split, resampling_method, n_components_pca_int, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "n_components_pca" to be set either' ' to None or to a positive integer value between 1 and the number ' 'of features' ) # Tests requirement for run to be "randomsearch", "gridsearch" or # "train" when func_name is set to "run_ml" x_train_array = np.array([]) y_train_array = np.array([]) train_groups_array = np.array([]) x_test_array = np.array([[4, 4], [4, 4]]) y_test_array = np.array([5, 5]) selected_features_list = ['X', 'X'] splits_list = [(np.array([]), np.array([])), (np.array([]), np.array([])), (np.array([]), np.array([]))] n_components_pca_int = x_test_array.shape[1] run_str = 'random search' with self.assertRaises(ValueError) as message: check_arguments( 'run_ml', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features_list, splits_list, const_split, resampling_method, n_components_pca_int, run_str, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "run" to be set to either ' '"randomsearch", "gridsearch" or "train"' ) # Tests requirement for run to be "randomsearch" or "gridsearch" when # when func_name is set to "run_nested_CV" run_str = 'train' with self.assertRaises(ValueError) as message: check_arguments( 'run_nested_CV', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run_str, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "run" to be set to either ' '"randomsearch" or "gridsearch"' ) # Tests fixed_params type fixed_params_df = pd.DataFrame({}) with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params_df, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "fixed_params" to be a dictionary ' 'of parameter values with which to run the selected classifier ' 'algorithm' ) # Test tuned_params type x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]]) y_train_array = np.array([2, 2, 2, 2]) train_groups_array = np.array([3, 3, 3, 3]) x_test_array = np.array([[4, 4], [4, 4]]) y_test_array = np.array([5, 5]) selected_features_list = ['X', 'X'] splits_list = [(np.array([6, 6, 6]), np.array([6])), (np.array([]), np.array([6, 6, 6, 6])), (np.array([6]), np.array([6, 6, 6]))] n_components_pca_int = x_train_array.shape[1] run_str = 'train' fixed_params_dict = {'dual': False} tuned_params_list = [] with self.assertRaises(TypeError) as message: check_arguments( 'run_ml', x_train_array, y_train_array, train_groups_array, x_test_array, y_test_array, selected_features_list, splits_list, const_split, resampling_method, n_components_pca_int, run_str, fixed_params_dict, tuned_params_list, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "tuned_params" to be a dictionary ' 'of parameter names (keys) and ranges of values to optimise ' '(values) using either random or grid search' ) # Test train_scoring_metric is string in list of recognised scoring # metrics in sklearn train_scoring_metric_str = 'mutual_info_score' # Scoring metric used # for clustering, not classification with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric_str, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), '"train_scoring_metric" not recogised - ' 'please specify a string corresponding to the name of the metric ' 'you would like to use in the sklearn.metrics module, e.g. ' '"accuracy".\nExpect metric to be in the following list:\n' '[\'accuracy\', \'balanced_accuracy\', \'top_k_accuracy\', ' '\'average_precision\', \'neg_brier_score\', \'f1\', \'f1_micro\', ' '\'f1_macro\', \'f1_weighted\', \'f1_samples\', \'neg_log_loss\', ' '\'precision\', \'precision_micro\', \'precision_macro\', ' '\'precision_weighted\', \'precision_samples\', \'recall\', ' '\'recall_micro\', \'recall_macro\', \'recall_weighted\', ' '\'recall_samples\', \'jaccard\', \'jaccard_micro\', ' '\'jaccard_macro\', \'jaccard_weighted\', \'jaccard_samples\', ' '\'roc_auc\', \'roc_auc_ovr\', \'roc_auc_ovo\', ' '\'roc_auc_ovr_weighted\', \'roc_auc_ovo_weighted\']' ) # Test test_scoring_funcs is a dictionary of scoring functions (keys) # and dictionaries of parameter values to run these functions with from sklearn.metrics import accuracy_score, jaccard_score, make_scorer train_scoring_metric_func = make_scorer(accuracy_score) test_scoring_funcs_dict = {accuracy_score: {'normalize': True}, jaccard_score: {'average': 'weighted'}} with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric_func, test_scoring_funcs_dict, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Scoring function jaccard_score not ' 'recognised.\nExpect scoring functions to be in the following ' 'list:\n[\'accuracy_score\', \'f1_score\', \'precision_score\', ' '\'recall_score\', \'roc_auc_score\', \'cohen_kappa_score\']' ) # Test n_iter type is an integer n_iter_float = 3.0 with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter_float, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), '"n_iter" should be set to a positive ' 'integer value' ) # Test n_iter is a positive integer n_iter_int = -2 with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter_int, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), '"n_iter" should be set to a positive ' 'integer value' ) # Test cv_folds_inner_loop type is an integer cv_folds_inner_loop_dict = OrderedDict() with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop_dict, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "cv_folds_inner_loop" to be a ' 'positive integer value in the range of 2 - 20' ) # Test cv_folds_inner_loop is an integer in the range of 2 - 20 cv_folds_inner_loop_int = 21 with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop_int, cv_folds_outer_loop, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "cv_folds_inner_loop" to be a ' 'positive integer value in the range of 2 - 20' ) # Test cv_folds_outer_loop type is set to 'loocv' or an integer value cv_folds_outer_loop_float = 2.3 with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop_float, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "cv_folds_outer_loop" to be set to ' 'either "loocv" (leave-one-out cross-validation) or a positive ' 'integer in the range of 2 - 20' ) # Test cv_folds_outer_loop type is set to 'loocv' or an integer value cv_folds_outer_loop_str = '' with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop_str, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "cv_folds_outer_loop" to be set to ' 'either "loocv" (leave-one-out cross-validation) or a positive ' 'integer in the range of 2 - 20' ) # Test cv_folds_outer_loop is an integer in the range of 2 - 20 cv_folds_outer_loop_int = 1 with self.assertRaises(ValueError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop_int, draw_conf_mat, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "cv_folds_outer_loop" to be set to ' 'either "loocv" (leave-one-out cross-validation) or a positive ' 'integer in the range of 2 - 20' ) # Test draw_conf_mat type is a Boolean draw_conf_mat_float = 0.0 with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat_float, plt_name, True ) self.assertEqual( str(message.exception), 'Expect "draw_conf_mat" to be a Boolean ' 'value (True or False)' ) # Test plt_name plt_name_bool = False with self.assertRaises(TypeError) as message: check_arguments( 'PlaceHolder', x_train, y_train, train_groups, x_test, y_test, selected_features, splits, const_split, resampling_method, n_components_pca, run, fixed_params, tuned_params, train_scoring_metric, test_scoring_funcs, n_iter, cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name_bool, True ) self.assertEqual( str(message.exception), 'Expect "plt_name" to be a string' ) # Test passes with more complex default values from sklearn.metrics import precision_score x_train_ext = np.array([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) y_train_ext = np.array([2, 2, 2, 2, 2]) train_groups_ext = np.array([3, 3, 3, 3, 3]) x_test_ext = np.array([[4, 4, 4, 4]]) y_test_ext = np.array([5]) selected_features_ext = ['A', 'C', 'B'] splits_ext = [(np.array([6, 6, 6, 6, 6]), np.array([]))] const_split_ext = False resampling_method_ext = 'smote' n_components_pca_ext = 4 run_ext = 'train' fixed_params_ext = {'randomstate': 0} tuned_params_ext = {'n_estimators': np.linspace(5, 50, 10)} train_scoring_metric_ext = 'precision' test_scoring_funcs_ext = {precision_score: {'average': 'macro'}} n_iter_ext = 100 cv_folds_inner_loop_ext = 10 cv_folds_outer_loop_ext = 'loocv' draw_conf_mat_ext = False plt_name_ext = 'run_ml' output_str = check_arguments( 'PlaceHolder', x_train_ext, y_train_ext, train_groups_ext, x_test_ext, y_test_ext, selected_features_ext, splits_ext, const_split_ext, resampling_method_ext, n_components_pca_ext, run_ext, fixed_params_ext, tuned_params_ext, train_scoring_metric_ext, test_scoring_funcs_ext, n_iter_ext, cv_folds_inner_loop_ext, cv_folds_outer_loop_ext, draw_conf_mat_ext, plt_name_ext, True ) self.assertEqual(output_str, 'All checks passed') def test_class_initialisation(self): """ Tests initialisation of RunML class """ print('Testing RunML class') results_dir = 'tests/Temp_output' fluor_data = pd.DataFrame({}) classes = None subclasses = None shuffle = True # Test recognises that output directory already exists os.mkdir(results_dir) with self.assertRaises(FileExistsError) as message: test_ml_train = RunML( results_dir, fluor_data, classes, subclasses, shuffle, True ) self.assertEqual( str(message.exception), 'Directory {} already found in {}'.format(results_dir, os.getcwd()) ) shutil.rmtree('tests/Temp_output') # Test "classes" must be None or a list if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(TypeError) as message: test_ml_train = RunML( results_dir, fluor_data, np.array([]), subclasses, shuffle, True ) self.assertEqual( str(message.exception), 'Expect "classes" argument to be set either to None or to a list' ) # Test "subclasses" must be None or a list if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(TypeError) as message: test_ml_train = RunML( results_dir, fluor_data, [], np.array([]), shuffle, True ) self.assertEqual( str(message.exception), 'Expect "subclasses" argument to be set either to None or to a list' ) # Test that if "subclasses" is set to a value other than None, classes # cannot be set to None if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(TypeError) as message: test_ml_train = RunML( results_dir, fluor_data, classes, np.array([]), shuffle, True ) self.assertEqual( str(message.exception), 'If "subclasses" is set to a value other than None, then "classes" ' 'must also be set to a value other than None' ) # Tests that if subclasses list is defined, the entries in the list # are formatted as "class_subclass" if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(ValueError) as message: test_ml_train = RunML( results_dir, fluor_data, ['A', 'B', 'A', 'B'], ['A_1', 'B_1_', 'A_2', 'B_2'], shuffle, True ) self.assertEqual( str(message.exception), 'Entries in subclass list should be formatted as "class_subclass" ' '(in which neither "class" nor "subclass" contains the character ' '"_")' ) if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(ValueError) as message: test_ml_train = RunML( results_dir, fluor_data, ['A', 'B', 'A', 'B'], ['A_1', 'C_1', 'A_2', 'B_2'], shuffle, True ) self.assertEqual( str(message.exception), 'Entries in subclass list should be formatted as "class_subclass" ' '(in which neither "class" nor "subclass" contains the character ' '"_")' ) # Test requires "Analyte" column in fluor_data if classes is set to None if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(KeyError) as message: test_ml_train = RunML( results_dir, fluor_data, classes, subclasses, shuffle, True ) self.assertEqual( str(message.exception), '\'No "Analyte" column detected in input dataframe - if you ' 'do not define the "classes" argument, the input dataframe' ' must contain an "Analyte" column\'' ) # Tests that number of entries in "classes" and "subclasses" lists are # equal to one another and to the number of rows in "fluor_data" # dataframe if os.path.isdir(results_dir): shutil.rmtree(results_dir) fluor_data_df = pd.DataFrame({'Feature_1': [1, 3, 2, 4], 'Feature_2': [2, 4, 3, 1]}) classes_list = ['A', 'B', 'A', 'B'] subclasses_list = ['A_1', 'B_1', 'A_2', 'B_2'] with self.assertRaises(ValueError) as message: test_ml_train = RunML( results_dir, fluor_data_df, ['A', 'B', 'A', 'B', 'A'], subclasses_list, shuffle, True ) self.assertEqual( str(message.exception), 'Mismatch between number of entries in the input dataframe and ' 'the "classes" list' ) if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(ValueError) as message: test_ml_train = RunML( results_dir, fluor_data_df, classes_list, ['A_1', 'B_1', 'B_2'], shuffle, True ) self.assertEqual( str(message.exception), 'Mismatch between number of entries in the input dataframe and ' 'the "subclasses" list' ) # Tests prevents overwriting of "Classes" or "Subclasses" columns in # "fluor_data" dataframe if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(NameError) as message: test_ml_train = RunML( results_dir, pd.DataFrame({'Feature_1': [1, 3, 2, 4], 'Subclasses': [2, 4, 3, 1]}), classes_list, subclasses_list, shuffle, True ) self.assertEqual( str(message.exception), 'Please rename any columns in input dataframe labelled either ' '"Classes" or "Subclasses", as these columns are added to the ' 'dataframe by the code during data processing' ) # Tests no NaN or non-numeric values in "fluor_data" dataframe if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(ValueError) as message: test_ml_train = RunML( results_dir, pd.DataFrame({'Feature_1': [1, 3.0, 2, 4], 'Feature_2': [2, 4, np.nan, 1]}), ['A', 'B', 'A', 'B'], [np.nan, np.nan, np.nan, np.nan], shuffle, True ) self.assertEqual( str(message.exception), 'NaN detected in input dataframe' ) if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(ValueError) as message: test_ml_train = RunML( results_dir, pd.DataFrame({'Feature_1': [1, 3.0, 2, '4'], 'Feature_2': [2, 4, 3, 1]}), ['A', 'B', 'A', 'B'], [np.nan, np.nan, np.nan, np.nan], shuffle, True ) self.assertEqual( str(message.exception), 'Non-numeric value detected in input dataframe' ) # Tests no NaN values in "classes" list if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(ValueError) as message: test_ml_train = RunML( results_dir, pd.DataFrame({'Feature_1': [1, 3.0, 2, 4], 'Feature_2': [2, 4, 3, 1]}), ['A', 'B', 'A', np.nan], [np.nan, np.nan, np.nan, np.nan], shuffle, True ) self.assertEqual( str(message.exception), 'NaN detected in class values' ) # Tests "subclasses" list is not a mixture of NaN and other values if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(ValueError) as message: test_ml_train = RunML( results_dir, pd.DataFrame({'Feature_1': [1, 3.0, 2, 4], 'Feature_2': [2, 4, 3, 1]}), ['A', 'B', 'A', 'B'], [np.nan, 1.0, np.nan, np.nan], shuffle, True ) self.assertEqual( str(message.exception), 'NaN detected in subclass values' ) # Tests that "shuffle" is a Boolean if os.path.isdir(results_dir): shutil.rmtree(results_dir) with self.assertRaises(TypeError) as message: test_ml_train = RunML( results_dir,
pd.DataFrame({'Feature_1': [1, 3.0, 2, 4], 'Feature_2': [2, 4, 3, 1]})
pandas.DataFrame
import os from unittest import mock import responses from unittest.mock import patch, MagicMock from nextcode import jupyter from nextcode.exceptions import InvalidToken, InvalidProfile, ServerError from nextcode.services.query import jupyter from nextcode.services.query.exceptions import MissingRelations, QueryError from tests import BaseTestCase, REFRESH_TOKEN, AUTH_RESP, AUTH_URL from tests.test_query import QUERY_URL, ROOT_RESP import pandas as pd def setup_responses(): responses.add(responses.POST, AUTH_URL, json=AUTH_RESP) responses.add(responses.GET, QUERY_URL, json=ROOT_RESP) class JupyterTest(BaseTestCase): @responses.activate def setUp(self): super(JupyterTest, self).setUp() setup_responses() self.magics = jupyter.GorMagics() self.magics.shell = MagicMock() @responses.activate def test_basic_gor_magics(self): setup_responses() m = jupyter.GorMagics() m.handle_exception() os.environ["NEXTCODE_PROFILE"] = "notfound" with self.assertRaises(InvalidProfile): jupyter.get_service() del os.environ["NEXTCODE_PROFILE"] os.environ["GOR_API_KEY"] = REFRESH_TOKEN jupyter.get_service() @responses.activate def test_replace_vars(self): setup_responses() with self.assertRaises(Exception) as ex: self.magics.replace_vars("hello $not_found;") self.assertIn("Variable 'not_found' not found", str(ex.exception)) self.magics.shell.user_ns = {"found": 1} string = self.magics.replace_vars("hello $found;") self.assertEqual("hello 1;", string) @responses.activate def test_load_relations(self): with self.assertRaises(Exception) as ex: _ = self.magics.load_relations(["[not_found]"]) self.assertIn("Variable 'not_found' not found", str(ex.exception)) self.magics.shell.user_ns = {"found": 1} with self.assertRaises(Exception) as ex: string = self.magics.load_relations(["var:found", "var:alsofound"]) self.assertIn("found must be a pandas DataFrame object", str(ex.exception)) from pandas import DataFrame self.magics.shell.user_ns = {"found": DataFrame(), "alsofound": DataFrame()} _ = self.magics.load_relations(["var:found", "var:alsofound"]) def test_print_error(self): jupyter.print_error("test") def test_load_extension(self): setup_responses() m = MagicMock() with mock.patch("nextcode.services.query.jupyter.get_service"): jupyter.load_ipython_extension(m) with mock.patch( "nextcode.services.query.jupyter.get_service", side_effect=InvalidToken, ): jupyter.load_ipython_extension(m) with mock.patch( "nextcode.services.query.jupyter.get_service", side_effect=ServerError("Error"), ): jupyter.load_ipython_extension(m) class GorCommandTest(JupyterTest): @responses.activate def test_singleline(self): setup_responses() df = self.magics.gor("Hello") self.assertTrue(df is None) def mock_execute(*args, **kwargs): m = MagicMock() m.status = "DONE" m.line_count = 100 m.dataframe.return_value = pd.DataFrame() m.running.return_value = False return m m = MagicMock() m.execute = mock_execute with mock.patch("nextcode.services.query.jupyter.get_service", return_value=m): df = self.magics.gor("Hello") self.assertTrue(isinstance(df, pd.DataFrame)) @responses.activate def test_not_done(self): setup_responses() df = self.magics.gor("Hello") self.assertTrue(df is None) def mock_execute(*args, **kwargs): m = MagicMock() m.status = "PENDING" m.line_count = 100 m.dataframe.return_value = pd.DataFrame() m.error = None m.running.return_value = False return m m = MagicMock() m.execute = mock_execute with mock.patch("nextcode.services.query.jupyter.get_service", return_value=m): df = self.magics.gor("Hello") self.assertTrue(df is None) @responses.activate def test_multiline(self): setup_responses() def mock_execute(*args, **kwargs): m = MagicMock() m.status = "DONE" m.line_count = 999999999 m.dataframe.return_value = pd.DataFrame() m.running.return_value = False return m m = MagicMock() m.execute = mock_execute with mock.patch("nextcode.services.query.jupyter.get_service", return_value=m): df = self.magics.gor("Hello", "World\nAnother world") self.assertTrue(isinstance(df, pd.DataFrame)) @responses.activate def test_operator(self): setup_responses() def mock_execute(*args, **kwargs): m = MagicMock() m.status = "DONE" m.line_count = 100 m.running.return_value = False return m m = MagicMock() m.execute = mock_execute with mock.patch("nextcode.services.query.jupyter.get_service", return_value=m): df = self.magics.gor("myvar <<", "gor #dbsnp#\nAnother line") self.assertTrue(df is None) dt = self.magics.gor("user_data/file.gorz <<", "gor #dbsnp#") self.assertTrue(df is None) @responses.activate def test_relations(self): setup_responses() m = MagicMock() with mock.patch("nextcode.services.query.jupyter.get_service", return_value=m): df = self.magics.gor("Hello") self.assertTrue(df is None) def mock_execute(*args, **kwargs): m = MagicMock() m.status = "PENDING" m.line_count = 100 m.dataframe.return_value = pd.DataFrame() m.error = None m.running.return_value = False return m m = MagicMock() m.execute.side_effect = MissingRelations(relations=["a", "b"]) with mock.patch("nextcode.services.query.jupyter.get_service", return_value=m): df = self.magics.gor("Hello") self.assertTrue(df is None) @responses.activate def test_keyboard_interrupt(self): setup_responses() df = self.magics.gor("Hello") self.assertTrue(df is None) def mock_execute(*args, **kwargs): m = MagicMock() m.status = "PENDING" m.dataframe.return_value = pd.DataFrame() m.error = None m.wait.side_effect = KeyboardInterrupt m.cancel.side_effect = QueryError("") m.running.return_value = True return m m = MagicMock() m.execute = mock_execute with patch("nextcode.services.query.jupyter.get_service", return_value=m): df = self.magics.gor("Hello") self.assertTrue(df is None) @responses.activate def test_gorls(self): setup_responses() def mock_execute(*args, **kwargs): m = MagicMock() m.status = "DONE" m.line_count = 100 m.dataframe.return_value =
pd.DataFrame()
pandas.DataFrame
# -*- coding: utf-8 -*- import csv import os import platform import codecs import re import sys from datetime import datetime import pytest import numpy as np from pandas._libs.lib import Timestamp import pandas as pd import pandas.util.testing as tm from pandas import DataFrame, Series, Index, MultiIndex from pandas import compat from pandas.compat import (StringIO, BytesIO, PY3, range, lrange, u) from pandas.errors import DtypeWarning, EmptyDataError, ParserError from pandas.io.common import URLError from pandas.io.parsers import TextFileReader, TextParser class ParserTests(object): """ Want to be able to test either C+Cython or Python+Cython parsers """ data1 = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ def test_empty_decimal_marker(self): data = """A|B|C 1|2,334|5 10|13|10. """ # Parsers support only length-1 decimals msg = 'Only length-1 decimal markers supported' with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(data), decimal='') def test_bad_stream_exception(self): # Issue 13652: # This test validates that both python engine # and C engine will raise UnicodeDecodeError instead of # c engine raising ParserError and swallowing exception # that caused read to fail. handle = open(self.csv_shiftjs, "rb") codec = codecs.lookup("utf-8") utf8 = codecs.lookup('utf-8') # stream must be binary UTF8 stream = codecs.StreamRecoder( handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter) if compat.PY3: msg = "'utf-8' codec can't decode byte" else: msg = "'utf8' codec can't decode byte" with tm.assert_raises_regex(UnicodeDecodeError, msg): self.read_csv(stream) stream.close() def test_read_csv(self): if not compat.PY3: if compat.is_platform_windows(): prefix = u("file:///") else: prefix = u("file://") fname = prefix + compat.text_type(self.csv1) self.read_csv(fname, index_col=0, parse_dates=True) def test_1000_sep(self): data = """A|B|C 1|2,334|5 10|13|10. """ expected = DataFrame({ 'A': [1, 10], 'B': [2334, 13], 'C': [5, 10.] }) df = self.read_csv(StringIO(data), sep='|', thousands=',') tm.assert_frame_equal(df, expected) df = self.read_table(StringIO(data), sep='|', thousands=',') tm.assert_frame_equal(df, expected) def test_squeeze(self): data = """\ a,1 b,2 c,3 """ idx = Index(['a', 'b', 'c'], name=0) expected = Series([1, 2, 3], name=1, index=idx) result = self.read_table(StringIO(data), sep=',', index_col=0, header=None, squeeze=True) assert isinstance(result, Series) tm.assert_series_equal(result, expected) def test_squeeze_no_view(self): # see gh-8217 # Series should not be a view data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13""" result = self.read_csv(StringIO(data), index_col='time', squeeze=True) assert not result._is_view def test_malformed(self): # see gh-6607 # all data = """ignore A,B,C 1,2,3 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 4, saw 5' with tm.assert_raises_regex(Exception, msg): self.read_table(StringIO(data), sep=',', header=1, comment='#') # first chunk data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 6, saw 5' with tm.assert_raises_regex(Exception, msg): it = self.read_table(StringIO(data), sep=',', header=1, comment='#', iterator=True, chunksize=1, skiprows=[2]) it.read(5) # middle chunk data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 6, saw 5' with tm.assert_raises_regex(Exception, msg): it = self.read_table(StringIO(data), sep=',', header=1, comment='#', iterator=True, chunksize=1, skiprows=[2]) it.read(3) # last chunk data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 6, saw 5' with tm.assert_raises_regex(Exception, msg): it = self.read_table(StringIO(data), sep=',', header=1, comment='#', iterator=True, chunksize=1, skiprows=[2]) it.read() # skipfooter is not supported with the C parser yet if self.engine == 'python': # skipfooter data = """ignore A,B,C 1,2,3 # comment 1,2,3,4,5 2,3,4 footer """ msg = 'Expected 3 fields in line 4, saw 5' with tm.assert_raises_regex(Exception, msg): self.read_table(StringIO(data), sep=',', header=1, comment='#', skipfooter=1) def test_quoting(self): bad_line_small = """printer\tresult\tvariant_name Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten"" Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa pytest.raises(Exception, self.read_table, StringIO(bad_line_small), sep='\t') good_line_small = bad_line_small + '"' df = self.read_table(StringIO(good_line_small), sep='\t') assert len(df) == 3 def test_unnamed_columns(self): data = """A,B,C,, 1,2,3,4,5 6,7,8,9,10 11,12,13,14,15 """ expected = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], dtype=np.int64) df = self.read_table(StringIO(data), sep=',') tm.assert_almost_equal(df.values, expected) tm.assert_index_equal(df.columns, Index(['A', 'B', 'C', 'Unnamed: 3', 'Unnamed: 4'])) def test_csv_mixed_type(self): data = """A,B,C a,1,2 b,3,4 c,4,5 """ expected = DataFrame({'A': ['a', 'b', 'c'], 'B': [1, 3, 4], 'C': [2, 4, 5]}) out = self.read_csv(StringIO(data)) tm.assert_frame_equal(out, expected) def test_read_csv_dataframe(self): df = self.read_csv(self.csv1, index_col=0, parse_dates=True) df2 = self.read_table(self.csv1, sep=',', index_col=0, parse_dates=True) tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D'])) assert df.index.name == 'index' assert isinstance( df.index[0], (datetime, np.datetime64, Timestamp)) assert df.values.dtype == np.float64 tm.assert_frame_equal(df, df2) def test_read_csv_no_index_name(self): df = self.read_csv(self.csv2, index_col=0, parse_dates=True) df2 = self.read_table(self.csv2, sep=',', index_col=0, parse_dates=True) tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D', 'E'])) assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp)) assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64 tm.assert_frame_equal(df, df2) def test_read_table_unicode(self): fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8')) df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None) assert isinstance(df1[0].values[0], compat.text_type) def test_read_table_wrong_num_columns(self): # too few! data = """A,B,C,D,E,F 1,2,3,4,5,6 6,7,8,9,10,11,12 11,12,13,14,15,16 """ pytest.raises(ValueError, self.read_csv, StringIO(data)) def test_read_duplicate_index_explicit(self): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ result = self.read_csv(StringIO(data), index_col=0) expected = self.read_csv(StringIO(data)).set_index( 'index', verify_integrity=False) tm.assert_frame_equal(result, expected) result = self.read_table(StringIO(data), sep=',', index_col=0) expected = self.read_table(StringIO(data), sep=',', ).set_index( 'index', verify_integrity=False) tm.assert_frame_equal(result, expected) def test_read_duplicate_index_implicit(self): data = """A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ # make sure an error isn't thrown self.read_csv(StringIO(data)) self.read_table(StringIO(data), sep=',') def test_parse_bools(self): data = """A,B True,1 False,2 True,3 """ data = self.read_csv(StringIO(data)) assert data['A'].dtype == np.bool_ data = """A,B YES,1 no,2 yes,3 No,3 Yes,3 """ data = self.read_csv(StringIO(data), true_values=['yes', 'Yes', 'YES'], false_values=['no', 'NO', 'No']) assert data['A'].dtype == np.bool_ data = """A,B TRUE,1 FALSE,2 TRUE,3 """ data = self.read_csv(StringIO(data)) assert data['A'].dtype == np.bool_ data = """A,B foo,bar bar,foo""" result = self.read_csv(StringIO(data), true_values=['foo'], false_values=['bar']) expected = DataFrame({'A': [True, False], 'B': [False, True]}) tm.assert_frame_equal(result, expected) def test_int_conversion(self): data = """A,B 1.0,1 2.0,2 3.0,3 """ data = self.read_csv(StringIO(data)) assert data['A'].dtype == np.float64 assert data['B'].dtype == np.int64 def test_read_nrows(self): expected = self.read_csv(StringIO(self.data1))[:3] df = self.read_csv(StringIO(self.data1), nrows=3) tm.assert_frame_equal(df, expected) # see gh-10476 df = self.read_csv(StringIO(self.data1), nrows=3.0) tm.assert_frame_equal(df, expected) msg = r"'nrows' must be an integer >=0" with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), nrows=1.2) with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), nrows='foo') with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), nrows=-1) def test_read_chunksize(self): reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2) df = self.read_csv(StringIO(self.data1), index_col=0) chunks = list(reader) tm.assert_frame_equal(chunks[0], df[:2]) tm.assert_frame_equal(chunks[1], df[2:4]) tm.assert_frame_equal(chunks[2], df[4:]) # with invalid chunksize value: msg = r"'chunksize' must be an integer >=1" with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), chunksize=1.3) with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), chunksize='foo') with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), chunksize=0) def test_read_chunksize_and_nrows(self): # gh-15755 # With nrows reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2, nrows=5) df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5) tm.assert_frame_equal(pd.concat(reader), df) # chunksize > nrows reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=8, nrows=5) df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5) tm.assert_frame_equal(pd.concat(reader), df) # with changing "size": reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=8, nrows=5) df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5) tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2]) tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5]) with pytest.raises(StopIteration): reader.get_chunk(size=3) def test_read_chunksize_named(self): reader = self.read_csv( StringIO(self.data1), index_col='index', chunksize=2) df = self.read_csv(StringIO(self.data1), index_col='index') chunks = list(reader) tm.assert_frame_equal(chunks[0], df[:2]) tm.assert_frame_equal(chunks[1], df[2:4]) tm.assert_frame_equal(chunks[2], df[4:]) def test_get_chunk_passed_chunksize(self): data = """A,B,C 1,2,3 4,5,6 7,8,9 1,2,3""" result = self.read_csv(StringIO(data), chunksize=2) piece = result.get_chunk() assert len(piece) == 2 def test_read_chunksize_generated_index(self): # GH 12185 reader = self.read_csv(StringIO(self.data1), chunksize=2) df = self.read_csv(StringIO(self.data1)) tm.assert_frame_equal(pd.concat(reader), df) reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0) df = self.read_csv(StringIO(self.data1), index_col=0) tm.assert_frame_equal(pd.concat(reader), df) def test_read_text_list(self): data = """A,B,C\nfoo,1,2,3\nbar,4,5,6""" as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar', '4', '5', '6']] df = self.read_csv(StringIO(data), index_col=0) parser = TextParser(as_list, index_col=0, chunksize=2) chunk = parser.read(None) tm.assert_frame_equal(chunk, df) def test_iterator(self): # See gh-6607 reader = self.read_csv(StringIO(self.data1), index_col=0, iterator=True) df = self.read_csv(StringIO(self.data1), index_col=0) chunk = reader.read(3) tm.assert_frame_equal(chunk, df[:3]) last_chunk = reader.read(5) tm.assert_frame_equal(last_chunk, df[3:]) # pass list lines = list(csv.reader(StringIO(self.data1))) parser = TextParser(lines, index_col=0, chunksize=2) df = self.read_csv(StringIO(self.data1), index_col=0) chunks = list(parser) tm.assert_frame_equal(chunks[0], df[:2]) tm.assert_frame_equal(chunks[1], df[2:4]) tm.assert_frame_equal(chunks[2], df[4:]) # pass skiprows parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1]) chunks = list(parser) tm.assert_frame_equal(chunks[0], df[1:3]) treader = self.read_table(StringIO(self.data1), sep=',', index_col=0, iterator=True) assert isinstance(treader, TextFileReader) # gh-3967: stopping iteration when chunksize is specified data = """A,B,C foo,1,2,3 bar,4,5,6 baz,7,8,9 """ reader = self.read_csv(StringIO(data), iterator=True) result = list(reader) expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[ 3, 6, 9]), index=['foo', 'bar', 'baz']) tm.assert_frame_equal(result[0], expected) # chunksize = 1 reader = self.read_csv(StringIO(data), chunksize=1) result = list(reader) expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[ 3, 6, 9]), index=['foo', 'bar', 'baz']) assert len(result) == 3 tm.assert_frame_equal(pd.concat(result), expected) # skipfooter is not supported with the C parser yet if self.engine == 'python': # test bad parameter (skipfooter) reader = self.read_csv(StringIO(self.data1), index_col=0, iterator=True, skipfooter=1) pytest.raises(ValueError, reader.read, 3) def test_pass_names_with_index(self): lines = self.data1.split('\n') no_header = '\n'.join(lines[1:]) # regular index names = ['index', 'A', 'B', 'C', 'D'] df = self.read_csv(StringIO(no_header), index_col=0, names=names) expected = self.read_csv(StringIO(self.data1), index_col=0) tm.assert_frame_equal(df, expected) # multi index data = """index1,index2,A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ lines = data.split('\n') no_header = '\n'.join(lines[1:]) names = ['index1', 'index2', 'A', 'B', 'C', 'D'] df = self.read_csv(StringIO(no_header), index_col=[0, 1], names=names) expected = self.read_csv(StringIO(data), index_col=[0, 1]) tm.assert_frame_equal(df, expected) df = self.read_csv(StringIO(data), index_col=['index1', 'index2']) tm.assert_frame_equal(df, expected) def test_multi_index_no_level_names(self): data = """index1,index2,A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ data2 = """A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ lines = data.split('\n') no_header = '\n'.join(lines[1:]) names = ['A', 'B', 'C', 'D'] df = self.read_csv(StringIO(no_header), index_col=[0, 1], header=None, names=names) expected = self.read_csv(
StringIO(data)
pandas.compat.StringIO
# -*- coding: utf-8 -*- """ Created on Wed Dec 30 18:07:56 2020 @author: Fabio """ import pandas as pd import matplotlib.pyplot as plt def df_filterbydate(df, dataLB, dataUB): df['Data_Registrazione'] = pd.to_datetime(df['Data_Registrazione'], infer_datetime_format=True).dt.date df = df[(df['Data_Registrazione'] >= dataLB) & (df['Data_Registrazione'] <= dataUB)] return df def get_df_classi(df, soglia=180): # creo df delle classi non prof == 0/ prof == 1/ molto prof == 2 """ df0 = df[df['profitto'] <= 0] df1 = df[(df['profitto'] > 0) & (df['profitto'] <= soglia)] df2 = df[df['profitto'] > soglia] return df0, df1, df2 def get_classi_cliente(cliente, df, soglia): # estraggo il df del singolo cliente considerando tutti i suoi produttori df_cliente = df[df['Produttore'] == cliente] visit_num = len(df_cliente.index) df0, df1, df2 = get_df_classi(df_cliente,soglia) class_0c = len(df0.index) # conto il totale degli ordini non prof class_1c = len(df1.index) # conto il totale degli ordini prof class_2c = len(df2.index) # conto il totale degli ordini max prof class_c = [class_0c, class_1c, class_2c] class_0p = class_0c / visit_num class_1p = class_1c / visit_num class_2p = class_2c / visit_num class_p = [class_0p, class_1p, class_2p] class_0s = df0['profitto'].sum() # sommo il totale degli ordini non prof class_1s = df1['profitto'].sum() # sommo il totale degli ordini prof class_2s = df2['profitto'].sum() # sommo il totale degli ordini max prof class_s = [class_0s, class_1s, class_2s] if (class_0s >= class_1s + class_2s) or (class_0p > class_1p and class_0p > class_2p): color = 'red' # se non profittevole classe = 0 elif (class_1p >= class_0p) and (class_1p >= class_2p): color = 'lightblue' # se profittevole classe = 1 elif (class_2p >= class_0p) and (class_2p >= class_1p): color = 'blue' # se ottimo profitto classe = 2 return class_c, class_p, class_s, color, classe def get_classi_produttore(cliente, lat, long, df, soglia): # creo un df per singolo cliente con le coordinate di tutti i suoi produttori df_produttore = df[(df['Produttore'] == cliente) & (df['lat_P'] == lat) & (df['long_P'] == long)] visit_num = len(df_produttore.index) df0, df1, df2 = get_df_classi(df_produttore, soglia) class_0c = df0['profitto'].count() # conto il totale degli ordini non prof class_1c = df1['profitto'].count() # conto il totale degli ordini prof class_2c = df2['profitto'].count() # conto il totale degli ordini max prof class_c = [class_0c, class_1c, class_2c] class_0p = class_0c / visit_num class_1p = class_1c / visit_num class_2p = class_2c / visit_num class_p = [class_0p, class_1p, class_2p] class_0s = df0['profitto'].sum() # sommo il totale degli ordini non prof class_1s = df1['profitto'].sum() # sommo il totale degli ordini prof class_2s = df2['profitto'].sum() # sommo il totale degli ordini max prof class_s = [class_0s, class_1s, class_2s] if (class_0s >= class_1s + class_2s) or (class_0p > class_1p and class_0p > class_2p): color = 'red' # se non profittevole classe = 0 elif (class_1p >= class_0p) and (class_1p >= class_2p): color = 'blue' # se profittevole classe = 1 elif (class_2p >= class_0p) and (class_2p >= class_1p): color = 'darkblue' # se ottimo profitto classe = 2 return class_c, class_p, class_s, color, classe def filtro_ordine_peggiore(df,df2): # restituisce i peggiori ordini dei singoli clienti nella classe specificata dal df in input, df2 è il df totale produttori = df['Produttore'].drop_duplicates().tolist() ordini_peggiori = [] for produttore in produttori: """ seleziono i peggiori ordini del produttore sulla base del profitto""" peggiore = min(df[df['Produttore'] == produttore]['profitto']) """ estraggo gli indici dei peggiori ordini""" peggiore_index = df[df['profitto'] == peggiore].index """ ne estraggo il num fiscale""" num_ordine = df2.iloc[peggiore_index]['NumFiscale'].values """ creo una lista con produttore, num ficale e profitto dei peggiori ordini""" ordini_peggiori.append([produttore, num_ordine, peggiore]) """ creo un df dalla lista""" df_ordini = pd.DataFrame(data=ordini_peggiori, columns=['Produttore', 'NumFiscale', 'profitto']) return df_ordini def filtro_classifica( df): # da usare con i dataframe df_non_prof\df_prof\df_max_prof, restituisce la classifica dei clienti sulla base del profitto totale """ creo un df ordinato in modo decrescente sulla base del profitto""" classifica = df.sort_values(by='profitto', ascending=False) profitto = classifica['profitto'] produttori = classifica['Produttore'] df_classifica = pd.DataFrame() df_classifica['Produttore'] = produttori df_classifica['profitto'] = profitto return df_classifica def best_n(df, n): df_best = df.nlargest(n, 'profitto') produttori_list = df_best['Produttore'].tolist() return df_best, produttori_list def worst_n(df, n): df_worst = df.nsmallest(n, 'profitto') produttori_list = df_worst['Produttore'].tolist() return df_worst, produttori_list def grafici(df, df2, dfn, dfp, dfo, hist, pie, best, worst): # dare in input df, dfpneg, dfp, dfpmax e df2 è il df_produttori """ per scegliere il tipo di grafico inserire un valore al posto di hist/pie (o entrambi) e None su quello non richiesto, stessa cosa per scegliere se peggiori o migliori, ma non entrambi""" ax_i = [] ax_t = [] """blocco di if/elif per verificare la scelta del tipo di grafico """ if (pd.isna(hist) == False) & (pd.isna(pie) == False): """blocco di if/elif per verificare la scelta tra peggiori e migliori """ if pd.isna(best) == False: produttori = best_n(df2, 10) elif pd.isna(worst) == False: produttori = worst_n(df2, 10) else: produttori = [] """blocco di if/elif per verificare la scelta del tipo di grafico """ for produttore in produttori: """ per l'istogramma seleziono il produttore e estraggo la serie delle commesse nel df ed eseguo il plot""" serie = df[df['Produttore'] == produttore]['profitto'] figure, axes = plt.subplots(1, 1) plt.title(produttore) ax = serie.plot.hist() plt.ylabel('numero commesse') ax_i.append([produttore, ax]) """ per la torta seleziono il produttore ed estraggo la serie delle commesse in df_pneg, df_p e df_pmax""" serie_neg = dfn[dfn['Produttore'] == produttore]['profitto'] serie_prof = dfp[dfp['Produttore'] == produttore]['profitto'] serie_max = dfo[dfo['Produttore'] == produttore]['profitto'] """ eseguo il plot sul numero di volte che il produttore compare nelle singole classi """ y = [len(serie_neg), len(serie_prof), len(serie_max)] label = ['non profittevoli', 'profittevoli', 'ottimo profitto'] figure, ax = plt.subplots() plt.title(produttore) ax.pie(y) plt.legend(label, loc="best") ax_t.append([produttore, ax]) elif pd.isna(hist) == False: if pd.isna(best) == False: produttori = best_n(df2, 10) elif
pd.isna(worst)
pandas.isna
# -*- coding: utf-8 -*- # pylint: disable-msg=W0612,E1101 import itertools import warnings from warnings import catch_warnings from datetime import datetime from pandas.types.common import (is_integer_dtype, is_float_dtype, is_scalar) from pandas.compat import range, lrange, lzip, StringIO, lmap from pandas.tslib import NaT from numpy import nan from numpy.random import randn import numpy as np import pandas as pd from pandas import option_context from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice from pandas.core.api import (DataFrame, Index, Series, Panel, isnull, MultiIndex, Timestamp, Timedelta, UInt64Index) from pandas.formats.printing import pprint_thing from pandas import concat from pandas.core.common import PerformanceWarning from pandas.tests.indexing.common import _mklbl import pandas.util.testing as tm from pandas import date_range _verbose = False # ------------------------------------------------------------------------ # Indexing test cases def _generate_indices(f, values=False): """ generate the indicies if values is True , use the axis values is False, use the range """ axes = f.axes if values: axes = [lrange(len(a)) for a in axes] return itertools.product(*axes) def _get_value(f, i, values=False): """ return the value for the location i """ # check agains values if values: return f.values[i] # this is equiv of f[col][row]..... # v = f # for a in reversed(i): # v = v.__getitem__(a) # return v with catch_warnings(record=True): return f.ix[i] def _get_result(obj, method, key, axis): """ return the result for this obj with this key and this axis """ if isinstance(key, dict): key = key[axis] # use an artifical conversion to map the key as integers to the labels # so ix can work for comparisions if method == 'indexer': method = 'ix' key = obj._get_axis(axis)[key] # in case we actually want 0 index slicing try: xp = getattr(obj, method).__getitem__(_axify(obj, key, axis)) except: xp = getattr(obj, method).__getitem__(key) return xp def _axify(obj, key, axis): # create a tuple accessor axes = [slice(None)] * obj.ndim axes[axis] = key return tuple(axes) class TestIndexing(tm.TestCase): _objs = set(['series', 'frame', 'panel']) _typs = set(['ints', 'uints', 'labels', 'mixed', 'ts', 'floats', 'empty', 'ts_rev']) def setUp(self): self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2)) self.frame_ints = DataFrame(np.random.randn(4, 4), index=lrange(0, 8, 2), columns=lrange(0, 12, 3)) self.panel_ints = Panel(np.random.rand(4, 4, 4), items=lrange(0, 8, 2), major_axis=lrange(0, 12, 3), minor_axis=lrange(0, 16, 4)) self.series_uints = Series(np.random.rand(4), index=UInt64Index(lrange(0, 8, 2))) self.frame_uints = DataFrame(np.random.randn(4, 4), index=UInt64Index(lrange(0, 8, 2)), columns=UInt64Index(lrange(0, 12, 3))) self.panel_uints = Panel(np.random.rand(4, 4, 4), items=UInt64Index(lrange(0, 8, 2)), major_axis=UInt64Index(lrange(0, 12, 3)), minor_axis=UInt64Index(lrange(0, 16, 4))) self.series_labels = Series(np.random.randn(4), index=list('abcd')) self.frame_labels = DataFrame(np.random.randn(4, 4), index=list('abcd'), columns=list('ABCD')) self.panel_labels = Panel(np.random.randn(4, 4, 4), items=list('abcd'), major_axis=list('ABCD'), minor_axis=list('ZYXW')) self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8]) self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, 'null', 8]) self.panel_mixed = Panel(np.random.randn(4, 4, 4), items=[2, 4, 'null', 8]) self.series_ts = Series(np.random.randn(4), index=date_range('20130101', periods=4)) self.frame_ts = DataFrame(np.random.randn(4, 4), index=date_range('20130101', periods=4)) self.panel_ts = Panel(np.random.randn(4, 4, 4), items=date_range('20130101', periods=4)) dates_rev = (date_range('20130101', periods=4) .sort_values(ascending=False)) self.series_ts_rev = Series(np.random.randn(4), index=dates_rev) self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev) self.panel_ts_rev = Panel(np.random.randn(4, 4, 4), items=dates_rev) self.frame_empty = DataFrame({}) self.series_empty = Series({}) self.panel_empty = Panel({}) # form agglomerates for o in self._objs: d = dict() for t in self._typs: d[t] = getattr(self, '%s_%s' % (o, t), None) setattr(self, o, d) def check_values(self, f, func, values=False): if f is None: return axes = f.axes indicies = itertools.product(*axes) for i in indicies: result = getattr(f, func)[i] # check agains values if values: expected = f.values[i] else: expected = f for a in reversed(i): expected = expected.__getitem__(a) tm.assert_almost_equal(result, expected) def check_result(self, name, method1, key1, method2, key2, typs=None, objs=None, axes=None, fails=None): def _eq(t, o, a, obj, k1, k2): """ compare equal for these 2 keys """ if a is not None and a > obj.ndim - 1: return def _print(result, error=None): if error is not None: error = str(error) v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s," "key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" % (name, result, t, o, method1, method2, a, error or '')) if _verbose: pprint_thing(v) try: rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a)) try: xp = _get_result(obj, method2, k2, a) except: result = 'no comp' _print(result) return detail = None try: if is_scalar(rs) and is_scalar(xp): self.assertEqual(rs, xp) elif xp.ndim == 1: tm.assert_series_equal(rs, xp) elif xp.ndim == 2: tm.assert_frame_equal(rs, xp) elif xp.ndim == 3: tm.assert_panel_equal(rs, xp) result = 'ok' except AssertionError as e: detail = str(e) result = 'fail' # reverse the checks if fails is True: if result == 'fail': result = 'ok (fail)' _print(result) if not result.startswith('ok'): raise AssertionError(detail) except AssertionError: raise except Exception as detail: # if we are in fails, the ok, otherwise raise it if fails is not None: if isinstance(detail, fails): result = 'ok (%s)' % type(detail).__name__ _print(result) return result = type(detail).__name__ raise AssertionError(_print(result, error=detail)) if typs is None: typs = self._typs if objs is None: objs = self._objs if axes is not None: if not isinstance(axes, (tuple, list)): axes = [axes] else: axes = list(axes) else: axes = [0, 1, 2] # check for o in objs: if o not in self._objs: continue d = getattr(self, o) for a in axes: for t in typs: if t not in self._typs: continue obj = d[t] if obj is not None: obj = obj.copy() k2 = key2 _eq(t, o, a, obj, key1, k2) def test_ix_deprecation(self): # GH 15114 df = DataFrame({'A': [1, 2, 3]}) with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): df.ix[1, 'A'] def test_indexer_caching(self): # GH5727 # make sure that indexers are in the _internal_names_set n = 1000001 arrays = [lrange(n), lrange(n)] index = MultiIndex.from_tuples(lzip(*arrays)) s = Series(np.zeros(n), index=index) str(s) # setitem expected = Series(np.ones(n), index=index) s = Series(np.zeros(n), index=index) s[s == 0] = 1 tm.assert_series_equal(s, expected) def test_at_and_iat_get(self): def _check(f, func, values=False): if f is not None: indicies = _generate_indices(f, values) for i in indicies: result = getattr(f, func)[i] expected = _get_value(f, i, values) tm.assert_almost_equal(result, expected) for o in self._objs: d = getattr(self, o) # iat for f in [d['ints'], d['uints']]: _check(f, 'iat', values=True) for f in [d['labels'], d['ts'], d['floats']]: if f is not None: self.assertRaises(ValueError, self.check_values, f, 'iat') # at for f in [d['ints'], d['uints'], d['labels'], d['ts'], d['floats']]: _check(f, 'at') def test_at_and_iat_set(self): def _check(f, func, values=False): if f is not None: indicies = _generate_indices(f, values) for i in indicies: getattr(f, func)[i] = 1 expected = _get_value(f, i, values) tm.assert_almost_equal(expected, 1) for t in self._objs: d = getattr(self, t) # iat for f in [d['ints'], d['uints']]: _check(f, 'iat', values=True) for f in [d['labels'], d['ts'], d['floats']]: if f is not None: self.assertRaises(ValueError, _check, f, 'iat') # at for f in [d['ints'], d['uints'], d['labels'], d['ts'], d['floats']]: _check(f, 'at') def test_at_iat_coercion(self): # as timestamp is not a tuple! dates = date_range('1/1/2000', periods=8) df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D']) s = df['A'] result = s.at[dates[5]] xp = s.values[5] self.assertEqual(result, xp) # GH 7729 # make sure we are boxing the returns s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]') expected = Timestamp('2014-02-02') for r in [lambda: s.iat[1], lambda: s.iloc[1]]: result = r() self.assertEqual(result, expected) s = Series(['1 days', '2 days'], dtype='timedelta64[ns]') expected = Timedelta('2 days') for r in [lambda: s.iat[1], lambda: s.iloc[1]]: result = r() self.assertEqual(result, expected) def test_iat_invalid_args(self): pass def test_imethods_with_dups(self): # GH6493 # iat/iloc with dups s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64') result = s.iloc[2] self.assertEqual(result, 2) result = s.iat[2] self.assertEqual(result, 2) self.assertRaises(IndexError, lambda: s.iat[10]) self.assertRaises(IndexError, lambda: s.iat[-10]) result = s.iloc[[2, 3]] expected = Series([2, 3], [2, 2], dtype='int64') tm.assert_series_equal(result, expected) df = s.to_frame() result = df.iloc[2] expected = Series(2, index=[0], name=2) tm.assert_series_equal(result, expected) result = df.iat[2, 0] expected = 2 self.assertEqual(result, 2) def test_repeated_getitem_dups(self): # GH 5678 # repeated gettitems on a dup index returing a ndarray df = DataFrame( np.random.random_sample((20, 5)), index=['ABCDE' [x % 5] for x in range(20)]) expected = df.loc['A', 0] result = df.loc[:, 0].loc['A'] tm.assert_series_equal(result, expected) def test_iloc_exceeds_bounds(self): # GH6296 # iloc should allow indexers that exceed the bounds df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE')) expected = df # lists of positions should raise IndexErrror! with tm.assertRaisesRegexp(IndexError, 'positional indexers are out-of-bounds'): df.iloc[:, [0, 1, 2, 3, 4, 5]] self.assertRaises(IndexError, lambda: df.iloc[[1, 30]]) self.assertRaises(IndexError, lambda: df.iloc[[1, -30]]) self.assertRaises(IndexError, lambda: df.iloc[[100]]) s = df['A'] self.assertRaises(IndexError, lambda: s.iloc[[100]]) self.assertRaises(IndexError, lambda: s.iloc[[-100]]) # still raise on a single indexer msg = 'single positional indexer is out-of-bounds' with tm.assertRaisesRegexp(IndexError, msg): df.iloc[30] self.assertRaises(IndexError, lambda: df.iloc[-30]) # GH10779 # single positive/negative indexer exceeding Series bounds should raise # an IndexError with tm.assertRaisesRegexp(IndexError, msg): s.iloc[30] self.assertRaises(IndexError, lambda: s.iloc[-30]) # slices are ok result = df.iloc[:, 4:10] # 0 < start < len < stop expected = df.iloc[:, 4:] tm.assert_frame_equal(result, expected) result = df.iloc[:, -4:-10] # stop < 0 < start < len expected = df.iloc[:, :0] tm.assert_frame_equal(result, expected) result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down) expected = df.iloc[:, :4:-1] tm.assert_frame_equal(result, expected) result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down) expected = df.iloc[:, 4::-1] tm.assert_frame_equal(result, expected) result = df.iloc[:, -10:4] # start < 0 < stop < len expected = df.iloc[:, :4] tm.assert_frame_equal(result, expected) result = df.iloc[:, 10:4] # 0 < stop < len < start expected = df.iloc[:, :0] tm.assert_frame_equal(result, expected) result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down) expected = df.iloc[:, :0] tm.assert_frame_equal(result, expected) result = df.iloc[:, 10:11] # 0 < len < start < stop expected = df.iloc[:, :0] tm.assert_frame_equal(result, expected) # slice bounds exceeding is ok result = s.iloc[18:30] expected = s.iloc[18:] tm.assert_series_equal(result, expected) result = s.iloc[30:] expected = s.iloc[:0] tm.assert_series_equal(result, expected) result = s.iloc[30::-1] expected = s.iloc[::-1] tm.assert_series_equal(result, expected) # doc example def check(result, expected): str(result) result.dtypes tm.assert_frame_equal(result, expected) dfl = DataFrame(np.random.randn(5, 2), columns=list('AB')) check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index)) check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]]) check(dfl.iloc[4:6], dfl.iloc[[4]]) self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]]) self.assertRaises(IndexError, lambda: dfl.iloc[:, 4]) def test_iloc_getitem_int(self): # integer self.check_result('integer', 'iloc', 2, 'ix', {0: 4, 1: 6, 2: 8}, typs=['ints', 'uints']) self.check_result('integer', 'iloc', 2, 'indexer', 2, typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) def test_iloc_getitem_neg_int(self): # neg integer self.check_result('neg int', 'iloc', -1, 'ix', {0: 6, 1: 9, 2: 12}, typs=['ints', 'uints']) self.check_result('neg int', 'iloc', -1, 'indexer', -1, typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) def test_iloc_getitem_list_int(self): # list of ints self.check_result('list int', 'iloc', [0, 1, 2], 'ix', {0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]}, typs=['ints', 'uints']) self.check_result('list int', 'iloc', [2], 'ix', {0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints']) self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2], typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) # array of ints (GH5006), make sure that a single indexer is returning # the correct type self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix', {0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]}, typs=['ints', 'uints']) self.check_result('array int', 'iloc', np.array([2]), 'ix', {0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints']) self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer', [0, 1, 2], typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) def test_iloc_getitem_neg_int_can_reach_first_index(self): # GH10547 and GH10779 # negative integers should be able to reach index 0 df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]}) s = df['A'] expected = df.iloc[0] result = df.iloc[-3] tm.assert_series_equal(result, expected) expected = df.iloc[[0]] result = df.iloc[[-3]] tm.assert_frame_equal(result, expected) expected = s.iloc[0] result = s.iloc[-3] self.assertEqual(result, expected) expected = s.iloc[[0]] result = s.iloc[[-3]] tm.assert_series_equal(result, expected) # check the length 1 Series case highlighted in GH10547 expected = pd.Series(['a'], index=['A']) result = expected.iloc[[-1]] tm.assert_series_equal(result, expected) def test_iloc_getitem_dups(self): # no dups in panel (bug?) self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix', {0: [0, 2, 2, 6], 1: [0, 3, 3, 9]}, objs=['series', 'frame'], typs=['ints', 'uints']) # GH 6766 df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}]) df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}]) df = concat([df1, df2], axis=1) # cross-sectional indexing result = df.iloc[0, 0] self.assertTrue(isnull(result)) result = df.iloc[0, :] expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'], name=0) tm.assert_series_equal(result, expected) def test_iloc_getitem_array(self): # array like s = Series(index=lrange(1, 4)) self.check_result('array like', 'iloc', s.index, 'ix', {0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]}, typs=['ints', 'uints']) def test_iloc_getitem_bool(self): # boolean indexers b = [True, False, True, False, ] self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints']) self.check_result('bool', 'iloc', b, 'ix', b, typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) def test_iloc_getitem_slice(self): # slices self.check_result('slice', 'iloc', slice(1, 3), 'ix', {0: [2, 4], 1: [3, 6], 2: [4, 8]}, typs=['ints', 'uints']) self.check_result('slice', 'iloc', slice(1, 3), 'indexer', slice(1, 3), typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) def test_iloc_getitem_slice_dups(self): df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B']) df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2), columns=['A', 'C']) # axis=1 df = concat([df1, df2], axis=1) tm.assert_frame_equal(df.iloc[:, :4], df1) tm.assert_frame_equal(df.iloc[:, 4:], df2) df = concat([df2, df1], axis=1) tm.assert_frame_equal(df.iloc[:, :2], df2) tm.assert_frame_equal(df.iloc[:, 2:], df1) exp = concat([df2, df1.iloc[:, [0]]], axis=1) tm.assert_frame_equal(df.iloc[:, 0:3], exp) # axis=0 df = concat([df, df], axis=0) tm.assert_frame_equal(df.iloc[0:10, :2], df2) tm.assert_frame_equal(df.iloc[0:10, 2:], df1) tm.assert_frame_equal(df.iloc[10:, :2], df2) tm.assert_frame_equal(df.iloc[10:, 2:], df1) def test_iloc_setitem(self): df = self.frame_ints df.iloc[1, 1] = 1 result = df.iloc[1, 1] self.assertEqual(result, 1) df.iloc[:, 2:3] = 0 expected = df.iloc[:, 2:3] result = df.iloc[:, 2:3] tm.assert_frame_equal(result, expected) # GH5771 s = Series(0, index=[4, 5, 6]) s.iloc[1:2] += 1 expected = Series([0, 1, 0], index=[4, 5, 6]) tm.assert_series_equal(s, expected) def test_loc_setitem_slice(self): # GH10503 # assigning the same type should not change the type df1 = DataFrame({'a': [0, 1, 1], 'b': Series([100, 200, 300], dtype='uint32')}) ix = df1['a'] == 1 newb1 = df1.loc[ix, 'b'] + 1 df1.loc[ix, 'b'] = newb1 expected = DataFrame({'a': [0, 1, 1], 'b': Series([100, 201, 301], dtype='uint32')}) tm.assert_frame_equal(df1, expected) # assigning a new type should get the inferred type df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]}, dtype='uint64') ix = df1['a'] == 1 newb2 = df2.loc[ix, 'b'] df1.loc[ix, 'b'] = newb2 expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]}, dtype='uint64') tm.assert_frame_equal(df2, expected) def test_ix_loc_setitem_consistency(self): # GH 5771 # loc with slice and series s = Series(0, index=[4, 5, 6]) s.loc[4:5] += 1 expected = Series([1, 1, 0], index=[4, 5, 6]) tm.assert_series_equal(s, expected) # GH 5928 # chained indexing assignment df = DataFrame({'a': [0, 1, 2]}) expected = df.copy() with catch_warnings(record=True): expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a'] with catch_warnings(record=True): df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]] tm.assert_frame_equal(df, expected) df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]}) with catch_warnings(record=True): df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype( 'float64') + 0.5 expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]}) tm.assert_frame_equal(df, expected) # GH 8607 # ix setitem consistency df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580], 'delta': [1174, 904, 161], 'elapsed': [7673, 9277, 1470]}) expected = DataFrame({'timestamp': pd.to_datetime( [1413840976, 1413842580, 1413760580], unit='s'), 'delta': [1174, 904, 161], 'elapsed': [7673, 9277, 1470]}) df2 = df.copy() df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s') tm.assert_frame_equal(df2, expected) df2 = df.copy() df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s') tm.assert_frame_equal(df2, expected) df2 = df.copy() with catch_warnings(record=True): df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s') tm.assert_frame_equal(df2, expected) def test_ix_loc_consistency(self): # GH 8613 # some edge cases where ix/loc should return the same # this is not an exhaustive case def compare(result, expected): if is_scalar(expected): self.assertEqual(result, expected) else: self.assertTrue(expected.equals(result)) # failure cases for .loc, but these work for .ix df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD')) for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]), tuple([slice(0, 2), df.columns[0:2]])]: for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeDateIndex, tm.makePeriodIndex, tm.makeTimedeltaIndex]: df.index = index(len(df.index)) with catch_warnings(record=True): df.ix[key] self.assertRaises(TypeError, lambda: df.loc[key]) df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'), index=pd.date_range('2012-01-01', periods=5)) for key in ['2012-01-03', '2012-01-31', slice('2012-01-03', '2012-01-03'), slice('2012-01-03', '2012-01-04'), slice('2012-01-03', '2012-01-06', 2), slice('2012-01-03', '2012-01-31'), tuple([[True, True, True, False, True]]), ]: # getitem # if the expected raises, then compare the exceptions try: with catch_warnings(record=True): expected = df.ix[key] except KeyError: self.assertRaises(KeyError, lambda: df.loc[key]) continue result = df.loc[key] compare(result, expected) # setitem df1 = df.copy() df2 = df.copy() with catch_warnings(record=True): df1.ix[key] = 10 df2.loc[key] = 10 compare(df2, df1) # edge cases s = Series([1, 2, 3, 4], index=list('abde')) result1 = s['a':'c'] with catch_warnings(record=True): result2 = s.ix['a':'c'] result3 = s.loc['a':'c'] tm.assert_series_equal(result1, result2) tm.assert_series_equal(result1, result3) # now work rather than raising KeyError s = Series(range(5), [-2, -1, 1, 2, 3]) with catch_warnings(record=True): result1 = s.ix[-10:3] result2 = s.loc[-10:3] tm.assert_series_equal(result1, result2) with catch_warnings(record=True): result1 = s.ix[0:3] result2 = s.loc[0:3] tm.assert_series_equal(result1, result2) def test_loc_setitem_dups(self): # GH 6541 df_orig = DataFrame( {'me': list('rttti'), 'foo': list('aaade'), 'bar': np.arange(5, dtype='float64') * 1.34 + 2, 'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me') indexer = tuple(['r', ['bar', 'bar2']]) df = df_orig.copy() df.loc[indexer] *= 2.0 tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer]) indexer = tuple(['r', 'bar']) df = df_orig.copy() df.loc[indexer] *= 2.0 self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer]) indexer = tuple(['t', ['bar', 'bar2']]) df = df_orig.copy() df.loc[indexer] *= 2.0 tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer]) def test_iloc_setitem_dups(self): # GH 6766 # iloc with a mask aligning from another iloc df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}]) df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}]) df = concat([df1, df2], axis=1) expected = df.fillna(3) expected['A'] = expected['A'].astype('float64') inds = np.isnan(df.iloc[:, 0]) mask = inds[inds].index df.iloc[mask, 0] = df.iloc[mask, 2] tm.assert_frame_equal(df, expected) # del a dup column across blocks expected = DataFrame({0: [1, 2], 1: [3, 4]}) expected.columns = ['B', 'B'] del df['A'] tm.assert_frame_equal(df, expected) # assign back to self df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]] tm.assert_frame_equal(df, expected) # reversed x 2 df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index( drop=True) df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index( drop=True) tm.assert_frame_equal(df, expected) def test_chained_getitem_with_lists(self): # GH6394 # Regression in chained getitem indexing with embedded list-like from # 0.12 def check(result, expected): tm.assert_numpy_array_equal(result, expected) tm.assertIsInstance(result, np.ndarray) df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]}) expected = df['A'].iloc[2] result = df.loc[2, 'A'] check(result, expected) result2 = df.iloc[2]['A'] check(result2, expected) result3 = df['A'].loc[2] check(result3, expected) result4 = df['A'].iloc[2] check(result4, expected) def test_loc_getitem_int(self): # int label self.check_result('int label', 'loc', 2, 'ix', 2, typs=['ints', 'uints'], axes=0) self.check_result('int label', 'loc', 3, 'ix', 3, typs=['ints', 'uints'], axes=1) self.check_result('int label', 'loc', 4, 'ix', 4, typs=['ints', 'uints'], axes=2) self.check_result('int label', 'loc', 2, 'ix', 2, typs=['label'], fails=KeyError) def test_loc_getitem_label(self): # label self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'], axes=0) self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'], axes=0) self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0) self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1, typs=['ts'], axes=0) self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'], fails=KeyError) def test_loc_getitem_label_out_of_range(self): # out of range label self.check_result('label range', 'loc', 'f', 'ix', 'f', typs=['ints', 'uints', 'labels', 'mixed', 'ts'], fails=KeyError) self.check_result('label range', 'loc', 'f', 'ix', 'f', typs=['floats'], fails=TypeError) self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ints', 'uints', 'mixed'], fails=KeyError) self.check_result('label range', 'loc', 20, 'ix', 20, typs=['labels'], fails=TypeError) self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'], axes=0, fails=TypeError) self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'], axes=0, fails=TypeError) def test_loc_getitem_label_list(self): # list of labels self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0) self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1) self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2) self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix', ['a', 'b', 'd'], typs=['labels'], axes=0) self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix', ['A', 'B', 'C'], typs=['labels'], axes=1) self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix', ['Z', 'Y', 'W'], typs=['labels'], axes=2) self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix', [2, 8, 'null'], typs=['mixed'], axes=0) self.check_result('list lbl', 'loc', [Timestamp('20130102'), Timestamp('20130103')], 'ix', [Timestamp('20130102'), Timestamp('20130103')], typs=['ts'], axes=0) self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2], typs=['empty'], fails=KeyError) self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3], typs=['ints', 'uints'], axes=0, fails=KeyError) self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7], typs=['ints', 'uints'], axes=1, fails=KeyError) self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10], typs=['ints', 'uints'], axes=2, fails=KeyError) def test_loc_getitem_label_list_fails(self): # fails self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40], typs=['ints', 'uints'], axes=1, fails=KeyError) self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40], typs=['ints', 'uints'], axes=2, fails=KeyError) def test_loc_getitem_label_array_like(self): # array like self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index, 'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0) self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index, 'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1) self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index, 'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2) def test_loc_getitem_bool(self): # boolean indexers b = [True, False, True, False] self.check_result('bool', 'loc', b, 'ix', b, typs=['ints', 'uints', 'labels', 'mixed', 'ts', 'floats']) self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'], fails=KeyError) def test_loc_getitem_int_slice(self): # ok self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4], typs=['ints', 'uints'], axes=0) self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6], typs=['ints', 'uints'], axes=1) self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8], typs=['ints', 'uints'], axes=2) # GH 3053 # loc should treat integer slices like label slices from itertools import product index = MultiIndex.from_tuples([t for t in product( [6, 7, 8], ['a', 'b'])]) df = DataFrame(np.random.randn(6, 6), index, index) result = df.loc[6:8, :] with catch_warnings(record=True): expected = df.ix[6:8, :] tm.assert_frame_equal(result, expected) index = MultiIndex.from_tuples([t for t in product( [10, 20, 30], ['a', 'b'])]) df = DataFrame(np.random.randn(6, 6), index, index) result = df.loc[20:30, :] with catch_warnings(record=True): expected = df.ix[20:30, :] tm.assert_frame_equal(result, expected) # doc examples result = df.loc[10, :] with catch_warnings(record=True): expected = df.ix[10, :] tm.assert_frame_equal(result, expected) result = df.loc[:, 10] # expected = df.ix[:,10] (this fails) expected = df[10] tm.assert_frame_equal(result, expected) def test_loc_to_fail(self): # GH3449 df = DataFrame(np.random.random((3, 3)), index=['a', 'b', 'c'], columns=['e', 'f', 'g']) # raise a KeyError? self.assertRaises(KeyError, df.loc.__getitem__, tuple([[1, 2], [1, 2]])) # GH 7496 # loc should not fallback s = Series() s.loc[1] = 1 s.loc['a'] = 2 self.assertRaises(KeyError, lambda: s.loc[-1]) self.assertRaises(KeyError, lambda: s.loc[[-1, -2]]) self.assertRaises(KeyError, lambda: s.loc[['4']]) s.loc[-1] = 3 result = s.loc[[-1, -2]] expected = Series([3, np.nan], index=[-1, -2]) tm.assert_series_equal(result, expected) s['a'] = 2 self.assertRaises(KeyError, lambda: s.loc[[-2]]) del s['a'] def f(): s.loc[[-2]] = 0 self.assertRaises(KeyError, f) # inconsistency between .loc[values] and .loc[values,:] # GH 7999 df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value']) def f(): df.loc[[3], :] self.assertRaises(KeyError, f) def f(): df.loc[[3]] self.assertRaises(KeyError, f) def test_at_to_fail(self): # at should not fallback # GH 7814 s = Series([1, 2, 3], index=list('abc')) result = s.at['a'] self.assertEqual(result, 1) self.assertRaises(ValueError, lambda: s.at[0]) df = DataFrame({'A': [1, 2, 3]}, index=list('abc')) result = df.at['a', 'A'] self.assertEqual(result, 1) self.assertRaises(ValueError, lambda: df.at['a', 0]) s = Series([1, 2, 3], index=[3, 2, 1]) result = s.at[1] self.assertEqual(result, 3) self.assertRaises(ValueError, lambda: s.at['a']) df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1]) result = df.at[1, 0] self.assertEqual(result, 3) self.assertRaises(ValueError, lambda: df.at['a', 0]) # GH 13822, incorrect error string with non-unique columns when missing # column is accessed df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]}) df.columns = ['x', 'x', 'z'] # Check that we get the correct value in the KeyError self.assertRaisesRegexp(KeyError, r"\['y'\] not in index", lambda: df[['x', 'y', 'z']]) def test_loc_getitem_label_slice(self): # label slices (with ints) self.check_result('lab slice', 'loc', slice(1, 3), 'ix', slice(1, 3), typs=['labels', 'mixed', 'empty', 'ts', 'floats'], fails=TypeError) # real label slices self.check_result('lab slice', 'loc', slice('a', 'c'), 'ix', slice('a', 'c'), typs=['labels'], axes=0) self.check_result('lab slice', 'loc', slice('A', 'C'), 'ix', slice('A', 'C'), typs=['labels'], axes=1) self.check_result('lab slice', 'loc', slice('W', 'Z'), 'ix', slice('W', 'Z'), typs=['labels'], axes=2) self.check_result('ts slice', 'loc', slice('20130102', '20130104'), 'ix', slice('20130102', '20130104'), typs=['ts'], axes=0) self.check_result('ts slice', 'loc', slice('20130102', '20130104'), 'ix', slice('20130102', '20130104'), typs=['ts'], axes=1, fails=TypeError) self.check_result('ts slice', 'loc', slice('20130102', '20130104'), 'ix', slice('20130102', '20130104'), typs=['ts'], axes=2, fails=TypeError) # GH 14316 self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'), 'indexer', [0, 1, 2], typs=['ts_rev'], axes=0) self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8), typs=['mixed'], axes=0, fails=TypeError) self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8), typs=['mixed'], axes=1, fails=KeyError) self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8), typs=['mixed'], axes=2, fails=KeyError) self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice( 2, 4, 2), typs=['mixed'], axes=0, fails=TypeError) def test_loc_general(self): df = DataFrame( np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'], index=['A', 'B', 'C', 'D']) # want this to work result = df.loc[:, "A":"B"].iloc[0:2, :] self.assertTrue((result.columns == ['A', 'B']).all()) self.assertTrue((result.index == ['A', 'B']).all()) # mixed type result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0] expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0) tm.assert_series_equal(result, expected) self.assertEqual(result.dtype, object) def test_loc_setitem_consistency(self): # GH 6149 # coerce similary for setitem and loc when rows have a null-slice expected = DataFrame({'date': Series(0, index=range(5), dtype=np.int64), 'val': Series(range(5), dtype=np.int64)}) df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'), 'val': Series( range(5), dtype=np.int64)}) df.loc[:, 'date'] = 0 tm.assert_frame_equal(df, expected) df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'), 'val': Series(range(5), dtype=np.int64)}) df.loc[:, 'date'] = np.array(0, dtype=np.int64) tm.assert_frame_equal(df, expected) df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'), 'val': Series(range(5), dtype=np.int64)}) df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64) tm.assert_frame_equal(df, expected) expected = DataFrame({'date': Series('foo', index=range(5)), 'val': Series(range(5), dtype=np.int64)}) df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'), 'val': Series(range(5), dtype=np.int64)}) df.loc[:, 'date'] = 'foo' tm.assert_frame_equal(df, expected) expected = DataFrame({'date': Series(1.0, index=range(5)), 'val': Series(range(5), dtype=np.int64)}) df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'), 'val': Series(range(5), dtype=np.int64)}) df.loc[:, 'date'] = 1.0 tm.assert_frame_equal(df, expected) def test_loc_setitem_consistency_empty(self): # empty (essentially noops) expected = DataFrame(columns=['x', 'y']) expected['x'] = expected['x'].astype(np.int64) df = DataFrame(columns=['x', 'y']) df.loc[:, 'x'] = 1 tm.assert_frame_equal(df, expected) df = DataFrame(columns=['x', 'y']) df['x'] = 1 tm.assert_frame_equal(df, expected) def test_loc_setitem_consistency_slice_column_len(self): # .loc[:,column] setting with slice == len of the column # GH10408 data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse Region,Site,RespondentID,,,,, Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes, Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes, Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No""" df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2]) df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, ( 'Respondent', 'StartDate')]) df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, ( 'Respondent', 'EndDate')]) df.loc[:, ('Respondent', 'Duration')] = df.loc[:, ( 'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')] df.loc[:, ('Respondent', 'Duration')] = df.loc[:, ( 'Respondent', 'Duration')].astype('timedelta64[s]') expected = Series([1380, 720, 840, 2160.], index=df.index, name=('Respondent', 'Duration')) tm.assert_series_equal(df[('Respondent', 'Duration')], expected) def test_loc_setitem_frame(self): df = self.frame_labels result = df.iloc[0, 0] df.loc['a', 'A'] = 1 result = df.loc['a', 'A'] self.assertEqual(result, 1) result = df.iloc[0, 0] self.assertEqual(result, 1) df.loc[:, 'B':'D'] = 0 expected = df.loc[:, 'B':'D'] with catch_warnings(record=True): result = df.ix[:, 1:] tm.assert_frame_equal(result, expected) # GH 6254 # setting issue df = DataFrame(index=[3, 5, 4], columns=['A']) df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64') expected = DataFrame(dict(A=Series( [1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4]) tm.assert_frame_equal(df, expected) # GH 6252 # setting with an empty frame keys1 = ['@' + str(i) for i in range(5)] val1 = np.arange(5, dtype='int64') keys2 = ['@' + str(i) for i in range(4)] val2 = np.arange(4, dtype='int64') index = list(set(keys1).union(keys2)) df = DataFrame(index=index) df['A'] = nan df.loc[keys1, 'A'] = val1 df['B'] = nan df.loc[keys2, 'B'] = val2 expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series( val2, index=keys2))).reindex(index=index) tm.assert_frame_equal(df, expected) # GH 8669 # invalid coercion of nan -> int df = DataFrame({'A': [1, 2, 3], 'B': np.nan}) df.loc[df.B > df.A, 'B'] = df.A expected = DataFrame({'A': [1, 2, 3], 'B': np.nan}) tm.assert_frame_equal(df, expected) # GH 6546 # setting with mixed labels df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']}) result = df.loc[0, [1, 2]] expected = Series([1, 3], index=[1, 2], dtype=object, name=0) tm.assert_series_equal(result, expected) expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']}) df.loc[0, [1, 2]] = [5, 6] tm.assert_frame_equal(df, expected) def test_loc_setitem_frame_multiples(self): # multiple setting df = DataFrame({'A': ['foo', 'bar', 'baz'], 'B': Series( range(3), dtype=np.int64)}) rhs = df.loc[1:2] rhs.index = df.index[0:2] df.loc[0:1] = rhs expected = DataFrame({'A': ['bar', 'baz', 'baz'], 'B': Series( [1, 2, 2], dtype=np.int64)}) tm.assert_frame_equal(df, expected) # multiple setting with frame on rhs (with M8) df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'), 'val': Series( range(5), dtype=np.int64)}) expected = DataFrame({'date': [Timestamp('20000101'), Timestamp( '20000102'), Timestamp('20000101'), Timestamp('20000102'), Timestamp('20000103')], 'val': Series( [0, 1, 0, 1, 2], dtype=np.int64)}) rhs = df.loc[0:2] rhs.index = df.index[2:5] df.loc[2:4] = rhs tm.assert_frame_equal(df, expected) def test_iloc_getitem_frame(self): df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2), columns=lrange(0, 8, 2)) result = df.iloc[2] with catch_warnings(record=True): exp = df.ix[4] tm.assert_series_equal(result, exp) result = df.iloc[2, 2] with catch_warnings(record=True): exp = df.ix[4, 4] self.assertEqual(result, exp) # slice result = df.iloc[4:8] with catch_warnings(record=True): expected = df.ix[8:14] tm.assert_frame_equal(result, expected) result = df.iloc[:, 2:3] with catch_warnings(record=True): expected = df.ix[:, 4:5] tm.assert_frame_equal(result, expected) # list of integers result = df.iloc[[0, 1, 3]] with catch_warnings(record=True): expected = df.ix[[0, 2, 6]] tm.assert_frame_equal(result, expected) result = df.iloc[[0, 1, 3], [0, 1]] with catch_warnings(record=True): expected = df.ix[[0, 2, 6], [0, 2]] tm.assert_frame_equal(result, expected) # neg indicies result = df.iloc[[-1, 1, 3], [-1, 1]] with catch_warnings(record=True): expected = df.ix[[18, 2, 6], [6, 2]] tm.assert_frame_equal(result, expected) # dups indicies result = df.iloc[[-1, -1, 1, 3], [-1, 1]] with catch_warnings(record=True): expected = df.ix[[18, 18, 2, 6], [6, 2]] tm.assert_frame_equal(result, expected) # with index-like s = Series(index=lrange(1, 5)) result = df.iloc[s.index] with catch_warnings(record=True): expected = df.ix[[2, 4, 6, 8]] tm.assert_frame_equal(result, expected) def test_iloc_getitem_labelled_frame(self): # try with labelled frame df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'), columns=list('ABCD')) result = df.iloc[1, 1] exp = df.loc['b', 'B'] self.assertEqual(result, exp) result = df.iloc[:, 2:3] expected = df.loc[:, ['C']] tm.assert_frame_equal(result, expected) # negative indexing result = df.iloc[-1, -1] exp = df.loc['j', 'D'] self.assertEqual(result, exp) # out-of-bounds exception self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5])) # trying to use a label self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D'])) def test_iloc_getitem_doc_issue(self): # multi axis slicing issue with single block # surfaced in GH 6059 arr = np.random.randn(6, 4) index = date_range('20130101', periods=6) columns = list('ABCD') df = DataFrame(arr, index=index, columns=columns) # defines ref_locs df.describe() result = df.iloc[3:5, 0:2] str(result) result.dtypes expected = DataFrame(arr[3:5, 0:2], index=index[3:5], columns=columns[0:2]) tm.assert_frame_equal(result, expected) # for dups df.columns = list('aaaa') result = df.iloc[3:5, 0:2] str(result) result.dtypes expected = DataFrame(arr[3:5, 0:2], index=index[3:5], columns=list('aa')) tm.assert_frame_equal(result, expected) # related arr = np.random.randn(6, 4) index = list(range(0, 12, 2)) columns = list(range(0, 8, 2)) df = DataFrame(arr, index=index, columns=columns) df._data.blocks[0].mgr_locs result = df.iloc[1:5, 2:4] str(result) result.dtypes expected = DataFrame(arr[1:5, 2:4], index=index[1:5], columns=columns[2:4]) tm.assert_frame_equal(result, expected) def test_setitem_ndarray_1d(self): # GH5508 # len of indexer vs length of the 1d ndarray df = DataFrame(index=Index(lrange(1, 11))) df['foo'] = np.zeros(10, dtype=np.float64) df['bar'] = np.zeros(10, dtype=np.complex) # invalid def f(): with catch_warnings(record=True): df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2]) self.assertRaises(ValueError, f) def f(): df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0]) self.assertRaises(ValueError, f) # valid df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0]) result = df.loc[df.index[2:6], 'bar'] expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name='bar') tm.assert_series_equal(result, expected) # dtype getting changed? df = DataFrame(index=Index(lrange(1, 11))) df['foo'] = np.zeros(10, dtype=np.float64) df['bar'] = np.zeros(10, dtype=np.complex) def f(): df[2:5] = np.arange(1, 4) * 1j self.assertRaises(ValueError, f) def test_iloc_setitem_series(self): df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'), columns=list('ABCD')) df.iloc[1, 1] = 1 result = df.iloc[1, 1] self.assertEqual(result, 1) df.iloc[:, 2:3] = 0 expected = df.iloc[:, 2:3] result = df.iloc[:, 2:3] tm.assert_frame_equal(result, expected) s = Series(np.random.randn(10), index=lrange(0, 20, 2)) s.iloc[1] = 1 result = s.iloc[1] self.assertEqual(result, 1) s.iloc[:4] = 0 expected = s.iloc[:4] result = s.iloc[:4] tm.assert_series_equal(result, expected) s = Series([-1] * 6) s.iloc[0::2] = [0, 2, 4] s.iloc[1::2] = [1, 3, 5] result = s expected = Series([0, 1, 2, 3, 4, 5]) tm.assert_series_equal(result, expected) def test_iloc_setitem_list_of_lists(self): # GH 7551 # list-of-list is set incorrectly in mixed vs. single dtyped frames df = DataFrame(dict(A=np.arange(5, dtype='int64'), B=np.arange(5, 10, dtype='int64'))) df.iloc[2:4] = [[10, 11], [12, 13]] expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9])) tm.assert_frame_equal(df, expected) df = DataFrame( dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64'))) df.iloc[2:4] = [['x', 11], ['y', 13]] expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'], B=[5, 6, 11, 13, 9])) tm.assert_frame_equal(df, expected) def test_ix_general(self): # ix general issues # GH 2817 data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444}, 'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0}, 'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}} df = DataFrame(data).set_index(keys=['col', 'year']) key = 4.0, 2012 # emits a PerformanceWarning, ok with self.assert_produces_warning(PerformanceWarning): tm.assert_frame_equal(df.loc[key], df.iloc[2:]) # this is ok df.sort_index(inplace=True) res = df.loc[key] # col has float dtype, result should be Float64Index index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3], names=['col', 'year']) expected = DataFrame({'amount': [222, 333, 444]}, index=index) tm.assert_frame_equal(res, expected) def test_ix_weird_slicing(self): # http://stackoverflow.com/q/17056560/1240268 df = DataFrame({'one': [1, 2, 3, np.nan, np.nan], 'two': [1, 2, 3, 4, 5]}) df.loc[df['one'] > 1, 'two'] = -df['two'] expected = DataFrame({'one': {0: 1.0, 1: 2.0, 2: 3.0, 3: nan, 4: nan}, 'two': {0: 1, 1: -2, 2: -3, 3: 4, 4: 5}}) tm.assert_frame_equal(df, expected) def test_loc_coerceion(self): # 12411 df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'), pd.NaT]}) expected = df.dtypes result = df.iloc[[0]] tm.assert_series_equal(result.dtypes, expected) result = df.iloc[[1]] tm.assert_series_equal(result.dtypes, expected) # 12045 import datetime df = DataFrame({'date': [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}) expected = df.dtypes result = df.iloc[[0]] tm.assert_series_equal(result.dtypes, expected) result = df.iloc[[1]] tm.assert_series_equal(result.dtypes, expected) # 11594 df = DataFrame({'text': ['some words'] + [None] * 9}) expected = df.dtypes result = df.iloc[0:2] tm.assert_series_equal(result.dtypes, expected) result = df.iloc[3:] tm.assert_series_equal(result.dtypes, expected) def test_setitem_dtype_upcast(self): # GH3216 df = DataFrame([{"a": 1}, {"a": 3, "b": 2}]) df['c'] = np.nan self.assertEqual(df['c'].dtype, np.float64) df.loc[0, 'c'] = 'foo' expected = DataFrame([{"a": 1, "c": 'foo'}, {"a": 3, "b": 2, "c": np.nan}]) tm.assert_frame_equal(df, expected) # GH10280 df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3), index=list('ab'), columns=['foo', 'bar', 'baz']) for val in [3.14, 'wxyz']: left = df.copy() left.loc['a', 'bar'] = val right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'), columns=['foo', 'bar', 'baz']) tm.assert_frame_equal(left, right) self.assertTrue(is_integer_dtype(left['foo'])) self.assertTrue(is_integer_dtype(left['baz'])) left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0, index=list('ab'), columns=['foo', 'bar', 'baz']) left.loc['a', 'bar'] = 'wxyz' right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'), columns=['foo', 'bar', 'baz']) tm.assert_frame_equal(left, right) self.assertTrue(is_float_dtype(left['foo'])) self.assertTrue(is_float_dtype(left['baz'])) def test_setitem_iloc(self): # setitem with an iloc list df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"]) df.iloc[[0, 1], [1, 2]] df.iloc[[0, 1], [1, 2]] += 100 expected = DataFrame( np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"]) tm.assert_frame_equal(df, expected) def test_dups_fancy_indexing(self): # GH 3455 from pandas.util.testing import makeCustomDataframe as mkdf df = mkdf(10, 3) df.columns = ['a', 'a', 'b'] result = df[['b', 'a']].columns expected = Index(['b', 'a', 'a']) self.assert_index_equal(result, expected) # across dtypes df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']], columns=list('aaaaaaa')) df.head() str(df) result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']]) result.columns = list('aaaaaaa') # TODO(wesm): unused? df_v = df.iloc[:, 4] # noqa res_v = result.iloc[:, 4] # noqa tm.assert_frame_equal(df, result) # GH 3561, dups not in selected order df = DataFrame( {'test': [5, 7, 9, 11], 'test1': [4., 5, 6, 7], 'other': list('abcd')}, index=['A', 'A', 'B', 'C']) rows = ['C', 'B'] expected = DataFrame( {'test': [11, 9], 'test1': [7., 6], 'other': ['d', 'c']}, index=rows) result = df.loc[rows] tm.assert_frame_equal(result, expected) result = df.loc[Index(rows)] tm.assert_frame_equal(result, expected) rows = ['C', 'B', 'E'] expected = DataFrame( {'test': [11, 9, np.nan], 'test1': [7., 6, np.nan], 'other': ['d', 'c', np.nan]}, index=rows) result = df.loc[rows] tm.assert_frame_equal(result, expected) # see GH5553, make sure we use the right indexer rows = ['F', 'G', 'H', 'C', 'B', 'E'] expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan], 'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan], 'other': [np.nan, np.nan, np.nan, 'd', 'c', np.nan]}, index=rows) result = df.loc[rows] tm.assert_frame_equal(result, expected) # inconsistent returns for unique/duplicate indices when values are # missing df = DataFrame(randn(4, 3), index=list('ABCD')) expected = df.ix[['E']] dfnu = DataFrame(randn(5, 3), index=list('AABCD')) result = dfnu.ix[['E']] tm.assert_frame_equal(result, expected) # ToDo: check_index_type can be True after GH 11497 # GH 4619; duplicate indexer with missing label df = DataFrame({"A": [0, 1, 2]}) result = df.ix[[0, 8, 0]] expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0]) tm.assert_frame_equal(result, expected, check_index_type=False) df = DataFrame({"A": list('abc')}) result = df.ix[[0, 8, 0]] expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0]) tm.assert_frame_equal(result, expected, check_index_type=False) # non unique with non unique selector df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C']) expected = DataFrame( {'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E']) result = df.ix[['A', 'A', 'E']] tm.assert_frame_equal(result, expected) # GH 5835 # dups on index and missing values df = DataFrame( np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A']) expected = pd.concat( [df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'], index=df.index)], axis=1) result = df.ix[:, ['A', 'B', 'C']] tm.assert_frame_equal(result, expected) # GH 6504, multi-axis indexing df = DataFrame(np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b']) expected = df.iloc[0:6] result = df.loc[[1, 2]] tm.assert_frame_equal(result, expected) expected = df result = df.loc[:, ['a', 'b']] tm.assert_frame_equal(result, expected) expected = df.iloc[0:6, :] result = df.loc[[1, 2], ['a', 'b']] tm.assert_frame_equal(result, expected) def test_indexing_mixed_frame_bug(self): # GH3492 df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'}, 'b': {1: 111, 2: 222, 3: 333}}) # this works, new column is created correctly df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x) # this does not work, ie column test is not changed idx = df['test'] == '_' temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x) df.ix[idx, 'test'] = temp self.assertEqual(df.iloc[0, 2], '-----') # if I look at df, then element [0,2] equals '_'. If instead I type # df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I # get '_'. def test_multitype_list_index_access(self): # GH 10610 df = pd.DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23]) with self.assertRaises(KeyError): df[[22, 26, -8]] self.assertEqual(df[21].shape[0], df.shape[0]) def test_set_index_nan(self): # GH 3586 df = DataFrame({'PRuid': {17: 'nonQC', 18: 'nonQC', 19: 'nonQC', 20: '10', 21: '11', 22: '12', 23: '13', 24: '24', 25: '35', 26: '46', 27: '47', 28: '48', 29: '59', 30: '10'}, 'QC': {17: 0.0, 18: 0.0, 19: 0.0, 20: nan, 21: nan, 22: nan, 23: nan, 24: 1.0, 25: nan, 26: nan, 27: nan, 28: nan, 29: nan, 30: nan}, 'data': {17: 7.9544899999999998, 18: 8.0142609999999994, 19: 7.8591520000000008, 20: 0.86140349999999999, 21: 0.87853110000000001, 22: 0.8427041999999999, 23: 0.78587700000000005, 24: 0.73062459999999996, 25: 0.81668560000000001, 26: 0.81927080000000008, 27: 0.80705009999999999, 28: 0.81440240000000008, 29: 0.80140849999999997, 30: 0.81307740000000006}, 'year': {17: 2006, 18: 2007, 19: 2008, 20: 1985, 21: 1985, 22: 1985, 23: 1985, 24: 1985, 25: 1985, 26: 1985, 27: 1985, 28: 1985, 29: 1985, 30: 1986}}).reset_index() result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex( columns=df.columns) tm.assert_frame_equal(result, df) def test_multi_nan_indexing(self): # GH 3588 df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'], 'b': ["C1", "C2", "C3", "C4"], "c": [10, 15, np.nan, 20]}) result = df.set_index(['a', 'b'], drop=False) expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'], 'b': ["C1", "C2", "C3", "C4"], "c": [10, 15, np.nan, 20]}, index=[Index(['R1', 'R2', np.nan, 'R4'], name='a'), Index(['C1', 'C2', 'C3', 'C4'], name='b')]) tm.assert_frame_equal(result, expected) def test_multi_assign(self): # GH 3626, an assignement of a sub-df to a df df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'], 'PF': [0, 0, 0, 0, 1, 1], 'col1': lrange(6), 'col2': lrange(6, 12)}) df.ix[1, 0] = np.nan df2 = df.copy() mask = ~df2.FC.isnull() cols = ['col1', 'col2'] dft = df2 * 2 dft.ix[3, 3] = np.nan expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'], 'PF': [0, 0, 0, 0, 1, 1], 'col1': Series([0, 1, 4, 6, 8, 10]), 'col2': [12, 7, 16, np.nan, 20, 22]}) # frame on rhs df2.ix[mask, cols] = dft.ix[mask, cols] tm.assert_frame_equal(df2, expected) df2.ix[mask, cols] = dft.ix[mask, cols] tm.assert_frame_equal(df2, expected) # with an ndarray on rhs df2 = df.copy() df2.ix[mask, cols] = dft.ix[mask, cols].values tm.assert_frame_equal(df2, expected) df2.ix[mask, cols] = dft.ix[mask, cols].values tm.assert_frame_equal(df2, expected) # broadcasting on the rhs is required df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[ 0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7])) expected = df.copy() mask = expected['A'] == 0 for col in ['A', 'B']: expected.loc[mask, col] = df['D'] df.loc[df['A'] == 0, ['A', 'B']] = df['D'] tm.assert_frame_equal(df, expected) def test_ix_assign_column_mixed(self): # GH #1142 df = DataFrame(tm.getSeriesData()) df['foo'] = 'bar' orig = df.ix[:, 'B'].copy() df.ix[:, 'B'] = df.ix[:, 'B'] + 1 tm.assert_series_equal(df.B, orig + 1) # GH 3668, mixed frame with series value df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'}) expected = df.copy() for i in range(5): indexer = i * 2 v = 1000 + i * 200 expected.ix[indexer, 'y'] = v self.assertEqual(expected.ix[indexer, 'y'], v) df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100 tm.assert_frame_equal(df, expected) # GH 4508, making sure consistency of assignments df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]}) df.ix[[0, 2, ], 'b'] = [100, -100] expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]}) tm.assert_frame_equal(df, expected) df = pd.DataFrame({'a': lrange(4)}) df['b'] = np.nan df.ix[[1, 3], 'b'] = [100, -100] expected = DataFrame({'a': [0, 1, 2, 3], 'b': [np.nan, 100, np.nan, -100]}) tm.assert_frame_equal(df, expected) # ok, but chained assignments are dangerous # if we turn off chained assignement it will work with option_context('chained_assignment', None): df = pd.DataFrame({'a': lrange(4)}) df['b'] = np.nan df['b'].ix[[1, 3]] = [100, -100] tm.assert_frame_equal(df, expected) def test_ix_get_set_consistency(self): # GH 4544 # ix/loc get/set not consistent when # a mixed int/string index df = DataFrame(np.arange(16).reshape((4, 4)), columns=['a', 'b', 8, 'c'], index=['e', 7, 'f', 'g']) self.assertEqual(df.ix['e', 8], 2) self.assertEqual(df.loc['e', 8], 2) df.ix['e', 8] = 42 self.assertEqual(df.ix['e', 8], 42) self.assertEqual(df.loc['e', 8], 42) df.loc['e', 8] = 45 self.assertEqual(df.ix['e', 8], 45) self.assertEqual(df.loc['e', 8], 45) def test_setitem_list(self): # GH 6043 # ix with a list df = DataFrame(index=[0, 1], columns=[0]) df.ix[1, 0] = [1, 2, 3] df.ix[1, 0] = [1, 2] result = DataFrame(index=[0, 1], columns=[0]) result.ix[1, 0] = [1, 2] tm.assert_frame_equal(result, df) # ix with an object class TO(object): def __init__(self, value): self.value = value def __str__(self): return "[{0}]".format(self.value) __repr__ = __str__ def __eq__(self, other): return self.value == other.value def view(self): return self df = DataFrame(index=[0, 1], columns=[0]) df.ix[1, 0] = TO(1) df.ix[1, 0] = TO(2) result = DataFrame(index=[0, 1], columns=[0]) result.ix[1, 0] = TO(2) tm.assert_frame_equal(result, df) # remains object dtype even after setting it back df = DataFrame(index=[0, 1], columns=[0]) df.ix[1, 0] = TO(1) df.ix[1, 0] = np.nan result = DataFrame(index=[0, 1], columns=[0]) tm.assert_frame_equal(result, df) def test_iloc_mask(self): # GH 3631, iloc with a mask (of a series) should raise df = DataFrame(lrange(5), list('ABCDE'), columns=['a']) mask = (df.a % 2 == 0) self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask])) mask.index = lrange(len(mask)) self.assertRaises(NotImplementedError, df.iloc.__getitem__, tuple([mask])) # ndarray ok result = df.iloc[np.array([True] * len(mask), dtype=bool)] tm.assert_frame_equal(result, df) # the possibilities locs = np.arange(4) nums = 2 ** locs reps = lmap(bin, nums) df = DataFrame({'locs': locs, 'nums': nums}, reps) expected = { (None, ''): '0b1100', (None, '.loc'): '0b1100', (None, '.iloc'): '0b1100', ('index', ''): '0b11', ('index', '.loc'): '0b11', ('index', '.iloc'): ('iLocation based boolean indexing ' 'cannot use an indexable as a mask'), ('locs', ''): 'Unalignable boolean Series provided as indexer ' '(index of the boolean Series and of the indexed ' 'object do not match', ('locs', '.loc'): 'Unalignable boolean Series provided as indexer ' '(index of the boolean Series and of the ' 'indexed object do not match', ('locs', '.iloc'): ('iLocation based boolean indexing on an ' 'integer type is not available'), } # UserWarnings from reindex of a boolean mask with warnings.catch_warnings(record=True): result = dict() for idx in [None, 'index', 'locs']: mask = (df.nums > 2).values if idx: mask = Series(mask, list(reversed(getattr(df, idx)))) for method in ['', '.loc', '.iloc']: try: if method: accessor = getattr(df, method[1:]) else: accessor = df ans = str(bin(accessor[mask]['nums'].sum())) except Exception as e: ans = str(e) key = tuple([idx, method]) r = expected.get(key) if r != ans: raise AssertionError( "[%s] does not match [%s], received [%s]" % (key, ans, r)) def test_ix_slicing_strings(self): # GH3836 data = {'Classification': ['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'], 'Random': [1, 2, 3, 4, 5], 'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']} df = DataFrame(data) x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF' ])] df.ix[x.index, 'X'] = df['Classification'] expected = DataFrame({'Classification': {0: 'SA EQUITY CFD', 1: 'bbb', 2: 'SA EQUITY', 3: 'SA SSF', 4: 'aaa'}, 'Random': {0: 1, 1: 2, 2: 3, 3: 4, 4: 5}, 'X': {0: 'correct', 1: 'bbb', 2: 'correct', 3: 'correct', 4: 'aaa'}}) # bug was 4: 'bbb' tm.assert_frame_equal(df, expected) def test_non_unique_loc(self): # GH3659 # non-unique indexer with loc slice # https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs # these are going to raise becuase the we are non monotonic df = DataFrame({'A': [1, 2, 3, 4, 5, 6], 'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3]) self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, None)])) self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(0, None)])) self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)])) # monotonic are ok df = DataFrame({'A': [1, 2, 3, 4, 5, 6], 'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0) result = df.loc[1:] expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]}, index=[1, 1, 2, 3]) tm.assert_frame_equal(result, expected) result = df.loc[0:] tm.assert_frame_equal(result, df) result = df.loc[1:2] expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]}, index=[1, 1, 2]) tm.assert_frame_equal(result, expected) def test_loc_name(self): # GH 3880 df = DataFrame([[1, 1], [1, 1]]) df.index.name = 'index_name' result = df.iloc[[0, 1]].index.name self.assertEqual(result, 'index_name') result = df.ix[[0, 1]].index.name self.assertEqual(result, 'index_name') result = df.loc[[0, 1]].index.name self.assertEqual(result, 'index_name') def test_iloc_non_unique_indexing(self): # GH 4017, non-unique indexing (on the axis) df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000}) idx = np.array(lrange(30)) * 99 expected = df.iloc[idx] df3 = pd.concat([df, 2 * df, 3 * df]) result = df3.iloc[idx] tm.assert_frame_equal(result, expected) df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000}) df2 = pd.concat([df2, 2 * df2, 3 * df2]) sidx = df2.index.to_series() expected = df2.iloc[idx[idx <= sidx.max()]] new_list = [] for r, s in expected.iterrows(): new_list.append(s) new_list.append(s * 2) new_list.append(s * 3) expected = DataFrame(new_list) expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()]) ]) result = df2.loc[idx] tm.assert_frame_equal(result, expected, check_index_type=False) def test_string_slice(self): # GH 14424 # string indexing against datetimelike with object # dtype should properly raises KeyError df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')], dtype=object)) self.assertTrue(df.index.is_all_dates) with tm.assertRaises(KeyError): df['2011'] with tm.assertRaises(KeyError): df.loc['2011', 0] df = pd.DataFrame() self.assertFalse(df.index.is_all_dates) with tm.assertRaises(KeyError): df['2011'] with tm.assertRaises(KeyError): df.loc['2011', 0] def test_mi_access(self): # GH 4145 data = """h1 main h3 sub h5 0 a A 1 A1 1 1 b B 2 B1 2 2 c B 3 A1 3 3 d A 4 B2 4 4 e A 5 B2 5 5 f B 6 A2 6 """ df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0) df2 = df.set_index(['main', 'sub']).T.sort_index(1) index = Index(['h1', 'h3', 'h5']) columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub']) expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T result = df2.loc[:, ('A', 'A1')] tm.assert_frame_equal(result, expected) result = df2[('A', 'A1')] tm.assert_frame_equal(result, expected) # GH 4146, not returning a block manager when selecting a unique index # from a duplicate index # as of 4879, this returns a Series (which is similar to what happens # with a non-unique) expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1') result = df2['A']['A1'] tm.assert_series_equal(result, expected) # selecting a non_unique from the 2nd level expected = DataFrame([['d', 4, 4], ['e', 5, 5]], index=Index(['B2', 'B2'], name='sub'), columns=['h1', 'h3', 'h5'], ).T result = df2['A']['B2'] tm.assert_frame_equal(result, expected) def test_non_unique_loc_memory_error(self): # GH 4280 # non_unique index with a large selection triggers a memory error columns = list('ABCDEFG') def gen_test(l, l2): return pd.concat([DataFrame(randn(l, len(columns)), index=lrange(l), columns=columns), DataFrame(np.ones((l2, len(columns))), index=[0] * l2, columns=columns)]) def gen_expected(df, mask): l = len(mask) return pd.concat([df.take([0], convert=False), DataFrame(np.ones((l, len(columns))), index=[0] * l, columns=columns), df.take(mask[1:], convert=False)]) df = gen_test(900, 100) self.assertFalse(df.index.is_unique) mask = np.arange(100) result = df.loc[mask] expected = gen_expected(df, mask) tm.assert_frame_equal(result, expected) df = gen_test(900000, 100000) self.assertFalse(df.index.is_unique) mask = np.arange(100000) result = df.loc[mask] expected = gen_expected(df, mask) tm.assert_frame_equal(result, expected) def test_astype_assignment(self): # GH4312 (iloc) df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']], columns=list('ABCDEFG')) df = df_orig.copy() df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64) expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']], columns=list('ABCDEFG')) tm.assert_frame_equal(df, expected) df = df_orig.copy() df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True) expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']], columns=list('ABCDEFG')) tm.assert_frame_equal(df, expected) # GH5702 (loc) df = df_orig.copy() df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64) expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']], columns=list('ABCDEFG')) tm.assert_frame_equal(df, expected) df = df_orig.copy() df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64) expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']], columns=list('ABCDEFG')) tm.assert_frame_equal(df, expected) # full replacements / no nans df = DataFrame({'A': [1., 2., 3., 4.]}) df.iloc[:, 0] = df['A'].astype(np.int64) expected = DataFrame({'A': [1, 2, 3, 4]}) tm.assert_frame_equal(df, expected) df = DataFrame({'A': [1., 2., 3., 4.]}) df.loc[:, 'A'] = df['A'].astype(np.int64) expected = DataFrame({'A': [1, 2, 3, 4]}) tm.assert_frame_equal(df, expected) def test_astype_assignment_with_dups(self): # GH 4686 # assignment with dups that has a dtype change cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')]) df = DataFrame(np.arange(3).reshape((1, 3)), columns=cols, dtype=object) index = df.index.copy() df['A'] = df['A'].astype(np.float64) self.assert_index_equal(df.index, index) # TODO(wesm): unused variables # result = df.get_dtype_counts().sort_index() # expected = Series({'float64': 2, 'object': 1}).sort_index() def test_dups_loc(self): # GH4726 # dup indexing with iloc/loc df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]], columns=['a', 'a', 'a', 'a', 'a'], index=[1]) expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')], index=['a', 'a', 'a', 'a', 'a'], name=1) result = df.iloc[0] tm.assert_series_equal(result, expected) result = df.loc[1] tm.assert_series_equal(result, expected) def test_partial_setting(self): # GH2578, allow ix and friends to partially set # series s_orig = Series([1, 2, 3]) s = s_orig.copy() s[5] = 5 expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5]) tm.assert_series_equal(s, expected) s = s_orig.copy() s.loc[5] = 5 expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5]) tm.assert_series_equal(s, expected) s = s_orig.copy() s[5] = 5. expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5]) tm.assert_series_equal(s, expected) s = s_orig.copy() s.loc[5] = 5. expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5]) tm.assert_series_equal(s, expected) # iloc/iat raise s = s_orig.copy() def f(): s.iloc[3] = 5. self.assertRaises(IndexError, f) def f(): s.iat[3] = 5. self.assertRaises(IndexError, f) # ## frame ## df_orig = DataFrame( np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64') # iloc/iat raise df = df_orig.copy() def f(): df.iloc[4, 2] = 5. self.assertRaises(IndexError, f) def f(): df.iat[4, 2] = 5. self.assertRaises(IndexError, f) # row setting where it exists expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]})) df = df_orig.copy() df.iloc[1] = df.iloc[2] tm.assert_frame_equal(df, expected) expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]})) df = df_orig.copy() df.loc[1] = df.loc[2] tm.assert_frame_equal(df, expected) # like 2578, partial setting with dtype preservation expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]})) df = df_orig.copy() df.loc[3] = df.loc[2] tm.assert_frame_equal(df, expected) # single dtype frame, overwrite expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]})) df = df_orig.copy() df.ix[:, 'B'] = df.ix[:, 'A'] tm.assert_frame_equal(df, expected) # mixed dtype frame, overwrite expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])})) df = df_orig.copy() df['B'] = df['B'].astype(np.float64) df.ix[:, 'B'] = df.ix[:, 'A'] tm.assert_frame_equal(df, expected) # single dtype frame, partial setting expected = df_orig.copy() expected['C'] = df['A'] df = df_orig.copy() df.ix[:, 'C'] = df.ix[:, 'A'] tm.assert_frame_equal(df, expected) # mixed frame, partial setting expected = df_orig.copy() expected['C'] = df['A'] df = df_orig.copy() df.ix[:, 'C'] = df.ix[:, 'A'] tm.assert_frame_equal(df, expected) # ## panel ## p_orig = Panel(np.arange(16).reshape(2, 4, 2), items=['Item1', 'Item2'], major_axis=pd.date_range('2001/1/12', periods=4), minor_axis=['A', 'B'], dtype='float64') # panel setting via item p_orig = Panel(np.arange(16).reshape(2, 4, 2), items=['Item1', 'Item2'], major_axis=pd.date_range('2001/1/12', periods=4), minor_axis=['A', 'B'], dtype='float64') expected = p_orig.copy() expected['Item3'] = expected['Item1'] p = p_orig.copy() p.loc['Item3'] = p['Item1'] tm.assert_panel_equal(p, expected) # panel with aligned series expected = p_orig.copy() expected = expected.transpose(2, 1, 0) expected['C'] = DataFrame({'Item1': [30, 30, 30, 30], 'Item2': [32, 32, 32, 32]}, index=p_orig.major_axis) expected = expected.transpose(2, 1, 0) p = p_orig.copy() p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items) tm.assert_panel_equal(p, expected) # GH 8473 dates = date_range('1/1/2000', periods=8) df_orig = DataFrame(np.random.randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D']) expected = pd.concat([df_orig, DataFrame( {'A': 7}, index=[dates[-1] + 1])]) df = df_orig.copy() df.loc[dates[-1] + 1, 'A'] = 7 tm.assert_frame_equal(df, expected) df = df_orig.copy() df.at[dates[-1] + 1, 'A'] = 7 tm.assert_frame_equal(df, expected) exp_other = DataFrame({0: 7}, index=[dates[-1] + 1]) expected = pd.concat([df_orig, exp_other], axis=1) df = df_orig.copy() df.loc[dates[-1] + 1, 0] = 7 tm.assert_frame_equal(df, expected) df = df_orig.copy() df.at[dates[-1] + 1, 0] = 7 tm.assert_frame_equal(df, expected) def test_partial_setting_mixed_dtype(self): # in a mixed dtype environment, try to preserve dtypes # by appending df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"]) s = df.loc[1].copy() s.name = 2 expected = df.append(s) df.loc[2] = df.loc[1] tm.assert_frame_equal(df, expected) # columns will align df = DataFrame(columns=['A', 'B']) df.loc[0] = Series(1, index=range(4)) tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0])) # columns will align df = DataFrame(columns=['A', 'B']) df.loc[0] = Series(1, index=['B']) exp = DataFrame([[np.nan, 1]], columns=['A', 'B'], index=[0], dtype='float64') tm.assert_frame_equal(df, exp) # list-like must conform df = DataFrame(columns=['A', 'B']) def f(): df.loc[0] = [1, 2, 3] self.assertRaises(ValueError, f) # these are coerced to float unavoidably (as its a list-like to begin) df = DataFrame(columns=['A', 'B']) df.loc[3] = [6, 7] exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'], dtype='float64') tm.assert_frame_equal(df, exp) def test_series_partial_set(self): # partial set with new index # Regression from GH4825 ser = Series([0.1, 0.2], index=[1, 2]) # loc expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3]) result = ser.loc[[3, 2, 3]] tm.assert_series_equal(result, expected, check_index_type=True) expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x']) result = ser.loc[[3, 2, 3, 'x']] tm.assert_series_equal(result, expected, check_index_type=True) expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1]) result = ser.loc[[2, 2, 1]] tm.assert_series_equal(result, expected, check_index_type=True) expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1]) result = ser.loc[[2, 2, 'x', 1]] tm.assert_series_equal(result, expected, check_index_type=True) # raises as nothing in in the index self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]]) expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3]) result = ser.loc[[2, 2, 3]] tm.assert_series_equal(result, expected, check_index_type=True) expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4]) result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]] tm.assert_series_equal(result, expected, check_index_type=True) expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3]) result = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]).loc[[5, 3, 3]] tm.assert_series_equal(result, expected, check_index_type=True) expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4]) result = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]).loc[[5, 4, 4]] tm.assert_series_equal(result, expected, check_index_type=True) expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2]) result = Series([0.1, 0.2, 0.3, 0.4], index=[4, 5, 6, 7]).loc[[7, 2, 2]] tm.assert_series_equal(result, expected, check_index_type=True) expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5]) result = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]).loc[[4, 5, 5]] tm.assert_series_equal(result, expected, check_index_type=True) # iloc expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1]) result = ser.iloc[[1, 1, 0, 0]] tm.assert_series_equal(result, expected, check_index_type=True) def test_series_partial_set_with_name(self): # GH 11497 idx = Index([1, 2], dtype='int64', name='idx') ser = Series([0.1, 0.2], index=idx, name='s') # loc exp_idx = Index([3, 2, 3], dtype='int64', name='idx') expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s') result = ser.loc[[3, 2, 3]] tm.assert_series_equal(result, expected, check_index_type=True) exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx') expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx, name='s') result = ser.loc[[3, 2, 3, 'x']] tm.assert_series_equal(result, expected, check_index_type=True) exp_idx = Index([2, 2, 1], dtype='int64', name='idx') expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s') result = ser.loc[[2, 2, 1]] tm.assert_series_equal(result, expected, check_index_type=True) exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx') expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s') result = ser.loc[[2, 2, 'x', 1]] tm.assert_series_equal(result, expected, check_index_type=True) # raises as nothing in in the index self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]]) exp_idx = Index([2, 2, 3], dtype='int64', name='idx') expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s') result = ser.loc[[2, 2, 3]] tm.assert_series_equal(result, expected, check_index_type=True) exp_idx = Index([3, 4, 4], dtype='int64', name='idx') expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s') idx = Index([1, 2, 3], dtype='int64', name='idx') result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]] tm.assert_series_equal(result, expected, check_index_type=True) exp_idx = Index([5, 3, 3], dtype='int64', name='idx') expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s') idx = Index([1, 2, 3, 4], dtype='int64', name='idx') result = Series([0.1, 0.2, 0.3, 0.4], index=idx, name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
pandas.util.testing.assert_series_equal
#!/usr/bin/python3 import sys sys.path.insert(0, "/home/eric/ramukcire/estimating_cost_of_dc_services/syscost/") import pandas as pd import numpy as np import os import matplotlib.pyplot as plt import re from collections import Counter import itertools import warnings from termcolor import colored import streamlit as st from subprocess import check_output import traffic.traffic as traffic # from traffic.traffic import traffic from datetime import datetime as dt from matplotlib import rcParams rcParams.update({'figure.autolayout': True}) print(colored('Imported Modules\n', 'yellow')) print(colored('Running from '+str((os.getcwd())),'green')) #print(colored('Other directories at this level are '+ str(os.listdir()),'red')) st.title('Total Cost of Ownership Model (Hardy)') st.subheader('<NAME>, Doctor of Design') '''This script will run the Hardy model. For now it will not interact \ with the model directly, but will be able to consume the outputs \ from the Perl program, parse it, and pass it to SysCost EEIO inputs. \ ''' class costet(object): def __init__(self, input_dc, input_r, streamlit=True, model_file=None): '''Args: Runs the specified parameter TCO model. input_dc: "input_example/dc.params" (Data-Center Parameters) input_r: "input_example/r.params" (Resource Parameters) streamlit = boolean for using streamlit model_file = file name for the model output''' self.input_dc = input_dc self.input_r = input_r self.model = check_output(["perl", "./cost-et.pl", input_dc, input_r], shell = False) self.model = self.model.decode("utf-8") self.streamlit = streamlit self.model_file = model_file def view_raw_output(self, save=None): if self.streamlit is True: st.header('Model run for ' +self.input_dc+' with '+self.input_r) st.subheader('Output from Cost-ET Model run') st.text(self.model) if save is not None: f = open(self.model_file, "w+") f.write(str(self.model)) f.close() print(colored('This is the output from the Cost-ET model: ' + self.model, 'yellow')) def view_script(self, script): '''Args: script: "cost-et.pl" ''' f = open(script, "r") f = f.read() print(colored('Print this :'+ f, 'magenta')) if self.streamlit is True: st.subheader('Print out of '+script) st.code(f, language='perl') def get_dc_params(self): _df = pd.read_csv(self.model_file)[2:24].reset_index(drop=True) _df.columns = ['DC_parameters'] _df[['DC Param','Value']] = _df['DC_parameters'].str.split("=",expand=True) _df = _df[['DC Param','Value']] if self.streamlit is True: st.subheader('DC Parameters: ') st.dataframe(_df, 500, 600) return _df def get_resource_params(self): _df = pd.read_csv(self.model_file)[29:76].reset_index(drop=True) _df.columns = ['Resource_parameters'] _df[['Resource','Value']] = _df['Resource_parameters'].str.split("=",expand=True) _df = _df[['Resource','Value']] if self.streamlit is True: st.subheader('Resources Parameters: ') st.dataframe(_df, 500, 600) return _df def get_server_age(self): _df = pd.read_csv(self.model_file)[79:85].reset_index(drop=True) _df.columns = ['Age Dist'] _df[['Age (Years)','Server Count']] = _df['Age Dist'].str.split(" ",expand=True) _df = _df[['Age (Years)','Server Count']] if self.streamlit is True: st.subheader('Age: ') st.dataframe(_df, 500, 1000) return _df def get_server_replacement(self): '''Unclear what this calue means ATM.''' _df =
pd.read_csv(self.model_file)
pandas.read_csv
import csv import numpy as np import pandas as pd import awrams.utils.datetools as dt from awrams.utils.helpers import sanitize_cell import awrams.utils.extents as extents import awrams.benchmarking.config as cfg from awrams.utils.awrams_log import get_module_logger logger = get_module_logger('utils') def infer_freq(df): if 'M' in df.index.inferred_freq: return 'm' elif 'A' in df.index.inferred_freq: return 'y' elif 'D' in df.index.inferred_freq: return 'd' else: return 'd' def read_csv(csv_file): d = dict() _csv = csv.DictReader(open(csv_file,'r')) for row in _csv: id = row.pop(_csv.fieldnames[0]) d[id] = row return d def read_id_csv(id_csv): with open(id_csv,'r') as in_csv: return [line.strip() for line in in_csv.readlines()][1:] def get_obs_sites(): return read_csv(cfg.BENCHMARK_SITES) def get_sites_by_ids(site_ids, site_id_type, site_ref_type, site_set_name=None): """ cross reference site ids with site meta contained is BENCHMARK_SITES csv :param site_ids: list of site ids :param site_id_type: id type for ids in site_ids (one of column names in BENCHMARK_SITES csv) :param site_ref_type: column name to use for id reference name :param site_set_name: name of set id belongs to, only required if id appears more than once in BENCHMARK_SITES csv :return: map of extents, map of site name """ site_idx = get_obs_sites() site_extents = {} site_name_map = {} for site_id in site_ids: ixs = [k for k in list(site_idx.keys()) if site_idx[k][site_id_type] == site_id] if site_set_name is not None: ixs = [k for k in ixs if site_idx[k]['set'] in site_set_name] if len(ixs) == 0: # site_id not found in SiteLocationsWithIndex.csv continue # use the first occurrence of pred_index ref = site_idx[ixs[0]] site_name = ref[site_ref_type] site_name_map[site_name] = site_id site_extents[site_name] = extents.from_cell_coords(*sanitize_cell((float(ref['Y']), float(ref['X'])))) return site_extents, site_name_map def get_catchments_by_ids(ids): """ get catchment extents for ids :param ids: list of ids :return: map of extents """ from awrams.utils.catchments import CatchmentDB, CatchmentNotFoundError extent_map = {} for idx in ids: try: extent_map[str(idx)] = CatchmentDB().get_by_id(idx) except CatchmentNotFoundError as e: logger.warning(e) continue return extent_map def extract_sites(df, sites, site_idx, period): data = {} for i,site in enumerate(sites): data[site] = series_for_site(df, site, site_idx[i], period) out_df =
pd.DataFrame(data)
pandas.DataFrame
"""Run unit tests. Use this to run tests and understand how tasks.py works. Setup:: mkdir -p test-data/input mkdir -p test-data/output mysql -u root -p CREATE DATABASE testdb; CREATE USER 'testusr'@'localhost' IDENTIFIED BY 'test<PASSWORD>'; GRANT ALL PRIVILEGES ON testdb.* TO 'testusr'@'%'; Run tests:: pytest test_combine.py -s Notes: * this will create sample csv, xls and xlsx files * test_combine_() test the main combine function """ from d6tstack.combine_csv import * from d6tstack.sniffer import CSVSniffer import d6tstack.utils import math import pandas as pd # import pyarrow as pa # import pyarrow.parquet as pq import ntpath import shutil import dask.dataframe as dd import sqlalchemy import pytest cfg_fname_base_in = 'test-data/input/test-data-' cfg_fname_base_out_dir = 'test-data/output' cfg_fname_base_out = cfg_fname_base_out_dir+'/test-data-' cnxn_string = 'sqlite:///test-data/db/{}.db' #************************************************************ # fixtures #************************************************************ class DebugLogger(object): def __init__(self, event): pass def send_log(self, msg, status): pass def send(self, data): pass logger = DebugLogger('combiner') # sample data def create_files_df_clean(): # create sample data df1=pd.DataFrame({'date':pd.date_range('1/1/2011', periods=10), 'sales': 100, 'cost':-80, 'profit':20}) df2=pd.DataFrame({'date':pd.date_range('2/1/2011', periods=10), 'sales': 200, 'cost':-90, 'profit':200-90}) df3=pd.DataFrame({'date':pd.date_range('3/1/2011', periods=10), 'sales': 300, 'cost':-100, 'profit':300-100}) # cfg_col = [ 'date', 'sales','cost','profit'] # return df1[cfg_col], df2[cfg_col], df3[cfg_col] return df1, df2, df3 def create_files_df_clean_combine(): df1,df2,df3 = create_files_df_clean() df_all = pd.concat([df1,df2,df3]) df_all = df_all[df_all.columns].astype(str) return df_all def create_files_df_clean_combine_with_filename(fname_list): df1, df2, df3 = create_files_df_clean() df1['filename'] = os.path.basename(fname_list[0]) df2['filename'] = os.path.basename(fname_list[1]) df3['filename'] = os.path.basename(fname_list[2]) df_all = pd.concat([df1, df2, df3]) df_all = df_all[df_all.columns].astype(str) return df_all def create_files_df_colmismatch_combine(cfg_col_common,allstr=True): df1, df2, df3 = create_files_df_clean() df3['profit2']=df3['profit']*2 if cfg_col_common: df_all = pd.concat([df1, df2, df3], join='inner') else: df_all = pd.concat([df1, df2, df3]) if allstr: df_all = df_all[df_all.columns].astype(str) return df_all def check_df_colmismatch_combine(dfg,is_common=False, convert_date=True): dfg = dfg.drop(['filepath','filename'],1).sort_values('date').reset_index(drop=True) if convert_date: dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d') dfchk = create_files_df_colmismatch_combine(is_common,False).reset_index(drop=True)[dfg.columns] assert dfg.equals(dfchk) return True def create_files_df_colmismatch_combine2(cfg_col_common): df1, df2, df3 = create_files_df_clean() for i in range(15): df3['profit'+str(i)]=df3['profit']*2 if cfg_col_common: df_all = pd.concat([df1, df2, df3], join='inner') else: df_all = pd.concat([df1, df2, df3]) df_all = df_all[df_all.columns].astype(str) return df_all # csv standard @pytest.fixture(scope="module") def create_files_csv(): df1,df2,df3 = create_files_df_clean() # save files cfg_fname = cfg_fname_base_in+'input-csv-clean-%s.csv' df1.to_csv(cfg_fname % 'jan',index=False) df2.to_csv(cfg_fname % 'feb',index=False) df3.to_csv(cfg_fname % 'mar',index=False) return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar'] @pytest.fixture(scope="module") def create_files_csv_colmismatch(): df1,df2,df3 = create_files_df_clean() df3['profit2']=df3['profit']*2 # save files cfg_fname = cfg_fname_base_in+'input-csv-colmismatch-%s.csv' df1.to_csv(cfg_fname % 'jan',index=False) df2.to_csv(cfg_fname % 'feb',index=False) df3.to_csv(cfg_fname % 'mar',index=False) return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar'] @pytest.fixture(scope="module") def create_files_csv_colmismatch2(): df1,df2,df3 = create_files_df_clean() for i in range(15): df3['profit'+str(i)]=df3['profit']*2 # save files cfg_fname = cfg_fname_base_in+'input-csv-colmismatch2-%s.csv' df1.to_csv(cfg_fname % 'jan',index=False) df2.to_csv(cfg_fname % 'feb',index=False) df3.to_csv(cfg_fname % 'mar',index=False) return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar'] @pytest.fixture(scope="module") def create_files_csv_colreorder(): df1,df2,df3 = create_files_df_clean() cfg_col = [ 'date', 'sales','cost','profit'] cfg_col2 = [ 'date', 'sales','profit','cost'] # return df1[cfg_col], df2[cfg_col], df3[cfg_col] # save files cfg_fname = cfg_fname_base_in+'input-csv-reorder-%s.csv' df1[cfg_col].to_csv(cfg_fname % 'jan',index=False) df2[cfg_col].to_csv(cfg_fname % 'feb',index=False) df3[cfg_col2].to_csv(cfg_fname % 'mar',index=False) return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar'] @pytest.fixture(scope="module") def create_files_csv_noheader(): df1,df2,df3 = create_files_df_clean() # save files cfg_fname = cfg_fname_base_in+'input-noheader-csv-%s.csv' df1.to_csv(cfg_fname % 'jan',index=False, header=False) df2.to_csv(cfg_fname % 'feb',index=False, header=False) df3.to_csv(cfg_fname % 'mar',index=False, header=False) return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar'] @pytest.fixture(scope="module") def create_files_csv_col_renamed(): df1, df2, df3 = create_files_df_clean() df3 = df3.rename(columns={'sales':'revenue'}) cfg_col = ['date', 'sales', 'profit', 'cost'] cfg_col2 = ['date', 'revenue', 'profit', 'cost'] cfg_fname = cfg_fname_base_in + 'input-csv-renamed-%s.csv' df1[cfg_col].to_csv(cfg_fname % 'jan', index=False) df2[cfg_col].to_csv(cfg_fname % 'feb', index=False) df3[cfg_col2].to_csv(cfg_fname % 'mar', index=False) return [cfg_fname % 'jan', cfg_fname % 'feb', cfg_fname % 'mar'] def create_files_csv_dirty(cfg_sep=",", cfg_header=True): df1,df2,df3 = create_files_df_clean() df1.to_csv(cfg_fname_base_in+'debug.csv',index=False, sep=cfg_sep, header=cfg_header) return cfg_fname_base_in+'debug.csv' # excel single-tab def create_files_xls_single_helper(cfg_fname): df1,df2,df3 = create_files_df_clean() df1.to_excel(cfg_fname % 'jan',index=False) df2.to_excel(cfg_fname % 'feb',index=False) df3.to_excel(cfg_fname % 'mar',index=False) return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar'] @pytest.fixture(scope="module") def create_files_xls_single(): return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xls') @pytest.fixture(scope="module") def create_files_xlsx_single(): return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xlsx') def write_file_xls(dfg, fname, startrow=0,startcol=0): writer = pd.ExcelWriter(fname) dfg.to_excel(writer, 'Sheet1', index=False,startrow=startrow,startcol=startcol) dfg.to_excel(writer, 'Sheet2', index=False,startrow=startrow,startcol=startcol) writer.save() # excel multi-tab def create_files_xls_multiple_helper(cfg_fname): df1,df2,df3 = create_files_df_clean() write_file_xls(df1,cfg_fname % 'jan') write_file_xls(df2,cfg_fname % 'feb') write_file_xls(df3,cfg_fname % 'mar') return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar'] @pytest.fixture(scope="module") def create_files_xls_multiple(): return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xls') @pytest.fixture(scope="module") def create_files_xlsx_multiple(): return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xlsx') #************************************************************ # tests - helpers #************************************************************ def test_file_extensions_get(): fname_list = ['a.csv','b.csv'] ext_list = file_extensions_get(fname_list) assert ext_list==['.csv','.csv'] fname_list = ['a.xls','b.xls'] ext_list = file_extensions_get(fname_list) assert ext_list==['.xls','.xls'] def test_file_extensions_all_equal(): ext_list = ['.csv']*2 assert file_extensions_all_equal(ext_list) ext_list = ['.xls']*2 assert file_extensions_all_equal(ext_list) ext_list = ['.csv','.xls'] assert not file_extensions_all_equal(ext_list) def test_file_extensions_valid(): ext_list = ['.csv']*2 assert file_extensions_valid(ext_list) ext_list = ['.xls']*2 assert file_extensions_valid(ext_list) ext_list = ['.exe','.xls'] assert not file_extensions_valid(ext_list) #************************************************************ #************************************************************ # scan header #************************************************************ #************************************************************ def test_csv_sniff(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder): with pytest.raises(ValueError) as e: c = CombinerCSV([]) # clean combiner = CombinerCSV(fname_list=create_files_csv) combiner.sniff_columns() assert combiner.is_all_equal() assert combiner.is_column_present().all().all() assert combiner.sniff_results['columns_all'] == ['date', 'sales', 'cost', 'profit'] assert combiner.sniff_results['columns_common'] == combiner.sniff_results['columns_all'] assert combiner.sniff_results['columns_unique'] == [] # extra column combiner = CombinerCSV(fname_list=create_files_csv_colmismatch) combiner.sniff_columns() assert not combiner.is_all_equal() assert not combiner.is_column_present().all().all() assert combiner.is_column_present().all().values.tolist()==[True, True, True, True, False] assert combiner.sniff_results['columns_all'] == ['date', 'sales', 'cost', 'profit', 'profit2'] assert combiner.sniff_results['columns_common'] == ['date', 'sales', 'cost', 'profit'] assert combiner.is_column_present_common().columns.tolist() == ['date', 'sales', 'cost', 'profit'] assert combiner.sniff_results['columns_unique'] == ['profit2'] assert combiner.is_column_present_unique().columns.tolist() == ['profit2'] # mixed order combiner = CombinerCSV(fname_list=create_files_csv_colreorder) combiner.sniff_columns() assert not combiner.is_all_equal() assert combiner.sniff_results['df_columns_order']['profit'].values.tolist() == [3, 3, 2] def test_csv_selectrename(create_files_csv, create_files_csv_colmismatch): # rename df = CombinerCSV(fname_list=create_files_csv).preview_rename() assert df.empty df = CombinerCSV(fname_list=create_files_csv, columns_rename={'notthere':'nan'}).preview_rename() assert df.empty df = CombinerCSV(fname_list=create_files_csv, columns_rename={'cost':'cost2'}).preview_rename() assert df.columns.tolist()==['cost'] assert df['cost'].unique().tolist()==['cost2'] df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_rename={'profit2':'profit3'}).preview_rename() assert df.columns.tolist()==['profit2'] assert df['profit2'].unique().tolist()==[np.nan, 'profit3'] # select l = CombinerCSV(fname_list=create_files_csv).preview_select() assert l == ['date', 'sales', 'cost', 'profit'] l2 = CombinerCSV(fname_list=create_files_csv, columns_select_common=True).preview_select() assert l2==l l = CombinerCSV(fname_list=create_files_csv, columns_select=['date', 'sales', 'cost']).preview_select() assert l == ['date', 'sales', 'cost'] l = CombinerCSV(fname_list=create_files_csv_colmismatch).preview_select() assert l == ['date', 'sales', 'cost', 'profit', 'profit2'] l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).preview_select() assert l == ['date', 'sales', 'cost', 'profit'] # rename+select l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit2'], columns_rename={'profit2':'profit3'}).preview_select() assert l==['date', 'profit3'] l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit3'], columns_rename={'profit2':'profit3'}).preview_select() assert l==['date', 'profit3'] def test_to_pandas(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder): df = CombinerCSV(fname_list=create_files_csv).to_pandas() assert df.shape == (30, 6) df = CombinerCSV(fname_list=create_files_csv_colmismatch).to_pandas() assert df.shape == (30, 6+1) assert df['profit2'].isnull().unique().tolist() == [True, False] df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).to_pandas() assert df.shape == (30, 6) assert 'profit2' not in df.columns # rename+select df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit2'], columns_rename={'profit2':'profit3'}, add_filename=False).to_pandas() assert df.shape == (30, 2) assert 'profit3' in df.columns and not 'profit2' in df.columns df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit3'], columns_rename={'profit2':'profit3'}, add_filename=False).to_pandas() assert df.shape == (30, 2) assert 'profit3' in df.columns and not 'profit2' in df.columns def test_combinepreview(create_files_csv_colmismatch): df = CombinerCSV(fname_list=create_files_csv_colmismatch).combine_preview() assert df.shape == (9, 6+1) assert df.dtypes.tolist() == [np.dtype('O'), np.dtype('int64'), np.dtype('int64'), np.dtype('int64'), np.dtype('float64'), np.dtype('O'), np.dtype('O')] def apply(dfg): dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d') return dfg df = CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).combine_preview() assert df.shape == (9, 6+1) assert df.dtypes.tolist() == [np.dtype('<M8[ns]'), np.dtype('int64'), np.dtype('int64'), np.dtype('int64'), np.dtype('float64'), np.dtype('O'), np.dtype('O')] def test_tocsv(create_files_csv_colmismatch): fname = 'test-data/output/combined.csv' fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_combine(filename=fname) assert fname == fnameout df = pd.read_csv(fname) dfchk = df.copy() assert df.shape == (30, 4+1+2) assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename'] assert check_df_colmismatch_combine(df) fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).to_csv_combine(filename=fname) df = pd.read_csv(fname) assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'filepath', 'filename'] assert check_df_colmismatch_combine(df,is_common=True) def helper(fdir): fnamesout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_align(output_dir=fdir) for fname in fnamesout: df = pd.read_csv(fname) assert df.shape == (10, 4+1+2) assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename'] helper('test-data/output') helper('test-data/output/') df = dd.read_csv('test-data/output/d6tstack-test-data-input-csv-colmismatch-*.csv') df = df.compute() assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename'] assert df.reset_index(drop=True).equals(dfchk) assert check_df_colmismatch_combine(df) # check creates directory try: shutil.rmtree('test-data/output-tmp') except: pass _ = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_align(output_dir='test-data/output-tmp') try: shutil.rmtree('test-data/output-tmp') except: pass def test_topq(create_files_csv_colmismatch): fname = 'test-data/output/combined.pq' fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_parquet_combine(filename=fname) assert fname == fnameout df = pd.read_parquet(fname, engine='fastparquet') assert df.shape == (30, 4+1+2) assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename'] df2 = pd.read_parquet(fname, engine='pyarrow') assert df2.equals(df) assert check_df_colmismatch_combine(df) df = dd.read_parquet(fname) df = df.compute() assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename'] df2 = pd.read_parquet(fname, engine='fastparquet') assert df2.equals(df) df3 = pd.read_parquet(fname, engine='pyarrow') assert df3.equals(df) assert check_df_colmismatch_combine(df) def helper(fdir): fnamesout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_parquet_align(output_dir=fdir) for fname in fnamesout: df =
pd.read_parquet(fname, engine='fastparquet')
pandas.read_parquet
import requests import pandas as pd import itertools import operator import time import datetime import json def O3_IMECA2CONC(IMECA): conc = IMECA*0.11/100 # en ppm return conc*1000 #en ppb def SO2_IMECA2CONC(IMECA): conc = IMECA*0.13/100 # ppm return conc*1000 #en ppb def NO2_IMECA2CONC(IMECA): conc = IMECA*0.21/100 # en ppm return conc*1000 #en ppb def CO_IMECA2CONC(IMECA): conc = IMECA*11/100 # en ppm return conc #en ppm def PM10_IMECA2CONC(IMECA): form1 = IMECA/.833 form2 = (IMECA -40.0)/0.5 form3 = IMECA/0.625 value_by_region = {0:form1,1:form2,2:form3} region = [dectect_region(form1),dectect_region(form2),dectect_region(form2)] most_common_region = most_common(region) return value_by_region[most_common_region] #en ug/m3 def IMECA2CONC(Chemical,Value): Value = int(Value) if Chemical == "O3": return O3_IMECA2CONC(Value) if Chemical == "SO2": return SO2_IMECA2CONC(Value) if Chemical == "NO2": return NO2_IMECA2CONC(Value) if Chemical == "CO": return CO_IMECA2CONC(Value) if Chemical == "PM10": return PM10_IMECA2CONC(Value) return "n/a" def dectect_region(conc): if conc <= 121.0: return 0 if conc >121.0 and conc <=320: return 1 if conc > 320: return 2 def most_common(L): # get an iterable of (item, iterable) pairs SL = sorted((x, i) for i, x in enumerate(L)) # print 'SL:', SL groups = itertools.groupby(SL, key=operator.itemgetter(0)) # auxiliary function to get "quality" for an item def _auxfun(g): item, iterable = g count = 0 min_index = len(L) for _, where in iterable: count += 1 min_index = min(min_index, where) # print 'item %r, count %r, minind %r' % (item, count, min_index) return count, -min_index # pick the highest-count/earliest item return max(groups, key=_auxfun)[0] def gen_feed_info(): now = datetime.datetime.now() feed_dict = {} feed_dict["feed_id"] = "MXMEX-"+ str(now.year) + str(now.month) + str(now.day) + str(now.hour) + str(now.minute) feed_dict["feed_publisher-name"] = "Secretaría de Medio Ambiente de la Ciudad de México" feed_dict["feed_publisher-url"] = "http://www.aire.df.gob.mx/default.php" feed_dict["feed_start-date"] = datetime.datetime.now().isoformat() feed_dict["feed_finish-date"] = datetime.datetime.now().isoformat() feed_df = pd.DataFrame.from_dict([feed_dict]) feed_df.to_csv("output/feed_info.csv", index=False) return feed_dict["feed_id"] r = requests.get("http://148.243.232.113/calidadaire/xml/simat.json") rtextencoded = json.loads(r.text) stations = rtextencoded["pollutionMeasurements"]["stations"] units = {"NO2":"ppb","O3":"ppb","SO2":"ppb","PM10":"ug/m3","CO":"ppm"} methods = {"O3":"MXMEX-O3-1993","NO2":"MXMEX-NOx-1993","SO2":"MXMEX-SO2-1993","PM10":"MXMEX-PM10-1993", "Temp": "MXMEX-TEMP", "Hum": "MXMEX-HUM", "CO":"MXMEX-CO-1993"} country = [{ "country_id": "MX", "country_lat": "19.24", "country_long": "-99.09", "country_name": "México", "country_timezone": "UTC +6:00" }] dataframe_country = pd.DataFrame(country) dataframe_country.to_csv("output/countries.csv", index=False) city = [{ "country_id": "MX", "city_id": "MXMEX", "city_lat": "19.38", "city_long": "-99.08", "city_name": "Ciudad de México y zona metropolitana", "city_timezone": "UTC +6:00" }] dataframe_city = pd.DataFrame(city) dataframe_city.to_csv("output/cities.csv", index=False) estaciones = {} estaciones_as_list = [] for station in stations: local_dict = { "station_id": "MXMEX-" + station["shortName"], "country_id": "MX", "city_id": "MXMEX", "station_local" : station["name"], "station_name" : station["name"], "level": "station", "station_long" : station["location"].split(",")[1], "station_lat" : station["location"].split(",")[0], "station_timezone": "UTC +6:00" } estaciones[station["name"]] = local_dict estaciones_as_list.append(local_dict) dataframe_estaciones = pd.DataFrame(estaciones_as_list) dataframe_estaciones.to_csv("output/stations.csv", index=False) pollutants = {} pollutants_as_list = [] feed_id = gen_feed_info() now = datetime.datetime.now() nowminusminuteandsecond = datetime.datetime.now().replace(minute=0, second=0, microsecond=0) current_time = datetime.datetime.now().isoformat() truncated_time = nowminusminuteandsecond.isoformat() for station in stations: if station["pollutant"] == "n.d": next else: local_dict = { "station_id": "MXMEX-" + station["shortName"], "pollutant_id": station["pollutant"], "pollutant_unit": units[station["pollutant"]], "pollutant_update-time": truncated_time, "pollutant_value": IMECA2CONC(station["pollutant"],station["imecaPoints"]), "pollutant_averaging": 1, "method_id" : methods[station["pollutant"]], "feed_id" : feed_id } pollutants_as_list.append(local_dict) if station["temperature"] is not '': temp_dict = { "station_id": "MXMEX-" + station["shortName"], "pollutant_id": "Temp", "pollutant_unit": "C", "pollutant_update-time": truncated_time, "pollutant_value": station["temperature"], "pollutant_averaging": 1, "method_id" : methods[station["pollutant"]], "feed_id" : feed_id } pollutants_as_list.append(temp_dict) else: temp_dict = {} if station["humidity"] is not '': hum_dict = { "station_id": "MXMEX-" + station["shortName"], "pollutant_id": "Hum", "pollutant_unit": "%", "pollutant_update-time": truncated_time, "pollutant_value": station["humidity"], "pollutant_averaging": 1, "method_id" : methods[station["pollutant"]], "feed_id" : feed_id } pollutants_as_list.append(hum_dict) else: hum_dict = {} pollutants_df =
pd.DataFrame.from_dict(pollutants_as_list)
pandas.DataFrame.from_dict
import pandas as pd from geopandas import GeoDataFrame from shapely.geometry import Point from shapely.ops import nearest_points import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..'))) from app.app.routing.node import Node class Graph: instance = None def __init__(self, nodes, edges): self.nodes = self.__prepare_nodes(nodes) edges = self.__prepare_edges(edges) joined = self.__join(edges, self.nodes) self.graph = self.__adjacent(joined) @classmethod def load_default(cls): if cls.instance is None: nodes =
pd.read_pickle("./resources/nodes.p")
pandas.read_pickle
import ccxt import pandas as pd from pprint import pprint from decimal import Decimal from datetime import datetime class FetchHistoricalDataException(Exception): pass class CcxtExchange(): def __init__(self, exchange_id, params): self.exchange_id = exchange_id # 'binance' self.exchange_class = getattr(ccxt, exchange_id) self.exchange = self.exchange_class(params) # params = { # 'apiKey': 'YOUR_API_KEY', # 'secret': 'YOUR_SECRET', # 'timeout': 30000, # 'enableRateLimit': True, # } def getOHLCV(self, symbol, interval, limit=1000, start_time=None, cast_to=float): """ Converts cctx ohlcv data from list of lists to dataframe. """ ohlcv = self.exchange.fetchOHLCV( symbol, interval, since=start_time, limit=limit) if len(ohlcv) == 0: return pd.DataFrame() df = pd.DataFrame(ohlcv) df.columns = ['time', 'open', 'high', 'low', 'close', 'volume'] # transform values from strings to floats for col in df.columns: df[col] = df[col].astype(cast_to) df['date'] =
pd.to_datetime(df['time'] * 1000000, infer_datetime_format=True)
pandas.to_datetime
import numpy as np import pytest import pandas as pd from pandas import ( DataFrame, MultiIndex, ) import pandas._testing as tm def test_to_numpy(idx): result = idx.to_numpy() exp = idx.values tm.assert_numpy_array_equal(result, exp) def test_to_frame(): tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")] index = MultiIndex.from_tuples(tuples) result = index.to_frame(index=False) expected = DataFrame(tuples) tm.assert_frame_equal(result, expected) result = index.to_frame() expected.index = index tm.assert_frame_equal(result, expected) tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")] index = MultiIndex.from_tuples(tuples, names=["first", "second"]) result = index.to_frame(index=False) expected = DataFrame(tuples) expected.columns = ["first", "second"] tm.assert_frame_equal(result, expected) result = index.to_frame() expected.index = index tm.assert_frame_equal(result, expected) # See GH-22580 index = MultiIndex.from_tuples(tuples) result = index.to_frame(index=False, name=["first", "second"]) expected = DataFrame(tuples) expected.columns = ["first", "second"] tm.assert_frame_equal(result, expected) result = index.to_frame(name=["first", "second"]) expected.index = index expected.columns = ["first", "second"] tm.assert_frame_equal(result, expected) msg = "'name' must be a list / sequence of column names." with pytest.raises(TypeError, match=msg): index.to_frame(name="first") msg = "'name' should have same length as number of levels on index." with pytest.raises(ValueError, match=msg): index.to_frame(name=["first"]) # Tests for datetime index index = MultiIndex.from_product([range(5), pd.date_range("20130101", periods=3)]) result = index.to_frame(index=False) expected = DataFrame( { 0: np.repeat(np.arange(5, dtype="int64"), 3), 1: np.tile(pd.date_range("20130101", periods=3), 5), } ) tm.assert_frame_equal(result, expected) result = index.to_frame() expected.index = index tm.assert_frame_equal(result, expected) # See GH-22580 result = index.to_frame(index=False, name=["first", "second"]) expected = DataFrame( { "first": np.repeat(np.arange(5, dtype="int64"), 3), "second": np.tile(pd.date_range("20130101", periods=3), 5), } ) tm.assert_frame_equal(result, expected) result = index.to_frame(name=["first", "second"]) expected.index = index tm.assert_frame_equal(result, expected) def test_to_frame_dtype_fidelity(): # GH 22420 mi = MultiIndex.from_arrays( [ pd.date_range("19910905", periods=6, tz="US/Eastern"), [1, 1, 1, 2, 2, 2], pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), ["x", "x", "y", "z", "x", "y"], ], names=["dates", "a", "b", "c"], ) original_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)} expected_df = DataFrame( { "dates":
pd.date_range("19910905", periods=6, tz="US/Eastern")
pandas.date_range
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Oct 13 14:18:46 2018 @author: rick Module with functions for importing and integrating biostratigraphic age-depth data from DSDP, ODP, and IODP into a single, standardized csv file. Age-depth data are not available for Chikyu expeditions. IODP: Must first manually download each "List of Assets" from IODP LIMS DESC Reports web portal with "workbook" and "fossil" selected Functions: """ import glob import os import pandas as pd import numpy as np from ocean_drilling_db import data_filepaths as dfp def load_dsdp_age_depth(): # Read in data and rename columns dsdp_data = pd.read_csv(dfp.dsdp_age_depth, sep="\t", header=0, skiprows=None, encoding='windows-1252') dsdp_data = dsdp_data.reindex(['leg', 'site', 'hole', 'top of section depth(m)', 'bottom of section depth(m)', 'age top of section(million years)', 'age bottom of section(million years)', 'data source'], axis=1) dsdp_data.columns = ('leg', 'site', 'hole', 'top_depth', 'bottom_depth', 'top_age', 'bottom_age', 'source') dsdp_data[['top_age', 'bottom_age']] = np.multiply(dsdp_data[['top_age', 'bottom_age']], 1000000) dsdp_data = dsdp_data.applymap(str) # Assign site keys site_keys = pd.read_csv('hole_metadata.csv', sep='\t', index_col=0) site_keys = site_keys[['site_key','site']] full_data = pd.merge(site_keys, dsdp_data, how = 'inner', on = 'site') full_data = full_data.reindex(['site_key', 'leg', 'site', 'hole', 'top_depth', 'bottom_depth', 'top_age', 'bottom_age', 'type', 'source'], axis=1) # Use both top and bottom picks top_values = full_data.reindex(['site_key', 'leg', 'site', 'hole', 'top_depth','top_age', 'type', 'source'], axis=1) top_values = top_values.rename(columns={'top_depth': 'depth', 'top_age': 'age'}) bottom_values = full_data.reindex(['site_key', 'leg', 'site', 'hole', 'bottom_depth', 'bottom_age', 'type', 'source'], axis=1) bottom_values = bottom_values.rename(columns={'bottom_depth': 'depth', 'bottom_age': 'age'}) final_data = pd.concat([top_values, bottom_values]) final_data[['age', 'depth']] = final_data.loc[:,['age', 'depth']].applymap(float) # Sort and clean final_data = final_data.sort_values(['site_key', 'depth']) final_data = final_data.replace(to_replace='nan', value=np.nan) return final_data ### Difference between age-depth and age-profiles files?? def load_odp_age_depth(): odp_data = pd.read_csv(dfp.odp_age_depth, sep="\t", header=0, skiprows=None, encoding='windows-1252') # Rename and reorder columns, change units to years odp_data.columns = ('leg', 'site', 'hole', 'source', 'depth', 'age', 'type') odp_data = odp_data.reindex(['leg', 'site', 'hole', 'depth', 'age', 'type', 'source'], axis=1) odp_data['age'] = np.multiply(odp_data['age'], 1000000) odp_data = odp_data.applymap(str) # Assign site keys site_keys = pd.read_csv('hole_metadata.csv', sep='\t', index_col=0) site_keys = site_keys[['site_key','site']] full_data = pd.merge(site_keys, odp_data, how = 'inner', on = 'site') full_data = full_data.reindex(['site_key', 'leg', 'site', 'hole', 'depth', 'age', 'type', 'source'], axis=1) full_data[['age', 'depth']] = full_data.loc[:,['age', 'depth']].applymap(float) return full_data def load_odp_age_profiles(): data = pd.read_csv(dfp.odp_age_profile, sep="\t", header=0, skiprows=None, encoding='windows-1252') # Filter out those with depth difference greater than 1 core length (10m) (11m to account for 10% error/expansion) diff = data['Ageprofile Depth Base']-data['Ageprofile Depth Top'] data = data.iloc[diff[diff < 11].index.tolist(),:] data['Ageprofile Age Old'] = data['Ageprofile Age Old'].str.strip().replace('',np.nan).astype(float) # Average depths and ages data['depth'] = (data['Ageprofile Depth Top'] + data['Ageprofile Depth Base'])/2 data['age'] = (data['Ageprofile Depth Top'] + data['Ageprofile Depth Base'])/2 data.columns = data.columns.str.strip() data = data.reindex(['Leg', 'Site', 'Hole', 'depth', 'age', 'Ageprofile Datum Description'], axis=1) data = data.rename(columns={'Leg':'leg', 'Site':'site', 'Hole':'hole', 'Ageprofile Datum Description': 'type'}) data.hole = data.hole.str.strip() data.type = data.type.str.strip() data.site = data['site'].astype(str) # Get site keys and add to DataFrame site_keys = pd.read_csv('hole_metadata.csv', sep='\t', index_col=0) site_keys = site_keys[['site_key','site']] full_data = pd.merge(site_keys, data, how='inner', on='site') full_data = full_data[['site_key', 'leg', 'site', 'hole', 'depth', 'age', 'type']] full_data['age'] = full_data['age'] * 1000000 return full_data def load_iodp_age_depth(): files = glob.glob(os.path.join(dfp.iodp_age_depth,'*.xls*')) fossil_data =
pd.DataFrame()
pandas.DataFrame
import numpy as np import pandas as pd from numba import njit import pytest from vectorbt import defaults from vectorbt.utils import checks, config, decorators, math, array from tests.utils import hash # ############# config.py ############# # class TestConfig: def test_config(self): conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=False) conf['b']['d'] = 2 conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=True) conf['a'] = 2 with pytest.raises(Exception) as e_info: conf['d'] = 2 # go deeper conf['b']['c'] = 2 with pytest.raises(Exception) as e_info: conf['b']['d'] = 2 def test_merge_kwargs(self): assert config.merge_kwargs({'a': 1}, {'b': 2}) == {'a': 1, 'b': 2} assert config.merge_kwargs({'a': 1}, {'a': 2}) == {'a': 2} assert config.merge_kwargs({'a': {'b': 2}}, {'a': {'c': 3}}) == {'a': {'b': 2, 'c': 3}} assert config.merge_kwargs({'a': {'b': 2}}, {'a': {'b': 3}}) == {'a': {'b': 3}} # ############# decorators.py ############# # class TestDecorators: def test_class_or_instancemethod(self): class G: @decorators.class_or_instancemethod def g(self_or_cls): if isinstance(self_or_cls, type): return True # class return False # instance assert G.g() assert not G().g() def test_custom_property(self): class G: @decorators.custom_property(some='key') def cache_me(self): return np.random.uniform() assert 'some' in G.cache_me.kwargs assert G.cache_me.kwargs['some'] == 'key' def test_custom_method(self): class G: @decorators.custom_method(some='key') def cache_me(self): return np.random.uniform() assert 'some' in G.cache_me.kwargs assert G.cache_me.kwargs['some'] == 'key' def test_cached_property(self): class G: @decorators.cached_property(some='key') def cache_me(self): return np.random.uniform() assert 'some' in G.cache_me.kwargs assert G.cache_me.kwargs['some'] == 'key' class G: @decorators.cached_property def cache_me(self): return np.random.uniform() g = G() # general caching cached_number = g.cache_me assert g.cache_me == cached_number # clear_cache method G.cache_me.clear_cache(g) cached_number2 = g.cache_me assert cached_number2 != cached_number assert g.cache_me == cached_number2 # disabled locally G.cache_me.disabled = True cached_number3 = g.cache_me assert cached_number3 != cached_number2 assert g.cache_me != cached_number3 G.cache_me.disabled = False # disabled globally defaults.caching = False cached_number4 = g.cache_me assert cached_number4 != cached_number3 assert g.cache_me != cached_number4 defaults.caching = True def test_cached_method(self): class G: @decorators.cached_method(some='key') def cache_me(self): return np.random.uniform() assert 'some' in G.cache_me.kwargs assert G.cache_me.kwargs['some'] == 'key' class G: @decorators.cached_method def cache_me(self, b=10): return np.random.uniform() * 10 g = G() # general caching cached_number = g.cache_me() assert g.cache_me() == cached_number # clear_cache method G.cache_me.clear_cache(g) cached_number2 = g.cache_me() assert cached_number2 != cached_number assert g.cache_me() == cached_number2 # disabled locally G.cache_me.disabled = True cached_number3 = g.cache_me() assert cached_number3 != cached_number2 assert g.cache_me() != cached_number3 G.cache_me.disabled = False # disabled globally defaults.caching = False cached_number4 = g.cache_me() assert cached_number4 != cached_number3 assert g.cache_me() != cached_number4 defaults.caching = True # disabled by non-hashable args cached_number5 = g.cache_me(b=np.zeros(1)) assert cached_number5 != cached_number4 assert g.cache_me(b=np.zeros(1)) != cached_number5 def test_traverse_attr_kwargs(self): class A: @decorators.custom_property(some_key=0) def a(self): pass class B: @decorators.cached_property(some_key=0, child_cls=A) def a(self): pass @decorators.custom_method(some_key=1) def b(self): pass class C: @decorators.cached_method(some_key=0, child_cls=B) def b(self): pass @decorators.custom_property(some_key=1) def c(self): pass assert hash(str(decorators.traverse_attr_kwargs(C))) == 16728515581653529580 assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key'))) == 16728515581653529580 assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=1))) == 703070484833749378 assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=(0, 1)))) == 16728515581653529580 # ############# checks.py ############# # class TestChecks: def test_is_pandas(self): assert not checks.is_pandas(0) assert not checks.is_pandas(np.array([0])) assert checks.is_pandas(pd.Series([1, 2, 3])) assert checks.is_pandas(pd.DataFrame([1, 2, 3])) def test_is_series(self): assert not checks.is_series(0) assert not checks.is_series(np.array([0])) assert checks.is_series(pd.Series([1, 2, 3])) assert not checks.is_series(pd.DataFrame([1, 2, 3])) def test_is_frame(self): assert not checks.is_frame(0) assert not checks.is_frame(np.array([0])) assert not checks.is_frame(pd.Series([1, 2, 3])) assert checks.is_frame(pd.DataFrame([1, 2, 3])) def test_is_array(self): assert not checks.is_array(0) assert checks.is_array(np.array([0])) assert checks.is_array(pd.Series([1, 2, 3])) assert checks.is_array(pd.DataFrame([1, 2, 3])) def test_is_numba_func(self): def test_func(x): return x @njit def test_func_nb(x): return x assert not checks.is_numba_func(test_func) assert checks.is_numba_func(test_func_nb) def test_is_hashable(self): assert checks.is_hashable(2) assert not checks.is_hashable(np.asarray(2)) def test_is_index_equal(self): assert checks.is_index_equal( pd.Index([0]), pd.Index([0]) ) assert not checks.is_index_equal( pd.Index([0]), pd.Index([1]) ) assert not checks.is_index_equal( pd.Index([0], name='name'), pd.Index([0]) ) assert not checks.is_index_equal( pd.MultiIndex.from_arrays([[0], [1]]), pd.Index([0]) ) assert checks.is_index_equal( pd.MultiIndex.from_arrays([[0], [1]]), pd.MultiIndex.from_arrays([[0], [1]]) ) assert checks.is_index_equal( pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']), pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']) ) assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2'])
pandas.MultiIndex.from_arrays
import functools import json import os from multiprocessing.pool import Pool from typing import List, Tuple, Type, Any import pprint import abc import luigi import numpy as np import pandas as pd import torch import torchbearer from torchbearer import Trial from tqdm import tqdm import gc from mars_gym.data.dataset import ( preprocess_interactions_data_frame, InteractionsDataset, ) from mars_gym.evaluation.propensity_score import FillPropensityScoreMixin from mars_gym.evaluation.metrics.fairness import calculate_fairness_metrics from mars_gym.utils import files from mars_gym.utils.files import get_test_set_predictions_path, get_params_path from mars_gym.evaluation.metrics.offpolicy import ( eval_IPS, eval_CIPS, eval_SNIPS, eval_doubly_robust, ) from mars_gym.evaluation.metrics.rank import ( mean_reciprocal_rank, average_precision, precision_at_k, ndcg_at_k, personalization_at_k, prediction_coverage_at_k, ) from mars_gym.simulation.training import ( TorchModelTraining, load_torch_model_training_from_task_id, ) from mars_gym.evaluation.policy_estimator import PolicyEstimatorTraining from mars_gym.torch.data import FasterBatchSampler, NoAutoCollationDataLoader from mars_gym.utils.reflection import load_attr, get_attribute_names from mars_gym.utils.utils import parallel_literal_eval, JsonEncoder from mars_gym.utils.index_mapping import ( create_index_mapping, create_index_mapping_from_arrays, transform_with_indexing, map_array, ) class BaseEvaluationTask(luigi.Task, metaclass=abc.ABCMeta): model_task_class: str = luigi.Parameter( default="mars_gym.simulation.interaction.InteractionTraining" ) model_task_id: str = luigi.Parameter() offpolicy_eval: bool = luigi.BoolParameter(default=False) task_hash: str = luigi.Parameter(default="none") @property def cache_attr(self): return [""] @property def task_name(self): return self.model_task_id + "_" + self.task_id.split("_")[-1] @property def model_training(self) -> TorchModelTraining: if not hasattr(self, "_model_training"): class_ = load_attr(self.model_task_class, Type[TorchModelTraining]) self._model_training = load_torch_model_training_from_task_id( class_, self.model_task_id ) return self._model_training @property def n_items(self): return self.model_training.n_items def output(self): return luigi.LocalTarget( os.path.join( files.OUTPUT_PATH, "evaluation", self.__class__.__name__, "results", self.task_name, ) ) def cache_cleanup(self): for a in self.cache_attrs: if hasattr(self, a): delattr(self, a) def _save_params(self): with open(get_params_path(self.output().path), "w") as params_file: json.dump( self.param_kwargs, params_file, default=lambda o: dict(o), indent=4 ) class EvaluateTestSetPredictions(FillPropensityScoreMixin, BaseEvaluationTask): # TODO transform this params in a dict params direct_estimator_class: str = luigi.Parameter(default="mars_gym.simulation.training.SupervisedModelTraining") direct_estimator_negative_proportion: int = luigi.FloatParameter(0) direct_estimator_batch_size: int = luigi.IntParameter(default=500) direct_estimator_epochs: int = luigi.IntParameter(default=50) direct_estimator_extra_params: dict = luigi.DictParameter(default={}) eval_cips_cap: int = luigi.IntParameter(default=15) policy_estimator_extra_params: dict = luigi.DictParameter(default={}) num_processes: int = luigi.IntParameter(default=os.cpu_count()) fairness_columns: List[str] = luigi.ListParameter(default=[]) rank_metrics: List[str] = luigi.ListParameter(default=[]) only_new_interactions: bool = luigi.BoolParameter(default=False) only_exist_items: bool = luigi.BoolParameter(default=False) only_exist_users: bool = luigi.BoolParameter(default=False) def get_direct_estimator(self, extra_params: dict) -> TorchModelTraining: assert self.direct_estimator_class is not None estimator_class = load_attr( self.direct_estimator_class, Type[TorchModelTraining] ) attribute_names = get_attribute_names(estimator_class) params = { key: value for key, value in self.model_training.param_kwargs.items() if key in attribute_names } return estimator_class(**{**params, **extra_params}) #TODO We need change it @property def direct_estimator(self): if not hasattr(self, "_direct_estimator"): self._direct_estimator = self.get_direct_estimator( {**{ "project": self.model_training.project, "learning_rate": 0.0001, "test_size": 0.0, "epochs": self.direct_estimator_epochs, "batch_size": self.direct_estimator_batch_size, "loss_function": "bce", "loss_function_params": {}, "observation": "All Data", "negative_proportion": self.direct_estimator_negative_proportion, "policy_estimator_extra_params": {}, "metrics": ["loss"], "seed": 51, }, **self.direct_estimator_extra_params} ) return self._direct_estimator @property def policy_estimator(self) -> PolicyEstimatorTraining: if not hasattr(self, "_policy_estimator"): self._policy_estimator = PolicyEstimatorTraining( project=self.model_training.project, data_frames_preparation_extra_params=self.model_training.data_frames_preparation_extra_params, **self.policy_estimator_extra_params, ) return self._policy_estimator def requires(self): if self.offpolicy_eval: return [self.direct_estimator, self.policy_estimator] return [] @property def item_column(self) -> str: return self.model_training.project_config.item_column.name @property def available_arms_column(self) -> str: return self.model_training.project_config.available_arms_column_name @property def propensity_score_column(self) -> str: return self.model_training.project_config.propensity_score_column_name def get_item_index(self)-> List[str]: indexed_list = list(self.model_training.index_mapping[self.model_training.project_config.item_column.name].keys()) indexed_list = [x for x in indexed_list if x is not None and str(x) != 'nan'] return indexed_list def get_catalog(self, df: pd.DataFrame) -> List[str]: indexed_list = self.get_item_index() test_list = list(df["sorted_actions"]) test_list.append(indexed_list) all_items = sum(test_list, []) unique_items = list(np.unique(all_items)) return unique_items def run(self): os.makedirs(self.output().path) # df: pd.DataFrame = preprocess_interactions_data_frame( # pd.read_csv( # get_test_set_predictions_path(self.model_training.output().path) # ), # self.model_training.project_config, # ) # .sample(10000) df: pd.DataFrame = pd.read_csv( get_test_set_predictions_path(self.model_training.output().path), dtype = {self.model_training.project_config.item_column.name : "str"} ) # .sample(10000) df["sorted_actions"] = parallel_literal_eval(df["sorted_actions"]) df["prob_actions"] = parallel_literal_eval(df["prob_actions"]) df["action_scores"] = parallel_literal_eval(df["action_scores"]) df["action"] = df["sorted_actions"].apply( lambda sorted_actions: str(sorted_actions[0]) ) with Pool(self.num_processes) as p: print("Creating the relevance lists...") # from IPython import embed; embed() df["relevance_list"] = list( tqdm( p.starmap( _create_relevance_list, zip( df["sorted_actions"], df[self.model_training.project_config.item_column.name], df[self.model_training.project_config.output_column.name], ), ), total=len(df), ) ) if self.model_training.metadata_data_frame is not None: df = pd.merge( df, pd.read_csv(self.model_training.metadata_data_frame_path, dtype = {self.model_training.project_config.item_column.name : "str"}), left_on="action", right_on=self.model_training.project_config.item_column.name, suffixes=("", "_action"), ) ground_truth_df = df[ ~(df[self.model_training.project_config.output_column.name] == 0) ] print("Rank Metrics...") df_rank, dict_rank = self.rank_metrics(ground_truth_df) gc.collect() print("Fairness Metrics") df_fairness, df_fairness_metrics = self.fairness_metrics(ground_truth_df) gc.collect() print("Offpolice Metrics") df_offpolicy, dict_offpolice = self.offpolice_metrics(df) gc.collect() # dict_offpolice = {} # Save Logs metrics = {**dict_rank, **dict_offpolice} pprint.pprint(metrics) with open( os.path.join(self.output().path, "metrics.json"), "w" ) as metrics_file: json.dump(metrics, metrics_file, cls=JsonEncoder, indent=4) df_offpolicy.to_csv( os.path.join(self.output().path, "df_offpolicy.csv"), index=False ) df_rank.to_csv( os.path.join(self.output().path, "rank_metrics.csv"), index=False ) df_fairness_metrics.to_csv( os.path.join(self.output().path, "fairness_metrics.csv"), index=False ) df_fairness.to_csv( os.path.join(self.output().path, "fairness_df.csv"), index=False ) def rank_metrics(self, df: pd.DataFrame): df = df.copy() # Filter only disponível interaction df = df[df.relevance_list.apply(max) > 0] # Filter only new interactions, them not appear in trained dataset if self.only_new_interactions: df = df[df['trained'] == 0] # Filter only item indexed if self.only_exist_items: items = self.get_item_index() df = df[df[self.model_training.project_config.item_column.name].isin(items)] with Pool(self.num_processes) as p: print("Calculating average precision...") df["average_precision"] = list( tqdm(p.map(average_precision, df["relevance_list"]), total=len(df)) ) print("Calculating precision at 1...") df["precision_at_1"] = list( tqdm( p.map(functools.partial(precision_at_k, k=1), df["relevance_list"]), total=len(df), ) ) print("Calculating MRR at 5 ...") df["mrr_at_5"] = list( tqdm( p.map(functools.partial(mean_reciprocal_rank, k=5), df["relevance_list"]), total=len(df), ) ) print("Calculating MRR at 10 ...") df["mrr_at_10"] = list( tqdm( p.map(functools.partial(mean_reciprocal_rank, k=10), df["relevance_list"]), total=len(df), ) ) print("Calculating nDCG at 5...") df["ndcg_at_5"] = list( tqdm( p.map(functools.partial(ndcg_at_k, k=5), df["relevance_list"]), total=len(df), ) ) print("Calculating nDCG at 10...") df["ndcg_at_10"] = list( tqdm( p.map(functools.partial(ndcg_at_k, k=10), df["relevance_list"]), total=len(df), ) ) print("Calculating nDCG at 15...") df["ndcg_at_15"] = list( tqdm( p.map(functools.partial(ndcg_at_k, k=15), df["relevance_list"]), total=len(df), ) ) print("Calculating nDCG at 20...") df["ndcg_at_20"] = list( tqdm( p.map(functools.partial(ndcg_at_k, k=20), df["relevance_list"]), total=len(df), ) ) print("Calculating nDCG at 50...") df["ndcg_at_50"] = list( tqdm( p.map(functools.partial(ndcg_at_k, k=50), df["relevance_list"]), total=len(df), ) ) # catalog = self.get_catalog(df) metrics = { "model_task": self.model_task_id, "count": len(df), "mean_average_precision": df["average_precision"].mean(), #"MRR": df["MRR"].mean(), "precision_at_1": df["precision_at_1"].mean(), "mrr_at_5": df["mrr_at_5"].mean(), "mrr_at_10": df["mrr_at_10"].mean(), "ndcg_at_5": df["ndcg_at_5"].mean(), #"ndcg_at_10": df["ndcg_at_10"].mean(), #"ndcg_at_15": df["ndcg_at_15"].mean(), "ndcg_at_20": df["ndcg_at_20"].mean(), #"ndcg_at_50": df["ndcg_at_50"].mean(), "coverage_at_5": prediction_coverage_at_k(df["sorted_actions"], catalog, 5), #"coverage_at_10": prediction_coverage_at_k( # df["sorted_actions"], catalog, 10 #), #"coverage_at_15": prediction_coverage_at_k( # df["sorted_actions"], catalog, 15 #), "coverage_at_20": prediction_coverage_at_k( df["sorted_actions"], catalog, 20 ), #"coverage_at_50": prediction_coverage_at_k( # df["sorted_actions"], catalog, 50 #), #"personalization_at_5": personalization_at_k(df["sorted_actions"], 5), #"personalization_at_10": personalization_at_k(df["sorted_actions"], 10), #"personalization_at_15": personalization_at_k(df["sorted_actions"], 15), #"personalization_at_20": personalization_at_k(df["sorted_actions"], 20), #"personalization_at_50": personalization_at_k(df["sorted_actions"], 50), } return df, metrics def offpolice_metrics(self, df: pd.DataFrame): metrics = {} if not self.offpolicy_eval: return
pd.DataFrame()
pandas.DataFrame
import datetime from datetime import timedelta from distutils.version import LooseVersion from io import BytesIO import os import re from warnings import catch_warnings, simplefilter import numpy as np import pytest from pandas.compat import is_platform_little_endian, is_platform_windows import pandas.util._test_decorators as td from pandas.core.dtypes.common import is_categorical_dtype import pandas as pd from pandas import ( Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex, RangeIndex, Series, Timestamp, bdate_range, concat, date_range, isna, timedelta_range, ) from pandas.tests.io.pytables.common import ( _maybe_remove, create_tempfile, ensure_clean_path, ensure_clean_store, safe_close, safe_remove, tables, ) import pandas.util.testing as tm from pandas.io.pytables import ( ClosedFileError, HDFStore, PossibleDataLossError, Term, read_hdf, ) from pandas.io import pytables as pytables # noqa: E402 isort:skip from pandas.io.pytables import TableIterator # noqa: E402 isort:skip _default_compressor = "blosc" ignore_natural_naming_warning = pytest.mark.filterwarnings( "ignore:object name:tables.exceptions.NaturalNameWarning" ) @pytest.mark.single class TestHDFStore: def test_format_kwarg_in_constructor(self, setup_path): # GH 13291 with ensure_clean_path(setup_path) as path: with pytest.raises(ValueError): HDFStore(path, format="table") def test_context(self, setup_path): path = create_tempfile(setup_path) try: with HDFStore(path) as tbl: raise ValueError("blah") except ValueError: pass finally: safe_remove(path) try: with HDFStore(path) as tbl: tbl["a"] = tm.makeDataFrame() with HDFStore(path) as tbl: assert len(tbl) == 1 assert type(tbl["a"]) == DataFrame finally: safe_remove(path) def test_conv_read_write(self, setup_path): path = create_tempfile(setup_path) try: def roundtrip(key, obj, **kwargs): obj.to_hdf(path, key, **kwargs) return read_hdf(path, key) o = tm.makeTimeSeries() tm.assert_series_equal(o, roundtrip("series", o)) o = tm.makeStringSeries() tm.assert_series_equal(o, roundtrip("string_series", o)) o = tm.makeDataFrame() tm.assert_frame_equal(o, roundtrip("frame", o)) # table df = DataFrame(dict(A=range(5), B=range(5))) df.to_hdf(path, "table", append=True) result = read_hdf(path, "table", where=["index>2"]) tm.assert_frame_equal(df[df.index > 2], result) finally: safe_remove(path) def test_long_strings(self, setup_path): # GH6166 df = DataFrame( {"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10) ) with ensure_clean_store(setup_path) as store: store.append("df", df, data_columns=["a"]) result = store.select("df") tm.assert_frame_equal(df, result) def test_api(self, setup_path): # GH4584 # API issue when to_hdf doesn't accept append AND format args with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() df.iloc[:10].to_hdf(path, "df", append=True, format="table") df.iloc[10:].to_hdf(path, "df", append=True, format="table") tm.assert_frame_equal(read_hdf(path, "df"), df) # append to False df.iloc[:10].to_hdf(path, "df", append=False, format="table") df.iloc[10:].to_hdf(path, "df", append=True, format="table") tm.assert_frame_equal(read_hdf(path, "df"), df) with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() df.iloc[:10].to_hdf(path, "df", append=True) df.iloc[10:].to_hdf(path, "df", append=True, format="table") tm.assert_frame_equal(read_hdf(path, "df"), df) # append to False df.iloc[:10].to_hdf(path, "df", append=False, format="table") df.iloc[10:].to_hdf(path, "df", append=True) tm.assert_frame_equal(read_hdf(path, "df"), df) with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() df.to_hdf(path, "df", append=False, format="fixed") tm.assert_frame_equal(read_hdf(path, "df"), df) df.to_hdf(path, "df", append=False, format="f") tm.assert_frame_equal(read_hdf(path, "df"), df) df.to_hdf(path, "df", append=False) tm.assert_frame_equal(read_hdf(path, "df"), df) df.to_hdf(path, "df") tm.assert_frame_equal(read_hdf(path, "df"), df) with ensure_clean_store(setup_path) as store: path = store._path df = tm.makeDataFrame() _maybe_remove(store, "df") store.append("df", df.iloc[:10], append=True, format="table") store.append("df", df.iloc[10:], append=True, format="table") tm.assert_frame_equal(store.select("df"), df) # append to False _maybe_remove(store, "df") store.append("df", df.iloc[:10], append=False, format="table") store.append("df", df.iloc[10:], append=True, format="table") tm.assert_frame_equal(store.select("df"), df) # formats _maybe_remove(store, "df") store.append("df", df.iloc[:10], append=False, format="table") store.append("df", df.iloc[10:], append=True, format="table") tm.assert_frame_equal(store.select("df"), df) _maybe_remove(store, "df") store.append("df", df.iloc[:10], append=False, format="table") store.append("df", df.iloc[10:], append=True, format=None) tm.assert_frame_equal(store.select("df"), df) with ensure_clean_path(setup_path) as path: # Invalid. df = tm.makeDataFrame() with pytest.raises(ValueError): df.to_hdf(path, "df", append=True, format="f") with pytest.raises(ValueError): df.to_hdf(path, "df", append=True, format="fixed") with pytest.raises(TypeError): df.to_hdf(path, "df", append=True, format="foo") with pytest.raises(TypeError): df.to_hdf(path, "df", append=False, format="bar") # File path doesn't exist path = "" with pytest.raises(FileNotFoundError): read_hdf(path, "df") def test_api_default_format(self, setup_path): # default_format option with ensure_clean_store(setup_path) as store: df = tm.makeDataFrame() pd.set_option("io.hdf.default_format", "fixed") _maybe_remove(store, "df") store.put("df", df) assert not store.get_storer("df").is_table with pytest.raises(ValueError): store.append("df2", df) pd.set_option("io.hdf.default_format", "table") _maybe_remove(store, "df") store.put("df", df) assert store.get_storer("df").is_table _maybe_remove(store, "df2") store.append("df2", df) assert store.get_storer("df").is_table pd.set_option("io.hdf.default_format", None) with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() pd.set_option("io.hdf.default_format", "fixed") df.to_hdf(path, "df") with HDFStore(path) as store: assert not store.get_storer("df").is_table with pytest.raises(ValueError): df.to_hdf(path, "df2", append=True) pd.set_option("io.hdf.default_format", "table") df.to_hdf(path, "df3") with HDFStore(path) as store: assert store.get_storer("df3").is_table df.to_hdf(path, "df4", append=True) with HDFStore(path) as store: assert store.get_storer("df4").is_table pd.set_option("io.hdf.default_format", None) def test_keys(self, setup_path): with ensure_clean_store(setup_path) as store: store["a"] = tm.makeTimeSeries() store["b"] = tm.makeStringSeries() store["c"] = tm.makeDataFrame() assert len(store) == 3 expected = {"/a", "/b", "/c"} assert set(store.keys()) == expected assert set(store) == expected def test_keys_ignore_hdf_softlink(self, setup_path): # GH 20523 # Puts a softlink into HDF file and rereads with ensure_clean_store(setup_path) as store: df = DataFrame(dict(A=range(5), B=range(5))) store.put("df", df) assert store.keys() == ["/df"] store._handle.create_soft_link(store._handle.root, "symlink", "df") # Should ignore the softlink assert store.keys() == ["/df"] def test_iter_empty(self, setup_path): with ensure_clean_store(setup_path) as store: # GH 12221 assert list(store) == [] def test_repr(self, setup_path): with ensure_clean_store(setup_path) as store: repr(store) store.info() store["a"] = tm.makeTimeSeries() store["b"] = tm.makeStringSeries() store["c"] = tm.makeDataFrame() df = tm.makeDataFrame() df["obj1"] = "foo" df["obj2"] = "bar" df["bool1"] = df["A"] > 0 df["bool2"] = df["B"] > 0 df["bool3"] = True df["int1"] = 1 df["int2"] = 2 df["timestamp1"] = Timestamp("20010102") df["timestamp2"] = Timestamp("20010103") df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0) df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0) df.loc[3:6, ["obj1"]] = np.nan df = df._consolidate()._convert(datetime=True) with catch_warnings(record=True): simplefilter("ignore", pd.errors.PerformanceWarning) store["df"] = df # make a random group in hdf space store._handle.create_group(store._handle.root, "bah") assert store.filename in repr(store) assert store.filename in str(store) store.info() # storers with ensure_clean_store(setup_path) as store: df = tm.makeDataFrame() store.append("df", df) s = store.get_storer("df") repr(s) str(s) @ignore_natural_naming_warning def test_contains(self, setup_path): with ensure_clean_store(setup_path) as store: store["a"] = tm.makeTimeSeries() store["b"] = tm.makeDataFrame() store["foo/bar"] = tm.makeDataFrame() assert "a" in store assert "b" in store assert "c" not in store assert "foo/bar" in store assert "/foo/bar" in store assert "/foo/b" not in store assert "bar" not in store # gh-2694: tables.NaturalNameWarning with catch_warnings(record=True): store["node())"] = tm.makeDataFrame() assert "node())" in store def test_versioning(self, setup_path): with ensure_clean_store(setup_path) as store: store["a"] = tm.makeTimeSeries() store["b"] = tm.makeDataFrame() df = tm.makeTimeDataFrame() _maybe_remove(store, "df1") store.append("df1", df[:10]) store.append("df1", df[10:]) assert store.root.a._v_attrs.pandas_version == "0.15.2" assert store.root.b._v_attrs.pandas_version == "0.15.2" assert store.root.df1._v_attrs.pandas_version == "0.15.2" # write a file and wipe its versioning _maybe_remove(store, "df2") store.append("df2", df) # this is an error because its table_type is appendable, but no # version info store.get_node("df2")._v_attrs.pandas_version = None with pytest.raises(Exception): store.select("df2") def test_mode(self, setup_path): df = tm.makeTimeDataFrame() def check(mode): with ensure_clean_path(setup_path) as path: # constructor if mode in ["r", "r+"]: with pytest.raises(IOError): HDFStore(path, mode=mode) else: store = HDFStore(path, mode=mode) assert store._handle.mode == mode store.close() with ensure_clean_path(setup_path) as path: # context if mode in ["r", "r+"]: with pytest.raises(IOError): with HDFStore(path, mode=mode) as store: # noqa pass else: with HDFStore(path, mode=mode) as store: assert store._handle.mode == mode with ensure_clean_path(setup_path) as path: # conv write if mode in ["r", "r+"]: with pytest.raises(IOError): df.to_hdf(path, "df", mode=mode) df.to_hdf(path, "df", mode="w") else: df.to_hdf(path, "df", mode=mode) # conv read if mode in ["w"]: with pytest.raises(ValueError): read_hdf(path, "df", mode=mode) else: result = read_hdf(path, "df", mode=mode) tm.assert_frame_equal(result, df) def check_default_mode(): # read_hdf uses default mode with ensure_clean_path(setup_path) as path: df.to_hdf(path, "df", mode="w") result = read_hdf(path, "df") tm.assert_frame_equal(result, df) check("r") check("r+") check("a") check("w") check_default_mode() def test_reopen_handle(self, setup_path): with ensure_clean_path(setup_path) as path: store = HDFStore(path, mode="a") store["a"] = tm.makeTimeSeries() # invalid mode change with pytest.raises(PossibleDataLossError): store.open("w") store.close() assert not store.is_open # truncation ok here store.open("w") assert store.is_open assert len(store) == 0 store.close() assert not store.is_open store = HDFStore(path, mode="a") store["a"] = tm.makeTimeSeries() # reopen as read store.open("r") assert store.is_open assert len(store) == 1 assert store._mode == "r" store.close() assert not store.is_open # reopen as append store.open("a") assert store.is_open assert len(store) == 1 assert store._mode == "a" store.close() assert not store.is_open # reopen as append (again) store.open("a") assert store.is_open assert len(store) == 1 assert store._mode == "a" store.close() assert not store.is_open def test_open_args(self, setup_path): with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() # create an in memory store store = HDFStore( path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0 ) store["df"] = df store.append("df2", df) tm.assert_frame_equal(store["df"], df) tm.assert_frame_equal(store["df2"], df) store.close() # the file should not have actually been written assert not os.path.exists(path) def test_flush(self, setup_path): with ensure_clean_store(setup_path) as store: store["a"] = tm.makeTimeSeries() store.flush() store.flush(fsync=True) def test_get(self, setup_path): with ensure_clean_store(setup_path) as store: store["a"] = tm.makeTimeSeries() left = store.get("a") right = store["a"] tm.assert_series_equal(left, right) left = store.get("/a") right = store["/a"] tm.assert_series_equal(left, right) with pytest.raises(KeyError, match="'No object named b in the file'"): store.get("b") @pytest.mark.parametrize( "where, expected", [ ( "/", { "": ({"first_group", "second_group"}, set()), "/first_group": (set(), {"df1", "df2"}), "/second_group": ({"third_group"}, {"df3", "s1"}), "/second_group/third_group": (set(), {"df4"}), }, ), ( "/second_group", { "/second_group": ({"third_group"}, {"df3", "s1"}), "/second_group/third_group": (set(), {"df4"}), }, ), ], ) def test_walk(self, where, expected, setup_path): # GH10143 objs = { "df1": pd.DataFrame([1, 2, 3]), "df2": pd.DataFrame([4, 5, 6]), "df3": pd.DataFrame([6, 7, 8]), "df4": pd.DataFrame([9, 10, 11]), "s1": pd.Series([10, 9, 8]), # Next 3 items aren't pandas objects and should be ignored "a1": np.array([[1, 2, 3], [4, 5, 6]]), "tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"), "tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"), } with ensure_clean_store("walk_groups.hdf", mode="w") as store: store.put("/first_group/df1", objs["df1"]) store.put("/first_group/df2", objs["df2"]) store.put("/second_group/df3", objs["df3"]) store.put("/second_group/s1", objs["s1"]) store.put("/second_group/third_group/df4", objs["df4"]) # Create non-pandas objects store._handle.create_array("/first_group", "a1", objs["a1"]) store._handle.create_table("/first_group", "tb1", obj=objs["tb1"]) store._handle.create_table("/second_group", "tb2", obj=objs["tb2"]) assert len(list(store.walk(where=where))) == len(expected) for path, groups, leaves in store.walk(where=where): assert path in expected expected_groups, expected_frames = expected[path] assert expected_groups == set(groups) assert expected_frames == set(leaves) for leaf in leaves: frame_path = "/".join([path, leaf]) obj = store.get(frame_path) if "df" in leaf: tm.assert_frame_equal(obj, objs[leaf]) else: tm.assert_series_equal(obj, objs[leaf]) def test_getattr(self, setup_path): with ensure_clean_store(setup_path) as store: s = tm.makeTimeSeries() store["a"] = s # test attribute access result = store.a tm.assert_series_equal(result, s) result = getattr(store, "a") tm.assert_series_equal(result, s) df = tm.makeTimeDataFrame() store["df"] = df result = store.df tm.assert_frame_equal(result, df) # errors for x in ["d", "mode", "path", "handle", "complib"]: with pytest.raises(AttributeError): getattr(store, x) # not stores for x in ["mode", "path", "handle", "complib"]: getattr(store, "_{x}".format(x=x)) def test_put(self, setup_path): with ensure_clean_store(setup_path) as store: ts = tm.makeTimeSeries() df = tm.makeTimeDataFrame() store["a"] = ts store["b"] = df[:10] store["foo/bar/bah"] = df[:10] store["foo"] = df[:10] store["/foo"] = df[:10] store.put("c", df[:10], format="table") # not OK, not a table with pytest.raises(ValueError): store.put("b", df[10:], append=True) # node does not currently exist, test _is_table_type returns False # in this case _maybe_remove(store, "f") with pytest.raises(ValueError): store.put("f", df[10:], append=True) # can't put to a table (use append instead) with pytest.raises(ValueError): store.put("c", df[10:], append=True) # overwrite table store.put("c", df[:10], format="table", append=False) tm.assert_frame_equal(df[:10], store["c"]) def test_put_string_index(self, setup_path): with ensure_clean_store(setup_path) as store: index = Index( ["I am a very long string index: {i}".format(i=i) for i in range(20)] ) s = Series(np.arange(20), index=index) df = DataFrame({"A": s, "B": s}) store["a"] = s tm.assert_series_equal(store["a"], s) store["b"] = df tm.assert_frame_equal(store["b"], df) # mixed length index = Index( ["abcdefghijklmnopqrstuvwxyz1234567890"] + ["I am a very long string index: {i}".format(i=i) for i in range(20)] ) s = Series(np.arange(21), index=index) df = DataFrame({"A": s, "B": s}) store["a"] = s tm.assert_series_equal(store["a"], s) store["b"] = df tm.assert_frame_equal(store["b"], df) def test_put_compression(self, setup_path): with ensure_clean_store(setup_path) as store: df = tm.makeTimeDataFrame() store.put("c", df, format="table", complib="zlib") tm.assert_frame_equal(store["c"], df) # can't compress if format='fixed' with pytest.raises(ValueError): store.put("b", df, format="fixed", complib="zlib") @td.skip_if_windows_python_3 def test_put_compression_blosc(self, setup_path): df = tm.makeTimeDataFrame() with ensure_clean_store(setup_path) as store: # can't compress if format='fixed' with pytest.raises(ValueError): store.put("b", df, format="fixed", complib="blosc") store.put("c", df, format="table", complib="blosc") tm.assert_frame_equal(store["c"], df) def test_complibs_default_settings(self, setup_path): # GH15943 df = tm.makeDataFrame() # Set complevel and check if complib is automatically set to # default value with ensure_clean_path(setup_path) as tmpfile: df.to_hdf(tmpfile, "df", complevel=9) result = pd.read_hdf(tmpfile, "df") tm.assert_frame_equal(result, df) with tables.open_file(tmpfile, mode="r") as h5file: for node in h5file.walk_nodes(where="/df", classname="Leaf"): assert node.filters.complevel == 9 assert node.filters.complib == "zlib" # Set complib and check to see if compression is disabled with ensure_clean_path(setup_path) as tmpfile: df.to_hdf(tmpfile, "df", complib="zlib") result = pd.read_hdf(tmpfile, "df") tm.assert_frame_equal(result, df) with tables.open_file(tmpfile, mode="r") as h5file: for node in h5file.walk_nodes(where="/df", classname="Leaf"): assert node.filters.complevel == 0 assert node.filters.complib is None # Check if not setting complib or complevel results in no compression with ensure_clean_path(setup_path) as tmpfile: df.to_hdf(tmpfile, "df") result = pd.read_hdf(tmpfile, "df") tm.assert_frame_equal(result, df) with tables.open_file(tmpfile, mode="r") as h5file: for node in h5file.walk_nodes(where="/df", classname="Leaf"): assert node.filters.complevel == 0 assert node.filters.complib is None # Check if file-defaults can be overridden on a per table basis with ensure_clean_path(setup_path) as tmpfile: store = pd.HDFStore(tmpfile) store.append("dfc", df, complevel=9, complib="blosc") store.append("df", df) store.close() with tables.open_file(tmpfile, mode="r") as h5file: for node in h5file.walk_nodes(where="/df", classname="Leaf"): assert node.filters.complevel == 0 assert node.filters.complib is None for node in h5file.walk_nodes(where="/dfc", classname="Leaf"): assert node.filters.complevel == 9 assert node.filters.complib == "blosc" def test_complibs(self, setup_path): # GH14478 df = tm.makeDataFrame() # Building list of all complibs and complevels tuples all_complibs = tables.filters.all_complibs # Remove lzo if its not available on this platform if not tables.which_lib_version("lzo"): all_complibs.remove("lzo") # Remove bzip2 if its not available on this platform if not tables.which_lib_version("bzip2"): all_complibs.remove("bzip2") all_levels = range(0, 10) all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels] for (lib, lvl) in all_tests: with ensure_clean_path(setup_path) as tmpfile: gname = "foo" # Write and read file to see if data is consistent df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl) result = pd.read_hdf(tmpfile, gname) tm.assert_frame_equal(result, df) # Open file and check metadata # for correct amount of compression h5table = tables.open_file(tmpfile, mode="r") for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"): assert node.filters.complevel == lvl if lvl == 0: assert node.filters.complib is None else: assert node.filters.complib == lib h5table.close() def test_put_integer(self, setup_path): # non-date, non-string index df = DataFrame(np.random.randn(50, 100)) self._check_roundtrip(df, tm.assert_frame_equal, setup_path) @td.xfail_non_writeable def test_put_mixed_type(self, setup_path): df = tm.makeTimeDataFrame() df["obj1"] = "foo" df["obj2"] = "bar" df["bool1"] = df["A"] > 0 df["bool2"] = df["B"] > 0 df["bool3"] = True df["int1"] = 1 df["int2"] = 2 df["timestamp1"] = Timestamp("20010102") df["timestamp2"] = Timestamp("20010103") df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0) df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0) df.loc[3:6, ["obj1"]] = np.nan df = df._consolidate()._convert(datetime=True) with ensure_clean_store(setup_path) as store: _maybe_remove(store, "df") # PerformanceWarning with catch_warnings(record=True): simplefilter("ignore", pd.errors.PerformanceWarning) store.put("df", df) expected = store.get("df") tm.assert_frame_equal(expected, df) @pytest.mark.filterwarnings( "ignore:object name:tables.exceptions.NaturalNameWarning" ) def test_append(self, setup_path): with ensure_clean_store(setup_path) as store: # this is allowed by almost always don't want to do it # tables.NaturalNameWarning): with catch_warnings(record=True): df = tm.makeTimeDataFrame() _maybe_remove(store, "df1") store.append("df1", df[:10]) store.append("df1", df[10:]) tm.assert_frame_equal(store["df1"], df) _maybe_remove(store, "df2") store.put("df2", df[:10], format="table") store.append("df2", df[10:]) tm.assert_frame_equal(store["df2"], df) _maybe_remove(store, "df3") store.append("/df3", df[:10]) store.append("/df3", df[10:]) tm.assert_frame_equal(store["df3"], df) # this is allowed by almost always don't want to do it # tables.NaturalNameWarning _maybe_remove(store, "/df3 foo") store.append("/df3 foo", df[:10]) store.append("/df3 foo", df[10:]) tm.assert_frame_equal(store["df3 foo"], df) # dtype issues - mizxed type in a single object column df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]]) df["mixed_column"] = "testing" df.loc[2, "mixed_column"] = np.nan _maybe_remove(store, "df") store.append("df", df) tm.assert_frame_equal(store["df"], df) # uints - test storage of uints uint_data = DataFrame( { "u08": Series( np.random.randint(0, high=255, size=5), dtype=np.uint8 ), "u16": Series( np.random.randint(0, high=65535, size=5), dtype=np.uint16 ), "u32": Series( np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32 ), "u64": Series( [2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62], dtype=np.uint64, ), }, index=np.arange(5), ) _maybe_remove(store, "uints") store.append("uints", uint_data) tm.assert_frame_equal(store["uints"], uint_data) # uints - test storage of uints in indexable columns _maybe_remove(store, "uints") # 64-bit indices not yet supported store.append("uints", uint_data, data_columns=["u08", "u16", "u32"]) tm.assert_frame_equal(store["uints"], uint_data) def test_append_series(self, setup_path): with ensure_clean_store(setup_path) as store: # basic ss = tm.makeStringSeries() ts = tm.makeTimeSeries() ns = Series(np.arange(100)) store.append("ss", ss) result = store["ss"] tm.assert_series_equal(result, ss) assert result.name is None store.append("ts", ts) result = store["ts"] tm.assert_series_equal(result, ts) assert result.name is None ns.name = "foo" store.append("ns", ns) result = store["ns"] tm.assert_series_equal(result, ns) assert result.name == ns.name # select on the values expected = ns[ns > 60] result = store.select("ns", "foo>60") tm.assert_series_equal(result, expected) # select on the index and values expected = ns[(ns > 70) & (ns.index < 90)] result = store.select("ns", "foo>70 and index<90") tm.assert_series_equal(result, expected) # multi-index mi = DataFrame(np.random.randn(5, 1), columns=["A"]) mi["B"] = np.arange(len(mi)) mi["C"] = "foo" mi.loc[3:5, "C"] = "bar" mi.set_index(["C", "B"], inplace=True) s = mi.stack() s.index = s.index.droplevel(2) store.append("mi", s) tm.assert_series_equal(store["mi"], s) def test_store_index_types(self, setup_path): # GH5386 # test storing various index types with ensure_clean_store(setup_path) as store: def check(format, index): df = DataFrame(np.random.randn(10, 2), columns=list("AB")) df.index = index(len(df)) _maybe_remove(store, "df") store.put("df", df, format=format) tm.assert_frame_equal(df, store["df"]) for index in [ tm.makeFloatIndex, tm.makeStringIndex, tm.makeIntIndex, tm.makeDateIndex, ]: check("table", index) check("fixed", index) # period index currently broken for table # seee GH7796 FIXME check("fixed", tm.makePeriodIndex) # check('table',tm.makePeriodIndex) # unicode index = tm.makeUnicodeIndex check("table", index) check("fixed", index) @pytest.mark.skipif( not is_platform_little_endian(), reason="reason platform is not little endian" ) def test_encoding(self, setup_path): with ensure_clean_store(setup_path) as store: df = DataFrame(dict(A="foo", B="bar"), index=range(5)) df.loc[2, "A"] = np.nan df.loc[3, "B"] = np.nan _maybe_remove(store, "df") store.append("df", df, encoding="ascii") tm.assert_frame_equal(store["df"], df) expected = df.reindex(columns=["A"]) result = store.select("df", Term("columns=A", encoding="ascii")) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "val", [ [b"E\xc9, 17", b"", b"a", b"b", b"c"], [b"E\xc9, 17", b"a", b"b", b"c"], [b"EE, 17", b"", b"a", b"b", b"c"], [b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"], [b"", b"a", b"b", b"c"], [b"\xf8\xfc", b"a", b"b", b"c"], [b"A\xf8\xfc", b"", b"a", b"b", b"c"], [np.nan, b"", b"b", b"c"], [b"A\xf8\xfc", np.nan, b"", b"b", b"c"], ], ) @pytest.mark.parametrize("dtype", ["category", object]) def test_latin_encoding(self, setup_path, dtype, val): enc = "latin-1" nan_rep = "" key = "data" val = [x.decode(enc) if isinstance(x, bytes) else x for x in val] ser = pd.Series(val, dtype=dtype) with ensure_clean_path(setup_path) as store: ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep) retr = read_hdf(store, key) s_nan = ser.replace(nan_rep, np.nan) if is_categorical_dtype(s_nan): assert is_categorical_dtype(retr) tm.assert_series_equal( s_nan, retr, check_dtype=False, check_categorical=False ) else: tm.assert_series_equal(s_nan, retr) # FIXME: don't leave commented-out # fails: # for x in examples: # roundtrip(s, nan_rep=b'\xf8\xfc') def test_append_some_nans(self, setup_path): with ensure_clean_store(setup_path) as store: df = DataFrame( { "A": Series(np.random.randn(20)).astype("int32"), "A1": np.random.randn(20), "A2": np.random.randn(20), "B": "foo", "C": "bar", "D": Timestamp("20010101"), "E": datetime.datetime(2001, 1, 2, 0, 0), }, index=np.arange(20), ) # some nans _maybe_remove(store, "df1") df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan store.append("df1", df[:10]) store.append("df1", df[10:]) tm.assert_frame_equal(store["df1"], df) # first column df1 = df.copy() df1.loc[:, "A1"] = np.nan _maybe_remove(store, "df1") store.append("df1", df1[:10]) store.append("df1", df1[10:]) tm.assert_frame_equal(store["df1"], df1) # 2nd column df2 = df.copy() df2.loc[:, "A2"] = np.nan _maybe_remove(store, "df2") store.append("df2", df2[:10]) store.append("df2", df2[10:]) tm.assert_frame_equal(store["df2"], df2) # datetimes df3 = df.copy() df3.loc[:, "E"] = np.nan _maybe_remove(store, "df3") store.append("df3", df3[:10]) store.append("df3", df3[10:]) tm.assert_frame_equal(store["df3"], df3) def test_append_all_nans(self, setup_path): with ensure_clean_store(setup_path) as store: df = DataFrame( {"A1": np.random.randn(20), "A2": np.random.randn(20)}, index=np.arange(20), ) df.loc[0:15, :] = np.nan # nan some entire rows (dropna=True) _maybe_remove(store, "df") store.append("df", df[:10], dropna=True) store.append("df", df[10:], dropna=True) tm.assert_frame_equal(store["df"], df[-4:]) # nan some entire rows (dropna=False) _maybe_remove(store, "df2") store.append("df2", df[:10], dropna=False) store.append("df2", df[10:], dropna=False) tm.assert_frame_equal(store["df2"], df) # tests the option io.hdf.dropna_table pd.set_option("io.hdf.dropna_table", False) _maybe_remove(store, "df3") store.append("df3", df[:10]) store.append("df3", df[10:]) tm.assert_frame_equal(store["df3"], df) pd.set_option("io.hdf.dropna_table", True) _maybe_remove(store, "df4") store.append("df4", df[:10]) store.append("df4", df[10:]) tm.assert_frame_equal(store["df4"], df[-4:]) # nan some entire rows (string are still written!) df = DataFrame( { "A1": np.random.randn(20), "A2": np.random.randn(20), "B": "foo", "C": "bar", }, index=np.arange(20), ) df.loc[0:15, :] = np.nan _maybe_remove(store, "df") store.append("df", df[:10], dropna=True) store.append("df", df[10:], dropna=True) tm.assert_frame_equal(store["df"], df) _maybe_remove(store, "df2") store.append("df2", df[:10], dropna=False) store.append("df2", df[10:], dropna=False) tm.assert_frame_equal(store["df2"], df) # nan some entire rows (but since we have dates they are still # written!) df = DataFrame( { "A1": np.random.randn(20), "A2": np.random.randn(20), "B": "foo", "C": "bar", "D": Timestamp("20010101"), "E": datetime.datetime(2001, 1, 2, 0, 0), }, index=np.arange(20), ) df.loc[0:15, :] = np.nan _maybe_remove(store, "df") store.append("df", df[:10], dropna=True) store.append("df", df[10:], dropna=True) tm.assert_frame_equal(store["df"], df) _maybe_remove(store, "df2") store.append("df2", df[:10], dropna=False) store.append("df2", df[10:], dropna=False) tm.assert_frame_equal(store["df2"], df) # Test to make sure defaults are to not drop. # Corresponding to Issue 9382 df_with_missing = DataFrame( {"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]} ) with ensure_clean_path(setup_path) as path: df_with_missing.to_hdf(path, "df_with_missing", format="table") reloaded = read_hdf(path, "df_with_missing") tm.assert_frame_equal(df_with_missing, reloaded) def test_read_missing_key_close_store(self, setup_path): # GH 25766 with ensure_clean_path(setup_path) as path: df = pd.DataFrame({"a": range(2), "b": range(2)}) df.to_hdf(path, "k1") with pytest.raises(KeyError, match="'No object named k2 in the file'"): pd.read_hdf(path, "k2") # smoke test to test that file is properly closed after # read with KeyError before another write df.to_hdf(path, "k2") def test_read_missing_key_opened_store(self, setup_path): # GH 28699 with ensure_clean_path(setup_path) as path: df = pd.DataFrame({"a": range(2), "b": range(2)}) df.to_hdf(path, "k1") store = pd.HDFStore(path, "r") with pytest.raises(KeyError, match="'No object named k2 in the file'"): pd.read_hdf(store, "k2") # Test that the file is still open after a KeyError and that we can # still read from it. pd.read_hdf(store, "k1") def test_append_frame_column_oriented(self, setup_path): with ensure_clean_store(setup_path) as store: # column oriented df = tm.makeTimeDataFrame() _maybe_remove(store, "df1") store.append("df1", df.iloc[:, :2], axes=["columns"]) store.append("df1", df.iloc[:, 2:]) tm.assert_frame_equal(store["df1"], df) result = store.select("df1", "columns=A") expected = df.reindex(columns=["A"]) tm.assert_frame_equal(expected, result) # selection on the non-indexable result = store.select("df1", ("columns=A", "index=df.index[0:4]")) expected = df.reindex(columns=["A"], index=df.index[0:4]) tm.assert_frame_equal(expected, result) # this isn't supported with pytest.raises(TypeError): store.select("df1", "columns=A and index>df.index[4]") def test_append_with_different_block_ordering(self, setup_path): # GH 4096; using same frames, but different block orderings with ensure_clean_store(setup_path) as store: for i in range(10): df = DataFrame(np.random.randn(10, 2), columns=list("AB")) df["index"] = range(10) df["index"] += i * 10 df["int64"] = Series([1] * len(df), dtype="int64") df["int16"] = Series([1] * len(df), dtype="int16") if i % 2 == 0: del df["int64"] df["int64"] = Series([1] * len(df), dtype="int64") if i % 3 == 0: a = df.pop("A") df["A"] = a df.set_index("index", inplace=True) store.append("df", df) # test a different ordering but with more fields (like invalid # combinate) with ensure_clean_store(setup_path) as store: df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64") df["int64"] = Series([1] * len(df), dtype="int64") df["int16"] = Series([1] * len(df), dtype="int16") store.append("df", df) # store additional fields in different blocks df["int16_2"] = Series([1] * len(df), dtype="int16") with pytest.raises(ValueError): store.append("df", df) # store multile additional fields in different blocks df["float_3"] = Series([1.0] * len(df), dtype="float64") with pytest.raises(ValueError): store.append("df", df) def test_append_with_strings(self, setup_path): with ensure_clean_store(setup_path) as store: with catch_warnings(record=True): def check_col(key, name, size): assert ( getattr(store.get_storer(key).table.description, name).itemsize == size ) # avoid truncation on elements df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]]) store.append("df_big", df) tm.assert_frame_equal(store.select("df_big"), df) check_col("df_big", "values_block_1", 15) # appending smaller string ok df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]]) store.append("df_big", df2) expected = concat([df, df2]) tm.assert_frame_equal(store.select("df_big"), expected) check_col("df_big", "values_block_1", 15) # avoid truncation on elements df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]]) store.append("df_big2", df, min_itemsize={"values": 50}) tm.assert_frame_equal(store.select("df_big2"), df) check_col("df_big2", "values_block_1", 50) # bigger string on next append store.append("df_new", df) df_new = DataFrame( [[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]] ) with pytest.raises(ValueError): store.append("df_new", df_new) # min_itemsize on Series index (GH 11412) df = tm.makeMixedDataFrame().set_index("C") store.append("ss", df["B"], min_itemsize={"index": 4}) tm.assert_series_equal(store.select("ss"), df["B"]) # same as above, with data_columns=True store.append( "ss2", df["B"], data_columns=True, min_itemsize={"index": 4} ) tm.assert_series_equal(store.select("ss2"), df["B"]) # min_itemsize in index without appending (GH 10381) store.put("ss3", df, format="table", min_itemsize={"index": 6}) # just make sure there is a longer string: df2 = df.copy().reset_index().assign(C="longer").set_index("C") store.append("ss3", df2) tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2])) # same as above, with a Series store.put("ss4", df["B"], format="table", min_itemsize={"index": 6}) store.append("ss4", df2["B"]) tm.assert_series_equal( store.select("ss4"), pd.concat([df["B"], df2["B"]]) ) # with nans _maybe_remove(store, "df") df = tm.makeTimeDataFrame() df["string"] = "foo" df.loc[1:4, "string"] = np.nan df["string2"] = "bar" df.loc[4:8, "string2"] = np.nan df["string3"] = "bah" df.loc[1:, "string3"] = np.nan store.append("df", df) result = store.select("df") tm.assert_frame_equal(result, df) with ensure_clean_store(setup_path) as store: def check_col(key, name, size): assert getattr( store.get_storer(key).table.description, name ).itemsize, size df = DataFrame(dict(A="foo", B="bar"), index=range(10)) # a min_itemsize that creates a data_column _maybe_remove(store, "df") store.append("df", df, min_itemsize={"A": 200}) check_col("df", "A", 200) assert store.get_storer("df").data_columns == ["A"] # a min_itemsize that creates a data_column2 _maybe_remove(store, "df") store.append("df", df, data_columns=["B"], min_itemsize={"A": 200}) check_col("df", "A", 200) assert store.get_storer("df").data_columns == ["B", "A"] # a min_itemsize that creates a data_column2 _maybe_remove(store, "df") store.append("df", df, data_columns=["B"], min_itemsize={"values": 200}) check_col("df", "B", 200) check_col("df", "values_block_0", 200) assert store.get_storer("df").data_columns == ["B"] # infer the .typ on subsequent appends _maybe_remove(store, "df") store.append("df", df[:5], min_itemsize=200) store.append("df", df[5:], min_itemsize=200) tm.assert_frame_equal(store["df"], df) # invalid min_itemsize keys df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"]) _maybe_remove(store, "df") with pytest.raises(ValueError): store.append("df", df, min_itemsize={"foo": 20, "foobar": 20}) def test_append_with_empty_string(self, setup_path): with ensure_clean_store(setup_path) as store: # with all empty strings (GH 12242) df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]}) store.append("df", df[:-1], min_itemsize={"x": 1}) store.append("df", df[-1:], min_itemsize={"x": 1}) tm.assert_frame_equal(store.select("df"), df) def test_to_hdf_with_min_itemsize(self, setup_path): with ensure_clean_path(setup_path) as path: # min_itemsize in index with to_hdf (GH 10381) df = tm.makeMixedDataFrame().set_index("C") df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6}) # just make sure there is a longer string: df2 = df.copy().reset_index().assign(C="longer").set_index("C") df2.to_hdf(path, "ss3", append=True, format="table") tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2])) # same as above, with a Series df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6}) df2["B"].to_hdf(path, "ss4", append=True, format="table") tm.assert_series_equal( pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]]) ) @pytest.mark.parametrize( "format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"] ) def test_to_hdf_errors(self, format, setup_path): data = ["\ud800foo"] ser = pd.Series(data, index=pd.Index(data)) with ensure_clean_path(setup_path) as path: # GH 20835 ser.to_hdf(path, "table", format=format, errors="surrogatepass") result = pd.read_hdf(path, "table", errors="surrogatepass") tm.assert_series_equal(result, ser) def test_append_with_data_columns(self, setup_path): with ensure_clean_store(setup_path) as store: df = tm.makeTimeDataFrame() df.iloc[0, df.columns.get_loc("B")] = 1.0 _maybe_remove(store, "df") store.append("df", df[:2], data_columns=["B"]) store.append("df", df[2:]) tm.assert_frame_equal(store["df"], df) # check that we have indices created assert store._handle.root.df.table.cols.index.is_indexed is True assert store._handle.root.df.table.cols.B.is_indexed is True # data column searching result = store.select("df", "B>0") expected = df[df.B > 0] tm.assert_frame_equal(result, expected) # data column searching (with an indexable and a data_columns) result = store.select("df", "B>0 and index>df.index[3]") df_new = df.reindex(index=df.index[4:]) expected = df_new[df_new.B > 0] tm.assert_frame_equal(result, expected) # data column selection with a string data_column df_new = df.copy() df_new["string"] = "foo" df_new.loc[1:4, "string"] = np.nan df_new.loc[5:6, "string"] = "bar" _maybe_remove(store, "df") store.append("df", df_new, data_columns=["string"]) result = store.select("df", "string='foo'") expected = df_new[df_new.string == "foo"] tm.assert_frame_equal(result, expected) # using min_itemsize and a data column def check_col(key, name, size): assert ( getattr(store.get_storer(key).table.description, name).itemsize == size ) with ensure_clean_store(setup_path) as store: _maybe_remove(store, "df") store.append( "df", df_new, data_columns=["string"], min_itemsize={"string": 30} ) check_col("df", "string", 30) _maybe_remove(store, "df") store.append("df", df_new, data_columns=["string"], min_itemsize=30) check_col("df", "string", 30) _maybe_remove(store, "df") store.append( "df", df_new, data_columns=["string"], min_itemsize={"values": 30} ) check_col("df", "string", 30) with ensure_clean_store(setup_path) as store: df_new["string2"] = "foobarbah" df_new["string_block1"] = "foobarbah1" df_new["string_block2"] = "foobarbah2" _maybe_remove(store, "df") store.append( "df", df_new, data_columns=["string", "string2"], min_itemsize={"string": 30, "string2": 40, "values": 50}, ) check_col("df", "string", 30) check_col("df", "string2", 40) check_col("df", "values_block_1", 50) with ensure_clean_store(setup_path) as store: # multiple data columns df_new = df.copy() df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0 df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0 df_new["string"] = "foo" sl = df_new.columns.get_loc("string") df_new.iloc[1:4, sl] = np.nan df_new.iloc[5:6, sl] = "bar" df_new["string2"] = "foo" sl = df_new.columns.get_loc("string2") df_new.iloc[2:5, sl] = np.nan df_new.iloc[7:8, sl] = "bar" _maybe_remove(store, "df") store.append("df", df_new, data_columns=["A", "B", "string", "string2"]) result = store.select( "df", "string='foo' and string2='foo' and A>0 and B<0" ) expected = df_new[ (df_new.string == "foo") & (df_new.string2 == "foo") & (df_new.A > 0) & (df_new.B < 0) ] tm.assert_frame_equal(result, expected, check_index_type=False) # yield an empty frame result = store.select("df", "string='foo' and string2='cool'") expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")] tm.assert_frame_equal(result, expected, check_index_type=False) with ensure_clean_store(setup_path) as store: # doc example df_dc = df.copy() df_dc["string"] = "foo" df_dc.loc[4:6, "string"] = np.nan df_dc.loc[7:9, "string"] = "bar" df_dc["string2"] = "cool" df_dc["datetime"] = Timestamp("20010102") df_dc = df_dc._convert(datetime=True) df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan _maybe_remove(store, "df_dc") store.append( "df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"] ) result = store.select("df_dc", "B>0") expected = df_dc[df_dc.B > 0] tm.assert_frame_equal(result, expected, check_index_type=False) result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"]) expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")] tm.assert_frame_equal(result, expected, check_index_type=False) with ensure_clean_store(setup_path) as store: # doc example part 2 np.random.seed(1234) index = date_range("1/1/2000", periods=8) df_dc = DataFrame( np.random.randn(8, 3), index=index, columns=["A", "B", "C"] ) df_dc["string"] = "foo" df_dc.loc[4:6, "string"] = np.nan df_dc.loc[7:9, "string"] = "bar" df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs() df_dc["string2"] = "cool" # on-disk operations store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"]) result = store.select("df_dc", "B>0") expected = df_dc[df_dc.B > 0] tm.assert_frame_equal(result, expected) result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"']) expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")] tm.assert_frame_equal(result, expected) def test_create_table_index(self, setup_path): with ensure_clean_store(setup_path) as store: with catch_warnings(record=True): def col(t, column): return getattr(store.get_storer(t).table.cols, column) # data columns df = tm.makeTimeDataFrame() df["string"] = "foo" df["string2"] = "bar" store.append("f", df, data_columns=["string", "string2"]) assert col("f", "index").is_indexed is True assert col("f", "string").is_indexed is True assert col("f", "string2").is_indexed is True # specify index=columns store.append( "f2", df, index=["string"], data_columns=["string", "string2"] ) assert col("f2", "index").is_indexed is False assert col("f2", "string").is_indexed is True assert col("f2", "string2").is_indexed is False # try to index a non-table _maybe_remove(store, "f2") store.put("f2", df) with pytest.raises(TypeError): store.create_table_index("f2") def test_append_hierarchical(self, setup_path): index = MultiIndex( levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=["foo", "bar"], ) df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"]) with ensure_clean_store(setup_path) as store: store.append("mi", df) result = store.select("mi") tm.assert_frame_equal(result, df) # GH 3748 result = store.select("mi", columns=["A", "B"]) expected = df.reindex(columns=["A", "B"]) tm.assert_frame_equal(result, expected) with ensure_clean_path("test.hdf") as path: df.to_hdf(path, "df", format="table") result = read_hdf(path, "df", columns=["A", "B"]) expected = df.reindex(columns=["A", "B"]) tm.assert_frame_equal(result, expected) def test_column_multiindex(self, setup_path): # GH 4710 # recreate multi-indexes properly index = MultiIndex.from_tuples( [("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"] ) df = DataFrame(np.arange(12).reshape(3, 4), columns=index) expected = df.copy() if isinstance(expected.index, RangeIndex): expected.index = Int64Index(expected.index) with ensure_clean_store(setup_path) as store: store.put("df", df) tm.assert_frame_equal( store["df"], expected, check_index_type=True, check_column_type=True ) store.put("df1", df, format="table") tm.assert_frame_equal( store["df1"], expected, check_index_type=True, check_column_type=True ) with pytest.raises(ValueError): store.put("df2", df, format="table", data_columns=["A"]) with pytest.raises(ValueError): store.put("df3", df, format="table", data_columns=True) # appending multi-column on existing table (see GH 6167) with ensure_clean_store(setup_path) as store: store.append("df2", df) store.append("df2", df) tm.assert_frame_equal(store["df2"], concat((df, df))) # non_index_axes name df = DataFrame( np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo") ) expected = df.copy() if isinstance(expected.index, RangeIndex): expected.index = Int64Index(expected.index) with ensure_clean_store(setup_path) as store: store.put("df1", df, format="table") tm.assert_frame_equal( store["df1"], expected, check_index_type=True, check_column_type=True ) def test_store_multiindex(self, setup_path): # validate multi-index names # GH 5527 with ensure_clean_store(setup_path) as store: def make_index(names=None): return MultiIndex.from_tuples( [ (datetime.datetime(2013, 12, d), s, t) for d in range(1, 3) for s in range(2) for t in range(3) ], names=names, ) # no names _maybe_remove(store, "df") df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index()) store.append("df", df) tm.assert_frame_equal(store.select("df"), df) # partial names _maybe_remove(store, "df") df = DataFrame( np.zeros((12, 2)), columns=["a", "b"], index=make_index(["date", None, None]), ) store.append("df", df) tm.assert_frame_equal(store.select("df"), df) # series _maybe_remove(store, "s") s = Series(np.zeros(12), index=make_index(["date", None, None])) store.append("s", s) xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"])) tm.assert_series_equal(store.select("s"), xp) # dup with column _maybe_remove(store, "df") df = DataFrame( np.zeros((12, 2)), columns=["a", "b"], index=make_index(["date", "a", "t"]), ) with pytest.raises(ValueError): store.append("df", df) # dup within level _maybe_remove(store, "df") df = DataFrame( np.zeros((12, 2)), columns=["a", "b"], index=make_index(["date", "date", "date"]), ) with pytest.raises(ValueError): store.append("df", df) # fully names _maybe_remove(store, "df") df = DataFrame( np.zeros((12, 2)), columns=["a", "b"], index=make_index(["date", "s", "t"]), ) store.append("df", df) tm.assert_frame_equal(store.select("df"), df) def test_select_columns_in_where(self, setup_path): # GH 6169 # recreate multi-indexes when columns is passed # in the `where` argument index = MultiIndex( levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=["foo_name", "bar_name"], ) # With a DataFrame df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"]) with ensure_clean_store(setup_path) as store: store.put("df", df, format="table") expected = df[["A"]] tm.assert_frame_equal(store.select("df", columns=["A"]), expected) tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected) # With a Series s = Series(np.random.randn(10), index=index, name="A") with ensure_clean_store(setup_path) as store: store.put("s", s, format="table") tm.assert_series_equal(store.select("s", where="columns=['A']"), s) def test_mi_data_columns(self, setup_path): # GH 14435 idx = pd.MultiIndex.from_arrays( [date_range("2000-01-01", periods=5), range(5)], names=["date", "id"] ) df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx) with ensure_clean_store(setup_path) as store: store.append("df", df, data_columns=True) actual = store.select("df", where="id == 1") expected = df.iloc[[1], :] tm.assert_frame_equal(actual, expected) def test_pass_spec_to_storer(self, setup_path): df = tm.makeDataFrame() with ensure_clean_store(setup_path) as store: store.put("df", df) with pytest.raises(TypeError): store.select("df", columns=["A"]) with pytest.raises(TypeError): store.select("df", where=[("columns=A")]) @td.xfail_non_writeable def test_append_misc(self, setup_path): with ensure_clean_store(setup_path) as store: df = tm.makeDataFrame() store.append("df", df, chunksize=1) result = store.select("df") tm.assert_frame_equal(result, df) store.append("df1", df, expectedrows=10) result = store.select("df1") tm.assert_frame_equal(result, df) # more chunksize in append tests def check(obj, comparator): for c in [10, 200, 1000]: with ensure_clean_store(setup_path, mode="w") as store: store.append("obj", obj, chunksize=c) result = store.select("obj") comparator(result, obj) df = tm.makeDataFrame() df["string"] = "foo" df["float322"] = 1.0 df["float322"] = df["float322"].astype("float32") df["bool"] = df["float322"] > 0 df["time1"] = Timestamp("20130101") df["time2"] = Timestamp("20130102") check(df, tm.assert_frame_equal) # empty frame, GH4273 with ensure_clean_store(setup_path) as store: # 0 len df_empty = DataFrame(columns=list("ABC")) store.append("df", df_empty) with pytest.raises(KeyError, match="'No object named df in the file'"): store.select("df") # repeated append of 0/non-zero frames df = DataFrame(np.random.rand(10, 3), columns=list("ABC")) store.append("df", df) tm.assert_frame_equal(store.select("df"), df) store.append("df", df_empty) tm.assert_frame_equal(store.select("df"), df) # store df = DataFrame(columns=list("ABC")) store.put("df2", df) tm.assert_frame_equal(store.select("df2"), df) def test_append_raise(self, setup_path): with ensure_clean_store(setup_path) as store: # test append with invalid input to get good error messages # list in column df = tm.makeDataFrame() df["invalid"] = [["a"]] * len(df) assert df.dtypes["invalid"] == np.object_ with pytest.raises(TypeError): store.append("df", df) # multiple invalid columns df["invalid2"] = [["a"]] * len(df) df["invalid3"] = [["a"]] * len(df) with pytest.raises(TypeError): store.append("df", df) # datetime with embedded nans as object df = tm.makeDataFrame() s = Series(datetime.datetime(2001, 1, 2), index=df.index) s = s.astype(object) s[0:5] = np.nan df["invalid"] = s assert df.dtypes["invalid"] == np.object_ with pytest.raises(TypeError): store.append("df", df) # directly ndarray with pytest.raises(TypeError): store.append("df", np.arange(10)) # series directly with pytest.raises(TypeError): store.append("df", Series(np.arange(10))) # appending an incompatible table df = tm.makeDataFrame() store.append("df", df) df["foo"] = "foo" with pytest.raises(ValueError): store.append("df", df) def test_table_index_incompatible_dtypes(self, setup_path): df1 = DataFrame({"a": [1, 2, 3]}) df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3)) with ensure_clean_store(setup_path) as store: store.put("frame", df1, format="table") with pytest.raises(TypeError): store.put("frame", df2, format="table", append=True) def test_table_values_dtypes_roundtrip(self, setup_path): with ensure_clean_store(setup_path) as store: df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8") store.append("df_f8", df1) tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes) df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8") store.append("df_i8", df2) tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes) # incompatible dtype with pytest.raises(ValueError): store.append("df_i8", df1) # check creation/storage/retrieval of float32 (a bit hacky to # actually create them thought) df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"]) store.append("df_f4", df1) tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes) assert df1.dtypes[0] == "float32" # check with mixed dtypes df1 = DataFrame( { c: Series(np.random.randint(5), dtype=c) for c in ["float32", "float64", "int32", "int64", "int16", "int8"] } ) df1["string"] = "foo" df1["float322"] = 1.0 df1["float322"] = df1["float322"].astype("float32") df1["bool"] = df1["float32"] > 0 df1["time1"] = Timestamp("20130101") df1["time2"] = Timestamp("20130102") store.append("df_mixed_dtypes1", df1) result = store.select("df_mixed_dtypes1").dtypes.value_counts() result.index = [str(i) for i in result.index] expected = Series( { "float32": 2, "float64": 1, "int32": 1, "bool": 1, "int16": 1, "int8": 1, "int64": 1, "object": 1, "datetime64[ns]": 2, } ) result = result.sort_index() expected = expected.sort_index() tm.assert_series_equal(result, expected) def test_table_mixed_dtypes(self, setup_path): # frame df = tm.makeDataFrame() df["obj1"] = "foo" df["obj2"] = "bar" df["bool1"] = df["A"] > 0 df["bool2"] = df["B"] > 0 df["bool3"] = True df["int1"] = 1 df["int2"] = 2 df["timestamp1"] = Timestamp("20010102") df["timestamp2"] = Timestamp("20010103") df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0) df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0) df.loc[3:6, ["obj1"]] = np.nan df = df._consolidate()._convert(datetime=True) with ensure_clean_store(setup_path) as store: store.append("df1_mixed", df) tm.assert_frame_equal(store.select("df1_mixed"), df) def test_unimplemented_dtypes_table_columns(self, setup_path): with ensure_clean_store(setup_path) as store: dtypes = [("date", datetime.date(2001, 1, 2))] # currently not supported dtypes #### for n, f in dtypes: df = tm.makeDataFrame() df[n] = f with pytest.raises(TypeError): store.append("df1_{n}".format(n=n), df) # frame df = tm.makeDataFrame() df["obj1"] = "foo" df["obj2"] = "bar" df["datetime1"] = datetime.date(2001, 1, 2) df = df._consolidate()._convert(datetime=True) with ensure_clean_store(setup_path) as store: # this fails because we have a date in the object block...... with pytest.raises(TypeError): store.append("df_unimplemented", df) @td.xfail_non_writeable @pytest.mark.skipif( LooseVersion(np.__version__) == LooseVersion("1.15.0"), reason=( "Skipping pytables test when numpy version is " "exactly equal to 1.15.0: gh-22098" ), ) def test_calendar_roundtrip_issue(self, setup_path): # 8591 # doc example from tseries holiday section weekmask_egypt = "Sun Mon Tue Wed Thu" holidays = [ "2012-05-01", datetime.datetime(2013, 5, 1), np.datetime64("2014-05-01"), ] bday_egypt = pd.offsets.CustomBusinessDay( holidays=holidays, weekmask=weekmask_egypt ) dt = datetime.datetime(2013, 4, 30) dts = date_range(dt, periods=5, freq=bday_egypt) s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split())) with ensure_clean_store(setup_path) as store: store.put("fixed", s) result = store.select("fixed") tm.assert_series_equal(result, s) store.append("table", s) result = store.select("table") tm.assert_series_equal(result, s) def test_roundtrip_tz_aware_index(self, setup_path): # GH 17618 time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern") df = pd.DataFrame(data=[0], index=[time]) with ensure_clean_store(setup_path) as store: store.put("frame", df, format="fixed") recons = store["frame"] tm.assert_frame_equal(recons, df) assert recons.index[0].value == 946706400000000000 def test_append_with_timedelta(self, setup_path): # GH 3577 # append timedelta df = DataFrame( dict( A=Timestamp("20130101"), B=[ Timestamp("20130101") + timedelta(days=i, seconds=10) for i in range(10) ], ) ) df["C"] = df["A"] - df["B"] df.loc[3:5, "C"] = np.nan with ensure_clean_store(setup_path) as store: # table _maybe_remove(store, "df") store.append("df", df, data_columns=True) result = store.select("df") tm.assert_frame_equal(result, df) result = store.select("df", where="C<100000") tm.assert_frame_equal(result, df) result = store.select("df", where="C<pd.Timedelta('-3D')") tm.assert_frame_equal(result, df.iloc[3:]) result = store.select("df", "C<'-3D'") tm.assert_frame_equal(result, df.iloc[3:]) # a bit hacky here as we don't really deal with the NaT properly result = store.select("df", "C<'-500000s'") result = result.dropna(subset=["C"]) tm.assert_frame_equal(result, df.iloc[6:]) result = store.select("df", "C<'-3.5D'") result = result.iloc[1:] tm.assert_frame_equal(result, df.iloc[4:]) # fixed _maybe_remove(store, "df2") store.put("df2", df) result = store.select("df2") tm.assert_frame_equal(result, df) def test_remove(self, setup_path): with ensure_clean_store(setup_path) as store: ts = tm.makeTimeSeries() df = tm.makeDataFrame() store["a"] = ts store["b"] = df _maybe_remove(store, "a") assert len(store) == 1 tm.assert_frame_equal(df, store["b"]) _maybe_remove(store, "b") assert len(store) == 0 # nonexistence with pytest.raises( KeyError, match="'No object named a_nonexistent_store in the file'" ): store.remove("a_nonexistent_store") # pathing store["a"] = ts store["b/foo"] = df _maybe_remove(store, "foo") _maybe_remove(store, "b/foo") assert len(store) == 1 store["a"] = ts store["b/foo"] = df _maybe_remove(store, "b") assert len(store) == 1 # __delitem__ store["a"] = ts store["b"] = df del store["a"] del store["b"] assert len(store) == 0 def test_invalid_terms(self, setup_path): with ensure_clean_store(setup_path) as store: with catch_warnings(record=True): df = tm.makeTimeDataFrame() df["string"] = "foo" df.loc[0:4, "string"] = "bar" store.put("df", df, format="table") # some invalid terms with pytest.raises(TypeError): Term() # more invalid with pytest.raises(ValueError): store.select("df", "df.index[3]") with pytest.raises(SyntaxError): store.select("df", "index>") # from the docs with ensure_clean_path(setup_path) as path: dfq = DataFrame( np.random.randn(10, 4), columns=list("ABCD"), index=date_range("20130101", periods=10), ) dfq.to_hdf(path, "dfq", format="table", data_columns=True) # check ok read_hdf( path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']" ) read_hdf(path, "dfq", where="A>0 or C>0") # catch the invalid reference with ensure_clean_path(setup_path) as path: dfq = DataFrame( np.random.randn(10, 4), columns=list("ABCD"), index=date_range("20130101", periods=10), ) dfq.to_hdf(path, "dfq", format="table") with pytest.raises(ValueError): read_hdf(path, "dfq", where="A>0 or C>0") def test_same_name_scoping(self, setup_path): with ensure_clean_store(setup_path) as store: import pandas as pd df = DataFrame( np.random.randn(20, 2), index=pd.date_range("20130101", periods=20) ) store.put("df", df, format="table") expected = df[df.index > pd.Timestamp("20130105")] import datetime # noqa result = store.select("df", "index>datetime.datetime(2013,1,5)") tm.assert_frame_equal(result, expected) from datetime import datetime # noqa # technically an error, but allow it result = store.select("df", "index>datetime.datetime(2013,1,5)") tm.assert_frame_equal(result, expected) result = store.select("df", "index>datetime(2013,1,5)") tm.assert_frame_equal(result, expected) def test_series(self, setup_path): s = tm.makeStringSeries() self._check_roundtrip(s, tm.assert_series_equal, path=setup_path) ts = tm.makeTimeSeries() self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path) ts2 = Series(ts.index, Index(ts.index, dtype=object)) self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path) ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object)) self._check_roundtrip( ts3, tm.assert_series_equal, path=setup_path, check_index_type=False ) def test_float_index(self, setup_path): # GH #454 index = np.random.randn(10) s = Series(np.random.randn(10), index=index) self._check_roundtrip(s, tm.assert_series_equal, path=setup_path) @td.xfail_non_writeable def test_tuple_index(self, setup_path): # GH #492 col = np.arange(10) idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)] data = np.random.randn(30).reshape((3, 10)) DF = DataFrame(data, index=idx, columns=col) with catch_warnings(record=True): simplefilter("ignore", pd.errors.PerformanceWarning) self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path) @td.xfail_non_writeable @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") def test_index_types(self, setup_path): with catch_warnings(record=True): values = np.random.randn(2) func = lambda l, r: tm.assert_series_equal( l, r, check_dtype=True, check_index_type=True, check_series_type=True ) with catch_warnings(record=True): ser = Series(values, [0, "y"]) self._check_roundtrip(ser, func, path=setup_path) with catch_warnings(record=True): ser = Series(values, [datetime.datetime.today(), 0]) self._check_roundtrip(ser, func, path=setup_path) with catch_warnings(record=True): ser = Series(values, ["y", 0]) self._check_roundtrip(ser, func, path=setup_path) with catch_warnings(record=True): ser = Series(values, [datetime.date.today(), "a"]) self._check_roundtrip(ser, func, path=setup_path) with catch_warnings(record=True): ser = Series(values, [0, "y"]) self._check_roundtrip(ser, func, path=setup_path) ser = Series(values, [datetime.datetime.today(), 0]) self._check_roundtrip(ser, func, path=setup_path) ser = Series(values, ["y", 0]) self._check_roundtrip(ser, func, path=setup_path) ser = Series(values, [datetime.date.today(), "a"]) self._check_roundtrip(ser, func, path=setup_path) ser = Series(values, [1.23, "b"]) self._check_roundtrip(ser, func, path=setup_path) ser = Series(values, [1, 1.53]) self._check_roundtrip(ser, func, path=setup_path) ser = Series(values, [1, 5]) self._check_roundtrip(ser, func, path=setup_path) ser = Series( values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)] ) self._check_roundtrip(ser, func, path=setup_path) def test_timeseries_preepoch(self, setup_path): dr = bdate_range("1/1/1940", "1/1/1960") ts = Series(np.random.randn(len(dr)), index=dr) try: self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path) except OverflowError: pytest.skip("known failer on some windows platforms") @td.xfail_non_writeable @pytest.mark.parametrize( "compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)] ) def test_frame(self, compression, setup_path): df = tm.makeDataFrame() # put in some random NAs df.values[0, 0] = np.nan df.values[5, 3] = np.nan self._check_roundtrip_table( df, tm.assert_frame_equal, path=setup_path, compression=compression ) self._check_roundtrip( df, tm.assert_frame_equal, path=setup_path, compression=compression ) tdf = tm.makeTimeDataFrame() self._check_roundtrip( tdf, tm.assert_frame_equal, path=setup_path, compression=compression ) with ensure_clean_store(setup_path) as store: # not consolidated df["foo"] = np.random.randn(len(df)) store["df"] = df recons = store["df"] assert recons._data.is_consolidated() # empty self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path) @td.xfail_non_writeable def test_empty_series_frame(self, setup_path): s0 = Series(dtype=object) s1 = Series(name="myseries", dtype=object) df0 = DataFrame() df1 = DataFrame(index=["a", "b", "c"]) df2 = DataFrame(columns=["d", "e", "f"]) self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path) self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path) self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path) self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path) self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path) @td.xfail_non_writeable @pytest.mark.parametrize( "dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"] ) def test_empty_series(self, dtype, setup_path): s = Series(dtype=dtype) self._check_roundtrip(s, tm.assert_series_equal, path=setup_path) def test_can_serialize_dates(self, setup_path): rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")] frame = DataFrame(np.random.randn(len(rng), 4), index=rng) self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path) def test_store_hierarchical(self, setup_path): index = MultiIndex( levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=["foo", "bar"], ) frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"]) self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path) self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path) self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path) # check that the names are stored with ensure_clean_store(setup_path) as store: store["frame"] = frame recons = store["frame"] tm.assert_frame_equal(recons, frame) def test_store_index_name(self, setup_path): df = tm.makeDataFrame() df.index.name = "foo" with ensure_clean_store(setup_path) as store: store["frame"] = df recons = store["frame"] tm.assert_frame_equal(recons, df) def test_store_index_name_with_tz(self, setup_path): # GH 13884 df = pd.DataFrame({"A": [1, 2]}) df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788]) df.index = df.index.tz_localize("UTC") df.index.name = "foo" with ensure_clean_store(setup_path) as store: store.put("frame", df, format="table") recons = store["frame"] tm.assert_frame_equal(recons, df) @pytest.mark.parametrize("table_format", ["table", "fixed"]) def test_store_index_name_numpy_str(self, table_format, setup_path): # GH #13492 idx = pd.Index( pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]), name="cols\u05d2", ) idx1 = pd.Index( pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]), name="rows\u05d0", ) df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1) # This used to fail, returning numpy strings instead of python strings. with ensure_clean_path(setup_path) as path: df.to_hdf(path, "df", format=table_format) df2 = read_hdf(path, "df") tm.assert_frame_equal(df, df2, check_names=True) assert type(df2.index.name) == str assert type(df2.columns.name) == str def test_store_series_name(self, setup_path): df = tm.makeDataFrame() series = df["A"] with ensure_clean_store(setup_path) as store: store["series"] = series recons = store["series"] tm.assert_series_equal(recons, series) @td.xfail_non_writeable @pytest.mark.parametrize( "compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)] ) def test_store_mixed(self, compression, setup_path): def _make_one(): df = tm.makeDataFrame() df["obj1"] = "foo" df["obj2"] = "bar" df["bool1"] = df["A"] > 0 df["bool2"] = df["B"] > 0 df["int1"] = 1 df["int2"] = 2 return df._consolidate() df1 = _make_one() df2 = _make_one() self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path) self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path) with ensure_clean_store(setup_path) as store: store["obj"] = df1 tm.assert_frame_equal(store["obj"], df1) store["obj"] = df2 tm.assert_frame_equal(store["obj"], df2) # check that can store Series of all of these types self._check_roundtrip( df1["obj1"], tm.assert_series_equal, path=setup_path, compression=compression, ) self._check_roundtrip( df1["bool1"], tm.assert_series_equal, path=setup_path, compression=compression, ) self._check_roundtrip( df1["int1"], tm.assert_series_equal, path=setup_path, compression=compression, ) @pytest.mark.filterwarnings( "ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning" ) def test_select_with_dups(self, setup_path): # single dtypes df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]) df.index = date_range("20130101 9:30", periods=10, freq="T") with ensure_clean_store(setup_path) as store: store.append("df", df) result = store.select("df") expected = df tm.assert_frame_equal(result, expected, by_blocks=True) result = store.select("df", columns=df.columns) expected = df tm.assert_frame_equal(result, expected, by_blocks=True) result = store.select("df", columns=["A"]) expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
pandas.util.testing.assert_frame_equal
# -*- coding: utf-8 -*- import numpy as np import pandas as pd import sklearn.mixture def fit_mixture(X=None, n_clusters=2): """Gaussian Mixture Model Performs a polynomial regression of given order. Parameters ---------- X : list, array or Series The values to classify. n_clusters : int Number of components to look for. threshold : float Probability threshold to Returns ------- pd.DataFrame DataFrame containing the probability of belongning to each cluster. See Also ---------- signal_detrend, fit_error Examples --------- >>> import pandas as pd >>> import neurokit2 as nk >>> >>> x = nk.signal_simulate() >>> probs = nk.fit_mixture(x, n_clusters=2) >>> nk.signal_plot([x, probs["Cluster_0"], probs["Cluster_1"]], standardize=True) """ if X.ndim == 1: X = X.reshape(-1, 1) # fit a Gaussian Mixture Model with two components clf = sklearn.mixture.GaussianMixture(n_components=n_clusters, random_state=333) clf = clf.fit(X) # Get predicted probabilities predicted = clf.predict_proba(X) probabilities =
pd.DataFrame(predicted)
pandas.DataFrame
''' File: stepping_exp.py Author: <NAME> (<EMAIL>) Date: July 30th, 2015 * Copyright (c) 2015, <NAME> and NICTA * All rights reserved. * * Developed by: <NAME> * NICTA * http://www.nickmattei.net * http://www.preflib.org * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NICTA nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY NICTA ''AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NICTA BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. About -------------------- This runs a simple stepping experiment and saves the results to a file. Note that it DOES NOT TRACK the score vector etc. This is meant as a way to run a series of steps -- not a comprehensive experimental framework. ''' import pickle import numpy as np import random import itertools import pandas as pd from collections import Counter _DEBUG = False from peerselect import impartial from peerselect import profile_generator # Set the Seed... # random.seed(15) class Impartial: VANILLA = "Vanilla" PARTITION = "Partition" DOLLAR = "DollarPartition" DPR = "DollarPartitionRaffle" CREDIABLE = "CredibleSubset" RAFFLE = "DollarRaffle" ALL = (VANILLA, DOLLAR, PARTITION, RAFFLE, CREDIABLE, DPR) # Exponential. #scores = [pow(n, 4), pow(n, 3), pow(n, 2), n, 1] #dist = [0.1, 0.2, 0.2, 0.2, 0.3] #Borda #scores = [3, 2, 1, 0] #dist = [0.25, 0.25, 0.25, 0.25] # Psuedo 10 point Normal... \sigma~=1 scores = [8, 7, 6, 5, 4, 3, 2, 1, 0] # Normal... dist = [0.03, 0.05, 0.12, 0.15, 0.30, 0.15, 0.12, 0.05, 0.03] s = 1000 test_n = [130] test_k = [25] test_m = [10, 20, 40] test_l = [5] test_p = [0.1] # Output File name ## Save the current runs out_name = "../notebooks/pickled_runs/pub_NSF1000s_130n_25k_10-40m_5l.pickle" # Map for all results... We'll build a high level index out of this later... results = {} for n,k,m,l,p in itertools.product(test_n, test_k, test_m, test_l, test_p): # Compute some artifacts from the scoring distributions. agents = np.arange(0, n) #Bit Hacky but faster.... Generate a unanimous score matrix and compute some properties.. t_profile = profile_generator.generate_mallows_mixture_profile(agents, agents, [1.0], [agents], [0.0]) t_matrix = profile_generator.profile_classes_to_score_matrix(t_profile, scores, dist) #Determine how many of each bin we have. # Compute the binning from The SCORE MATRIX ---> Tally guy 0. t = Counter(list(t_matrix[:,0])) size = [t[k] for k in sorted(t.keys(), reverse=True)] # Determine how many of each bin we should have... n_from_bin = [0]*len(size) left = k for i,j in enumerate(n_from_bin): if left > 0: n_from_bin[i] = min(size[i], left) left -= size[i] cum_n_from_bin = list(np.cumsum(n_from_bin)) # Determine what bin they should go in according to the ground truth. # Just take the first guys's vector and iterate over it. # Guy i got score v and should be in the corresponding bin as indexed by the score vector. in_bin = {i:scores.index(v) for i,v in enumerate(list(t_matrix[:, 0]))} # Containers for Results count_results = {x:[0]*k for x in Impartial.ALL} bin_results = {x:[0]*len(size) for x in Impartial.ALL} for c_sample in range(s): #Generate a full profile and a clustering. profile = profile_generator.generate_mallows_mixture_profile(agents, agents, [1.0], [agents], [p]) clustering = impartial.even_partition_order(sorted(agents, key=lambda j: random.random()), l) #Generate an approx-m-regular assignment. m_assignment = profile_generator.generate_approx_m_regular_assignment(agents, m, clustering, randomize=True) score_matrix = profile_generator.profile_classes_to_score_matrix(profile, scores, dist) score_matrix = profile_generator.restrict_score_matrix(score_matrix, m_assignment) #Compute Target Set. target_set = impartial.vanilla(score_matrix, k) ws = {} ws[Impartial.DOLLAR] = impartial.dollar_partition_explicit(score_matrix, k, clustering, normalize=True) size_ws = len(ws[Impartial.DOLLAR]) # Let everyone else have the same size set so they are all compareable. ws[Impartial.VANILLA] = [i for i,j in impartial.vanilla(score_matrix, size_ws)] # Let CRED, PART, and RAFFLE have bigger sets... ws[Impartial.PARTITION] = impartial.partition_explicit(score_matrix, size_ws, clustering, normalize=False) ws[Impartial.CREDIABLE] = impartial.credible_subset(score_matrix, size_ws, m, normalize=False) ws[Impartial.DPR] = impartial.dollar_raffle_explicit(score_matrix, size_ws, clustering, normalize=True) #Call Raffle and have everyone in a cluster by themselves = Dollar. ws[Impartial.RAFFLE] = impartial.dollar_raffle(score_matrix, size_ws, n, randomize=True, normalize=True) # Update the Per position information. for i,tup in enumerate(target_set): a = tup[0] for x in Impartial.ALL: if a in ws[x]: count_results[x][i] += 1 # Update the per bin picking for each type. for x in Impartial.ALL: for e in ws[x]: bin_results[x][in_bin[e]] += 1 # Make cumulative versions for easy graphing... cum_count_results = {x:[0]*k for x in Impartial.ALL} cum_bin_results = {x:[0]*len(size) for x in Impartial.ALL} for x in Impartial.ALL: cum_count_results[x] = [v/float((i+1.) * s) for i,v in enumerate(list(np.cumsum(count_results[x])))] cum_bin_results[x] = [v/float(cum_n_from_bin[i] * s) for i,v in enumerate(np.cumsum(bin_results[x]))] # Normalize the counts and bins by n Samples to get a rate. count_results[x] = [float(i) / float(s) for i in count_results[x]] bin_results[x] = [float(i) / float(s) for i in bin_results[x]] # This should likely be some kind of multiindex but I can't figure it out. t = (n, k, m, l, p, "count") results[t] = pd.DataFrame(count_results) results[t].index += 1 t = (n, k, m, l, p, "cum_count") results[t] = pd.DataFrame(cum_count_results) results[t].index += 1 t = (n, k, m, l, p, "bin") results[t] =
pd.DataFrame(bin_results)
pandas.DataFrame
import tabula import glob, os import pandas as pd def clean_saro(df): unique_id_col = 'SARO NUMBER' # remove unnecessary blank columns df_clean = df.dropna(how='all', axis=1) # fill forward unique_id_col df_clean.loc[:, unique_id_col] = df_clean[unique_id_col].ffill(axis=0) # replace NaN with empty string for all other columns df_clean = df_clean.fillna("") # remove all repeating header columns df_clean = df_clean[df_clean[unique_id_col] != unique_id_col].reset_index(drop=True) id_list_original_order = df_clean[unique_id_col].unique() # set id col as index df_clean = df_clean.set_index(unique_id_col) # extract column names cols = df_clean.columns # merge all text data by id column all_clean_series = [] for col in cols: df_temp = df_clean.groupby(unique_id_col)[col].apply(lambda x: ' '.join(list(x)).strip()) all_clean_series.append(df_temp) # combine all merged columns, joined on id column df_final =
pd.concat(all_clean_series, axis=1)
pandas.concat
from keras import layers, regularizers from sklearn.preprocessing import StandardScaler, MinMaxScaler from Shared.data_loader import DataLoader from Shared.data import Data import numpy as np import sys import pandas as pd from pathlib import Path import keras from VAE.sampling import Sampling from VAE.vae_model import VAE import anndata as ad import matplotlib.pyplot as plt import pickle from sklearn.metrics import r2_score import umap import tensorflow as tf class VAutoEncoder: data: Data # The defined encoder encoder: any # The defined decoder decoder: any # The ae vae: any # the training history of the AE history: any input_dim: int encoding_dim: int input_umap: any latent_umap: any r2_scores = pd.DataFrame(columns=["Marker", "Score"]) encoded_data = pd.DataFrame() reconstructed_data = pd.DataFrame() args = None results_folder = Path("results", "vae") def __init__(self, args, folder: str = None): self.encoding_dim = 5 self.args = args if folder is not None: self.results_folder = Path(self.results_folder, folder) def normalize(self, data): # Input data contains some zeros which results in NaN (or Inf) # values when their log10 is computed. NaN (or Inf) are problematic # values for downstream analysis. Therefore, zeros are replaced by # a small value; see the following thread for related discussion. # https://www.researchgate.net/post/Log_transformation_of_values_that_include_0_zero_for_statistical_analyses2 data[data == 0] = 1e-32 data = np.log10(data) standard_scaler = StandardScaler() data = standard_scaler.fit_transform(data) data = data.clip(min=-5, max=5) min_max_scaler = MinMaxScaler(feature_range=(0, 1)) data = min_max_scaler.fit_transform(data) return data def load_data(self): print("Loading data...") if self.args.file: inputs, markers = DataLoader.get_data( self.args.file, self.args.morph) elif self.args.dir: inputs, markers = DataLoader.load_folder_data( self.args.dir, self.args.morph) else: print("Please specify a directory or a file") sys.exit() self.data = Data(np.array(inputs), markers, self.normalize) def build_auto_encoder(self): # Build the encoder inputs_dim = self.data.inputs.shape[1] activity_regularizer = regularizers.l1_l2(10e-5) activation = tf.keras.layers.LeakyReLU() encoder_inputs = keras.Input(shape=(inputs_dim,)) h1 = layers.Dense(inputs_dim, activation=activation, activity_regularizer=activity_regularizer)(encoder_inputs) h2 = layers.BatchNormalization()(h1) h3 = layers.Dropout(0.5)(h2) h4 = layers.Dense(inputs_dim / 2, activation=activation, activity_regularizer=activity_regularizer)(h3) h5 = layers.BatchNormalization()(h4) h6 = layers.Dropout(0.5)(h5) h7 = layers.Dense(inputs_dim / 3, activation=activation, activity_regularizer=activity_regularizer)(h6) h8 = layers.Dropout(0.5)(h7) h9 = layers.BatchNormalization()(h8) # The following variables are for the convenience of building the decoder. # last layer before flatten lbf = h9 # shape before flatten. sbf = keras.backend.int_shape(lbf)[1:] # neurons count before latent dim nbl = np.prod(sbf) z_mean = layers.Dense(self.encoding_dim, name="z_mean")(lbf) z_log_var = layers.Dense(self.encoding_dim, name="z_log_var")(lbf) z = Sampling()([z_mean, z_log_var]) self.encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder") self.encoder.summary() # Build the decoder decoder_inputs = keras.Input(shape=(self.encoding_dim,)) h1 = layers.Dense(nbl, activation=activation)(decoder_inputs) h2 = layers.Dense(inputs_dim / 2, activation=activation)(h1) decoder_outputs = layers.Dense(inputs_dim)(h2) self.decoder = keras.Model(decoder_inputs, decoder_outputs, name="decoder") self.decoder.summary() # Visualize the model. # tf.keras.utils.plot_model(model, to_file="model.png") # Train the VAE # Create the VAR, compile, and run. callback = tf.keras.callbacks.EarlyStopping(monitor="reconstruction_loss", mode="min", patience=5, restore_best_weights=True) self.vae = VAE(self.encoder, self.decoder) self.vae.compile(optimizer="adam") self.history = self.vae.fit(self.data.X_train, validation_data=(self.data.X_val, self.data.X_val), epochs=500, callbacks=callback, batch_size=32, shuffle=True, verbose=1) def predict(self): # Make some predictions cell = self.data.X_val[0] cell = cell.reshape(1, cell.shape[0]) mean, log_var, z = self.encoder.predict(cell) encoded_cell = z decoded_cell = self.decoder.predict(encoded_cell) var_cell = self.vae.predict(cell) print(f"Input shape:\t{cell.shape}") print(f"Encoded shape:\t{encoded_cell.shape}") print(f"Decoded shape:\t{decoded_cell.shape}") print(f"\nInput:\n{cell[0]}") print(f"\nEncoded:\n{encoded_cell[0]}") print(f"\nDecoded:\n{decoded_cell[0]}") def calculate_r2_score(self): recon_test = self.vae.predict(self.data.X_test) recon_test = pd.DataFrame(data=recon_test, columns=self.data.markers) input_data = pd.DataFrame(data=self.data.X_test, columns=self.data.markers) # self.plot_clusters(input_data, range(len(self.data.markers))) for marker in self.data.markers: input_marker = input_data[f"{marker}"] var_marker = recon_test[f"{marker}"] self.r2_scores = self.r2_scores.append( { "Marker": marker, "Score": r2_score(input_marker, var_marker) }, ignore_index=True ) # self.plot_label_clusters(self.data.X_test, self.data.X_test) def plot_label_clusters(self, data, labels): # display a 2D plot of the digit classes in the latent space z_mean, _, _ = self.vae.encoder.predict(data) plt.figure(figsize=(12, 10)) plt.scatter(z_mean[:, 0], z_mean[:, 1], c=labels) plt.colorbar() plt.xlabel("z[0]") plt.ylabel("z[1]") plt.show() def create_h5ad_object(self): # Input fit = umap.UMAP() self.input_umap = input_umap = fit.fit_transform(self.data.X_test) # latent space fit = umap.UMAP() mean, log_var, z = self.encoder.predict(self.data.X_test) self.latent_umap = fit.fit_transform(z) self.__create_h5ad("latent_markers", self.latent_umap, self.data.markers,
pd.DataFrame(columns=self.data.markers, data=self.data.X_test)
pandas.DataFrame
# -*- coding: utf-8 -*- import os, sys, time import pandas as pd import geopandas as gpd import h2o from h2o.automl import H2OAutoML from h2o.frame import H2OFrame ### Credit: Original code was developed by <NAME> and later revised by <NAME> ### start_time = time.time() ### Initial file setting -------------------------------------------------------------------- pth = os.getcwd() building_file = '/Niamey_data/buildings_altered.shp' sample_file = '/Niamey_data/Niamey_sample_data.shp' # Read a processed Building Footprint layer building_df = gpd.read_file(pth + building_file) building_df = building_df.to_crs({'init':'epsg:4326'}) # Read a Sample Area layer sample_area = gpd.read_file(pth + sample_file) sample_area = sample_area.to_crs({'init':'epsg:4326'}) # Urban classes to be used in the sample layer and for classification # Assign unique integer for each class by yourself here. class_map = {'middle income':1,'informal':2,'formal':3, 'commercial':4} ### Variable prep here ---------------------------------------------------------------------- # Here, adjust your prediction and response variables. Modify the code below to satisfy your needs. # Current setting is very basic: Apply all variables in the building_df. col = building_df.columns predictors = list(col[1:21]) response = 'type' ### Generate a training data by intersecting 'building_df' and 'sample_area'----------------- # Set urban class default as 'unknown' source_df = building_df.copy() source_df['type'] = 'unknown' # Create an empty DF for append training_data = pd.DataFrame() # 'training_data' is now our official 'training data' for the ML model. for index, row in sample_area.iterrows(): x = row.geometry y = row.type df_temp = source_df[source_df.intersects(x)].copy() df_temp['type'] = y training_data = training_data.append(df_temp) training_data['type'] = training_data['type'].map(class_map) ### Model training here --------------------------------------------------------------------- h2o.init() # Convert the training data to an h2o frame. # NOTE that this process will be inefficien if the original data has many NaNs. hf = H2OFrame(training_data) # This block of code is fairly h2o standard. It trains 20 models on this data, # limiting the runtime to 1 hour. At the end of an hour or training 20 models, # whichever is first, it returns a DataFrame of predictions as preds, ordered by the quality of their predictions. # Split 'hf' into a taraining frame and validation frame. train, valid = hf.split_frame(ratios = [.8], seed = 10) # Identify predictors and response x = predictors y = response ## For binary classification, response should be a factor train[y] = train[y].asfactor() valid[y] = valid[y].asfactor() # Run AutoML for 20 base models (limited to 1 hour max runtime by default) aml = H2OAutoML(max_models = 20, seed =1) aml.train(x = x, y = y, training_frame = train) # View the AutoML Leaderboard lb = aml.leaderboard # Print all rows instead of default (10 rows) lb.head(rows=lb.nrows) print("** Model validation with 'valid' hf **") preds = aml.leader.predict(valid) # Here, we print out the performance of our top performing model. res = aml.leader.model_performance(valid) print(res) # We save the model down to its own save location. model_path = h2o.save_model(model = aml.leader, path = pth, force = True) ### Model fitting here ---------------------------------------------------------------------- # h2o struggled to generate predictions for more than 100,000 rows at a time. # Thus, we split the original DataFrame into 100,000 row chunks, run the predictions on # the h2o version of the frame, then send these to file. max_row_size = 100000 chunk_num = int(len(building_df) / max_row_size) chunk_mod = len(building_df) % max_row_size building_df['type'] = 'unknown' def MLpred(df): df_input = df[predictors] # Extract predictor cols only (specified by the 'predictors' LIST) hf_temp = H2OFrame(df_input) preds_temp = aml.leader.predict(hf_temp) pred_df_temp = preds_temp.as_data_frame() # add 'PID' to 'pred_df_temp' so that it will be merged to the original 'df.' df.reset_index(inplace = True) pred_df_temp['PID'] = df.PID ans = pd.merge(df, pred_df_temp, on = "PID") return(ans) # Create an empty DF for append prediction_df =
pd.DataFrame()
pandas.DataFrame
import datetime from datetime import timedelta from distutils.version import LooseVersion from io import BytesIO import os import re from warnings import catch_warnings, simplefilter import numpy as np import pytest from pandas.compat import is_platform_little_endian, is_platform_windows import pandas.util._test_decorators as td from pandas.core.dtypes.common import is_categorical_dtype import pandas as pd from pandas import ( Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex, RangeIndex, Series, Timestamp, bdate_range, concat, date_range, isna, timedelta_range, ) from pandas.tests.io.pytables.common import ( _maybe_remove, create_tempfile, ensure_clean_path, ensure_clean_store, safe_close, safe_remove, tables, ) import pandas.util.testing as tm from pandas.io.pytables import ( ClosedFileError, HDFStore, PossibleDataLossError, Term, read_hdf, ) from pandas.io import pytables as pytables # noqa: E402 isort:skip from pandas.io.pytables import TableIterator # noqa: E402 isort:skip _default_compressor = "blosc" ignore_natural_naming_warning = pytest.mark.filterwarnings( "ignore:object name:tables.exceptions.NaturalNameWarning" ) @pytest.mark.single class TestHDFStore: def test_format_kwarg_in_constructor(self, setup_path): # GH 13291 with ensure_clean_path(setup_path) as path: with pytest.raises(ValueError): HDFStore(path, format="table") def test_context(self, setup_path): path = create_tempfile(setup_path) try: with HDFStore(path) as tbl: raise ValueError("blah") except ValueError: pass finally: safe_remove(path) try: with HDFStore(path) as tbl: tbl["a"] = tm.makeDataFrame() with HDFStore(path) as tbl: assert len(tbl) == 1 assert type(tbl["a"]) == DataFrame finally: safe_remove(path) def test_conv_read_write(self, setup_path): path = create_tempfile(setup_path) try: def roundtrip(key, obj, **kwargs): obj.to_hdf(path, key, **kwargs) return read_hdf(path, key) o = tm.makeTimeSeries() tm.assert_series_equal(o, roundtrip("series", o)) o = tm.makeStringSeries() tm.assert_series_equal(o, roundtrip("string_series", o)) o = tm.makeDataFrame() tm.assert_frame_equal(o, roundtrip("frame", o)) # table df = DataFrame(dict(A=range(5), B=range(5))) df.to_hdf(path, "table", append=True) result = read_hdf(path, "table", where=["index>2"]) tm.assert_frame_equal(df[df.index > 2], result) finally: safe_remove(path) def test_long_strings(self, setup_path): # GH6166 df = DataFrame( {"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10) ) with ensure_clean_store(setup_path) as store: store.append("df", df, data_columns=["a"]) result = store.select("df") tm.assert_frame_equal(df, result) def test_api(self, setup_path): # GH4584 # API issue when to_hdf doesn't accept append AND format args with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() df.iloc[:10].to_hdf(path, "df", append=True, format="table") df.iloc[10:].to_hdf(path, "df", append=True, format="table") tm.assert_frame_equal(read_hdf(path, "df"), df) # append to False df.iloc[:10].to_hdf(path, "df", append=False, format="table") df.iloc[10:].to_hdf(path, "df", append=True, format="table") tm.assert_frame_equal(read_hdf(path, "df"), df) with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() df.iloc[:10].to_hdf(path, "df", append=True) df.iloc[10:].to_hdf(path, "df", append=True, format="table") tm.assert_frame_equal(read_hdf(path, "df"), df) # append to False df.iloc[:10].to_hdf(path, "df", append=False, format="table") df.iloc[10:].to_hdf(path, "df", append=True) tm.assert_frame_equal(read_hdf(path, "df"), df) with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() df.to_hdf(path, "df", append=False, format="fixed") tm.assert_frame_equal(read_hdf(path, "df"), df) df.to_hdf(path, "df", append=False, format="f") tm.assert_frame_equal(read_hdf(path, "df"), df) df.to_hdf(path, "df", append=False) tm.assert_frame_equal(read_hdf(path, "df"), df) df.to_hdf(path, "df") tm.assert_frame_equal(read_hdf(path, "df"), df) with ensure_clean_store(setup_path) as store: path = store._path df = tm.makeDataFrame() _maybe_remove(store, "df") store.append("df", df.iloc[:10], append=True, format="table") store.append("df", df.iloc[10:], append=True, format="table") tm.assert_frame_equal(store.select("df"), df) # append to False _maybe_remove(store, "df") store.append("df", df.iloc[:10], append=False, format="table") store.append("df", df.iloc[10:], append=True, format="table") tm.assert_frame_equal(store.select("df"), df) # formats _maybe_remove(store, "df") store.append("df", df.iloc[:10], append=False, format="table") store.append("df", df.iloc[10:], append=True, format="table") tm.assert_frame_equal(store.select("df"), df) _maybe_remove(store, "df") store.append("df", df.iloc[:10], append=False, format="table") store.append("df", df.iloc[10:], append=True, format=None) tm.assert_frame_equal(store.select("df"), df) with ensure_clean_path(setup_path) as path: # Invalid. df = tm.makeDataFrame() with pytest.raises(ValueError): df.to_hdf(path, "df", append=True, format="f") with pytest.raises(ValueError): df.to_hdf(path, "df", append=True, format="fixed") with pytest.raises(TypeError): df.to_hdf(path, "df", append=True, format="foo") with pytest.raises(TypeError): df.to_hdf(path, "df", append=False, format="bar") # File path doesn't exist path = "" with pytest.raises(FileNotFoundError): read_hdf(path, "df") def test_api_default_format(self, setup_path): # default_format option with ensure_clean_store(setup_path) as store: df = tm.makeDataFrame() pd.set_option("io.hdf.default_format", "fixed") _maybe_remove(store, "df") store.put("df", df) assert not store.get_storer("df").is_table with pytest.raises(ValueError): store.append("df2", df) pd.set_option("io.hdf.default_format", "table") _maybe_remove(store, "df") store.put("df", df) assert store.get_storer("df").is_table _maybe_remove(store, "df2") store.append("df2", df) assert store.get_storer("df").is_table pd.set_option("io.hdf.default_format", None) with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() pd.set_option("io.hdf.default_format", "fixed") df.to_hdf(path, "df") with HDFStore(path) as store: assert not store.get_storer("df").is_table with pytest.raises(ValueError): df.to_hdf(path, "df2", append=True) pd.set_option("io.hdf.default_format", "table") df.to_hdf(path, "df3") with HDFStore(path) as store: assert store.get_storer("df3").is_table df.to_hdf(path, "df4", append=True) with HDFStore(path) as store: assert store.get_storer("df4").is_table pd.set_option("io.hdf.default_format", None) def test_keys(self, setup_path): with ensure_clean_store(setup_path) as store: store["a"] = tm.makeTimeSeries() store["b"] = tm.makeStringSeries() store["c"] = tm.makeDataFrame() assert len(store) == 3 expected = {"/a", "/b", "/c"} assert set(store.keys()) == expected assert set(store) == expected def test_keys_ignore_hdf_softlink(self, setup_path): # GH 20523 # Puts a softlink into HDF file and rereads with ensure_clean_store(setup_path) as store: df = DataFrame(dict(A=range(5), B=range(5))) store.put("df", df) assert store.keys() == ["/df"] store._handle.create_soft_link(store._handle.root, "symlink", "df") # Should ignore the softlink assert store.keys() == ["/df"] def test_iter_empty(self, setup_path): with ensure_clean_store(setup_path) as store: # GH 12221 assert list(store) == [] def test_repr(self, setup_path): with ensure_clean_store(setup_path) as store: repr(store) store.info() store["a"] = tm.makeTimeSeries() store["b"] = tm.makeStringSeries() store["c"] = tm.makeDataFrame() df = tm.makeDataFrame() df["obj1"] = "foo" df["obj2"] = "bar" df["bool1"] = df["A"] > 0 df["bool2"] = df["B"] > 0 df["bool3"] = True df["int1"] = 1 df["int2"] = 2 df["timestamp1"] = Timestamp("20010102") df["timestamp2"] = Timestamp("20010103") df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0) df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0) df.loc[3:6, ["obj1"]] = np.nan df = df._consolidate()._convert(datetime=True) with catch_warnings(record=True): simplefilter("ignore", pd.errors.PerformanceWarning) store["df"] = df # make a random group in hdf space store._handle.create_group(store._handle.root, "bah") assert store.filename in repr(store) assert store.filename in str(store) store.info() # storers with ensure_clean_store(setup_path) as store: df = tm.makeDataFrame() store.append("df", df) s = store.get_storer("df") repr(s) str(s) @ignore_natural_naming_warning def test_contains(self, setup_path): with ensure_clean_store(setup_path) as store: store["a"] = tm.makeTimeSeries() store["b"] = tm.makeDataFrame() store["foo/bar"] = tm.makeDataFrame() assert "a" in store assert "b" in store assert "c" not in store assert "foo/bar" in store assert "/foo/bar" in store assert "/foo/b" not in store assert "bar" not in store # gh-2694: tables.NaturalNameWarning with catch_warnings(record=True): store["node())"] = tm.makeDataFrame() assert "node())" in store def test_versioning(self, setup_path): with ensure_clean_store(setup_path) as store: store["a"] = tm.makeTimeSeries() store["b"] = tm.makeDataFrame() df = tm.makeTimeDataFrame() _maybe_remove(store, "df1") store.append("df1", df[:10]) store.append("df1", df[10:]) assert store.root.a._v_attrs.pandas_version == "0.15.2" assert store.root.b._v_attrs.pandas_version == "0.15.2" assert store.root.df1._v_attrs.pandas_version == "0.15.2" # write a file and wipe its versioning _maybe_remove(store, "df2") store.append("df2", df) # this is an error because its table_type is appendable, but no # version info store.get_node("df2")._v_attrs.pandas_version = None with pytest.raises(Exception): store.select("df2") def test_mode(self, setup_path): df = tm.makeTimeDataFrame() def check(mode): with ensure_clean_path(setup_path) as path: # constructor if mode in ["r", "r+"]: with pytest.raises(IOError): HDFStore(path, mode=mode) else: store = HDFStore(path, mode=mode) assert store._handle.mode == mode store.close() with ensure_clean_path(setup_path) as path: # context if mode in ["r", "r+"]: with pytest.raises(IOError): with HDFStore(path, mode=mode) as store: # noqa pass else: with HDFStore(path, mode=mode) as store: assert store._handle.mode == mode with ensure_clean_path(setup_path) as path: # conv write if mode in ["r", "r+"]: with pytest.raises(IOError): df.to_hdf(path, "df", mode=mode) df.to_hdf(path, "df", mode="w") else: df.to_hdf(path, "df", mode=mode) # conv read if mode in ["w"]: with pytest.raises(ValueError): read_hdf(path, "df", mode=mode) else: result = read_hdf(path, "df", mode=mode) tm.assert_frame_equal(result, df) def check_default_mode(): # read_hdf uses default mode with ensure_clean_path(setup_path) as path: df.to_hdf(path, "df", mode="w") result = read_hdf(path, "df") tm.assert_frame_equal(result, df) check("r") check("r+") check("a") check("w") check_default_mode() def test_reopen_handle(self, setup_path): with ensure_clean_path(setup_path) as path: store = HDFStore(path, mode="a") store["a"] = tm.makeTimeSeries() # invalid mode change with pytest.raises(PossibleDataLossError): store.open("w") store.close() assert not store.is_open # truncation ok here store.open("w") assert store.is_open assert len(store) == 0 store.close() assert not store.is_open store = HDFStore(path, mode="a") store["a"] = tm.makeTimeSeries() # reopen as read store.open("r") assert store.is_open assert len(store) == 1 assert store._mode == "r" store.close() assert not store.is_open # reopen as append store.open("a") assert store.is_open assert len(store) == 1 assert store._mode == "a" store.close() assert not store.is_open # reopen as append (again) store.open("a") assert store.is_open assert len(store) == 1 assert store._mode == "a" store.close() assert not store.is_open def test_open_args(self, setup_path): with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() # create an in memory store store = HDFStore( path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0 ) store["df"] = df store.append("df2", df) tm.assert_frame_equal(store["df"], df) tm.assert_frame_equal(store["df2"], df) store.close() # the file should not have actually been written assert not os.path.exists(path) def test_flush(self, setup_path): with ensure_clean_store(setup_path) as store: store["a"] = tm.makeTimeSeries() store.flush() store.flush(fsync=True) def test_get(self, setup_path): with ensure_clean_store(setup_path) as store: store["a"] = tm.makeTimeSeries() left = store.get("a") right = store["a"] tm.assert_series_equal(left, right) left = store.get("/a") right = store["/a"] tm.assert_series_equal(left, right) with pytest.raises(KeyError, match="'No object named b in the file'"): store.get("b") @pytest.mark.parametrize( "where, expected", [ ( "/", { "": ({"first_group", "second_group"}, set()), "/first_group": (set(), {"df1", "df2"}), "/second_group": ({"third_group"}, {"df3", "s1"}), "/second_group/third_group": (set(), {"df4"}), }, ), ( "/second_group", { "/second_group": ({"third_group"}, {"df3", "s1"}), "/second_group/third_group": (set(), {"df4"}), }, ), ], ) def test_walk(self, where, expected, setup_path): # GH10143 objs = { "df1": pd.DataFrame([1, 2, 3]), "df2": pd.DataFrame([4, 5, 6]), "df3": pd.DataFrame([6, 7, 8]), "df4": pd.DataFrame([9, 10, 11]), "s1": pd.Series([10, 9, 8]), # Next 3 items aren't pandas objects and should be ignored "a1": np.array([[1, 2, 3], [4, 5, 6]]), "tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"), "tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"), } with ensure_clean_store("walk_groups.hdf", mode="w") as store: store.put("/first_group/df1", objs["df1"]) store.put("/first_group/df2", objs["df2"]) store.put("/second_group/df3", objs["df3"]) store.put("/second_group/s1", objs["s1"]) store.put("/second_group/third_group/df4", objs["df4"]) # Create non-pandas objects store._handle.create_array("/first_group", "a1", objs["a1"]) store._handle.create_table("/first_group", "tb1", obj=objs["tb1"]) store._handle.create_table("/second_group", "tb2", obj=objs["tb2"]) assert len(list(store.walk(where=where))) == len(expected) for path, groups, leaves in store.walk(where=where): assert path in expected expected_groups, expected_frames = expected[path] assert expected_groups == set(groups) assert expected_frames == set(leaves) for leaf in leaves: frame_path = "/".join([path, leaf]) obj = store.get(frame_path) if "df" in leaf: tm.assert_frame_equal(obj, objs[leaf]) else: tm.assert_series_equal(obj, objs[leaf]) def test_getattr(self, setup_path): with ensure_clean_store(setup_path) as store: s = tm.makeTimeSeries() store["a"] = s # test attribute access result = store.a tm.assert_series_equal(result, s) result = getattr(store, "a") tm.assert_series_equal(result, s) df = tm.makeTimeDataFrame() store["df"] = df result = store.df tm.assert_frame_equal(result, df) # errors for x in ["d", "mode", "path", "handle", "complib"]: with pytest.raises(AttributeError): getattr(store, x) # not stores for x in ["mode", "path", "handle", "complib"]: getattr(store, "_{x}".format(x=x)) def test_put(self, setup_path): with ensure_clean_store(setup_path) as store: ts = tm.makeTimeSeries() df = tm.makeTimeDataFrame() store["a"] = ts store["b"] = df[:10] store["foo/bar/bah"] = df[:10] store["foo"] = df[:10] store["/foo"] = df[:10] store.put("c", df[:10], format="table") # not OK, not a table with pytest.raises(ValueError): store.put("b", df[10:], append=True) # node does not currently exist, test _is_table_type returns False # in this case _maybe_remove(store, "f") with pytest.raises(ValueError): store.put("f", df[10:], append=True) # can't put to a table (use append instead) with pytest.raises(ValueError): store.put("c", df[10:], append=True) # overwrite table store.put("c", df[:10], format="table", append=False) tm.assert_frame_equal(df[:10], store["c"]) def test_put_string_index(self, setup_path): with ensure_clean_store(setup_path) as store: index = Index( ["I am a very long string index: {i}".format(i=i) for i in range(20)] ) s = Series(np.arange(20), index=index) df = DataFrame({"A": s, "B": s}) store["a"] = s tm.assert_series_equal(store["a"], s) store["b"] = df tm.assert_frame_equal(store["b"], df) # mixed length index = Index( ["abcdefghijklmnopqrstuvwxyz1234567890"] + ["I am a very long string index: {i}".format(i=i) for i in range(20)] ) s = Series(np.arange(21), index=index) df = DataFrame({"A": s, "B": s}) store["a"] = s tm.assert_series_equal(store["a"], s) store["b"] = df tm.assert_frame_equal(store["b"], df) def test_put_compression(self, setup_path): with ensure_clean_store(setup_path) as store: df = tm.makeTimeDataFrame() store.put("c", df, format="table", complib="zlib") tm.assert_frame_equal(store["c"], df) # can't compress if format='fixed' with pytest.raises(ValueError): store.put("b", df, format="fixed", complib="zlib") @td.skip_if_windows_python_3 def test_put_compression_blosc(self, setup_path): df = tm.makeTimeDataFrame() with ensure_clean_store(setup_path) as store: # can't compress if format='fixed' with pytest.raises(ValueError): store.put("b", df, format="fixed", complib="blosc") store.put("c", df, format="table", complib="blosc") tm.assert_frame_equal(store["c"], df) def test_complibs_default_settings(self, setup_path): # GH15943 df = tm.makeDataFrame() # Set complevel and check if complib is automatically set to # default value with ensure_clean_path(setup_path) as tmpfile: df.to_hdf(tmpfile, "df", complevel=9) result = pd.read_hdf(tmpfile, "df") tm.assert_frame_equal(result, df) with tables.open_file(tmpfile, mode="r") as h5file: for node in h5file.walk_nodes(where="/df", classname="Leaf"): assert node.filters.complevel == 9 assert node.filters.complib == "zlib" # Set complib and check to see if compression is disabled with ensure_clean_path(setup_path) as tmpfile: df.to_hdf(tmpfile, "df", complib="zlib") result = pd.read_hdf(tmpfile, "df") tm.assert_frame_equal(result, df) with tables.open_file(tmpfile, mode="r") as h5file: for node in h5file.walk_nodes(where="/df", classname="Leaf"): assert node.filters.complevel == 0 assert node.filters.complib is None # Check if not setting complib or complevel results in no compression with ensure_clean_path(setup_path) as tmpfile: df.to_hdf(tmpfile, "df") result = pd.read_hdf(tmpfile, "df") tm.assert_frame_equal(result, df) with
tables.open_file(tmpfile, mode="r")
pandas.tests.io.pytables.common.tables.open_file
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/07-pv-forecast.ipynb (unless otherwise specified). __all__ = ['construct_df_charge_features', 'prepare_training_input_data', 'plot_random_day', 'generate_kfold_preds_weeks', 'generate_kfold_charge_preds', 'predict_charge', 'get_train_test_arr', 'get_train_test_Xy', 'predict_charge', 'fit_and_save_pv_model', 'prepare_test_feature_data', 'optimise_test_charge_profile'] # Cell import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import joblib from moepy.lowess import quantile_model from sklearn.pipeline import Pipeline from sklearn.linear_model import LinearRegression, Lasso, Ridge from sklearn.metrics import make_scorer, r2_score, mean_absolute_error, mean_squared_error from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.model_selection import GroupKFold from mlxtend.feature_selection import SequentialFeatureSelector as SFS from skopt.plots import plot_objective from skopt.space import Real, Categorical, Integer from batopt import clean, discharge, utils, charge import FEAutils as hlp from ipypb import track # Cell def construct_df_charge_features(df, dt_rng=None): if dt_rng is None: dt_rng = pd.date_range(df.index.min(), df.index.max(), freq='30T') df_features = pd.DataFrame(index=dt_rng) # Adding temperature data temp_loc_cols = df.columns[df.columns.str.contains('temp_location')] df_features.loc[df.index, temp_loc_cols] = df[temp_loc_cols].copy() df_features = df_features.ffill(limit=1) # Adding solar irradiance data solar_loc_cols = df.columns[df.columns.str.contains('solar_location')] df_features.loc[df.index, solar_loc_cols] = df[solar_loc_cols].copy() df_features = df_features.ffill(limit=1) # Adding avg solar from previous week df_features['pv_7d_lag'] = df['pv_power_mw'].rolling(48*7).mean().shift(48*7) # Adding datetime features dts = df_features.index df_features['hour'] = dts.hour + dts.minute/60 df_features['doy'] = dts.dayofyear # Removing some extraneous features - found not be particularly useful cols = [c for c in df_features.columns if 'solar_location4' not in c and 'solar_location1' not in c] df_features = df_features.filter(cols) # Removing NaN values df_features = df_features.dropna() return df_features def prepare_training_input_data(intermediate_data_dir, start_hour=5): # Loading input data df = clean.combine_training_datasets(intermediate_data_dir).interpolate(limit=1) df_features = construct_df_charge_features(df) # Filtering for overlapping feature and target data dt_idx = pd.date_range(df_features.index.min(), df['pv_power_mw'].dropna().index.max()-pd.Timedelta(minutes=30), freq='30T') s_pv = df.loc[dt_idx, 'pv_power_mw'] df_features = df_features.loc[dt_idx] # Filtering for evening datetimes charging_datetimes = charge.extract_charging_datetimes(df_features, start_hour=start_hour) X = df_features.loc[charging_datetimes] y = s_pv.loc[charging_datetimes] return X, y # Cell def plot_random_day(df_pred, ax=None): """ View predicted and observed PV profiles """ if ax is None: ax = plt.gca() random_day_idx = pd.to_datetime(np.random.choice(df_pred.index.date)) df_random_day = df_pred[df_pred.index.date==random_day_idx] df_random_day['true'].plot(ax=ax) df_random_day['pred'].plot(ax=ax) return ax # Cell def generate_kfold_preds_weeks(X, y, model, groups, kfold_kwargs, index=None): """ Generate kfold preds, grouping by week """ group_kfold = GroupKFold(**kfold_kwargs) df_pred = pd.DataFrame(columns=['pred', 'true'], index=np.arange(X.shape[0])) for train_index, test_index in group_kfold.split(X, y, groups): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] model.fit(X_train, y_train) df_pred.loc[test_index, 'true'] = y_test df_pred.loc[test_index, 'pred'] = model.predict(X_test) df_pred.sort_index() if index is not None: assert len(index) == df_pred.shape[0], 'The passed index must be the same length as X and y' df_pred.index = index return df_pred def generate_kfold_charge_preds(X, y, model, groups, kfold_kwargs={'n_splits': 5}): """ Fit the PV forecasting model and calculate the optimal charge profile for predictions. """ df_pred = generate_kfold_preds_weeks(X.values, y.values, model, groups, kfold_kwargs=kfold_kwargs, index=X.index) charge_pred = charge.construct_charge_s(df_pred.pred) charge_pred = charge.post_pred_charge_proc_func(charge_pred) return pd.DataFrame({'charge_pred': charge_pred, 'pv_actual': df_pred.true, 'pv_pred': df_pred.pred}) def predict_charge(X, model): """ Given a fitted PV forecast model and feature array X, get the optimal charge profile. """ pv_pred = pd.Series(model.predict(X), index=X.index) charge_pred = charge.construct_charge_s(pv_pred) charge_pred = charge.post_pred_charge_proc_func(charge_pred) return pd.Series(charge_pred, index=X.index) # Cell def get_train_test_arr(arr, start_of_test_period): train_arr = arr[:pd.to_datetime(start_of_test_period, utc=True)] test_arr = arr[pd.to_datetime(start_of_test_period, utc=True):] return train_arr, test_arr def get_train_test_Xy(X, y, start_of_test_period): x_train, x_test = get_train_test_arr(X, start_of_test_period) y_train, y_test = get_train_test_arr(y, start_of_test_period) return x_train, x_test, y_train, y_test # Cell def predict_charge(X, model): """ Given a fitted PV forecast model and feature array X, get the optimal charge profile. """ pv_pred = pd.Series(model.predict(X), index=X.index) charge_pred = charge.construct_charge_s(pv_pred) charge_pred = charge.post_pred_charge_proc_func(charge_pred) return
pd.Series(charge_pred, index=X.index)
pandas.Series
import pandas as pd import ast import sys import os.path from pandas.core.algorithms import isin sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) import dateutil.parser as parser from utils.mysql_utils import separator from utils.io import read_json from utils.scraping_utils import remove_html_tags from utils.user_utils import infer_role from graph.arango_utils import * import pgeocode def cast_to_float(v): try: return float(v) except ValueError: return v def convert_to_iso8601(text): date = parser.parse(text) return date.isoformat() def load_member_summaries( source_dir="data_for_graph/members", filename="company_check", # concat_uk_sector=False ): ''' LOAD FLAT FILES OF MEMBER DATA ''' dfs = [] for membership_level in ("Patron", "Platinum", "Gold", "Silver", "Bronze", "Digital", "Freemium"): summary_filename = os.path.join(source_dir, membership_level, f"{membership_level}_{filename}.csv") print ("reading summary from", summary_filename) dfs.append(
pd.read_csv(summary_filename, index_col=0)
pandas.read_csv
# -*- coding: utf-8 -*- """ v9s model * Input: v5_im Author: Kohei <<EMAIL>> """ from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler from pathlib import Path import subprocess import argparse import math import glob import sys import json import re import warnings import scipy import tqdm import click import tables as tb import pandas as pd import numpy as np from keras.models import Model from keras.engine.topology import merge as merge_l from keras.layers import ( Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization) from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, EarlyStopping, History from keras import backend as K import skimage.transform import skimage.morphology import rasterio.features import shapely.wkt import shapely.ops import shapely.geometry MODEL_NAME = 'v9s' ORIGINAL_SIZE = 650 INPUT_SIZE = 256 LOGFORMAT = '%(asctime)s %(levelname)s %(message)s' BASE_DIR = "/data/train" WORKING_DIR = "/data/working" IMAGE_DIR = "/data/working/images/{}".format('v5') MODEL_DIR = "/data/working/models/{}".format(MODEL_NAME) FN_SOLUTION_CSV = "/data/output/{}.csv".format(MODEL_NAME) # Parameters MIN_POLYGON_AREA = 30 # Input files FMT_TRAIN_SUMMARY_PATH = str( Path(BASE_DIR) / Path("{prefix:s}_Train/") / Path("summaryData/{prefix:s}_Train_Building_Solutions.csv")) FMT_TRAIN_RGB_IMAGE_PATH = str( Path(BASE_DIR) / Path("{prefix:s}_Train/") / Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif")) FMT_TEST_RGB_IMAGE_PATH = str( Path(BASE_DIR) / Path("{prefix:s}_Test_public/") / Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif")) FMT_TRAIN_MSPEC_IMAGE_PATH = str( Path(BASE_DIR) / Path("{prefix:s}_Train/") / Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif")) FMT_TEST_MSPEC_IMAGE_PATH = str( Path(BASE_DIR) / Path("{prefix:s}_Test_public/") / Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif")) # Preprocessing result FMT_BANDCUT_TH_PATH = IMAGE_DIR + "/bandcut{}.csv" FMT_MUL_BANDCUT_TH_PATH = IMAGE_DIR + "/mul_bandcut{}.csv" # Image list, Image container and mask container FMT_VALTRAIN_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_valtrain_ImageId.csv" FMT_VALTEST_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_valtest_ImageId.csv" FMT_VALTRAIN_IM_STORE = IMAGE_DIR + "/valtrain_{}_im.h5" FMT_VALTEST_IM_STORE = IMAGE_DIR + "/valtest_{}_im.h5" FMT_VALTRAIN_MASK_STORE = IMAGE_DIR + "/valtrain_{}_mask.h5" FMT_VALTEST_MASK_STORE = IMAGE_DIR + "/valtest_{}_mask.h5" FMT_VALTRAIN_MUL_STORE = IMAGE_DIR + "/valtrain_{}_mul.h5" FMT_VALTEST_MUL_STORE = IMAGE_DIR + "/valtest_{}_mul.h5" FMT_TRAIN_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_train_ImageId.csv" FMT_TEST_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_test_ImageId.csv" FMT_TRAIN_IM_STORE = IMAGE_DIR + "/train_{}_im.h5" FMT_TEST_IM_STORE = IMAGE_DIR + "/test_{}_im.h5" FMT_TRAIN_MASK_STORE = IMAGE_DIR + "/train_{}_mask.h5" FMT_TRAIN_MUL_STORE = IMAGE_DIR + "/train_{}_mul.h5" FMT_TEST_MUL_STORE = IMAGE_DIR + "/test_{}_mul.h5" FMT_IMMEAN = IMAGE_DIR + "/{}_immean.h5" FMT_MULMEAN = IMAGE_DIR + "/{}_mulmean.h5" # Model files FMT_VALMODEL_PATH = MODEL_DIR + "/{}_val_weights.h5" FMT_FULLMODEL_PATH = MODEL_DIR + "/{}_full_weights.h5" FMT_VALMODEL_HIST = MODEL_DIR + "/{}_val_hist.csv" FMT_VALMODEL_EVALHIST = MODEL_DIR + "/{}_val_evalhist.csv" FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv" # Prediction & polygon result FMT_TESTPRED_PATH = MODEL_DIR + "/{}_pred.h5" FMT_VALTESTPRED_PATH = MODEL_DIR + "/{}_eval_pred.h5" FMT_VALTESTPOLY_PATH = MODEL_DIR + "/{}_eval_poly.csv" FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv" FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv" FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv" FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv" # Model related files (others) FMT_VALMODEL_LAST_PATH = MODEL_DIR + "/{}_val_weights_last.h5" FMT_FULLMODEL_LAST_PATH = MODEL_DIR + "/{}_full_weights_last.h5" # Logger warnings.simplefilter("ignore", UserWarning) handler = StreamHandler() handler.setLevel(INFO) handler.setFormatter(Formatter(LOGFORMAT)) fh_handler = FileHandler(".{}.log".format(MODEL_NAME)) fh_handler.setFormatter(Formatter(LOGFORMAT)) logger = getLogger('spacenet2') logger.setLevel(INFO) if __name__ == '__main__': logger.addHandler(handler) logger.addHandler(fh_handler) # Fix seed for reproducibility np.random.seed(1145141919) def directory_name_to_area_id(datapath): """ Directory name to AOI number Usage: >>> directory_name_to_area_id("/data/test/AOI_2_Vegas") 2 """ dir_name = Path(datapath).name if dir_name.startswith('AOI_2_Vegas'): return 2 elif dir_name.startswith('AOI_3_Paris'): return 3 elif dir_name.startswith('AOI_4_Shanghai'): return 4 elif dir_name.startswith('AOI_5_Khartoum'): return 5 else: raise RuntimeError("Unsupported city id is given.") def _remove_interiors(line): if "), (" in line: line_prefix = line.split('), (')[0] line_terminate = line.split('))",')[-1] line = ( line_prefix + '))",' + line_terminate ) return line def __load_band_cut_th(band_fn, bandsz=3): df = pd.read_csv(band_fn, index_col='area_id') all_band_cut_th = {area_id: {} for area_id in range(2, 6)} for area_id, row in df.iterrows(): for chan_i in range(bandsz): all_band_cut_th[area_id][chan_i] = dict( min=row['chan{}_min'.format(chan_i)], max=row['chan{}_max'.format(chan_i)], ) return all_band_cut_th def _calc_fscore_per_aoi(area_id): prefix = area_id_to_prefix(area_id) truth_file = FMT_VALTESTTRUTH_PATH.format(prefix) poly_file = FMT_VALTESTPOLY_PATH.format(prefix) cmd = [ 'java', '-jar', '/root/visualizer-2.0/visualizer.jar', '-truth', truth_file, '-solution', poly_file, '-no-gui', '-band-triplets', '/root/visualizer-2.0/data/band-triplets.txt', '-image-dir', 'pass', ] proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout_data, stderr_data = proc.communicate() lines = [line for line in stdout_data.decode('utf8').split('\n')[-10:]] """ Overall F-score : 0.85029 AOI_2_Vegas: TP : 27827 FP : 4999 FN : 4800 Precision: 0.847712 Recall : 0.852883 F-score : 0.85029 """ if stdout_data.decode('utf8').strip().endswith("Overall F-score : 0"): overall_fscore = 0 tp = 0 fp = 0 fn = 0 precision = 0 recall = 0 fscore = 0 elif len(lines) > 0 and lines[0].startswith("Overall F-score : "): assert lines[0].startswith("Overall F-score : ") assert lines[2].startswith("AOI_") assert lines[3].strip().startswith("TP") assert lines[4].strip().startswith("FP") assert lines[5].strip().startswith("FN") assert lines[6].strip().startswith("Precision") assert lines[7].strip().startswith("Recall") assert lines[8].strip().startswith("F-score") overall_fscore = float(re.findall("([\d\.]+)", lines[0])[0]) tp = int(re.findall("(\d+)", lines[3])[0]) fp = int(re.findall("(\d+)", lines[4])[0]) fn = int(re.findall("(\d+)", lines[5])[0]) precision = float(re.findall("([\d\.]+)", lines[6])[0]) recall = float(re.findall("([\d\.]+)", lines[7])[0]) fscore = float(re.findall("([\d\.]+)", lines[8])[0]) else: logger.warn("Unexpected data >>> " + stdout_data.decode('utf8')) raise RuntimeError("Unsupported format") return { 'overall_fscore': overall_fscore, 'tp': tp, 'fp': fp, 'fn': fn, 'precision': precision, 'recall': recall, 'fscore': fscore, } def prefix_to_area_id(prefix): area_dict = { 'AOI_2_Vegas': 2, 'AOI_3_Paris': 3, 'AOI_4_Shanghai': 4, 'AOI_5_Khartoum': 5, } return area_dict[area_id] def area_id_to_prefix(area_id): area_dict = { 2: 'AOI_2_Vegas', 3: 'AOI_3_Paris', 4: 'AOI_4_Shanghai', 5: 'AOI_5_Khartoum', } return area_dict[area_id] # --------------------------------------------------------- # main def _get_model_parameter(area_id): prefix = area_id_to_prefix(area_id) fn_hist = FMT_VALMODEL_EVALTHHIST.format(prefix) best_row = pd.read_csv(fn_hist).sort_values( by='fscore', ascending=False, ).iloc[0] param = dict( fn_epoch=int(best_row['zero_base_epoch']), min_poly_area=int(best_row['min_area_th']), ) return param def get_resized_raster_3chan_image(image_id, band_cut_th=None): fn = train_image_id_to_path(image_id) with rasterio.open(fn, 'r') as f: values = f.read().astype(np.float32) for chan_i in range(3): min_val = band_cut_th[chan_i]['min'] max_val = band_cut_th[chan_i]['max'] values[chan_i] = np.clip(values[chan_i], min_val, max_val) values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val) values = np.swapaxes(values, 0, 2) values = np.swapaxes(values, 0, 1) values = skimage.transform.resize(values, (INPUT_SIZE, INPUT_SIZE)) return values def get_resized_raster_3chan_image_test(image_id, band_cut_th=None): fn = test_image_id_to_path(image_id) with rasterio.open(fn, 'r') as f: values = f.read().astype(np.float32) for chan_i in range(3): min_val = band_cut_th[chan_i]['min'] max_val = band_cut_th[chan_i]['max'] values[chan_i] = np.clip(values[chan_i], min_val, max_val) values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val) values = np.swapaxes(values, 0, 2) values = np.swapaxes(values, 0, 1) values = skimage.transform.resize(values, (INPUT_SIZE, INPUT_SIZE)) return values def image_mask_resized_from_summary(df, image_id): im_mask = np.zeros((650, 650)) for idx, row in df[df.ImageId == image_id].iterrows(): shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix) if shape_obj.exterior is not None: coords = list(shape_obj.exterior.coords) x = [round(float(pp[0])) for pp in coords] y = [round(float(pp[1])) for pp in coords] yy, xx = skimage.draw.polygon(y, x, (650, 650)) im_mask[yy, xx] = 1 interiors = shape_obj.interiors for interior in interiors: coords = list(interior.coords) x = [round(float(pp[0])) for pp in coords] y = [round(float(pp[1])) for pp in coords] yy, xx = skimage.draw.polygon(y, x, (650, 650)) im_mask[yy, xx] = 0 im_mask = skimage.transform.resize(im_mask, (INPUT_SIZE, INPUT_SIZE)) im_mask = (im_mask > 0.5).astype(np.uint8) return im_mask def train_test_image_prep(area_id): prefix = area_id_to_prefix(area_id) df_train = pd.read_csv( FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix), index_col='ImageId') df_test = pd.read_csv( FMT_TEST_IMAGELIST_PATH.format(prefix=prefix), index_col='ImageId') band_cut_th = __load_band_cut_th( FMT_BANDCUT_TH_PATH.format(prefix))[area_id] df_summary = _load_train_summary_data(area_id) fn = FMT_TRAIN_IM_STORE.format(prefix) logger.info("Prepare image container: {}".format(fn)) with tb.open_file(fn, 'w') as f: for image_id in tqdm.tqdm(df_train.index, total=len(df_train)): im = get_resized_raster_3chan_image(image_id, band_cut_th) atom = tb.Atom.from_dtype(im.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, image_id, atom, im.shape, filters=filters) ds[:] = im fn = FMT_TEST_IM_STORE.format(prefix) logger.info("Prepare image container: {}".format(fn)) with tb.open_file(fn, 'w') as f: for image_id in tqdm.tqdm(df_test.index, total=len(df_test)): im = get_resized_raster_3chan_image_test(image_id, band_cut_th) atom = tb.Atom.from_dtype(im.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, image_id, atom, im.shape, filters=filters) ds[:] = im fn = FMT_TRAIN_MASK_STORE.format(prefix) logger.info("Prepare image container: {}".format(fn)) with tb.open_file(fn, 'w') as f: for image_id in tqdm.tqdm(df_train.index, total=len(df_train)): im_mask = image_mask_resized_from_summary(df_summary, image_id) atom = tb.Atom.from_dtype(im_mask.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, image_id, atom, im_mask.shape, filters=filters) ds[:] = im_mask def valtrain_test_image_prep(area_id): prefix = area_id_to_prefix(area_id) logger.info("valtrain_test_image_prep for {}".format(prefix)) df_train = pd.read_csv( FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix), index_col='ImageId') df_test = pd.read_csv( FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix), index_col='ImageId') band_cut_th = __load_band_cut_th( FMT_BANDCUT_TH_PATH.format(prefix))[area_id] df_summary = _load_train_summary_data(area_id) fn = FMT_VALTRAIN_IM_STORE.format(prefix) logger.info("Prepare image container: {}".format(fn)) with tb.open_file(fn, 'w') as f: for image_id in tqdm.tqdm(df_train.index, total=len(df_train)): im = get_resized_raster_3chan_image(image_id, band_cut_th) atom = tb.Atom.from_dtype(im.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, image_id, atom, im.shape, filters=filters) ds[:] = im fn = FMT_VALTEST_IM_STORE.format(prefix) logger.info("Prepare image container: {}".format(fn)) with tb.open_file(fn, 'w') as f: for image_id in tqdm.tqdm(df_test.index, total=len(df_test)): im = get_resized_raster_3chan_image(image_id, band_cut_th) atom = tb.Atom.from_dtype(im.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, image_id, atom, im.shape, filters=filters) ds[:] = im fn = FMT_VALTRAIN_MASK_STORE.format(prefix) logger.info("Prepare image container: {}".format(fn)) with tb.open_file(fn, 'w') as f: for image_id in tqdm.tqdm(df_train.index, total=len(df_train)): im_mask = image_mask_resized_from_summary(df_summary, image_id) atom = tb.Atom.from_dtype(im_mask.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, image_id, atom, im_mask.shape, filters=filters) ds[:] = im_mask fn = FMT_VALTEST_MASK_STORE.format(prefix) logger.info("Prepare image container: {}".format(fn)) with tb.open_file(fn, 'w') as f: for image_id in tqdm.tqdm(df_test.index, total=len(df_test)): im_mask = image_mask_resized_from_summary(df_summary, image_id) atom = tb.Atom.from_dtype(im_mask.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, image_id, atom, im_mask.shape, filters=filters) ds[:] = im_mask def train_test_mul_image_prep(area_id): prefix = area_id_to_prefix(area_id) df_train = pd.read_csv( FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix), index_col='ImageId') df_test = pd.read_csv( FMT_TEST_IMAGELIST_PATH.format(prefix=prefix), index_col='ImageId') band_rgb_th = __load_band_cut_th( FMT_BANDCUT_TH_PATH.format(prefix))[area_id] band_mul_th = __load_band_cut_th( FMT_MUL_BANDCUT_TH_PATH.format(prefix), bandsz=8)[area_id] df_summary = _load_train_summary_data(area_id) fn = FMT_TRAIN_MUL_STORE.format(prefix) logger.info("Prepare image container: {}".format(fn)) with tb.open_file(fn, 'w') as f: for image_id in tqdm.tqdm(df_train.index, total=len(df_train)): im = get_resized_raster_8chan_image( image_id, band_rgb_th, band_mul_th) atom = tb.Atom.from_dtype(im.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, image_id, atom, im.shape, filters=filters) ds[:] = im fn = FMT_TEST_MUL_STORE.format(prefix) logger.info("Prepare image container: {}".format(fn)) with tb.open_file(fn, 'w') as f: for image_id in tqdm.tqdm(df_test.index, total=len(df_test)): im = get_resized_raster_8chan_image_test( image_id, band_rgb_th, band_mul_th) atom = tb.Atom.from_dtype(im.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, image_id, atom, im.shape, filters=filters) ds[:] = im def valtrain_test_mul_image_prep(area_id): prefix = area_id_to_prefix(area_id) logger.info("valtrain_test_image_prep for {}".format(prefix)) df_train = pd.read_csv( FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix), index_col='ImageId') df_test = pd.read_csv( FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix), index_col='ImageId') band_rgb_th = __load_band_cut_th( FMT_BANDCUT_TH_PATH.format(prefix))[area_id] band_mul_th = __load_band_cut_th( FMT_MUL_BANDCUT_TH_PATH.format(prefix), bandsz=8)[area_id] df_summary = _load_train_summary_data(area_id) fn = FMT_VALTRAIN_MUL_STORE.format(prefix) logger.info("Prepare image container: {}".format(fn)) with tb.open_file(fn, 'w') as f: for image_id in tqdm.tqdm(df_train.index, total=len(df_train)): im = get_resized_raster_8chan_image( image_id, band_rgb_th, band_mul_th) atom = tb.Atom.from_dtype(im.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, image_id, atom, im.shape, filters=filters) ds[:] = im fn = FMT_VALTEST_MUL_STORE.format(prefix) logger.info("Prepare image container: {}".format(fn)) with tb.open_file(fn, 'w') as f: for image_id in tqdm.tqdm(df_test.index, total=len(df_test)): im = get_resized_raster_8chan_image( image_id, band_rgb_th, band_mul_th) atom = tb.Atom.from_dtype(im.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, image_id, atom, im.shape, filters=filters) ds[:] = im def _load_train_summary_data(area_id): prefix = area_id_to_prefix(area_id) fn = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix) df = pd.read_csv(fn) return df def split_val_train_test(area_id): prefix = area_id_to_prefix(area_id) df = _load_train_summary_data(area_id) df_agg = df.groupby('ImageId').agg('first') image_id_list = df_agg.index.tolist() np.random.shuffle(image_id_list) sz_valtrain = int(len(image_id_list) * 0.7) sz_valtest = len(image_id_list) - sz_valtrain pd.DataFrame({'ImageId': image_id_list[:sz_valtrain]}).to_csv( FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix), index=False) pd.DataFrame({'ImageId': image_id_list[sz_valtrain:]}).to_csv( FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix), index=False) def train_image_id_to_mspec_path(image_id): prefix = image_id_to_prefix(image_id) fn = FMT_TRAIN_MSPEC_IMAGE_PATH.format( prefix=prefix, image_id=image_id) return fn def test_image_id_to_mspec_path(image_id): prefix = image_id_to_prefix(image_id) fn = FMT_TEST_MSPEC_IMAGE_PATH.format( prefix=prefix, image_id=image_id) return fn def train_image_id_to_path(image_id): prefix = image_id_to_prefix(image_id) fn = FMT_TRAIN_RGB_IMAGE_PATH.format( prefix=prefix, image_id=image_id) return fn def test_image_id_to_path(image_id): prefix = image_id_to_prefix(image_id) fn = FMT_TEST_RGB_IMAGE_PATH.format( prefix=prefix, image_id=image_id) return fn def image_id_to_prefix(image_id): prefix = image_id.split('img')[0][:-1] return prefix def calc_multiband_cut_threshold(area_id): rows = [] band_cut_th = __calc_multiband_cut_threshold(area_id) prefix = area_id_to_prefix(area_id) row = dict(prefix=area_id_to_prefix(area_id)) row['area_id'] = area_id for chan_i in band_cut_th.keys(): row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max'] row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min'] rows.append(row) pd.DataFrame(rows).to_csv(FMT_BANDCUT_TH_PATH.format(prefix), index=False) def __calc_multiband_cut_threshold(area_id): prefix = area_id_to_prefix(area_id) band_values = {k: [] for k in range(3)} band_cut_th = {k: dict(max=0, min=0) for k in range(3)} image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format( prefix=prefix)).ImageId.tolist() for image_id in tqdm.tqdm(image_id_list[:500]): image_fn = train_image_id_to_path(image_id) with rasterio.open(image_fn, 'r') as f: values = f.read().astype(np.float32) for i_chan in range(3): values_ = values[i_chan].ravel().tolist() values_ = np.array( [v for v in values_ if v != 0] ) # Remove sensored mask band_values[i_chan].append(values_) image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format( prefix=prefix)).ImageId.tolist() for image_id in tqdm.tqdm(image_id_list[:500]): image_fn = train_image_id_to_path(image_id) with rasterio.open(image_fn, 'r') as f: values = f.read().astype(np.float32) for i_chan in range(3): values_ = values[i_chan].ravel().tolist() values_ = np.array( [v for v in values_ if v != 0] ) # Remove sensored mask band_values[i_chan].append(values_) for i_chan in range(3): band_values[i_chan] = np.concatenate( band_values[i_chan]).ravel() band_cut_th[i_chan]['max'] = scipy.percentile( band_values[i_chan], 98) band_cut_th[i_chan]['min'] = scipy.percentile( band_values[i_chan], 2) return band_cut_th def calc_mul_multiband_cut_threshold(area_id): rows = [] band_cut_th = __calc_mul_multiband_cut_threshold(area_id) prefix = area_id_to_prefix(area_id) row = dict(prefix=area_id_to_prefix(area_id)) row['area_id'] = area_id for chan_i in band_cut_th.keys(): row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max'] row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min'] rows.append(row) pd.DataFrame(rows).to_csv( FMT_MUL_BANDCUT_TH_PATH.format(prefix), index=False) def __calc_mul_multiband_cut_threshold(area_id): prefix = area_id_to_prefix(area_id) band_values = {k: [] for k in range(8)} band_cut_th = {k: dict(max=0, min=0) for k in range(8)} image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format( prefix=prefix)).ImageId.tolist() for image_id in tqdm.tqdm(image_id_list[:500]): image_fn = train_image_id_to_mspec_path(image_id) with rasterio.open(image_fn, 'r') as f: values = f.read().astype(np.float32) for i_chan in range(8): values_ = values[i_chan].ravel().tolist() values_ = np.array( [v for v in values_ if v != 0] ) # Remove sensored mask band_values[i_chan].append(values_) image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format( prefix=prefix)).ImageId.tolist() for image_id in tqdm.tqdm(image_id_list[:500]): image_fn = train_image_id_to_mspec_path(image_id) with rasterio.open(image_fn, 'r') as f: values = f.read().astype(np.float32) for i_chan in range(8): values_ = values[i_chan].ravel().tolist() values_ = np.array( [v for v in values_ if v != 0] ) # Remove sensored mask band_values[i_chan].append(values_) for i_chan in range(8): band_values[i_chan] = np.concatenate( band_values[i_chan]).ravel() band_cut_th[i_chan]['max'] = scipy.percentile( band_values[i_chan], 98) band_cut_th[i_chan]['min'] = scipy.percentile( band_values[i_chan], 2) return band_cut_th def get_unet(): conv_params = dict(activation='relu', border_mode='same') merge_params = dict(mode='concat', concat_axis=1) inputs = Input((8, 256, 256)) conv1 = Convolution2D(32, 3, 3, **conv_params)(inputs) conv1 = Convolution2D(32, 3, 3, **conv_params)(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Convolution2D(64, 3, 3, **conv_params)(pool1) conv2 = Convolution2D(64, 3, 3, **conv_params)(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Convolution2D(128, 3, 3, **conv_params)(pool2) conv3 = Convolution2D(128, 3, 3, **conv_params)(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Convolution2D(256, 3, 3, **conv_params)(pool3) conv4 = Convolution2D(256, 3, 3, **conv_params)(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Convolution2D(512, 3, 3, **conv_params)(pool4) conv5 = Convolution2D(512, 3, 3, **conv_params)(conv5) up6 = merge_l([UpSampling2D(size=(2, 2))(conv5), conv4], **merge_params) conv6 = Convolution2D(256, 3, 3, **conv_params)(up6) conv6 = Convolution2D(256, 3, 3, **conv_params)(conv6) up7 = merge_l([UpSampling2D(size=(2, 2))(conv6), conv3], **merge_params) conv7 = Convolution2D(128, 3, 3, **conv_params)(up7) conv7 = Convolution2D(128, 3, 3, **conv_params)(conv7) up8 = merge_l([UpSampling2D(size=(2, 2))(conv7), conv2], **merge_params) conv8 = Convolution2D(64, 3, 3, **conv_params)(up8) conv8 = Convolution2D(64, 3, 3, **conv_params)(conv8) up9 = merge_l([UpSampling2D(size=(2, 2))(conv8), conv1], **merge_params) conv9 = Convolution2D(32, 3, 3, **conv_params)(up9) conv9 = Convolution2D(32, 3, 3, **conv_params)(conv9) conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9) adam = Adam() model = Model(input=inputs, output=conv10) model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy', jaccard_coef, jaccard_coef_int]) return model def jaccard_coef(y_true, y_pred): smooth = 1e-12 intersection = K.sum(y_true * y_pred, axis=[0, -1, -2]) sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2]) jac = (intersection + smooth) / (sum_ - intersection + smooth) return K.mean(jac) def jaccard_coef_int(y_true, y_pred): smooth = 1e-12 y_pred_pos = K.round(K.clip(y_pred, 0, 1)) intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2]) sum_ = K.sum(y_true + y_pred_pos, axis=[0, -1, -2]) jac = (intersection + smooth) / (sum_ - intersection + smooth) return K.mean(jac) def generate_test_batch(area_id, batch_size=64, immean=None, enable_tqdm=False): prefix = area_id_to_prefix(area_id) df_test = pd.read_csv(FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)) fn_im = FMT_TEST_MUL_STORE.format(prefix) image_id_list = df_test.ImageId.tolist() if enable_tqdm: pbar = tqdm.tqdm(total=len(image_id_list)) while 1: total_sz = len(image_id_list) n_batch = int(math.floor(total_sz / batch_size) + 1) with tb.open_file(fn_im, 'r') as f_im: for i_batch in range(n_batch): target_image_ids = image_id_list[ i_batch*batch_size:(i_batch+1)*batch_size ] if len(target_image_ids) == 0: continue X_test = [] y_test = [] for image_id in target_image_ids: im = np.array(f_im.get_node('/' + image_id)) im = np.swapaxes(im, 0, 2) im = np.swapaxes(im, 1, 2) X_test.append(im) mask = np.zeros((INPUT_SIZE, INPUT_SIZE)).astype(np.uint8) y_test.append(mask) X_test = np.array(X_test) y_test = np.array(y_test) y_test = y_test.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE)) if immean is not None: X_test = X_test - immean if enable_tqdm: pbar.update(y_test.shape[0]) yield (X_test, y_test) if enable_tqdm: pbar.close() def get_resized_raster_8chan_image_test(image_id, band_rgb_th, band_mul_th): """ RGB + multispectral (total: 8 channels) """ im = [] fn = test_image_id_to_path(image_id) with rasterio.open(fn, 'r') as f: values = f.read().astype(np.float32) for chan_i in range(3): min_val = band_rgb_th[chan_i]['min'] max_val = band_rgb_th[chan_i]['max'] values[chan_i] = np.clip(values[chan_i], min_val, max_val) values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val) im.append(skimage.transform.resize( values[chan_i], (INPUT_SIZE, INPUT_SIZE))) fn = test_image_id_to_mspec_path(image_id) with rasterio.open(fn, 'r') as f: values = f.read().astype(np.float32) usechannels = [1, 2, 5, 6, 7] for chan_i in usechannels: min_val = band_mul_th[chan_i]['min'] max_val = band_mul_th[chan_i]['max'] values[chan_i] = np.clip(values[chan_i], min_val, max_val) values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val) im.append(skimage.transform.resize( values[chan_i], (INPUT_SIZE, INPUT_SIZE))) im = np.array(im) # (ch, w, h) im = np.swapaxes(im, 0, 2) # -> (h, w, ch) im = np.swapaxes(im, 0, 1) # -> (w, h, ch) return im def get_resized_raster_8chan_image(image_id, band_rgb_th, band_mul_th): """ RGB + multispectral (total: 8 channels) """ im = [] fn = train_image_id_to_path(image_id) with rasterio.open(fn, 'r') as f: values = f.read().astype(np.float32) for chan_i in range(3): min_val = band_rgb_th[chan_i]['min'] max_val = band_rgb_th[chan_i]['max'] values[chan_i] = np.clip(values[chan_i], min_val, max_val) values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val) im.append(skimage.transform.resize( values[chan_i], (INPUT_SIZE, INPUT_SIZE))) fn = train_image_id_to_mspec_path(image_id) with rasterio.open(fn, 'r') as f: values = f.read().astype(np.float32) usechannels = [1, 2, 5, 6, 7] for chan_i in usechannels: min_val = band_mul_th[chan_i]['min'] max_val = band_mul_th[chan_i]['max'] values[chan_i] = np.clip(values[chan_i], min_val, max_val) values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val) im.append(skimage.transform.resize( values[chan_i], (INPUT_SIZE, INPUT_SIZE))) im = np.array(im) # (ch, w, h) im = np.swapaxes(im, 0, 2) # -> (h, w, ch) im = np.swapaxes(im, 0, 1) # -> (w, h, ch) return im def _get_train_mul_data(area_id): """ RGB + multispectral (total: 8 channels) """ prefix = area_id_to_prefix(area_id) fn_train = FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix) df_train = pd.read_csv(fn_train) X_train = [] fn_im = FMT_TRAIN_MUL_STORE.format(prefix) with tb.open_file(fn_im, 'r') as f: for idx, image_id in enumerate(df_train.ImageId.tolist()): im = np.array(f.get_node('/' + image_id)) im = np.swapaxes(im, 0, 2) im = np.swapaxes(im, 1, 2) X_train.append(im) X_train = np.array(X_train) y_train = [] fn_mask = FMT_TRAIN_MASK_STORE.format(prefix) with tb.open_file(fn_mask, 'r') as f: for idx, image_id in enumerate(df_train.ImageId.tolist()): mask = np.array(f.get_node('/' + image_id)) mask = (mask > 0.5).astype(np.uint8) y_train.append(mask) y_train = np.array(y_train) y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE)) return X_train, y_train def _get_test_mul_data(area_id): """ RGB + multispectral (total: 8 channels) """ prefix = area_id_to_prefix(area_id) fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix) df_test = pd.read_csv(fn_test) X_test = [] fn_im = FMT_TEST_MUL_STORE.format(prefix) with tb.open_file(fn_im, 'r') as f: for idx, image_id in enumerate(df_test.ImageId.tolist()): im = np.array(f.get_node('/' + image_id)) im = np.swapaxes(im, 0, 2) im = np.swapaxes(im, 1, 2) X_test.append(im) X_test = np.array(X_test) return X_test def _get_valtest_mul_data(area_id): prefix = area_id_to_prefix(area_id) fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix) df_test = pd.read_csv(fn_test) X_val = [] fn_im = FMT_VALTEST_MUL_STORE.format(prefix) with tb.open_file(fn_im, 'r') as f: for idx, image_id in enumerate(df_test.ImageId.tolist()): im = np.array(f.get_node('/' + image_id)) im = np.swapaxes(im, 0, 2) im = np.swapaxes(im, 1, 2) X_val.append(im) X_val = np.array(X_val) y_val = [] fn_mask = FMT_VALTEST_MASK_STORE.format(prefix) with tb.open_file(fn_mask, 'r') as f: for idx, image_id in enumerate(df_test.ImageId.tolist()): mask = np.array(f.get_node('/' + image_id)) mask = (mask > 0.5).astype(np.uint8) y_val.append(mask) y_val = np.array(y_val) y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE)) return X_val, y_val def _get_valtrain_mul_data(area_id): prefix = area_id_to_prefix(area_id) fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix) df_train = pd.read_csv(fn_train) X_val = [] fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix) with tb.open_file(fn_im, 'r') as f: for idx, image_id in enumerate(df_train.ImageId.tolist()): im = np.array(f.get_node('/' + image_id)) im = np.swapaxes(im, 0, 2) im = np.swapaxes(im, 1, 2) X_val.append(im) X_val = np.array(X_val) y_val = [] fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix) with tb.open_file(fn_mask, 'r') as f: for idx, image_id in enumerate(df_train.ImageId.tolist()): mask = np.array(f.get_node('/' + image_id)) mask = (mask > 0.5).astype(np.uint8) y_val.append(mask) y_val = np.array(y_val) y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE)) return X_val, y_val def get_mul_mean_image(area_id): prefix = area_id_to_prefix(area_id) with tb.open_file(FMT_MULMEAN.format(prefix), 'r') as f: im_mean = np.array(f.get_node('/mulmean')) return im_mean def preproc_stage3(area_id): prefix = area_id_to_prefix(area_id) if not Path(FMT_VALTEST_MUL_STORE.format(prefix)).exists(): valtrain_test_mul_image_prep(area_id) if not Path(FMT_TEST_MUL_STORE.format(prefix)).exists(): train_test_mul_image_prep(area_id) # mean image for subtract preprocessing X1, _ = _get_train_mul_data(area_id) X2 = _get_test_mul_data(area_id) X = np.vstack([X1, X2]) print(X.shape) X_mean = X.mean(axis=0) fn = FMT_MULMEAN.format(prefix) logger.info("Prepare mean image: {}".format(fn)) with tb.open_file(fn, 'w') as f: atom = tb.Atom.from_dtype(X_mean.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, 'mulmean', atom, X_mean.shape, filters=filters) ds[:] = X_mean def _internal_test_predict_best_param(area_id, save_pred=True): prefix = area_id_to_prefix(area_id) param = _get_model_parameter(area_id) epoch = param['fn_epoch'] min_th = param['min_poly_area'] # Prediction phase logger.info("Prediction phase: {}".format(prefix)) X_mean = get_mul_mean_image(area_id) # Load model weights # Predict and Save prediction result fn = FMT_TESTPRED_PATH.format(prefix) fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}') fn_model = fn_model.format(epoch=epoch) model = get_unet() model.load_weights(fn_model) fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix) df_test = pd.read_csv(fn_test, index_col='ImageId') y_pred = model.predict_generator( generate_test_batch( area_id, batch_size=64, immean=X_mean, enable_tqdm=True, ), val_samples=len(df_test), ) del model # Save prediction result if save_pred: with tb.open_file(fn, 'w') as f: atom = tb.Atom.from_dtype(y_pred.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, 'pred', atom, y_pred.shape, filters=filters) ds[:] = y_pred return y_pred def _internal_test(area_id, enable_tqdm=False): prefix = area_id_to_prefix(area_id) y_pred = _internal_test_predict_best_param(area_id, save_pred=False) param = _get_model_parameter(area_id) min_th = param['min_poly_area'] # Postprocessing phase logger.info("Postprocessing phase") fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix) df_test = pd.read_csv(fn_test, index_col='ImageId') fn_out = FMT_TESTPOLY_PATH.format(prefix) with open(fn_out, 'w') as f: f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n") test_image_list = df_test.index.tolist() for idx, image_id in tqdm.tqdm(enumerate(test_image_list), total=len(test_image_list)): df_poly = mask_to_poly(y_pred[idx][0], min_polygon_area_th=min_th) if len(df_poly) > 0: for i, row in df_poly.iterrows(): line = "{},{},\"{}\",{:.6f}\n".format( image_id, row.bid, row.wkt, row.area_ratio) line = _remove_interiors(line) f.write(line) else: f.write("{},{},{},0\n".format( image_id, -1, "POLYGON EMPTY")) def validate_score(area_id): """ Calc competition score """ prefix = area_id_to_prefix(area_id) # Prediction phase if not Path(FMT_VALTESTPRED_PATH.format(prefix)).exists(): X_val, y_val = _get_valtest_mul_data(area_id) X_mean = get_mul_mean_image(area_id) # Load model weights # Predict and Save prediction result model = get_unet() model.load_weights(FMT_VALMODEL_PATH.format(prefix)) y_pred = model.predict(X_val - X_mean, batch_size=8, verbose=1) del model # Save prediction result fn = FMT_VALTESTPRED_PATH.format(prefix) with tb.open_file(fn, 'w') as f: atom = tb.Atom.from_dtype(y_pred.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, 'pred', atom, y_pred.shape, filters=filters) ds[:] = y_pred # Postprocessing phase if not Path(FMT_VALTESTPOLY_PATH.format(prefix)).exists(): fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix) df_test = pd.read_csv(fn_test, index_col='ImageId') fn = FMT_VALTESTPRED_PATH.format(prefix) with tb.open_file(fn, 'r') as f: y_pred = np.array(f.get_node('/pred')) print(y_pred.shape) fn_out = FMT_VALTESTPOLY_PATH.format(prefix) with open(fn_out, 'w') as f: f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n") for idx, image_id in enumerate(df_test.index.tolist()): df_poly = mask_to_poly(y_pred[idx][0]) if len(df_poly) > 0: for i, row in df_poly.iterrows(): f.write("{},{},\"{}\",{:.6f}\n".format( image_id, row.bid, row.wkt, row.area_ratio)) else: f.write("{},{},{},0\n".format( image_id, -1, "POLYGON EMPTY")) # update fn_out with open(fn_out, 'r') as f: lines = f.readlines() with open(fn_out, 'w') as f: f.write(lines[0]) for line in lines[1:]: line = _remove_interiors(line) f.write(line) # Validation solution file if not Path(FMT_VALTESTTRUTH_PATH.format(prefix)).exists(): fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix) df_true = pd.read_csv(fn_true) # # Remove prefix "PAN_" # df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:] fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix) df_test = pd.read_csv(fn_test) df_test_image_ids = df_test.ImageId.unique() fn_out = FMT_VALTESTTRUTH_PATH.format(prefix) with open(fn_out, 'w') as f: f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n") df_true = df_true[df_true.ImageId.isin(df_test_image_ids)] for idx, r in df_true.iterrows(): f.write("{},{},\"{}\",{:.6f}\n".format( r.ImageId, r.BuildingId, r.PolygonWKT_Pix, 1.0)) def validate_all_score(): header_line = [] lines = [] for area_id in range(2, 6): prefix = area_id_to_prefix(area_id) assert Path(FMT_VALTESTTRUTH_PATH.format(prefix)).exists() with open(FMT_VALTESTTRUTH_PATH.format(prefix), 'r') as f: header_line = f.readline() lines += f.readlines() with open(FMT_VALTESTTRUTH_OVALL_PATH, 'w') as f: f.write(header_line) for line in lines: f.write(line) # Predicted polygons header_line = [] lines = [] for area_id in range(2, 6): prefix = area_id_to_prefix(area_id) assert Path(FMT_VALTESTPOLY_PATH.format(prefix)).exists() with open(FMT_VALTESTPOLY_PATH.format(prefix), 'r') as f: header_line = f.readline() lines += f.readlines() with open(FMT_VALTESTPOLY_OVALL_PATH, 'w') as f: f.write(header_line) for line in lines: f.write(line) def generate_valtest_batch(area_id, batch_size=8, immean=None, enable_tqdm=False): prefix = area_id_to_prefix(area_id) df_train = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)) fn_im = FMT_VALTEST_MUL_STORE.format(prefix) fn_mask = FMT_VALTEST_MASK_STORE.format(prefix) image_id_list = df_train.ImageId.tolist() if enable_tqdm: pbar = tqdm.tqdm(total=len(image_id_list)) while 1: total_sz = len(image_id_list) n_batch = int(math.floor(total_sz / batch_size) + 1) with tb.open_file(fn_im, 'r') as f_im,\ tb.open_file(fn_mask, 'r') as f_mask: for i_batch in range(n_batch): target_image_ids = image_id_list[ i_batch*batch_size:(i_batch+1)*batch_size ] if len(target_image_ids) == 0: continue X_train = [] y_train = [] for image_id in target_image_ids: im = np.array(f_im.get_node('/' + image_id)) im = np.swapaxes(im, 0, 2) im = np.swapaxes(im, 1, 2) X_train.append(im) mask = np.array(f_mask.get_node('/' + image_id)) mask = (mask > 0).astype(np.uint8) y_train.append(mask) X_train = np.array(X_train) y_train = np.array(y_train) y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE)) if immean is not None: X_train = X_train - immean if enable_tqdm: pbar.update(y_train.shape[0]) yield (X_train, y_train) if enable_tqdm: pbar.close() def generate_valtrain_batch(area_id, batch_size=8, immean=None): prefix = area_id_to_prefix(area_id) df_train = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)) fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix) fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix) image_id_list = df_train.ImageId.tolist() np.random.shuffle(image_id_list) while 1: total_sz = len(image_id_list) n_batch = int(math.floor(total_sz / batch_size) + 1) with tb.open_file(fn_im, 'r') as f_im,\ tb.open_file(fn_mask, 'r') as f_mask: for i_batch in range(n_batch): target_image_ids = image_id_list[ i_batch*batch_size:(i_batch+1)*batch_size ] if len(target_image_ids) == 0: continue X_train = [] y_train = [] for image_id in target_image_ids: im = np.array(f_im.get_node('/' + image_id)) im = np.swapaxes(im, 0, 2) im = np.swapaxes(im, 1, 2) X_train.append(im) mask = np.array(f_mask.get_node('/' + image_id)) mask = (mask > 0).astype(np.uint8) y_train.append(mask) X_train = np.array(X_train) y_train = np.array(y_train) y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE)) if immean is not None: X_train = X_train - immean yield (X_train, y_train) def _get_test_data(area_id): prefix = area_id_to_prefix(area_id) fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix) df_test = pd.read_csv(fn_test) X_test = [] fn_im = FMT_TEST_IM_STORE.format(prefix) with tb.open_file(fn_im, 'r') as f: for idx, image_id in enumerate(df_test.ImageId.tolist()): im = np.array(f.get_node('/' + image_id)) im = np.swapaxes(im, 0, 2) im = np.swapaxes(im, 1, 2) X_test.append(im) X_test = np.array(X_test) return X_test def _get_valtest_data(area_id): prefix = area_id_to_prefix(area_id) fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix) df_test = pd.read_csv(fn_test) X_val = [] fn_im = FMT_VALTEST_IM_STORE.format(prefix) with tb.open_file(fn_im, 'r') as f: for idx, image_id in enumerate(df_test.ImageId.tolist()): im = np.array(f.get_node('/' + image_id)) im = np.swapaxes(im, 0, 2) im = np.swapaxes(im, 1, 2) X_val.append(im) X_val = np.array(X_val) y_val = [] fn_mask = FMT_VALTEST_MASK_STORE.format(prefix) with tb.open_file(fn_mask, 'r') as f: for idx, image_id in enumerate(df_test.ImageId.tolist()): mask = np.array(f.get_node('/' + image_id)) mask = (mask > 0.5).astype(np.uint8) y_val.append(mask) y_val = np.array(y_val) y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE)) return X_val, y_val def _get_valtrain_data(area_id): prefix = area_id_to_prefix(area_id) fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix) df_train = pd.read_csv(fn_train) X_val = [] fn_im = FMT_VALTRAIN_IM_STORE.format(prefix) with tb.open_file(fn_im, 'r') as f: for idx, image_id in enumerate(df_train.ImageId.tolist()): im = np.array(f.get_node('/' + image_id)) im = np.swapaxes(im, 0, 2) im = np.swapaxes(im, 1, 2) X_val.append(im) X_val = np.array(X_val) y_val = [] fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix) with tb.open_file(fn_mask, 'r') as f: for idx, image_id in enumerate(df_train.ImageId.tolist()): mask = np.array(f.get_node('/' + image_id)) mask = (mask > 0.5).astype(np.uint8) y_val.append(mask) y_val = np.array(y_val) y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE)) return X_val, y_val def predict(area_id): prefix = area_id_to_prefix(area_id) X_test = _get_test_mul_data(area_id) X_mean = get_mul_mean_image(area_id) # Load model weights # Predict and Save prediction result model = get_unet() model.load_weights(FMT_VALMODEL_PATH.format(prefix)) y_pred = model.predict(X_test - X_mean, batch_size=8, verbose=1) del model # Save prediction result fn = FMT_TESTPRED_PATH.format(prefix) with tb.open_file(fn, 'w') as f: atom = tb.Atom.from_dtype(y_pred.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, 'pred', atom, y_pred.shape, filters=filters) ds[:] = y_pred def _internal_validate_predict_best_param(area_id, enable_tqdm=False): param = _get_model_parameter(area_id) epoch = param['fn_epoch'] y_pred = _internal_validate_predict( area_id, epoch=epoch, save_pred=False, enable_tqdm=enable_tqdm) return y_pred def _internal_validate_predict(area_id, epoch=3, save_pred=True, enable_tqdm=False): prefix = area_id_to_prefix(area_id) X_mean = get_mul_mean_image(area_id) # Load model weights # Predict and Save prediction result fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}') fn_model = fn_model.format(epoch=epoch) model = get_unet() model.load_weights(fn_model) fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix) df_test = pd.read_csv(fn_test, index_col='ImageId') y_pred = model.predict_generator( generate_valtest_batch( area_id, batch_size=64, immean=X_mean, enable_tqdm=enable_tqdm, ), val_samples=len(df_test), ) del model # Save prediction result if save_pred: fn = FMT_VALTESTPRED_PATH.format(prefix) with tb.open_file(fn, 'w') as f: atom = tb.Atom.from_dtype(y_pred.dtype) filters = tb.Filters(complib='blosc', complevel=9) ds = f.create_carray(f.root, 'pred', atom, y_pred.shape, filters=filters) ds[:] = y_pred return y_pred def _internal_validate_fscore_wo_pred_file(area_id, epoch=3, min_th=MIN_POLYGON_AREA, enable_tqdm=False): prefix = area_id_to_prefix(area_id) # Prediction phase logger.info("Prediction phase") y_pred = _internal_validate_predict( area_id, epoch=epoch, save_pred=False, enable_tqdm=enable_tqdm) # Postprocessing phase logger.info("Postprocessing phase") fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix) df_test = pd.read_csv(fn_test, index_col='ImageId') fn = FMT_VALTESTPRED_PATH.format(prefix) fn_out = FMT_VALTESTPOLY_PATH.format(prefix) with open(fn_out, 'w') as f: f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n") test_list = df_test.index.tolist() iterator = enumerate(test_list) for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)): df_poly = mask_to_poly(y_pred[idx][0], min_polygon_area_th=min_th) if len(df_poly) > 0: for i, row in df_poly.iterrows(): line = "{},{},\"{}\",{:.6f}\n".format( image_id, row.bid, row.wkt, row.area_ratio) line = _remove_interiors(line) f.write(line) else: f.write("{},{},{},0\n".format( image_id, -1, "POLYGON EMPTY")) # ------------------------ # Validation solution file logger.info("Validation solution file") fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix) df_true =
pd.read_csv(fn_true)
pandas.read_csv
import os import shutil from datetime import timedelta from functools import partial from itertools import chain from operator import is_not import numpy as np import pandas as pd import pytz from catalyst import get_calendar from catalyst.assets._assets import TradingPair from catalyst.constants import DATE_TIME_FORMAT, AUTO_INGEST from catalyst.constants import LOG_LEVEL from catalyst.data.minute_bars import BcolzMinuteOverlappingData, \ BcolzMinuteBarMetadata from catalyst.exchange.exchange_bcolz import BcolzExchangeBarReader, \ BcolzExchangeBarWriter from catalyst.exchange.exchange_errors import EmptyValuesInBundleError, \ TempBundleNotFoundError, \ NoDataAvailableOnExchange, \ PricingDataNotLoadedError, DataCorruptionError, PricingDataValueError from catalyst.exchange.utils.bundle_utils import range_in_bundle, \ get_bcolz_chunk, get_df_from_arrays, get_assets from catalyst.exchange.utils.datetime_utils import get_start_dt, \ get_period_label, get_month_start_end, get_year_start_end from catalyst.exchange.utils.exchange_utils import get_exchange_folder, \ save_exchange_symbols, mixin_market_params, get_catalyst_symbol from catalyst.utils.cli import maybe_show_progress from catalyst.utils.paths import ensure_directory from logbook import Logger from pytz import UTC from six import itervalues log = Logger('exchange_bundle', level=LOG_LEVEL) BUNDLE_NAME_TEMPLATE = os.path.join('{root}', '{frequency}_bundle') def _cachpath(symbol, type_): return '-'.join([symbol, type_]) class ExchangeBundle: def __init__(self, exchange_name): self.exchange_name = exchange_name self.minutes_per_day = 1440 self.default_ohlc_ratio = 1000000 self._writers = dict() self._readers = dict() self.calendar = get_calendar('OPEN') self.exchange = None def get_reader(self, data_frequency, path=None): """ Get a data writer object, either a new object or from cache Returns ------- BcolzMinuteBarReader | BcolzDailyBarReader """ if path is None: root = get_exchange_folder(self.exchange_name) path = BUNDLE_NAME_TEMPLATE.format( root=root, frequency=data_frequency ) if path in self._readers and self._readers[path] is not None: return self._readers[path] try: self._readers[path] = BcolzExchangeBarReader( rootdir=path, data_frequency=data_frequency ) except IOError: self._readers[path] = None return self._readers[path] def update_metadata(self, writer, start_dt, end_dt): pass def get_writer(self, start_dt, end_dt, data_frequency): """ Get a data writer object, either a new object or from cache Returns ------- BcolzMinuteBarWriter | BcolzDailyBarWriter """ root = get_exchange_folder(self.exchange_name) path = BUNDLE_NAME_TEMPLATE.format( root=root, frequency=data_frequency ) if path in self._writers: return self._writers[path] ensure_directory(path) if len(os.listdir(path)) > 0: metadata = BcolzMinuteBarMetadata.read(path) write_metadata = False if start_dt < metadata.start_session: write_metadata = True start_session = start_dt else: start_session = metadata.start_session if end_dt > metadata.end_session: write_metadata = True end_session = end_dt else: end_session = metadata.end_session self._writers[path] = \ BcolzExchangeBarWriter( rootdir=path, start_session=start_session, end_session=end_session, write_metadata=write_metadata, data_frequency=data_frequency ) else: self._writers[path] = BcolzExchangeBarWriter( rootdir=path, start_session=start_dt, end_session=end_dt, write_metadata=True, data_frequency=data_frequency ) return self._writers[path] def filter_existing_assets(self, assets, start_dt, end_dt, data_frequency): """ For each asset, get the close on the start and end dates of the chunk. If the data exists, the chunk ingestion is complete. If any data is missing we ingest the data. Parameters ---------- assets: list[TradingPair] The assets is scope. start_dt: pd.Timestamp The chunk start date. end_dt: pd.Timestamp The chunk end date. data_frequency: str Returns ------- list[TradingPair] The assets missing from the bundle """ reader = self.get_reader(data_frequency) missing_assets = [] for asset in assets: has_data = range_in_bundle(asset, start_dt, end_dt, reader) if not has_data: missing_assets.append(asset) return missing_assets def _write(self, data, writer, data_frequency): try: writer.write( data=data, show_progress=False, invalid_data_behavior='raise' ) except BcolzMinuteOverlappingData as e: log.debug('chunk already exists: {}'.format(e)) except Exception as e: log.warn('error when writing data: {}, trying again'.format(e)) # This is workaround, there is an issue with empty # session_label when using a newly created writer del self._writers[writer._rootdir] writer = self.get_writer(writer._start_session, writer._end_session, data_frequency) writer.write( data=data, show_progress=False, invalid_data_behavior='raise' ) def get_calendar_periods_range(self, start_dt, end_dt, data_frequency): """ Get a list of dates for the specified range. Parameters ---------- start_dt: pd.Timestamp end_dt: pd.Timestamp data_frequency: str Returns ------- list[datetime] """ return self.calendar.minutes_in_range(start_dt, end_dt) \ if data_frequency == 'minute' \ else self.calendar.sessions_in_range(start_dt, end_dt) def _spot_empty_periods(self, ohlcv_df, asset, data_frequency, empty_rows_behavior): problems = [] nan_rows = ohlcv_df[ohlcv_df.isnull().T.any().T].index if len(nan_rows) > 0: dates = [] for row_date in nan_rows.values: row_date = pd.to_datetime(row_date, utc=True) if row_date > asset.start_date: dates.append(row_date) if len(dates) > 0: end_dt = asset.end_minute if data_frequency == 'minute' \ else asset.end_daily problem = '{name} ({start_dt} to {end_dt}) has empty ' \ 'periods: {dates}'.format( name=asset.symbol, start_dt=asset.start_date.strftime( DATE_TIME_FORMAT), end_dt=end_dt.strftime(DATE_TIME_FORMAT), dates=[date.strftime( DATE_TIME_FORMAT) for date in dates]) if empty_rows_behavior == 'warn': log.warn(problem) elif empty_rows_behavior == 'raise': raise EmptyValuesInBundleError( name=asset.symbol, end_minute=end_dt, dates=dates, ) else: ohlcv_df.dropna(inplace=True) else: problem = None problems.append(problem) return problems def _spot_duplicates(self, ohlcv_df, asset, data_frequency, threshold): # TODO: work in progress series = ohlcv_df.reset_index().groupby('close')['index'].apply( np.array ) ref_delta = timedelta(minutes=1) if data_frequency == 'minute' \ else timedelta(days=1) dups = series.loc[lambda values: [len(x) > 10 for x in values]] for index, dates in dups.iteritems(): prev_date = None for date in dates: if prev_date is not None: delta = (date - prev_date) / 1e9 if delta == ref_delta.seconds: log.info('pex') prev_date = date problems = [] for index, dates in dups.iteritems(): end_dt = asset.end_minute if data_frequency == 'minute' \ else asset.end_daily problem = '{name} ({start_dt} to {end_dt}) has {threshold} ' \ 'identical close values on: {dates}'.format( name=asset.symbol, start_dt=asset.start_date.strftime(DATE_TIME_FORMAT), end_dt=end_dt.strftime(DATE_TIME_FORMAT), threshold=threshold, dates=[pd.to_datetime(date).strftime(DATE_TIME_FORMAT) for date in dates]) problems.append(problem) return problems def ingest_df(self, ohlcv_df, data_frequency, asset, writer, empty_rows_behavior='warn', duplicates_threshold=None): """ Ingest a DataFrame of OHLCV data for a given market. Parameters ---------- ohlcv_df: DataFrame data_frequency: str asset: TradingPair writer: empty_rows_behavior: str """ problems = [] if empty_rows_behavior is not 'ignore': problems += self._spot_empty_periods( ohlcv_df, asset, data_frequency, empty_rows_behavior ) # if duplicates_threshold is not None: # problems += self._spot_duplicates( # ohlcv_df, asset, data_frequency, duplicates_threshold # ) data = [] if not ohlcv_df.empty: ohlcv_df.sort_index(inplace=True) data.append((asset.sid, ohlcv_df)) self._write(data, writer, data_frequency) return problems def ingest_ctable(self, asset, data_frequency, period, writer, empty_rows_behavior='strip', duplicates_threshold=100, cleanup=False): """ Merge a ctable bundle chunk into the main bundle for the exchange. Parameters ---------- asset: TradingPair data_frequency: str period: str writer: empty_rows_behavior: str Ensure that the bundle does not have any missing data. cleanup: bool Remove the temp bundle directory after ingestion. Returns ------- list[str] A list of problems which occurred during ingestion. """ problems = [] # Download and extract the bundle path = get_bcolz_chunk( exchange_name=self.exchange_name, symbol=asset.symbol, data_frequency=data_frequency, period=period ) reader = self.get_reader(data_frequency, path=path) if reader is None: try: log.warn('the reader is unable to use bundle: {}, ' 'deleting it.'.format(path)) shutil.rmtree(path) except Exception as e: log.warn('unable to remove temp bundle: {}'.format(e)) raise TempBundleNotFoundError(path=path) start_dt = reader.first_trading_day end_dt = reader.last_available_dt if data_frequency == 'daily': end_dt = end_dt - pd.Timedelta(hours=23, minutes=59) arrays = None try: arrays = reader.load_raw_arrays( sids=[asset.sid], fields=['open', 'high', 'low', 'close', 'volume'], start_dt=start_dt, end_dt=end_dt ) except Exception as e: log.warn('skipping ctable for {} from {} to {}: {}'.format( asset.symbol, start_dt, end_dt, e )) if not arrays: return reader._rootdir periods = self.get_calendar_periods_range( start_dt, end_dt, data_frequency ) df = get_df_from_arrays(arrays, periods) problems += self.ingest_df( ohlcv_df=df, data_frequency=data_frequency, asset=asset, writer=writer, empty_rows_behavior=empty_rows_behavior, duplicates_threshold=duplicates_threshold ) if cleanup: log.debug( 'removing bundle folder following ingestion: {}'.format( reader._rootdir) ) shutil.rmtree(reader._rootdir) return filter(partial(is_not, None), problems) def get_adj_dates(self, start, end, assets, data_frequency): """ Contains a date range to the trading availability of the specified markets. Parameters ---------- start: pd.Timestamp end: pd.Timestamp assets: list[TradingPair] data_frequency: str Returns ------- pd.Timestamp, pd.Timestamp """ earliest_trade = None last_entry = None for asset in assets: if earliest_trade is None or earliest_trade > asset.start_date: if asset.start_date >= self.calendar.first_session: earliest_trade = asset.start_date else: earliest_trade = self.calendar.first_session end_asset = asset.end_minute if data_frequency == 'minute' else \ asset.end_daily if end_asset is not None: if last_entry is None or end_asset > last_entry: last_entry = end_asset else: end = None last_entry = None if start is None or \ (earliest_trade is not None and earliest_trade > start): start = earliest_trade if last_entry is not None and (end is None or end > last_entry): end = last_entry.replace(minute=59, hour=23) \ if data_frequency == 'minute' else last_entry if end is None or start is None or start > end: raise NoDataAvailableOnExchange( exchange=[asset.exchange for asset in assets], symbol=[asset.symbol for asset in assets], data_frequency=data_frequency, ) return start, end def prepare_chunks(self, assets, data_frequency, start_dt, end_dt): """ Split a price data request into chunks corresponding to individual bundles. Parameters ---------- assets: list[TradingPair] data_frequency: str start_dt: pd.Timestamp end_dt: pd.Timestamp Returns ------- dict[TradingPair, list[dict(str, Object]]] """ get_start_end = get_month_start_end \ if data_frequency == 'minute' else get_year_start_end # Get a reader for the main bundle to verify if data exists reader = self.get_reader(data_frequency) chunks = dict() for asset in assets: try: # Checking if the the asset has price data in the specified # date range adj_start, adj_end = self.get_adj_dates( start_dt, end_dt, [asset], data_frequency ) except NoDataAvailableOnExchange as e: # If not, we continue to the next asset log.debug('skipping {}: {}'.format(asset.symbol, e)) continue dates = pd.date_range( start=get_period_label(adj_start, data_frequency), end=get_period_label(adj_end, data_frequency), freq='MS' if data_frequency == 'minute' else 'AS', tz=UTC ) # Adjusting the last date of the range to avoid # going over the asset's trading bounds dates.values[0] = adj_start dates.values[-1] = adj_end chunks[asset] = [] for index, dt in enumerate(dates): period_start, period_end = get_start_end( dt=dt, first_day=dt if index == 0 else None, last_day=dt if index == len(dates) - 1 else None ) # Currencies don't always start trading at midnight. # Checking the last minute of the day instead. range_start = period_start.replace(hour=23, minute=59) \ if data_frequency == 'minute' else period_start # Checking if the data already exists in the bundle # for the date range of the chunk. If not, we create # a chunk for ingestion. has_data = range_in_bundle( asset, range_start, period_end, reader ) if not has_data: period = get_period_label(dt, data_frequency) chunk = dict( asset=asset, period=period, ) chunks[asset].append(chunk) # We sort the chunks by end date to ingest most recent data first chunks[asset].sort( key=lambda chunk: pd.to_datetime(chunk['period']) ) return chunks def ingest_assets(self, assets, data_frequency, start_dt=None, end_dt=None, show_progress=False, show_breakdown=False, show_report=False): """ Determine if data is missing from the bundle and attempt to ingest it. Parameters ---------- assets: list[TradingPair] data_frequency: str start_dt: pd.Timestamp end_dt: pd.Timestamp show_progress: bool show_breakdown: bool """ if start_dt is None: start_dt = self.calendar.first_session if end_dt is None: end_dt = pd.Timestamp.utcnow() get_start_end = get_month_start_end \ if data_frequency == 'minute' else get_year_start_end # Assign the first and last day of the period start_dt, _ = get_start_end(start_dt) _, end_dt = get_start_end(end_dt) chunks = self.prepare_chunks( assets=assets, data_frequency=data_frequency, start_dt=start_dt, end_dt=end_dt ) problems = [] # This is the common writer for the entire exchange bundle # we want to give an end_date far in time writer = self.get_writer(start_dt, end_dt, data_frequency) if show_breakdown: if chunks: for asset in chunks: with maybe_show_progress( chunks[asset], show_progress, label='Ingesting {frequency} price data for ' '{symbol} on {exchange}'.format( exchange=self.exchange_name, frequency=data_frequency, symbol=asset.symbol )) as it: for chunk in it: problems += self.ingest_ctable( asset=chunk['asset'], data_frequency=data_frequency, period=chunk['period'], writer=writer, empty_rows_behavior='strip', cleanup=True ) else: all_chunks = list(chain.from_iterable(itervalues(chunks))) # We sort the chunks by end date to ingest most recent data first if all_chunks: all_chunks.sort( key=lambda chunk: pd.to_datetime(chunk['period']) ) with maybe_show_progress( all_chunks, show_progress, label='Ingesting {frequency} price data on ' '{exchange}'.format( exchange=self.exchange_name, frequency=data_frequency, )) as it: for chunk in it: problems += self.ingest_ctable( asset=chunk['asset'], data_frequency=data_frequency, period=chunk['period'], writer=writer, empty_rows_behavior='strip', cleanup=True ) if show_report and len(problems) > 0: log.info('problems during ingestion:{}\n'.format( '\n'.join(problems) )) def ingest_csv(self, path, data_frequency, empty_rows_behavior='strip', duplicates_threshold=100): """ Ingest price data from a CSV file. Parameters ---------- path: str data_frequency: str Returns ------- list[str] A list of potential problems detected during ingestion. """ log.info('ingesting csv file: {}'.format(path)) if self.exchange is None: # Avoid circular dependencies from catalyst.exchange.utils.factory import get_exchange self.exchange = get_exchange(self.exchange_name) problems = [] df = pd.read_csv( path, header=0, sep=',', dtype=dict( symbol=np.object_, last_traded=np.object_, open=np.float64, high=np.float64, low=np.float64, close=np.float64, volume=np.float64 ), parse_dates=['last_traded'], index_col=None ) min_start_dt = None max_end_dt = None symbols = df['symbol'].unique() # Apply the timezone before creating an index for simplicity df['last_traded'] = df['last_traded'].dt.tz_localize(pytz.UTC) df.set_index(['symbol', 'last_traded'], drop=True, inplace=True) assets = dict() for symbol in symbols: start_dt = df.index.get_level_values(1).min() end_dt = df.index.get_level_values(1).max() end_dt_key = 'end_{}'.format(data_frequency) market = self.exchange.get_market(symbol) if market is None: raise ValueError('symbol not available in the exchange.') params = dict( exchange=self.exchange.name, data_source='local', exchange_symbol=market['id'], ) mixin_market_params(self.exchange_name, params, market) asset_def = self.exchange.get_asset_def(market, True) if asset_def is not None: params['symbol'] = asset_def['symbol'] params['start_date'] = asset_def['start_date'] \ if asset_def['start_date'] < start_dt else start_dt params['end_date'] = asset_def[end_dt_key] \ if asset_def[end_dt_key] > end_dt else end_dt params['end_daily'] = end_dt \ if data_frequency == 'daily' else asset_def['end_daily'] params['end_minute'] = end_dt \ if data_frequency == 'minute' else asset_def['end_minute'] else: params['symbol'] = get_catalyst_symbol(market) params['end_daily'] = end_dt \ if data_frequency == 'daily' else 'N/A' params['end_minute'] = end_dt \ if data_frequency == 'minute' else 'N/A' if min_start_dt is None or start_dt < min_start_dt: min_start_dt = start_dt if max_end_dt is None or end_dt > max_end_dt: max_end_dt = end_dt asset = TradingPair(**params) assets[market['id']] = asset save_exchange_symbols(self.exchange_name, assets, True) writer = self.get_writer( start_dt=min_start_dt.replace(hour=00, minute=00), end_dt=max_end_dt.replace(hour=23, minute=59), data_frequency=data_frequency ) for symbol in assets: # here the symbol is the market['id'] asset = assets[symbol] ohlcv_df = df.loc[ (df.index.get_level_values(0) == asset.symbol) ] # type: pd.DataFrame ohlcv_df.index = ohlcv_df.index.droplevel(0) period_start = start_dt.replace(hour=00, minute=00) period_end = end_dt.replace(hour=23, minute=59) periods = self.get_calendar_periods_range( period_start, period_end, data_frequency ) # We're not really resampling but ensuring that each frame # contains data ohlcv_df = ohlcv_df.reindex(periods, method='ffill') ohlcv_df['volume'] = ohlcv_df['volume'].fillna(0) problems += self.ingest_df( ohlcv_df=ohlcv_df, data_frequency=data_frequency, asset=asset, writer=writer, empty_rows_behavior=empty_rows_behavior, duplicates_threshold=duplicates_threshold ) return filter(partial(is_not, None), problems) def ingest(self, data_frequency, include_symbols=None, exclude_symbols=None, start=None, end=None, csv=None, show_progress=True, show_breakdown=True, show_report=True): """ Inject data based on specified parameters. Parameters ---------- data_frequency: str include_symbols: str exclude_symbols: str start: pd.Timestamp end: pd.Timestamp show_progress: bool environ: """ if csv is not None: self.ingest_csv(csv, data_frequency) else: if self.exchange is None: # Avoid circular dependencies from catalyst.exchange.utils.factory import get_exchange self.exchange = get_exchange(self.exchange_name) assets = get_assets( self.exchange, include_symbols, exclude_symbols ) for frequency in data_frequency.split(','): self.ingest_assets( assets=assets, data_frequency=frequency, start_dt=start, end_dt=end, show_progress=show_progress, show_breakdown=show_breakdown, show_report=show_report ) def get_history_window_series_and_load(self, assets, end_dt, bar_count, field, data_frequency, algo_end_dt=None, force_auto_ingest=False ): """ Retrieve price data history, ingest missing data. Parameters ---------- assets: list[TradingPair] end_dt: pd.Timestamp bar_count: int field: str data_frequency: str algo_end_dt: pd.Timestamp force_auto_ingest: Returns ------- Series """ if AUTO_INGEST or force_auto_ingest: try: series = self.get_history_window_series( assets=assets, end_dt=end_dt, bar_count=bar_count, field=field, data_frequency=data_frequency, ) return
pd.DataFrame(series)
pandas.DataFrame
import datetime as dt import numpy as np import pandas as pd from pandas.testing import assert_series_equal, assert_frame_equal import pytest from solarforecastarbiter.datamodel import Observation from solarforecastarbiter.validation import tasks, validator from solarforecastarbiter.validation.quality_mapping import ( LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING, DAILY_VALIDATION_FLAG) @pytest.fixture() def make_observation(single_site): def f(variable): return Observation( name='test', variable=variable, interval_value_type='mean', interval_length=pd.Timedelta('1hr'), interval_label='beginning', site=single_site, uncertainty=0.1, observation_id='OBSID', provider='Organization 1', extra_parameters='') return f @pytest.fixture() def default_index(single_site): return [pd.Timestamp('2019-01-01T08:00:00', tz=single_site.timezone), pd.Timestamp('2019-01-01T09:00:00', tz=single_site.timezone), pd.Timestamp('2019-01-01T10:00:00', tz=single_site.timezone), pd.Timestamp('2019-01-01T11:00:00', tz=single_site.timezone), pd.Timestamp('2019-01-01T13:00:00', tz=single_site.timezone)] @pytest.fixture() def daily_index(single_site): out = pd.date_range(start='2019-01-01T08:00:00', end='2019-01-01T19:00:00', freq='1h', tz=single_site.timezone) return out.append( pd.Index([pd.Timestamp('2019-01-02T09:00:00', tz=single_site.timezone)])) def test_validate_ghi(mocker, make_observation, default_index): mocks = [mocker.patch.object(validator, f, new=mocker.MagicMock( wraps=getattr(validator, f))) for f in ['check_timestamp_spacing', 'check_irradiance_day_night', 'check_ghi_limits_QCRad', 'check_ghi_clearsky', 'detect_clearsky_ghi']] obs = make_observation('ghi') data = pd.Series([10, 1000, -100, 500, 300], index=default_index) flags = tasks.validate_ghi(obs, data) for mock in mocks: assert mock.called expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) * DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'], pd.Series([1, 0, 0, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['NIGHTTIME'], pd.Series([0, 1, 1, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'], pd.Series([0, 1, 0, 1, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'], pd.Series(0, index=data.index) * DESCRIPTION_MASK_MAPPING['CLEARSKY']) for flag, exp in zip(flags, expected): assert_series_equal(flag, exp | LATEST_VERSION_FLAG, check_names=False) def test_validate_mostly_clear(mocker, make_observation): mocks = [mocker.patch.object(validator, f, new=mocker.MagicMock( wraps=getattr(validator, f))) for f in ['check_timestamp_spacing', 'check_irradiance_day_night', 'check_ghi_limits_QCRad', 'check_ghi_clearsky', 'detect_clearsky_ghi']] obs = make_observation('ghi').replace(interval_length=pd.Timedelta('5min')) index = pd.date_range(start='2019-04-01T11:00', freq='5min', tz=obs.site.timezone, periods=11) data = pd.Series([742, 749, 756, 763, 769, 774, 779, 784, 789, 793, 700], index=index) flags = tasks.validate_ghi(obs, data) for mock in mocks: assert mock.called expected = (pd.Series(0, index=data.index) * DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'], pd.Series(0, index=data.index) * DESCRIPTION_MASK_MAPPING['NIGHTTIME'], pd.Series(0, index=data.index) * DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'], pd.Series(0, index=data.index) * DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'], pd.Series([1] * 10 + [0], index=data.index) * DESCRIPTION_MASK_MAPPING['CLEARSKY']) for flag, exp in zip(flags, expected): assert_series_equal(flag, exp | LATEST_VERSION_FLAG, check_names=False) def test_apply_immediate_validation( mocker, make_observation, default_index): obs = make_observation('ghi') data = pd.DataFrame( [(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)], index=default_index, columns=['value', 'quality_flag']) val = tasks.apply_immediate_validation(obs, data) out = data.copy() out['quality_flag'] = [ DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG | DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] ] assert_frame_equal(val, out) def test_apply_immediate_validation_already_validated( mocker, make_observation, default_index): obs = make_observation('ghi') data = pd.DataFrame( [(0, 18), (100, 18), (200, 18), (-1, 19), (1500, 18)], index=default_index, columns=['value', 'quality_flag']) val = tasks.apply_immediate_validation(obs, data) out = data.copy() out['quality_flag'] = [ DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG | DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] ] assert_frame_equal(val, out) @pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi', 'poa_global', 'relative_humidity']) def test_apply_immediate_validation_other( mocker, make_observation, default_index, var): mock = mocker.MagicMock() mocker.patch.dict( 'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS', {var: mock}) obs = make_observation(var) data = pd.DataFrame( [(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)], index=default_index, columns=['value', 'quality_flag']) tasks.apply_immediate_validation(obs, data) assert mock.called @pytest.mark.parametrize('var', ['availability', 'curtailment', 'event', 'net_load']) def test_apply_immediate_validation_defaults( mocker, make_observation, default_index, var): mock = mocker.spy(tasks, 'validate_defaults') obs = make_observation(var) data = pd.DataFrame( [(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)], index=default_index, columns=['value', 'quality_flag']) tasks.apply_immediate_validation(obs, data) assert mock.called def test_fetch_and_validate_observation_ghi(mocker, make_observation, default_index): obs = make_observation('ghi') data = pd.DataFrame( [(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)], index=default_index, columns=['value', 'quality_flag']) mocker.patch('solarforecastarbiter.io.api.APISession.get_observation', return_value=obs) mocker.patch( 'solarforecastarbiter.io.api.APISession.get_observation_values', return_value=data) post_mock = mocker.patch( 'solarforecastarbiter.io.api.APISession.post_observation_values') tasks.fetch_and_validate_observation( '', obs.observation_id, data.index[0], data.index[-1]) out = data.copy() out['quality_flag'] = [ DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG | DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] ] assert post_mock.call_count == 2 assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1]) assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:]) def test_fetch_and_validate_observation_ghi_nones( mocker, make_observation, default_index): obs = make_observation('ghi') data = pd.DataFrame( [(None, 1)] * 5, index=default_index, columns=['value', 'quality_flag']) mocker.patch('solarforecastarbiter.io.api.APISession.get_observation', return_value=obs) mocker.patch( 'solarforecastarbiter.io.api.APISession.get_observation_values', return_value=data) post_mock = mocker.patch( 'solarforecastarbiter.io.api.APISession.post_observation_values') tasks.fetch_and_validate_observation( '', obs.observation_id, data.index[0], data.index[-1]) out = data.copy() base = ( DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG ) out['quality_flag'] = [ base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'], base, base, base, base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] ] assert post_mock.call_count == 2 assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1]) assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:]) def test_fetch_and_validate_observation_not_listed(mocker, make_observation, default_index): obs = make_observation('curtailment') data = pd.DataFrame( [(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)], index=default_index, columns=['value', 'quality_flag']) mocker.patch('solarforecastarbiter.io.api.APISession.get_observation', return_value=obs) mocker.patch( 'solarforecastarbiter.io.api.APISession.get_observation_values', return_value=data) post_mock = mocker.patch( 'solarforecastarbiter.io.api.APISession.post_observation_values') tasks.fetch_and_validate_observation( '', obs.observation_id, data.index[0], data.index[-1]) out = data.copy() out['quality_flag'] = [ DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG, LATEST_VERSION_FLAG, LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG] assert post_mock.call_count == 2 assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1]) assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:]) def test_validate_dni(mocker, make_observation, default_index): mocks = [mocker.patch.object(validator, f, new=mocker.MagicMock( wraps=getattr(validator, f))) for f in ['check_timestamp_spacing', 'check_irradiance_day_night', 'check_dni_limits_QCRad']] obs = make_observation('dni') data = pd.Series([10, 1000, -100, 500, 500], index=default_index) flags = tasks.validate_dni(obs, data) for mock in mocks: assert mock.called expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) * DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'], pd.Series([1, 0, 0, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['NIGHTTIME'], pd.Series([0, 0, 1, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']) for flag, exp in zip(flags, expected): assert_series_equal(flag, exp | LATEST_VERSION_FLAG, check_names=False) def test_fetch_and_validate_observation_dni(mocker, make_observation, default_index): obs = make_observation('dni') data = pd.DataFrame( [(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)], index=default_index, columns=['value', 'quality_flag']) mocker.patch('solarforecastarbiter.io.api.APISession.get_observation', return_value=obs) mocker.patch( 'solarforecastarbiter.io.api.APISession.get_observation_values', return_value=data) post_mock = mocker.patch( 'solarforecastarbiter.io.api.APISession.post_observation_values') tasks.fetch_and_validate_observation( '', obs.observation_id, data.index[0], data.index[-1]) out = data.copy() out['quality_flag'] = [ DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG | DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']] assert post_mock.call_count == 2 assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1]) assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:]) def test_validate_dhi(mocker, make_observation, default_index): mocks = [mocker.patch.object(validator, f, new=mocker.MagicMock( wraps=getattr(validator, f))) for f in ['check_timestamp_spacing', 'check_irradiance_day_night', 'check_dhi_limits_QCRad']] obs = make_observation('dhi') data = pd.Series([10, 1000, -100, 200, 200], index=default_index) flags = tasks.validate_dhi(obs, data) for mock in mocks: assert mock.called expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) * DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'], pd.Series([1, 0, 0, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['NIGHTTIME'], pd.Series([0, 1, 1, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']) for flag, exp in zip(flags, expected): assert_series_equal(flag, exp | LATEST_VERSION_FLAG, check_names=False) def test_fetch_and_validate_observation_dhi(mocker, make_observation, default_index): obs = make_observation('dhi') data = pd.DataFrame( [(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)], index=default_index, columns=['value', 'quality_flag']) mocker.patch('solarforecastarbiter.io.api.APISession.get_observation', return_value=obs) mocker.patch( 'solarforecastarbiter.io.api.APISession.get_observation_values', return_value=data) post_mock = mocker.patch( 'solarforecastarbiter.io.api.APISession.post_observation_values') tasks.fetch_and_validate_observation( '', obs.observation_id, data.index[0], data.index[-1]) out = data.copy() out['quality_flag'] = [ DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG | DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']] assert post_mock.call_count == 2 assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1]) assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:]) def test_validate_poa_global(mocker, make_observation, default_index): mocks = [mocker.patch.object(validator, f, new=mocker.MagicMock( wraps=getattr(validator, f))) for f in ['check_timestamp_spacing', 'check_irradiance_day_night', 'check_poa_clearsky']] obs = make_observation('poa_global') data = pd.Series([10, 1000, -400, 300, 300], index=default_index) flags = tasks.validate_poa_global(obs, data) for mock in mocks: assert mock.called expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) * DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'], pd.Series([1, 0, 0, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['NIGHTTIME'], pd.Series([0, 1, 0, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']) for flag, exp in zip(flags, expected): assert_series_equal(flag, exp | LATEST_VERSION_FLAG, check_names=False) def test_fetch_and_validate_observation_poa_global(mocker, make_observation, default_index): obs = make_observation('poa_global') data = pd.DataFrame( [(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)], index=default_index, columns=['value', 'quality_flag']) mocker.patch('solarforecastarbiter.io.api.APISession.get_observation', return_value=obs) mocker.patch( 'solarforecastarbiter.io.api.APISession.get_observation_values', return_value=data) post_mock = mocker.patch( 'solarforecastarbiter.io.api.APISession.post_observation_values') tasks.fetch_and_validate_observation( '', obs.observation_id, data.index[0], data.index[-1]) out = data.copy() out['quality_flag'] = [ DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG | DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']] assert post_mock.call_count == 2 assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1]) assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:]) def test_validate_air_temp(mocker, make_observation, default_index): mocks = [mocker.patch.object(validator, f, new=mocker.MagicMock( wraps=getattr(validator, f))) for f in ['check_timestamp_spacing', 'check_irradiance_day_night', 'check_temperature_limits']] obs = make_observation('air_temperature') data = pd.Series([10, 1000, -400, 30, 20], index=default_index) flags = tasks.validate_air_temperature(obs, data) for mock in mocks: assert mock.called expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) * DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'], pd.Series([1, 0, 0, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['NIGHTTIME'], pd.Series([0, 1, 1, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']) for flag, exp in zip(flags, expected): assert_series_equal(flag, exp | LATEST_VERSION_FLAG, check_names=False) def test_fetch_and_validate_observation_air_temperature( mocker, make_observation, default_index): obs = make_observation('air_temperature') data = pd.DataFrame( [(0, 0), (200, 0), (20, 0), (-1, 1), (1500, 0)], index=default_index, columns=['value', 'quality_flag']) mocker.patch('solarforecastarbiter.io.api.APISession.get_observation', return_value=obs) mocker.patch( 'solarforecastarbiter.io.api.APISession.get_observation_values', return_value=data) post_mock = mocker.patch( 'solarforecastarbiter.io.api.APISession.post_observation_values') tasks.fetch_and_validate_observation( '', obs.observation_id, data.index[0], data.index[-1]) out = data.copy() out['quality_flag'] = [ DESCRIPTION_MASK_MAPPING['OK'] | DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG] assert post_mock.call_count == 2 assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1]) assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:]) def test_validate_wind_speed(mocker, make_observation, default_index): mocks = [mocker.patch.object(validator, f, new=mocker.MagicMock( wraps=getattr(validator, f))) for f in ['check_timestamp_spacing', 'check_irradiance_day_night', 'check_wind_limits']] obs = make_observation('wind_speed') data = pd.Series([10, 1000, -400, 3, 20], index=default_index) flags = tasks.validate_wind_speed(obs, data) for mock in mocks: assert mock.called expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) * DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'], pd.Series([1, 0, 0, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['NIGHTTIME'], pd.Series([0, 1, 1, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']) for flag, exp in zip(flags, expected): assert_series_equal(flag, exp | LATEST_VERSION_FLAG, check_names=False) def test_fetch_and_validate_observation_wind_speed( mocker, make_observation, default_index): obs = make_observation('wind_speed') data = pd.DataFrame( [(0, 0), (200, 0), (15, 0), (1, 1), (1500, 0)], index=default_index, columns=['value', 'quality_flag']) mocker.patch('solarforecastarbiter.io.api.APISession.get_observation', return_value=obs) mocker.patch( 'solarforecastarbiter.io.api.APISession.get_observation_values', return_value=data) post_mock = mocker.patch( 'solarforecastarbiter.io.api.APISession.post_observation_values') tasks.fetch_and_validate_observation( '', obs.observation_id, data.index[0], data.index[-1]) out = data.copy() out['quality_flag'] = [ DESCRIPTION_MASK_MAPPING['OK'] | DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG] assert post_mock.call_count == 2 assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1]) assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:]) def test_validate_relative_humidity(mocker, make_observation, default_index): mocks = [mocker.patch.object(validator, f, new=mocker.MagicMock( wraps=getattr(validator, f))) for f in ['check_timestamp_spacing', 'check_irradiance_day_night', 'check_rh_limits']] obs = make_observation('relative_humidity') data = pd.Series([10, 101, -400, 60, 20], index=default_index) flags = tasks.validate_relative_humidity(obs, data) for mock in mocks: assert mock.called expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) * DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'], pd.Series([1, 0, 0, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['NIGHTTIME'], pd.Series([0, 1, 1, 0, 0], index=data.index) * DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']) for flag, exp in zip(flags, expected): assert_series_equal(flag, exp | LATEST_VERSION_FLAG, check_names=False) def test_fetch_and_validate_observation_relative_humidity( mocker, make_observation, default_index): obs = make_observation('relative_humidity') data = pd.DataFrame( [(0, 0), (200, 0), (15, 0), (40, 1), (1500, 0)], index=default_index, columns=['value', 'quality_flag']) mocker.patch('solarforecastarbiter.io.api.APISession.get_observation', return_value=obs) mocker.patch( 'solarforecastarbiter.io.api.APISession.get_observation_values', return_value=data) post_mock = mocker.patch( 'solarforecastarbiter.io.api.APISession.post_observation_values') tasks.fetch_and_validate_observation( '', obs.observation_id, data.index[0], data.index[-1]) out = data.copy() out['quality_flag'] = [ DESCRIPTION_MASK_MAPPING['OK'] | DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG] assert post_mock.call_count == 2 assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
pandas.testing.assert_frame_equal
""" """ # Libraries import itertools import numpy as np import pandas as pd # Pint libraries from pint.errors import UndefinedUnitError # DataBlend libraries from datablend.core.settings import textwrapper from datablend.core.settings import ureg from datablend.utils.compute import add_days from datablend.utils.pandas import nanunique from datablend.utils.pandas_schema import schema_from_json # --------------------------------------------------- # Helper methods # --------------------------------------------------- # Transformation functions def mode(series): """Computes the mode.""" if series.isnull().all(): return np.nan return series.mode()[0] def fbfill(x): """Computes forward and then backward fill.""" return x.ffill().bfill() def bffill(x): """Computes backward and then forward fill""" return x.bfill(x).ffill(x) TRANSFORMATIONS = { 'mode': mode, 'fbfill': fbfill, 'bffill': bffill } def str2func(d): """This method passes strings to functions Parameters --------- d: dict Dictionary where value is a function name.""" # Create deep dictionary copy if isinstance(d, str): if d in TRANSFORMATIONS: return TRANSFORMATIONS[d] # Return return d def swap_day_month(x): """This method... .. note: Check that day/month can be swapped by ensuring they are in the range (1, 12) .. note: Should I return nan? """ if (x.day > 12) or (x.month > 12): return np.nan return x.replace(month=x.day, day=x.month) def add_to_date(x, year=0, month=0, day=0): """This method... .. note: Should I return nan? """ try: return x.replace(year=x.year + year, month=x.month + month, day=x.day + day) except: return x # -------------------------------------------------------------------- # Corrections # -------------------------------------------------------------------- def combinations_lower_upper(word): return list(map(''.join, itertools.product(\ *zip(word.upper(), word.lower())))) # Create map MAP = {1: True, 0: False, '1': True, '0': False} MAP.update({k: True for k in combinations_lower_upper('True')}) MAP.update({k: False for k in combinations_lower_upper('False')}) MAP.update({k: True for k in combinations_lower_upper('Yes')}) MAP.update({k: False for k in combinations_lower_upper('No')}) MAP.update({k: True for k in combinations_lower_upper('Y')}) MAP.update({k: False for k in combinations_lower_upper('N')}) def to_boolean(series, copy=True, errors='raise', replace_map=MAP): """ Parameters ---------- series: pd.Series copy: boolean errors: string replace_map: dict-like Returns ------- pd.Series The corrected series. """ # Library from pandas.api.types import is_bool_dtype # Return series if is_bool_dtype(series): return series # Replace and convert type return series.map(MAP) \ .astype('boolean', copy, errors) def string_correction(series, strip=True, replace_spaces=True, lower=True, title=True, capitalize=False): """Enforces string corrections Parameters ---------- Returns ------- """ # Copy transform = series.copy(deep=True) # Transformations transform = transform.str.lower() transform = transform.str.replace('\s{2,}', ' ') transform = transform.str.strip() if title: transform = transform.str.title() if capitalize: transform = transform.str.capitalize() # Return return transform def dtype_correction(series, dtype, copy=True, errors='raise', downcast=None, **kwargs): """Enforces specificy dtype. .. warning: If series contain decimal values, the output will also contain decimal values even when dtype Int64 is specified. .. note: The astype errors parameter only supports raise or ignore. In order to allow coerce for numbers we will handle it ourselves in this method. .. note: series.astype params are dtype, copy, errors. .. note: pd.to_numeric params are errors, downcast .. note: pd.to_datetime params are many (**kwargs) Parameters ---------- series: pd.Series The series to correct dtype: str (pandas dtypes) The dtype to convert the series errors: str (coerce, raise, ignore), default raise Whether to raise, ignore or coerce errors copy: boolean, default True Whether to return a copy downcast: downcast parameters as per pd.to_numeric **kwargs: arguments for pd.to_datetime Returns ------- pd.Series The corrected series. Examples -------- >>> dtype_correction(pd.Series(['1', '2']), dtype='Int64', errors='coerce') """ # Enable coerce functionality if errors == 'coerce': if dtype in ['Int64', 'Float64', 'number']: return pd.to_numeric(series, errors, downcast) elif dtype in ['datetime64']: return pd.to_datetime(series, errors, **kwargs) elif dtype in ['boolean']: return to_boolean(series, copy, 'raise') # Use astype method return series.astype(dtype, copy, errors) def bool_level_correction(sbool, slevel, verbosity=10): """Corrects values of boolean and numeric series. It handles boolean and numeric columns representing either the presence of a condition (True/False) or the presence of such condition through its level (0, 1, 2, 3). - If level > 0 then bool condition is True - If level == 0 then bool condition is False - If level is NaN then bool condition remains .. todo: Think carefully whether we should copy the data to avoid corrections in place. Also consider whether pass all the DataFrame, or exclusively the two columns with values to correct. Something like bool_level_correction(series_bool, series_level) Parameters ---------- df: pd.DataFrame The dataframe sbool: str Label of column with boolean values slevel: str Label of column with numeric values Returns ------- pd.DataFrame The corrected DataFrame. See also -------- Examples: :ref:`sphx_glr__examples_correctors_plot_bool_level.py`. Examples -------- >>> abdominal_pain = [False, True, False, True] >>> abdominal_pain_level = [0, 0, 1, 2] """ # Show information if verbosity > 5: print("Applying... bool_level_correction | {0} | {1}" \ .format(sbool.name, slevel.name)) # Create copy transform = sbool.copy(deep=True) # Convert dtype transform = transform.convert_dtypes() # Correction transform = transform | slevel.fillna(0) > 0 transform[slevel == 0] = False # Return return transform # Set #df.loc[:, sbool] = transform # Convert dtypes #df[[sbool, slevel]] = df[[sbool, slevel]].convert_dtypes() # Indexes #idxs = df.notna().any(axis=1) # Correct both series #df.loc[:, sbool] = df[sbool] | df[slevel].fillna(0) > 0 #df.loc[df[slevel] == 0, sbool] = False #df.loc[idxs, sbool] = df.loc[idxs, [sbool, slevel]].any(axis=1) #df.loc[idxs, slevel] = df.loc[idxs, slevel].replace(to_replace=0, value=pd.NA) #df.loc[idxs, slevel] = df.loc[idxs, slevel].fillna(df[sbool]) # Return #return df def fillna_correction(series, **kwargs): """Corrects filling nan with a strategy It implements the fillna function from pandas including two additional methods: - bffill: concatenate backwards then forwards fill - fbfill: concatenate forwards then backwards fill Parameters ---------- series: pd.Series The series **kwargs: pd.fillna arguments Returns ------- pd.Series The corrected series. See Also -------- Examples: :ref:`sphx_glr__examples_correctors_plot_fillna.py`. Examples -------- """ if 'method' in kwargs: if kwargs['method'] == 'bffill': return series.transform(bffill) if kwargs['method'] == 'fbfill': return series.transform(fbfill) return series.fillna(**kwargs) def static_correction(series, method, **kwargs): """Corrects filling with a consistent value. .. note: Mode might return a series with two values with the same frequency yet only the first will be considered. .. note: max, min, median, mean might have an inconsistent behaviour when applied to strings (objects) and similarly median and mean might have inconsistent behaviour when applied to boolean. .. note: The mode could be also implemented with value_counts. tidy.shock = \ tidy.groupby(by='StudyNo').shock \ .transform(static_correction, method='max') Parameters ---------- series: pd.Series The pandas series method: string or function The method which can be a function or a string supported by the pandas apply function such as [max, min, median, mean, mode, ...] Returns ------- pd.Series The corrected series. See Also -------- Examples: :ref:`sphx_glr__examples_correctors_plot_static.py`. Examples -------- """ #if series.isna().all(): # return series # The series is static already if series.nunique(dropna=False) == 1: return series # Get value to fill with. value = series.apply(method) # For mode a series is returned if isinstance(value, pd.Series): value = value[0] #print("---") #print(series) #print(value) # Transform transform = series.copy(deep=True) transform.values[:] = value #transform.update(np.repeat(value, series.size)) #print(transform) # Return return transform def categorical_correction(series, categories=[], allow_combinations=True, errors='coerce', sep=',', value=pd.NA): """Corrects ensuring only categories specified are included. .. warning: It assumes combinations are comma separated without spaces. The user will have to handle empty spaces issues with replace or trim. .. note: It sorts the output to facilitate comparison of multiple combinations. Thus DENV-1,DENV-2 and DENV-2,DENV-1 will be corrected as DENV-1,DENV2. .. note: It removes duplicates 'DENV-1,DENV-1' as DENV-1 .. todo: How to raise errors? Parameters ---------- series: pd.Series The data to correct. categories: list The categories allowed for the series. allow_combinations: boolean, default True Whether combinations (e.g. DENV-1,DENV-2) are allowed errors: string [raise, coerce], default raise - raise raises an exception - coerce sets inconsistent values as None Returns ------- pd.Series The corrected series. See Also -------- Examples: :ref:`sphx_glr__examples_correctors_plot_categorical.py`. """ # Copy series corrected = series.copy(deep=True) # Correct single elements isin = corrected.isin(categories) issep = series.apply(str).str.contains(sep) # Allowing combinations if allow_combinations: # Correct combinations combs = corrected[issep]\ .apply(lambda x: sep.join(sorted( set(x.split(sep)) .intersection(set(categories))))) # Fix empty set combs[combs == ''] = value if errors == 'raise': if (~isin & ~issep).sum() > 0: print("ERROR!") a = combs.compare(corrected) pass if errors == 'coerce': corrected[~isin] = value if allow_combinations: corrected.update(combs) # Returns return corrected def replace_correction(series, **kwargs): """Corrects replacing values. Parameters ---------- series: pd.Series The pandas series **kwargs: Arguments as per pandas replace function Returns ------- pd.Series The corrected series. See Also -------- Examples: :ref:`sphx_glr__examples_correctors_plot_replace.py`. """ return series.replace(**kwargs) def order_magnitude_correction(series, range, orders=[10, 100]): """Corrects issues related with the order of magnitude. It attempts to correct errors that occurs when inputting data manually and the values have one or two degrees of magnitude higher because one or two digits are pressed accidentally (e.g. pressing/missing extra 0s) or a comma is missing (e.g. 37.7 as 377). Parameters ---------- series: pd.Series The series to correct. range: The desired range to accept the correction. orders: list, default [10, 100] The orders of magnitude to try. Returns ------- pd.Series The corrected series. See Also -------- Examples: :ref:`sphx_glr__examples_correctors_plot_order_magnitude.py`. Examples -------- .. doctest:: >>> body_temperature = pd.Series([36, 366, 3660]) >>> order_magnitude_correction(body_temperature, range=[30, 43]) [36, 36.6, 36.66] """ # Create transform transform = pd.to_numeric(series.copy(deep=True)) # Range low, high = range # Loop for i in orders: aux = (transform / i) idx = aux.between(low, high) transform[idx] = aux[idx] # Return return transform def unit_correction(series, unit_from=None, unit_to=None, range=None, record=None): """Corrects issues related with units. .. todo: It can be implemented in a better way. if isintance(series, pd.Series): transformed = series.transformed # the copy Do stuff return depending on input parameter Parameters ---------- series: pd.Series The series to correct unit_from: The unit that has been used to record the series. unit_to: The unit the series should be converted too. range: The range to verify whether the conversion is valid. Returns ------- Examples -------- """ # Import #from datablend.core.settings import ureg # Libraries for pint from pint import UnitRegistry # Create ureg = UnitRegistry() # auto_reduce_dimensions = False # Create transformed transformed = pd.Series(series) # Transformed transformed = (transformed.values * ureg(unit_from)).to(unit_to) # Convert to unit if not isinstance(series, pd.Series): return transformed return pd.Series(index=series.index, data=transformed) # Check in between #between = pd.Series(v).between(low, high) def range_correction(series, range=None, value=np.nan): """Corrects issues with ranges. Some values collected are not within the ranges. They could also be removed using the IQR rule, but if we know the limits we can filter them as errors instead of outliers. .. todo: Check if any outside first otherwise return series. .. todo: Warn if replace value is outside range. .. todo: Include several options for value: value=np.nan value=number value=(low, high) value='edges' tidy.dbp = \ tidy.dbp.transform(range_correction, range=(40, 100)) Parameters ---------- series: pd.Series (numeric series) The pandas series to correct range: range or tuple (min, max) The range value: default np.nan The value to use for corrections Returns ------- pd.Series See Also -------- Examples: :ref:`sphx_glr__examples_correctors_plot_range.py`. Examples -------- """ # Create transform transform = pd.to_numeric(series.copy(deep=True)) # Range low, high = range # Correction transform[~transform.between(low, high)] = value # Return return transform def unique_true_value_correction(series, value=np.nan, **kwargs): """Ensure there is only one True value. For example, for variable representing events that can only occur once such as event_death, we can correct inconsistent series so that only one True value appears. .. note: If len(series) <=1 return series .. note: Set to value=np.nan or value=False .. note: What if there is no true value? .. note: Rename to one_true_value_correction tidy.event_admission = \ tidy.groupby(by=['StudyNo']) \ .event_admission \ .transform(unique_true_value_correction) Parameters ---------- series: pd.Series The boolean series to correct. **kwargs: Arguments to pass to the pandas duplicated function. In particular the argument 'keep' which allows (i) 'first' to keep first appearance, (ii) 'last' to keep last appearance or (iii) 'False' which keeps all appearences. Returns ------- pd.Series The corrected series See Also -------- Examples: :ref:`sphx_glr__examples_correctors_plot_unique_true_value.py`. Examples -------- """ # Ensure that it is boolean transform = series.copy(deep=True) transform = to_boolean(transform) # There is no true value! if transform.sum() == 0: #print("No value found!") return series # It is already unique if transform.sum() == 1: return series # More than one #transform[transform.duplicated(**kwargs)] = value # Find duplicates only for Trues idxs = transform[transform].duplicated(**kwargs) # From those duplicated set values transform.loc[idxs[idxs].index] = value # Return return transform def causal_correction(x, y): """This method is not implemented yet.""" # if x is one then y must be one. pass def compound_feature_correction(series, compound, verbose=0): """Corrects compound boolean features. Ensures that the values of a compound feature (e.g. bleeding) and its subcategories (e.g. bleeding_skin, bleeding_nose, ...) are consistent. The bleeding feature is set to True if the current value is True or if any of the bleeding sites is True; that is, series | compound.any(). .. note: Option to return bleeding other if it is not included in the compound and the series (bleeding) has True but no subcategory (bleeding site) is found. .. warning: Works with pd.NA but not with np.nan! Parameters ---------- series: pd.Series The series to correct compound: pd.DataFrame The DataFrame with subcategories. Returns ------- pd.Series The corrected series. See Also -------- Examples: :ref:`sphx_glr__examples_correctors_plot_compound.py`. Examples -------- """ if verbose > 5: print("Applying... compound_feature_correction to {0:>20} | {1}" \ .format(series.name, compound.columns.tolist())) # Copy data transform = series.copy(deep=True) # Convert to dtypes transform = transform.convert_dtypes() # Any true any = compound.convert_dtypes().any(axis=1) # Set transform transform = transform | any # other = transform & ~any # Return return transform def date_corrections(x, years=None, use_swap_day_month=True): """Applies various possible date corrections Parameters ---------- x: years: swap_day_month: Returns ------- """ # Original value corrections = [x] # Swapping day month corrections += [swap_day_month(x)] corrections += [add_to_date(x, year=1)] corrections += [add_to_date(x, year=-1)] corrections += [add_to_date(x, month=1)] corrections += [add_to_date(x, month=-1)] # Range of possible years if years is not None: corrections += [x.replace(year=int(y)) for y in years] # Return return pd.Series(
pd.Series(corrections)
pandas.Series
# -*- coding:utf-8 -*- __version__ = "0.2.0" __authors__ = "<NAME>" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" import csv import os from datetime import datetime, timedelta from typing import Dict import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import yaml from fameio.scripts.convert_results import run as convert_results from fameio.scripts.make_config import DEFAULT_CONFIG from fameio.scripts.make_config import run as make_config from fameio.source.cli import Config from fameio.source.loader import load_yaml from fameio.source.time import DATE_FORMAT from fameio.source.time import FameTime from ioproc.logger import mainlogger from ioproc.tools import action from mpl_toolkits.axes_grid1 import make_axes_locatable from iovrmr_tools import ( ensure_path_exists, filter_for_columns, get_excel_sheet, get_filter_type_lists, get_from_dict_or_default, insert_agents_from_map, insert_contracts_from_map, get_header, get_field, write_yaml, to_list, round_to_full_hour, ensure_given_data_matches_dims, check_if_window_too_large, raise_and_log_critical_error, get_all_csv_files_in_folder, AmirisOutputs, OPERATOR_AGENTS, SUPPORTED_AGENTS, EXCHANGE, sum_per_agent, call_function_per_agent, ) _FOUND_POWER = "Searched for `power` information in agent '{}' with id '{}. Found {} MW for technology `{}`." _FOUND_NO_POWER = "Searched for `power` information in agent '{}' with id '{}'. Found none." class Amiris: """Defines names of parameters in the `scenario.yaml`""" time_step = "TimeStep" awarded_power_name = "AwardedPowerInMWH" technology_name_for_storage = "Storage" identifier_for_storage = "Device" capacitiy_name_for_storage = "PowerInMW" capacitiy_name = "InstalledPowerInMW" energy_carrier_name = "EnergyCarrier" fuel_type_name = "FuelType" prototype_name = "Prototype" exchange_type_name = "EnergyExchange" storage_type_name = "StorageTrader" renewable_type_name = "VariableRenewableOperator" conventional_type_name = "PredefinedPlantBuilder" biogas_type_name = "Biogas" conventional_plant_operator = "ConventionalPlantOperator" max_efficiency = "Markup In EUR Per MWh" min_efficiency = "Markdown In EUR Per MWh" region_name = "Region" OPERATOR_AGENTS = [ renewable_type_name, conventional_type_name, biogas_type_name, ] FLEX_AGENTS = [storage_type_name] SUPPORTED_AGENTS = ["RenewableTrader", "SystemOperatorTrader"] @action("general") def parse_excel(dmgr, config, params): """ Parses given `excelFile` for specified `excelSheets` as dataframe object with the specified `write_to_dmgr` name. `excelHeader` can be set to `True` or `False`. The action may be specified in the user.yaml as follows: - action1: project: general call: parse_excel data: read_from_dmgr: null write_to_dmgr: powerPlants args: excelFile: Kraftwerksliste_komplett.xlsx excelSheet: Kraftwerke excelHeader: True """ args = params["args"] file = get_field(args, "excelFile") excel_sheet = get_excel_sheet(args) header = get_header(get_field(args, "excelHeader")) parsed_excel = pd.read_excel(io=file, sheet_name=excel_sheet, header=header) with dmgr.overwrite: dmgr[params["data"]["write_to_dmgr"]] = parsed_excel @action("general") def parse_csv(dmgr, config, params): """ Parses given `csvFile` as dataframe object with the specified `write_to_dmgr` name. The default delimiter is `,` if not configured otherwise in `csvSeparator`. The user may also specify a `csvHeader` (int or list of int) and a `csvIndex` (int or list of int). The action may be specified in the user.yaml as follows: - action1: project: general call: parse_csv data: read_from_dmgr: null write_to_dmgr: highreso_data args: csvFile: amiris-config/data/High_Res_O_output.csv csvSeparator: ";" csvHeader: [0, 1] csvIndex: [0, 1] """ args = params["args"] file = get_field(args, "csvFile") separator = get_from_dict_or_default("csvSeparator", args, ",") header = get_from_dict_or_default("csvHeader", args, None) if header: header = to_list(header) index_col = get_from_dict_or_default("csvIndex", args, None) if index_col: index_col = to_list(index_col) parsed_data = pd.read_csv(file, sep=separator, header=header, index_col=index_col) with dmgr.overwrite: dmgr[params["data"]["write_to_dmgr"]] = parsed_data @action("general") def convert_time(dmgr, config, params): """ Converts index in data specified in `read_from_dmgr` from given data to datetime string to date format as defined in `fameio`. Start year is derived from the original multiindex assuming that the first value is the year. The action may be specified in the user.yaml as follows: - action: project: general call: convert_time data: read_from_dmgr: highreso_data write_to_dmgr: highreso_data_converted """ data = dmgr[params["data"]["read_from_dmgr"]] start_year = data.index[0][0] # TODO: needs refactoring to account for missing time steps new_index = [d.strftime(DATE_FORMAT) for d in pd.date_range(str(start_year), periods=len(data.index), freq="60min")] data.index = new_index with dmgr.overwrite: dmgr[params["data"]["write_to_dmgr"]] = data @action("general") def filter_data(dmgr, config, params): """ Action to filter specified input data with given filters. Filters are applied on the data set for the specified 'column'. Each entry of the resulting filtered data set is matching all filter 'value' entries specified in the dictionary of 'intersectingFilters'. Filters are applied consecutively narrowing down the set of matching values. If no explicit filter `type` is specified, `type` "EQUAL" is assumed. Otherwise, the following list of numeric filters may be specified "EQUAL", "GREATER", "GREATEREQUAL", "LESS" or "LESSEQUAL". The action may be specified in the user.yaml as follows: - action2: project: general call: filter_data data: read_from_dmgr: powerPlants write_to_dmgr: powerPlants_filtered args: intersectingFilters: - column: COUNTRY values: ['GERMANY'] - column: FUEL values: 'UR' """ args = params["args"] filters = args["intersectingFilters"] data = dmgr[params["data"]["read_from_dmgr"]] filter_columns = [x["column"] for x in filters] filter_value_lists = [x["values"] for x in filters] filter_type_lists = get_filter_type_lists(filters) matching_entries = filter_for_columns(data, filter_columns, filter_value_lists, filter_type_lists) with dmgr.overwrite: dmgr[params["data"]["write_to_dmgr"]] = matching_entries @action("general") def translate_data(dmgr, config, params): """ Translates values in given data set based on definition in `translationDirective`. In specified `fields` which are the column names of the data set, the `origin` value is mapped to a `target` value. If `origin` is set to `'*'`, all entries are translated to `target`. The action may be specified in the user.yaml as follows: - action3: project: general call: translate_data data: read_from_dmgr: powerPlants_filtered write_to_dmgr: powerPlants_translated args: translationDirective: translationFieldMap """ data = dmgr[params["data"]["read_from_dmgr"]] args = params["args"] translation_map = config["user"][args["translationDirective"]] for translation in translation_map["Translation"]: field = get_field(translation, "column") for item in translation["map"]: origin = get_field(item, "origin") target = get_field(item, "target") if origin == "*": data[field] = target else: data.loc[data[field] == origin, field] = target with dmgr.overwrite: dmgr[params["data"]["write_to_dmgr"]] = data @action("general") def write_AMIRIS_config(dmgr, config, params): """ Writes AMIRIS specific configuration file in .yaml format by parsing `AMIRISConfigFieldMap` for maps of `Agents` and `Contracts`. The resulting full config file is written to the specified `outputFile` in the globally defined `output: filePath`. The action may be specified in the user.yaml as follows: - action4: project: general call: write_AMIRIS_config data: read_from_dmgr: powerPlants_translated_DE write_to_dmgr: null args: AMIRISConfigFieldMap: conventionalsFieldMap.yaml templateFile: scenario_template.yaml outputFile: scenario.yaml """ data = dmgr[params["data"]["read_from_dmgr"]] args = params["args"] config_file = load_yaml(args["templateFile"]) output_params = config["user"]["global"]["output"] output_path = output_params["filePath"] + "/" ensure_path_exists(output_path) output_file_path = output_path + args["outputFile"] amiris_maps = to_list(args["AMIRISConfigFieldMap"]) for amiris_map in amiris_maps: translation_map = load_yaml(amiris_map) config_file, inserted_agents = insert_agents_from_map(data, translation_map["Agents"], config_file) config_file = insert_contracts_from_map(inserted_agents, translation_map["Contracts"], config_file) write_yaml(config_file, output_file_path) @action("general") def create_AMIRIS_protobuf(dmgr, config, params): """ Calls the `fameio` package and converts given `input` config file to AMIRIS specific protobuf file. If no `output` path in the action definition is specified, the protobuf is saved as 'config.pb' to path as defined in `global->output->filePath`. The action may be specified in the user.yaml as follows: - action: project: general call: create_AMIRIS_protobuf data: read_from_dmgr: null write_to_dmgr: null args: input: output/scenario.yaml output: output/config.pb """ args = params["args"] if "output" in args: output_path = args["output"] filename, file_extension = os.path.splitext(output_path) if file_extension != ".pb": raise_and_log_critical_error( "Provide a path to a config.pb file including the '.pb' extension. " "Got '{}' instead.".format(output_path) ) input_path = args["input"] else: global_output_params = config["user"]["global"]["output"] output_path = global_output_params["filePath"] + "/" input_path = os.path.abspath(output_path + args["input"]) output_path = os.path.abspath(output_path + DEFAULT_CONFIG[Config.OUTPUT]) config = { Config.LOG_LEVEL: "info", Config.LOG_FILE: None, Config.OUTPUT: output_path, } make_config(input_path, config) @action("general") def write_timeseries(dmgr, config, params): """ Writes data of tag `read_from_dmgr` to `.csv`file or multiple .csv files for each column when `multipleFileExport` is set to `True` (default is `False`). The user may limit the export to specific `columns_to_export`. If not specified, all columns are exported as is. Header is exported when `header` is `True` (default is `True`). Separator and output path are derived from the `global` `output`. The action may be specified in the user.yaml as follows: - action: project: general call: write_timeseries data: read_from_dmgr: timeSeries_corrected_load write_to_dmgr: Null args: multipleFileExport: True header: False columns_to_export: UNITID, PLANT """ data = dmgr[params["data"]["read_from_dmgr"]] output_params = config["user"]["global"]["output"] output_path = output_params["filePath"] + "/export" ensure_path_exists(output_path) args = params["args"] if "columns_to_export" in args: columns_to_export = to_list(params["args"]["columns_to_export"]) data = data[columns_to_export] header = get_from_dict_or_default("header", args, True) if "multipleFileExport" in args: if args["multipleFileExport"]: for column in data: file_name = "_".join(column).replace(" ", "_") data[column].to_csv( output_path + "/{}.csv".format(file_name), sep=output_params["csvSeparator"], header=header, ) return data.to_csv( path_or_buf=output_path + "/export_{}.csv".format(params["data"]["read_from_dmgr"]), sep=output_params["csvSeparator"], header=header, ) @action("general") def convert_pb(dmgr, config, params): """ Calls the `convertFameResults` routine provided by the `fameio` package to convert `pbFile` in `pbDir` (as defined in the `global` section of the `user.yaml`). The files are written to `pbOutputRaw`. You may specify certain `agentsToExtract` given in a list. This limits the conversion of results to the ones specified. The action may be specified in the user.yaml as follows: - action: project: general call: convert_pb args: agentsToExtract: ['MyAgent1', 'MyAgent2'] """ agents_to_extract = None if "args" in params: if "agentsToExtract" in params["args"]: agents_to_extract = to_list(params["args"]["agentsToExtract"]) run_config = { Config.LOG_LEVEL: "info", Config.LOG_FILE: None, Config.AGENT_LIST: agents_to_extract, Config.OUTPUT: config["user"]["global"]["output"]["pbOutputRaw"], Config.SINGLE_AGENT_EXPORT: False, } ensure_path_exists(run_config[Config.OUTPUT]) if config["user"]["global"]["pbDir"]: path_to_pb = config["user"]["global"]["pbDir"] + config["user"]["global"]["pbFile"] else: path_to_pb = config["user"]["global"]["pbFile"] convert_results(path_to_pb, run_config) @action("general") def run_AMIRIS(dmgr, config, params): """ Invokes the AMIRIS model and executes simulation for `input` with specified parameters in `model`. `jar` is the compiled AMIRIS model which can be exchanged when an updated model is available. The arguments `vm` and `runner` can be used to specify the virtual machine and the runner configuration. Additionally, `fame_args` can be defined in the respective file. If no path to `fame_setup` file is provided, `fameSetup.yaml` is used instead. The action may be specified in the user.yaml as follows: - action: project: general call: run_AMIRIS args: input: output/config.pb model: jar: 'amiris/midgard/amiris-core_1.2-jar-with-dependencies.jar' vm: '-ea -Xmx2000M' fame_args: '-Dlog4j.configuration=file:amiris/log4j.properties' runner: 'de.dlr.gitlab.fame.setup.FameRunner' fame_setup: 'amiris/fameSetup.yaml' """ args = params["args"] model = args["model"] fame_setup_path = model["fame_setup"] if "fame_setup" in model else "fameSetup.yaml" with open(fame_setup_path, "r") as stream: try: fame_setup = yaml.safe_load(stream) except yaml.YAMLError: raise_and_log_critical_error("Cannot open fame setup file in {}.".format(fame_setup_path)) ensure_path_exists(fame_setup["outputPath"]) call = "java {} -cp {} {} {} -f {} -s {}".format( model["vm"], model["jar"], model["fame_args"], model["runner"], args["input"], fame_setup_path, ) os.system(call) def get_region_of_all_exchanges(scenario: dict) -> dict: """Returns {ID: region_name, ...} map for `EnergyExchange` in given `scenario`. If no region is found, the string `Region` is applied""" try: exchanges = [ {exchange["Id"]: exchange["Attributes"]["Region"]} for exchange in scenario["Agents"] if exchange["Type"] == "EnergyExchange" ] except KeyError: exchanges = [ {exchange["Id"]: "Region"} for exchange in scenario["Agents"] if exchange["Type"] == "EnergyExchange" ] output = {} for exchange in exchanges: output.update(exchange) return output @action("general") def compile_price_exchange_file(dmgr, config, params): """ Parses the `agent` csv file and converts the `fame_time_steps` into `datetime` strings. The `indexColumn` has to be specified. Further, a `mapping` of columns can be provided as dict with the origin values as `key` and the target values as `value`. The export is performed by a `groupby` specification which defines the columns for which a sum of all remaining column values is conducted. Replaces `Id` with `Region` name retrieved from specified `scenario` yaml where all `Exchanges` are parsed. If only one EnergyExchange is found, the value "Region" is applied. The action may be specified in the user.yaml as follows: - action: project: general call: compile_price_exchange_file args: agent: EnergyExchange indexColumn: 'TimeStep' mapping: TimeStep: Time AgentId: Region groupby: ["Time", "Region"] scenario: "../couple_markets_AT-DE/data/scenario.yaml" """ args = params["args"] folder_name = config["user"]["global"]["output"]["pbOutputRaw"] agent = args["agent"] output_folder_path = config["user"]["global"]["output"]["pbOutputProcessed"] ensure_path_exists(output_folder_path) parsed_data = pd.read_csv( filepath_or_buffer=folder_name + "/" + agent + ".csv", sep=config["user"]["global"]["output"]["csvSeparator"], ) index_label = args["indexColumn"] if "mapping" in args: parsed_data.rename(columns=args["mapping"], inplace=True) index_label = args["mapping"][index_label] parsed_data[index_label] = [ time.replace("_", " ") for time in convert_fame_time_to_datetime(parsed_data[index_label]) ] parsed_data = parsed_data.groupby(to_list(args["groupby"]), as_index=False).sum() parsed_data.set_index(index_label, inplace=True) parsed_data.rename( {"ElectricityPriceInEURperMWH": "Electricity Price In EUR Per MWh"}, axis=1, inplace=True, ) output = parsed_data.reset_index()[["Time", "Region", "Electricity Price In EUR Per MWh"]] scenario = load_yaml(args["scenario"]) region_map = get_region_of_all_exchanges(scenario) output["Region"].replace(region_map, inplace=True) year = get_simulation_year_from(scenario) run_name = config["user"]["global"]["pbFile"].split(".")[0] run_id = get_runid_from(scenario) institution = "DLR" value = "Electricity-Prices-In-EUR-Per-MWh" file_name = "{}_{}_{}_{}_{}.csv".format(str(year), run_name, run_id, institution, value) output.to_csv( path_or_buf=output_folder_path + file_name, sep=config["user"]["global"]["output"]["csvSeparator"], index=None, ) def convert_fame_time_to_datetime(fame_times): """Returns converted `fame_times` (index format) as `date_times` (list of strings) rounded to minutes""" date_times = list() for timestep in fame_times.values: timestep = round_to_full_hour(timestep) date_times.append(FameTime.convert_fame_time_step_to_datetime(timestep)) return date_times def get_dict_with_installed_power_by_id(scenario): """Returns dict with `Id` and `InstalledPowerInMW` for all eligible agents in given `scenario`""" agents_with_installed_power = dict() for agent in scenario["Agents"]: if "Attributes" in agent: if "InstalledPowerInMW" in agent["Attributes"]: capacity = agent["Attributes"]["InstalledPowerInMW"] agents_with_installed_power[agent["Id"]] = capacity return agents_with_installed_power def get_installed_power_by_technology(scenario: dict, single_file_mode=False) -> pd.DataFrame: """ Parses given `scenario` looking for agents with attributes. If `InstalledPowerInMW` is found capacity is added for technology specified either by `EnergyCarrier` or `FuelType`. If `Device` is found in agent attributes, the `technology` `STORAGE` is used. Returns pd.DataFrame with aggregated `InstalledPowerInMW` by `Type` or (if `single_file_mode` is enabled) each `region_name` in `scenario` with `InstalledPowerInMW` per `Type`. """ output_labels = ["Energy Carrier", "Region", "Installed Capacity In MW"] output_labels_no_region = ["Energy Carrier", "Installed Capacity In MW"] installed_power_by_type = get_installed_power_by_tech_from(scenario) if single_file_mode: all_plants = get_all_plants_from(scenario) agents_with_matched_regions = match_agent_to_technology(all_plants, installed_power_by_type) installed_power_by_type = aggregate_technologies(agents_with_matched_regions, output_labels) else: for technology in installed_power_by_type: installed_power_by_type[technology] = sum( [sum(list(entry.values())) for entry in installed_power_by_type[technology]] ) installed_power_by_type = pd.DataFrame( installed_power_by_type.items(), index=None, columns=output_labels_no_region, ) return installed_power_by_type def aggregate_technologies(agents_with_matched_regions, output_labels) -> pd.DataFrame: """Returns pd.Dataframe of `EnergyCarrier`, `Region`, `Installed capacities` (names defined in output_labels)""" for region in agents_with_matched_regions: aggregated_techs = {} for item in agents_with_matched_regions[region]: for technology in item.keys(): aggregated_techs[technology] = aggregated_techs.get(technology, 0) + item[technology] agents_with_matched_regions[region] = aggregated_techs installed_power_by_type = ( pd.DataFrame(agents_with_matched_regions).stack().reset_index().set_axis(output_labels, axis=1) ) return installed_power_by_type def match_agent_to_technology(all_plants: list, installed_power_by_type: list) -> dict: """Returns `dict` where all agents {id: capacity} are matched to a technology""" output = {} for technology in installed_power_by_type: for plant in installed_power_by_type[technology]: for agent_id, capacity in plant.items(): region = next(i[Amiris.region_name] for i in all_plants if i["Id"] == agent_id) output.setdefault(region, []).append({technology: capacity}) return output def get_installed_power_by_tech_from(scenario: dict) -> dict: """ Returns `dict` of technologies each with list of {agent_id: installed capacity} from given `scenario` Allows that `Form` is specified in a agents' attributes to separate technologoes of same fuel_type_name further """ installed_power_by_type = dict() for agent in scenario["Agents"]: if "Attributes" in agent: if Amiris.capacitiy_name in agent["Attributes"]: capacity = agent["Attributes"][Amiris.capacitiy_name] if Amiris.energy_carrier_name in agent["Attributes"]: technology = agent["Attributes"][Amiris.energy_carrier_name] elif Amiris.fuel_type_name in agent["Attributes"][Amiris.prototype_name]: if "Form" in agent["Attributes"]: technology = agent["Attributes"]["Form"] else: technology = agent["Attributes"][Amiris.prototype_name][Amiris.fuel_type_name] else: raise_and_log_critical_error( "Found no eligible technology for agent '{}' with id '{}'".format(agent, agent["Id"]) ) installed_power_by_type.setdefault(technology, []).append({agent["Id"]: capacity}) mainlogger.info(_FOUND_POWER.format(agent["Type"], agent["Id"], capacity, technology)) if Amiris.identifier_for_storage in agent["Attributes"]: capacity = agent["Attributes"][Amiris.identifier_for_storage][Amiris.capacitiy_name_for_storage] technology = Amiris.technology_name_for_storage installed_power_by_type.setdefault(technology, []).append({agent["Id"]: capacity}) mainlogger.info(_FOUND_POWER.format(agent["Type"], agent["Id"], capacity, technology)) else: mainlogger.info(_FOUND_NO_POWER.format(agent["Type"], agent["Id"])) return installed_power_by_type def get_all_plants_from(scenario: dict) -> list: """Returns list of all plants with `Id`, `Type`, `Region` (default: "Region") from given `scenario` by searching relevant contracts""" contracts = scenario["Contracts"] exchanges = get_agent_with_id_and_type_and_region_from(scenario, Amiris.exchange_type_name) storages = get_agent_with_id_and_type_and_region_from(scenario, Amiris.storage_type_name) renewables = get_agent_with_id_and_type_and_region_from(scenario, Amiris.renewable_type_name) conventionals = get_agent_with_id_and_type_and_region_from(scenario, Amiris.conventional_type_name) # todo look for more elegant way to add biogas to res biogas = get_agent_with_id_and_type_and_region_from(scenario, Amiris.biogas_type_name) extended_res = renewables + biogas for storage in storages: contract_to_exchange = get_contract_from_sender_with_product(storage["Id"], "Bids", contracts) storage[Amiris.region_name] = get_region_of_connected_exchange(exchanges, contract_to_exchange) for plant in extended_res: trader_id = get_trader_id_for(plant["Id"], "SetRegistration", contracts) contract_to_exchange = get_contract_from_sender_with_product(trader_id, "Bids", contracts) plant[Amiris.region_name] = get_region_of_connected_exchange(exchanges, contract_to_exchange) for plant in conventionals: operator_id, plant_index_in_list = get_operator_id_for(plant, contracts) trader_id = get_trader_id_for(operator_id, "MarginalCostForecast", contracts, plant_index_in_list) contract_to_exchange = get_contract_from_sender_with_product(trader_id, "Bids", contracts) plant[Amiris.region_name] = get_region_of_connected_exchange(exchanges, contract_to_exchange) all_plants_with_regions = storages + renewables + conventionals + biogas return all_plants_with_regions def get_trader_id_for(agent_id: int, product: str, contracts: list, plant_index_in_list=0) -> int: """Returns `trader_id` for agent_id looking for `product` in `contracts` considering position as stated in `plant_index_in_list`""" contract_to_trader = get_contract_from_sender_with_product(agent_id, product, contracts) trader_id = to_list(contract_to_trader["ReceiverId"])[plant_index_in_list] return trader_id def get_agent_with_id_and_type_and_region_from(scenario: dict, agent_type_name: str) -> list: """Returns list of `{Id: agent_id, Type: agent_type_name, Region: (default: None)}` for all agents of type `agent_type_name` in given `scenario`""" try: return [ { "Id": agent["Id"], "Type": agent_type_name, "Region": get_from_dict_or_default(Amiris.region_name, agent["Attributes"], None), } for agent in scenario["Agents"] if agent["Type"] == agent_type_name ] except KeyError: raise_and_log_critical_error("Failed looking for agent_type `{}` in given scenario.".format(agent_type_name)) def get_region_of_connected_exchange(exchanges: list, contract: dict) -> str: """Returns `region` of connected exchange from `ReceiverId` in `contract`, returns "Region" if not found""" _NOT_EXACTLY_ONE_EXCHANGE = "Your criteria match {} exchanges. Expected to find single exchange in " "Contract {}." exchange = [exchange for exchange in exchanges if exchange["Id"] == contract["ReceiverId"]] assert len(exchange) == 1, _NOT_EXACTLY_ONE_EXCHANGE.format(len(exchange, contract)) if exchange[0]["Region"] is None: return "Region" else: return exchange[0]["Region"] def get_contract_from_sender_with_product(sender_id: int, product_name: str, contracts: list) -> dict: """ Returns single contract from `sender` with `product_name` found in `contracts`, raises AssertionError otherwise """ _NOT_EXACTLY_ONE_CONTRACT = ( "Your criteria match {} contracts. Expected to find single contract with " "`SenderId` '{}' and `ProductName` '{}'." ) contract = [ contract for contract in contracts if contract["ProductName"] == product_name if sender_id in to_list(contract["SenderId"]) ] assert len(contract) == 1, _NOT_EXACTLY_ONE_CONTRACT.format(len(contract), sender_id, product_name) return contract[0] @action("general") def plot_lines(dmgr, config, params): """ Generates `m` x `n` line plot for given `window` size for specified `agent`. You may specify a conversion to datetime format from `fame` `timesteps` by enabling `convertToDateTime`. The plot specifics are defined in `figure` and the plot data in list of `plotData`. You may specify a `ylim` as a reference to another subplot which limits to scale of the y-axis.`ylabel` sets the name of the y-axis. A renaming of values can be defined in `map` with keys as `origins` (old values) and values as `targets` (new values). `conversion` allows to convert to different units. A `watermark` is printed if stated `true`. Make sure that the number of items in `plotData` matches your `figure` dimensions specified as `ncols` and `nrows`. The action may be specified in the user.yaml as follows: - action: project: general call: plot_lines args: agent: EnergyExchange time: convertToDateTime: true originName: TimeStep targetName: DateTime groupby: ["DateTime", "AgentId"] window: 168 figure: width: 18 height: 9 dpi: 80 ncols: 2 nrows: 2 map: AgentId: 1: "Market A" 6: "Market B" 60: "Market C" 600: "Market D" watermark: True plotData: - column: 'ElectricityPriceInEURperMWH' title: 'Electricity Price In EUR per MWh' position: [0, 0] ylabel: "EUR/MWh" - column: 'CoupledElectricityPriceInEURperMWH' title: 'Coupled Electricity Price In EUR per MWh' position: [0, 1] ylimFrom: [0, 0] ylabel: "EUR/MWh" - column: 'CoupledTotalAwardedPowerInMW' title: 'Coupled Total Awarded Power In GW' position: [1, 1] ylabel: "GW" conversion: 0.001 - column: 'TotalAwardedPowerInMW' title: 'Total Awarded Power In GW' position: [1, 0] ylimFrom: [1, 1] ylabel: "GW" conversion: 0.001 """ args = params["args"] folder_name = config["user"]["global"]["output"]["pbOutputRaw"] agent_name = args["agent"] output_folder_path = config["user"]["global"]["output"]["pbOutputProcessed"] ensure_path_exists(output_folder_path) x_dim = args["figure"]["ncols"] y_dim = args["figure"]["nrows"] ensure_given_data_matches_dims(x_dim, y_dim, args["plotData"]) parsed_data = pd.read_csv( filepath_or_buffer=folder_name + "/" + agent_name + ".csv", sep=config["user"]["global"]["output"]["csvSeparator"], ) if args["time"]["convertToDateTime"]: parsed_data = convert_to_datetime(args, parsed_data) grouped_data = parsed_data.groupby(to_list(args["groupby"]), as_index=False).sum() if "map" in args: for element in args["map"]: grouped_data["AgentId"].replace(args["map"][element], inplace=True) sns.set_theme() window = args["window"] time_col_name = args["time"]["targetName"] begin = grouped_data[time_col_name].min() end = begin + timedelta(hours=window) check_if_window_too_large(end, grouped_data[time_col_name].max()) output_path = output_folder_path + "/plots/" ensure_path_exists(output_path) while end < grouped_data[time_col_name].max(): data = grouped_data[grouped_data[time_col_name].between(begin, end)] f, a = plt.subplots( nrows=y_dim, ncols=x_dim, figsize=(args["figure"]["width"], args["figure"]["height"]), ) if x_dim == 1 and y_dim == 1: a = [a] for subplot in args["plotData"]: selection = (*args["groupby"], subplot["column"]) plot_data = data.pivot(*selection) if "conversion" in subplot: plot_data = data.pivot(*selection) * subplot["conversion"] pos_x = subplot["position"][0] pos_y = subplot["position"][1] ylim = get_from_dict_or_default("ylimFrom", subplot, None) if ylim: if x_dim == 1 or y_dim == 1: ylim = a[subplot["ylimFrom"][0]].get_ylim() else: ylim = a[subplot["ylimFrom"][0], subplot["ylimFrom"][1]].get_ylim() if x_dim == 1: s = pos_x elif y_dim == 1: s = pos_y else: s = (pos_x, pos_y) plot_data.plot( ax=a[s], title=subplot["title"], ylim=ylim, label="Test", legend=0, ) a[s].set_ylabel(subplot["ylabel"]) a[s].set_xlabel("") a[s].grid(True, which="both") if x_dim == 1 or y_dim == 1: lines, labels = a[0].get_legend_handles_labels() else: lines, labels = a[0][0].get_legend_handles_labels() f.legend(lines, labels, loc="lower center", ncol=len(labels)) plt.tight_layout() plt.subplots_adjust(bottom=0.15) if get_from_dict_or_default("watermark", args, False): f.text( 0.01, 0.01, config["user"]["global"]["pbFile"], fontsize=8, c="grey", ) time_string = datetime.strftime(begin, format="%Y-%m-%d_%H%M") plt.savefig( fname=output_path + agent_name + "_" + time_string[:-5] + "_" + str(window) + "_" + str(x_dim) + "X" + str(y_dim) + ".png", dpi=args["figure"]["dpi"], ) begin += timedelta(hours=window) end += timedelta(hours=window) plt.close("all") sns.reset_orig() def convert_to_datetime(args, parsed_data): """Converts `fame` time to `datetime`""" origin_name = args["time"]["originName"] target_name = args["time"]["targetName"] parsed_data[target_name] = [round_to_full_hour(x) for x in parsed_data[origin_name]] parsed_data[target_name] = [ datetime.strptime(FameTime.convert_fame_time_step_to_datetime(x), DATE_FORMAT) for x in parsed_data[target_name] ] parsed_data = parsed_data.drop(labels=origin_name, axis=1) return parsed_data @action("general") def analyse_time_series(dmgr, config, params): """ Reads `.csv` of given `agent` and performs `time` conversion from `fame timesteps` to `datetime` (if enabled). Writes standard statistical parameters (min, max, mean, etc.) for each `column` specified in `analysis` to disk. When `consoleOutput is set to `True`, the output gets also printed to the console. The action may be specified in the user.yaml as follows: - action: project: general call: analyse_time_series args: agent: ElectricityExchange time: convertToDateTime: true originName: TimeStep targetName: DateTime analysis: - column: 'ElectricityPriceInEURperMWH' countNumberOfValue: 3000 consoleOutput: True """ args = params["args"] folder_name = config["user"]["global"]["output"]["pbOutputRaw"] output_folder_path = config["user"]["global"]["output"]["pbOutputProcessed"] agent_name = args["agent"] parsed_data = pd.read_csv( filepath_or_buffer=folder_name + "/" + agent_name + ".csv", sep=config["user"]["global"]["output"]["csvSeparator"], ) if args["time"]["convertToDateTime"]: parsed_data = convert_to_datetime(args, parsed_data) grouped_data = parsed_data.groupby(to_list(args["groupby"]), as_index=False).sum() grouped_data_by_agentid = {key: value for (key, value) in grouped_data.groupby("AgentId")} ensure_path_exists(output_folder_path) for evaluation in args["analysis"]: column = evaluation["column"] file_name = output_folder_path + "analysis_" + column + ".txt" file = open(file_name, "w") for agent_id, data in grouped_data_by_agentid.items(): selected_data = data[column] write_to_file( file, "`{}` for Agent with Id `{}`".format(column, str(agent_id)), args["consoleOutput"], ) write_to_file( file, "{}".format(selected_data.describe().reset_index().to_string(header=None, index=None)), args["consoleOutput"], ) write_to_file( file, "`{}` occurances: {}".format( evaluation["countNumberOfValue"], len(selected_data[selected_data == evaluation["countNumberOfValue"]]), ), args["consoleOutput"], ) write_to_file(file, "-" * 40, args["consoleOutput"]) def write_to_file(file, text, print_to_console=False): """Writes `text` to `file` and additionally to console if `print_to_console` is `True`""" file.write(text + "\n") if print_to_console: print(text) @action("general") def plot_price_duration_curve(dmgr, config, params): """ Generates a price duration plot for a specified exchange `agent`. For this, values in `column` are sorted in ascending order and plotted against time. If you provide a `csv` file to `backtestAgainst`, you will receive an additional dashed curve to validate against a provided timeseries. You may specify a conversion to datetime format from `fame` `timesteps` by enabling `convertToDateTime`. The plot specifics are defined in `figure`. A renaming of names can be defined in `map` with keys as `origins` (old values) and values as `targets` (new values). `ylabel` defines the labelling of the y-axis, whereas the x-axis is always called `hours`. A `watermark` is printed if stated `true`. The action may be specified in the user.yaml as follows: - action: project: general call: plot_price_duration_curve args: agent: EnergyExchange column: 'ElectricityPriceInEURperMWH' time: convertToDateTime: true originName: TimeStep targetName: DateTime groupby: [ "DateTime", "AgentId" ] figure: width: 16 height: 7 dpi: 80 map: AgentId: 1: "Market A" 6: "Market B" 60: "Market C" 600: "Market D" backtestAgainst: file.csv ylabel: 'EUR/MWh' watermark: True """ args = params["args"] folder_name = config["user"]["global"]["output"]["pbOutputRaw"] agent_name = args["agent"] output_folder_path = config["user"]["global"]["output"]["pbOutputProcessed"] parsed_data = pd.read_csv( filepath_or_buffer=folder_name + "/" + agent_name + ".csv", sep=config["user"]["global"]["output"]["csvSeparator"], ) if args["time"]["convertToDateTime"]: parsed_data = convert_to_datetime(args, parsed_data) grouped_data = parsed_data.groupby(to_list(args["groupby"]), as_index=False).sum() if "map" in args: for element in args["map"]: grouped_data["AgentId"].replace(args["map"][element], inplace=True) sns.set_theme() output_path = output_folder_path + "/plots/" ensure_path_exists(output_path) f, a = plt.subplots( nrows=1, ncols=1, figsize=(args["figure"]["width"], args["figure"]["height"]), ) selection = (*args["groupby"], args["column"]) plot_data = grouped_data.pivot(*selection) for i, col in enumerate(plot_data.columns): sorted_data = plot_data[col].copy().sort_values(axis=0) sorted_data.plot(ax=a, legend=0, use_index=False) if "backtestAgainst" in args: backtest_data = pd.read_csv( filepath_or_buffer=args["backtestAgainst"], sep=config["user"]["global"]["output"]["csvSeparator"], names=[args["time"]["originName"], "Backtest"], ) backtest_data = convert_to_datetime(args, backtest_data) backtest_data.set_index(args["time"]["targetName"], inplace=True) backtest_data = backtest_data.copy().sort_values(axis=0, by="Backtest") backtest_data.plot(ax=a, legend=0, use_index=False, linestyle="--", c="black") a.set_ylabel(args["ylabel"]) a.set_xlabel("Hours") plt.suptitle("") plt.title(str(grouped_data[args["time"]["targetName"]].min().year), size=20) a.grid(True, which="both") lines, labels = a.get_legend_handles_labels() f.legend( lines, labels, loc="lower center", ncol=len(labels), bbox_to_anchor=(0.5, 0.06), ) plt.tight_layout() plt.subplots_adjust(bottom=0.26) if get_from_dict_or_default("watermark", args, False): f.text( 0.01, 0.01, config["user"]["global"]["pbFile"], fontsize=8, c="grey", ) plt.savefig( fname=output_path + agent_name + "_" + args["column"] + "_price_duration.png", dpi=args["figure"]["dpi"], ) plt.close("all") sns.reset_orig() @action("general") def plot_multiple_lines(dmgr, config, params): """ Generates `m` x `n` line plot for given `window` size for specified `agent`. If you provide a `csv` file to `backtestAgainst`, you will receive an additional dashed curve to validate against a provided timeseries. You may specify a conversion to datetime format from `fame` `timesteps` by enabling `convertToDateTime`. The plot specifics are defined in `figure` and the plot data in list of `plotData`. `ymin` and `ymax` define global lower and upper limits for the y-axis. `ylabel` sets the name of the y-axis. A renaming of values can be defined in `map` with keys as `origins` (old values) and values as `targets` (new values). `conversion` allows to convert to different units. A `watermark` is printed if stated `true`. Make sure that the number of items in `plotData` matches your `figure` dimensions specified as `ncols` and `nrows`. The action may be specified in the user.yaml as follows: - action: project: general call: plot_multiple_lines args: agent: EnergyExchange time: convertToDateTime: true originName: TimeStep targetName: DateTime groupby: ["DateTime", "AgentId"] window: 168 figure: width: 18 height: 9 dpi: 80 ncols: 2 nrows: 2 ymin: -10 ymax: 100 map: AgentId: 1: "Market A" 6: "Market B" 60: "Market C" 600: "Market D" watermark: True backtestAgainst: file.csv plotData: - column: 'ElectricityPriceInEURperMWH' title: 'Electricity Price In EUR per MWh' position: [0, 0] ylabel: "EUR/MWh" - column: 'CoupledElectricityPriceInEURperMWH' title: 'Coupled Electricity Price In EUR per MWh' position: [0, 1] ylabel: "EUR/MWh" - column: 'CoupledTotalAwardedPowerInMW' title: 'Coupled Total Awarded Power In GW' position: [1, 1] ylabel: "GW" conversion: 0.001 - column: 'TotalAwardedPowerInMW' title: 'Total Awarded Power In GW' position: [1, 0] ylabel: "GW" conversion: 0.001 """ args = params["args"] folder_name = config["user"]["global"]["output"]["pbOutputRaw"] agent_name = args["agent"] output_folder_path = config["user"]["global"]["output"]["pbOutputProcessed"] x_dim = args["figure"]["ncols"] y_dim = args["figure"]["nrows"] y_min = args["figure"]["ymin"] y_max = args["figure"]["ymax"] ensure_given_data_matches_dims(x_dim, y_dim, args["plotData"]) parsed_data = pd.read_csv( filepath_or_buffer=folder_name + "/" + agent_name + ".csv", sep=config["user"]["global"]["output"]["csvSeparator"], ) if args["time"]["convertToDateTime"]: parsed_data = convert_to_datetime(args, parsed_data) grouped_data = parsed_data.groupby(to_list(args["groupby"]), as_index=False).sum() if "backtestAgainst" in args: backtest_data = pd.read_csv( filepath_or_buffer=args["backtestAgainst"], sep=config["user"]["global"]["output"]["csvSeparator"], names=[args["time"]["originName"], "Historical prices"], ) backtest_data = convert_to_datetime(args, backtest_data) if "map" in args: for element in args["map"]: grouped_data["AgentId"].replace(args["map"][element], inplace=True) sns.set_theme() window = args["window"] time_col_name = args["time"]["targetName"] begin = grouped_data[time_col_name].min() end = begin + timedelta(hours=window) check_if_window_too_large(end, grouped_data[time_col_name].max()) output_path = output_folder_path + "/plots/" ensure_path_exists(output_path) while end < grouped_data[time_col_name].max(): data = grouped_data[grouped_data[time_col_name].between(begin, end)] f, a = plt.subplots( nrows=y_dim, ncols=x_dim, figsize=(args["figure"]["width"], args["figure"]["height"]), ) if x_dim == 1 and y_dim == 1: a = [a] for subplot in args["plotData"]: selection = (*args["groupby"], subplot["column"]) plot_data = data.pivot(*selection) if "conversion" in subplot: plot_data = data.pivot(*selection) * subplot["conversion"] pos_x = subplot["position"][0] pos_y = subplot["position"][1] if x_dim == 1: s = pos_x elif y_dim == 1: s = pos_y else: s = (pos_x, pos_y) plot_data.plot( ax=a[s], title=subplot["title"], ylim=(y_min, y_max), label="Test", legend=0, ) a[s].set_ylabel(subplot["ylabel"]) if "backtestAgainst" in args: backtest_data_to_plot = backtest_data[backtest_data[time_col_name].between(begin, end)] backtest_data_to_plot.set_index(args["time"]["targetName"], inplace=True) backtest_data_to_plot.plot(ax=a[s], legend=0, linestyle="--", c="black") a[s].set_xlabel("") a[s].grid(True, which="both") if x_dim == 1 or y_dim == 1: lines, labels = a[0].get_legend_handles_labels() else: lines, labels = a[0][0].get_legend_handles_labels() f.legend(lines, labels, loc="lower center", ncol=len(labels)) plt.tight_layout() plt.subplots_adjust(bottom=0.20) if get_from_dict_or_default("watermark", args, False): f.text( 0.01, 0.01, config["user"]["global"]["pbFile"], fontsize=8, c="grey", ) time_string = datetime.strftime(begin, format="%Y-%m-%d_%H%M") plt.savefig( fname=output_path + agent_name + "_" + time_string[:-5] + "_" + str(window) + "_" + str(x_dim) + "X" + str(y_dim) + ".png", dpi=args["figure"]["dpi"], ) begin += timedelta(hours=window) end += timedelta(hours=window) plt.close("all") sns.reset_orig() @action("general") def plot_heat_map(dmgr, config, params): """ Generates a heat map plot for a specified `agent`. For this, values in `column` are plotted in a 2D plot with `days` on x-axis and `hours` on y-axis. You may specify a conversion to datetime format from `fame` `timesteps` by enabling `convertToDateTime`. The plot specifics are defined in `figure`. The `cbarlabel` defines the labelling of the color bar, whereas `cmap` defines the color scheme (see available list for colormaps in matplotlib). `vmin` defines the minimum value for the color bar. If not given, a symmetrical color bar is used with the max value. If specified with as `MINIMUM`, the minimum value in data is used as minimum instead. You may also use your custom float or integer value. The action may be specified in the user.yaml as follows: - action: project: general call: plot_heat_map args: agent: StorageTrader column: 'AwardedPower' time: convertToDateTime: true originName: TimeStep targetName: DateTime groupby: [ "DateTime", "AwardedPower" ] figure: width: 13 height: 5 dpi: 200 cbarlabel: 'Awareded Power in MWh' cmap: "RdBu" vmin: MINIMUM """ args = params["args"] folder_name = config["user"]["global"]["output"]["pbOutputRaw"] agent_name = args["agent"] output_folder_path = config["user"]["global"]["output"]["pbOutputProcessed"] parsed_data = pd.read_csv( filepath_or_buffer=folder_name + "/" + agent_name + ".csv", sep=config["user"]["global"]["output"]["csvSeparator"], ) if args["time"]["convertToDateTime"]: parsed_data = convert_to_datetime(args, parsed_data) plot_data = parsed_data[to_list(args["groupby"])].groupby(["DateTime"]).sum().values.reshape(-1, 24).T f, a = plt.subplots( nrows=1, ncols=1, figsize=(args["figure"]["width"], args["figure"]["height"]), ) if "vmin" in args: if args["vmin"] == "MINIMUM": vmin = np.min(plot_data) elif isinstance(args["vmin"], float) or isinstance(args["vmin"], int): vmin = args["vmin"] else: raise_and_log_critical_error("Provide either `MINIMUM` or `float` value for `vmin`.") else: vmin = -np.max(np.abs(plot_data)) vmax = np.max(np.abs(plot_data)) plt.imshow( plot_data, aspect="auto", origin="lower", cmap=args["cmap"], interpolation="nearest", vmin=vmin, vmax=vmax, ) cbar = plt.colorbar(pad=0.02) plt.grid(False) # setting ticks positions t = np.arange(-0.5, 364.6, 30) a.xaxis.set_ticks(t) a.set_xticklabels(((t + 0.5)).astype(int)) t = np.arange(-0.5, 23.6, 6) a.yaxis.set_ticks(t) a.set_yticklabels(list(map(lambda x: x + ":00", (t + 0.5).astype(int).astype(str)))) cbar.set_label(args["cbarlabel"], labelpad=10) a.set_ylabel("Hour") a.set_xlabel("Day") output_path = output_folder_path + "/plots/" ensure_path_exists(output_path) plt.savefig( fname=output_path + agent_name + "_" + args["column"] + "_heat_map.png", dpi=args["figure"]["dpi"], ) plt.close("all") def get_simulation_year_from(scenario: dict) -> int: """Returns simulation year from given `scenario` assuming simulation starts in `StartTime` year + `1` year""" try: start_time = scenario["GeneralProperties"]["Simulation"]["StartTime"] except KeyError: raise_and_log_critical_error("Could not find `StartTime` in given scenario.") return int(start_time.split("-")[0]) + 1 def get_runid_from(scenario: dict) -> str: """Returns run_id from given `scenario` in format `xxx`""" try: return str(scenario["GeneralProperties"]["RunId"]).zfill(3) except KeyError: raise_and_log_critical_error("Could not find `RunId` in given scenario.") @action("general") def compile_installed_capacities_exchange_file(dmgr, config, params): """ Reads given `scenario` and calculates aggregated `InstalledPowerInMW` by `Type`. If `one_file_per_exchange` is enabled, a nested dict for each `Region` in `scenario` with a dict of `InstalledPowerInMW` per `Type` is calculated. In `map` you may specify a dict where the key represents the column name and the value ({old_value: new_value}, ...) the respective mapping of old and new values. Saves output as "year_runname_runid_DLR_Installed-Capacities-In-MW.csv" to `global` output folder with columns "EnergyCarrier, (Region), and Installed_capacity_in_MW". `Year`, `RunId` are retrieved from the `scenario.yaml`, whereas `runname` is defined in as `pbFile` in the `global` setting of the `user.yaml` (this should match the name specified in `fameSetup.yaml`). The action may be specified in the user.yaml as follows: - action: project: general call: args: scenario: "../couple_markets_AT-DE/data/scenario.yaml" one_file_per_exchange: true map: Energy Carrier: Storage: Pumped Hydro Storage NUCLEAR: Nuclear Power LIGNITE: Lignite HARD_COAL: Hard coal NATURAL_GAS: Gas (Turbine + CCGT) OIL: Other conventional PV: Photovoltaic WindOn: Wind Onshore RunOfRiver: Run of river Biogas: Bioenergy WindOff: Wind Offshore """ args = params["args"] output_folder_path = config["user"]["global"]["output"]["pbOutputProcessed"] ensure_path_exists(output_folder_path) scenario = load_yaml(args["scenario"]) single_file = get_from_dict_or_default("one_file_per_exchange", args, False) installed_capacities = get_installed_power_by_technology(scenario, single_file) if "map" in args: for column in args["map"]: installed_capacities[column].replace(args["map"][column], inplace=True) installed_capacities.rename( {"InstalledCapacitiesInMW": "Installed Capacities In MW"}, axis=1, inplace=True, ) year = get_simulation_year_from(scenario) run_name = config["user"]["global"]["pbFile"].split(".")[0] run_id = get_runid_from(scenario) institution = "DLR" value = "Installed-Capacities-In-MW" file_name = "{}_{}_{}_{}_{}.csv".format(str(year), run_name, run_id, institution, value) installed_capacities.to_csv( path_or_buf=output_folder_path + file_name, sep=config["user"]["global"]["output"]["csvSeparator"], index=None, ) def get_energycarrier_name_for(agent_id: int, installed_power_by_energycarrier: dict) -> str: """Returns `energy_carrier_name` for `agent_id` in `dict` of installed capacities""" for ( energy_carrier_name, energy_carrier_group, ) in installed_power_by_energycarrier.items(): for plant in energy_carrier_group: for plant_id, _ in plant.items(): if agent_id == plant_id: return energy_carrier_name raise_and_log_critical_error( "Did not find agent with `Id` {} in list of installed powers by energycarrier.".format(agent_id) ) @action("general") def compile_awarded_power_exchange_file(dmgr, config, params): """ Reads given `scenario`, parses raw results from agents and calculates aggregated `Awarded Power In MWh` by `Type` and `Region`. If `aggregated_yearly_sum` is enabled, a sum for the simulation year is calculated as Awarded Power In TWh`. This is currently only support for simulations spanning one year. `one_file_per_exchange` is currently not implemented. In `map` you may specify a dict where the key represents the column name and the value ({old_value: new_value}, ...) the respective mapping of old and new values. Saves output as "year_runname_runid_DLR_Installed-Capacities-In-MW.csv" to `global` output folder with columns "EnergyCarrier, (Region), and Awarded-Power-In-MWh". `Year`, `RunId` are retrieved from the `scenario.yaml`, whereas `runname` is defined in as `pbFile` in the `global` setting of the `user.yaml` (this should match the name specified in `fameSetup.yaml`). The action has currently the following limitations: - there is currently no feature which allows to aggregate values accross all regions - aggregation of yearly sums is only available for a single simulation year, not multiple year runs. The action may be specified in the user.yaml as follows: - action: project: general call: compile_awarded_power_exchange_file args: scenario: "../couple_markets_AT-DE/data/scenario.yaml" aggregated_yearly_sum: false map: Energy Carrier: Storage: Pumped Hydro Storage NUCLEAR: Nuclear Power LIGNITE: Lignite HARD_COAL: Hard Coal NATURAL_GAS: Gas (Turbine + CCGT) OIL: Other Conventional PV: Photovoltaic WindOn: Wind Onshore RunOfRiver: Hydro Energy Biogas: Bioenergy WindOff: Wind Offshore """ args = params["args"] folder_name = config["user"]["global"]["output"]["pbOutputRaw"] output_folder_path = config["user"]["global"]["output"]["pbOutputProcessed"] ensure_path_exists(output_folder_path) scenario = load_yaml(args["scenario"]) contracts = scenario["Contracts"] agents = get_all_plants_from(scenario) installed_power_by_energycarrier = get_installed_power_by_tech_from(scenario) parsed_agents = parse_agents_in(folder_name) for agent in agents: agent[Amiris.energy_carrier_name] = get_energycarrier_name_for(agent["Id"], installed_power_by_energycarrier) if agent["Type"] == Amiris.conventional_type_name: data_of_all_agents_of_type = parsed_agents[Amiris.conventional_plant_operator] operator_id, _ = get_operator_id_for(agent, contracts) agent_data = data_of_all_agents_of_type.loc[data_of_all_agents_of_type["AgentId"] == operator_id].copy() else: data_of_all_agents_of_type = parsed_agents[agent["Type"]] agent_data = data_of_all_agents_of_type.loc[data_of_all_agents_of_type["AgentId"] == agent["Id"]].copy() agent_data[Amiris.time_step] = [ time.replace("_", " ") for time in convert_fame_time_to_datetime(agent_data[Amiris.time_step]) ] agent_data = agent_data.groupby([Amiris.time_step, "AgentId"], as_index=False).sum() agent_data.set_index(Amiris.time_step, inplace=True) if agent["Type"] == Amiris.storage_type_name: agent[Amiris.awarded_power_name] = agent_data["AwardedChargePower"] else: agent[Amiris.awarded_power_name] = agent_data[Amiris.awarded_power_name] regions = dict() for agent in agents: regions.setdefault(agent["Region"], []).append(agent) main_output =
pd.DataFrame()
pandas.DataFrame
"""Run decoding analyses in time-frequency source space domain for the working memory task and save decoding performance and pattern""" # Authors: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # License: BSD (3-clause) import os import os.path as op import numpy as np import mne from h5io import read_hdf5 import pandas as pd from mne.decoding import SlidingEstimator, get_coef, LinearModel from mne.forward import read_forward_solution from mne.channels import read_dig_montage from mne.minimum_norm import (make_inverse_operator) from mne.beamformer import make_lcmv, apply_lcmv_epochs from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression, Ridge from sklearn.model_selection import StratifiedKFold from jr.gat import (AngularRegression, scorer_spearman, scorer_auc, scorer_angle) from base import (complete_behavior, get_events_interactions, read_hpi_mri) from config import path_data from sklearn import preprocessing from sklearn.metrics import make_scorer import sys subject = sys.argv[1] # read a swarm file for parralel computing on biowulf target_baseline = False # If True baseline applied from target freqs = np.array([10]) output_folder = '/review/time_frequency_in_source_%s/' % freqs[0] # Define analyses analyses_target = ['left_sfreq', 'right_sfreq', 'left_angle', 'right_angle'] analyses_cue = ['cue_side', 'cue_type', 'target_angle_cue_angle', 'target_sfreq_cue_sfreq'] analyses = dict(Target=analyses_target, Cue=analyses_cue) def load(subject, event_type): # Behavior fname = op.join(path_data, subject, 'behavior_%s.hdf5' % event_type) events = read_hdf5(fname) # add explicit conditions events = complete_behavior(events) # MEG if target_baseline: fname = op.join(path_data, subject, 'epochs_tf_%s.fif' % event_type) # noqa else: fname = op.join(path_data, subject, 'epochs_tf_%s_bsl.fif' % event_type) # noqa epochs = mne.read_epochs(fname) # epochs.decimate(10) return epochs, events # define frequency range # freqs = np.array([3, 6, 10, 16]) # freqs = np.arange(2, 60, 2) n_cycles = freqs / 2. # Define freesurfer, results and subject folder freesurf_subject = subject results_folder = op.join(path_data + 'results/' + subject + output_folder) subject_path = op.join(path_data, subject) subjects_dir = op.join(path_data, 'subjects') freesurf_subject_path = op.join(subjects_dir, freesurf_subject) # Define folder containing HPI position in MRI coordinates for registration neuronav_path = op.join(subject_path, 'neuronav') if not os.path.exists(results_folder): os.makedirs(results_folder) # Compute noise covariance on Target epoch if target_baseline: epochs, events = load(subject, 'Target') noise_cov = mne.compute_covariance(epochs, tmax=0) for epoch_type, epoch_analyses in analyses.iteritems(): epochs, events = load(subject, epoch_type) events = get_events_interactions(events) # read hpi position in device space hpi = list() idx = 0 for this_hpi in epochs.info['hpi_results'][idx]['dig_points']: if this_hpi['kind'] == 1 or this_hpi['kind'] == 2: hpi.append(this_hpi['r']) hpi = np.array(hpi) # read hpi_mri.txt (hpi position in MRI coord from brainsight) hpi_fname = op.join(neuronav_path, 'hpi_mri_surf.txt') landmark = read_hpi_mri(hpi_fname) point_names = ['NEC', 'LEC', 'REC'] # Nasion, Left and Right electrodes elp = np.array([landmark[key] for key in point_names]) # Set montage dig_montage = read_dig_montage(hsp=None, hpi=hpi, elp=elp, point_names=point_names, unit='mm', transform=False, dev_head_t=True) epochs.set_montage(dig_montage) # # Visually check the montage # plot_trans(epochs.info, trans=None, subject=freesurf_subject, dig=True, # meg_sensors=True, subjects_dir=subjects_dir, brain=True) # Create or Read forward model fwd_fname = op.join(subject_path, '%s-fwd.fif' % subject) if not op.isfile(fwd_fname): bem_dir = op.join(freesurf_subject_path, 'bem') bem_sol_fname = op.join(bem_dir, freesurf_subject + '-5120-bem-sol.fif') src_fname = op.join(bem_dir, freesurf_subject + '-oct-6-src.fif') fwd = mne.make_forward_solution( info=epochs.info, trans=None, src=src_fname, bem=bem_sol_fname, meg=True, eeg=False, mindist=5.0) # Convert to surface orientation for better visualization fwd = mne.convert_forward_solution(fwd, surf_ori=True) # save mne.write_forward_solution(fwd_fname, fwd, overwrite=True) fwd = read_forward_solution(fwd_fname) # Setup inverse model epochs.pick_types(meg=True, ref_meg=False) # inv = make_inverse_operator(epochs.info, fwd, noise_cov, # loose=0.2, depth=0.8) method = 'beamformer' # use of beamformer method # reconstruct source signal at the single trial data_cov = mne.compute_covariance(epochs, tmin=0.04) if not target_baseline: noise_cov = mne.compute_covariance(epochs, tmax=0) filters = make_lcmv(epochs.info, fwd, noise_cov=noise_cov, data_cov=data_cov, reg=0.05, pick_ori='max-power') stcs = apply_lcmv_epochs(epochs, filters, max_ori_out='signed') n_times = len(epochs.times) n_vertices = len(stcs[0].data) n_epochs = len(epochs.events) X_data = np.zeros([n_epochs, n_vertices, n_times]) for jj, stc in enumerate(stcs): X_data[jj] = stc.data X = mne.time_frequency.tfr_array_morlet(X_data, sfreq=epochs.info['sfreq'], freqs=freqs, output='power', n_cycles=n_cycles, n_jobs=24) n_epochs, n_channels, n_freqs, n_times = X.shape X = X.reshape(n_epochs, n_channels, -1) # collapse freqs and time # Run decoding for each analysis for analysis in epoch_analyses: # define to-be-predicted values y = np.array(events[analysis]) if 'angle' in analysis[:14]: clf = make_pipeline(StandardScaler(), LinearModel(AngularRegression(Ridge(), independent=False))) scorer = scorer_angle kwargs = dict() y = np.array(y, dtype=float) elif 'sfreq' in analysis[:14]: clf = make_pipeline(StandardScaler(), LinearModel(Ridge())) scorer = scorer_spearman kwargs = dict() y = np.array(y, dtype=float) elif ('cue_side' in analysis or 'cue_type' in analysis): clf = make_pipeline(StandardScaler(), LinearModel(LogisticRegression())) scorer = scorer_auc kwargs = dict() y[np.where(
pd.isnull(y)
pandas.isnull
""" Define the SeriesGroupBy and DataFrameGroupBy classes that hold the groupby interfaces (and some implementations). These are user facing as the result of the ``df.groupby(...)`` operations, which here returns a DataFrameGroupBy object. """ from __future__ import annotations from collections import abc from functools import partial from textwrap import dedent from typing import ( Any, Callable, Hashable, Iterable, Mapping, NamedTuple, TypeVar, Union, cast, ) import warnings import numpy as np from pandas._libs import reduction as libreduction from pandas._typing import ( ArrayLike, Manager, Manager2D, SingleManager, ) from pandas.util._decorators import ( Appender, Substitution, doc, ) from pandas.core.dtypes.common import ( ensure_int64, is_bool, is_categorical_dtype, is_dict_like, is_integer_dtype, is_interval_dtype, is_scalar, ) from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core import ( algorithms, nanops, ) from pandas.core.apply import ( GroupByApply, maybe_mangle_lambdas, reconstruct_func, validate_func_kwargs, ) from pandas.core.base import SpecificationError import pandas.core.common as com from pandas.core.construction import create_series_with_explicit_dtype from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame from pandas.core.groupby import base from pandas.core.groupby.groupby import ( GroupBy, _agg_template, _apply_docs, _transform_template, warn_dropping_nuisance_columns_deprecated, ) from pandas.core.indexes.api import ( Index, MultiIndex, all_indexes_same, ) from pandas.core.series import Series from pandas.core.util.numba_ import maybe_use_numba from pandas.plotting import boxplot_frame_groupby # TODO(typing) the return value on this callable should be any *scalar*. AggScalar = Union[str, Callable[..., Any]] # TODO: validate types on ScalarResult and move to _typing # Blocked from using by https://github.com/python/mypy/issues/1484 # See note at _mangle_lambda_list ScalarResult = TypeVar("ScalarResult") class NamedAgg(NamedTuple): column: Hashable aggfunc: AggScalar def generate_property(name: str, klass: type[DataFrame | Series]): """ Create a property for a GroupBy subclass to dispatch to DataFrame/Series. Parameters ---------- name : str klass : {DataFrame, Series} Returns ------- property """ def prop(self): return self._make_wrapper(name) parent_method = getattr(klass, name) prop.__doc__ = parent_method.__doc__ or "" prop.__name__ = name return property(prop) def pin_allowlisted_properties( klass: type[DataFrame | Series], allowlist: frozenset[str] ): """ Create GroupBy member defs for DataFrame/Series names in a allowlist. Parameters ---------- klass : DataFrame or Series class class where members are defined. allowlist : frozenset[str] Set of names of klass methods to be constructed Returns ------- class decorator Notes ----- Since we don't want to override methods explicitly defined in the base class, any such name is skipped. """ def pinner(cls): for name in allowlist: if hasattr(cls, name): # don't override anything that was explicitly defined # in the base class continue prop = generate_property(name, klass) setattr(cls, name, prop) return cls return pinner @pin_allowlisted_properties(Series, base.series_apply_allowlist) class SeriesGroupBy(GroupBy[Series]): _apply_allowlist = base.series_apply_allowlist def _wrap_agged_manager(self, mgr: Manager) -> Series: if mgr.ndim == 1: mgr = cast(SingleManager, mgr) single = mgr else: mgr = cast(Manager2D, mgr) single = mgr.iget(0) ser = self.obj._constructor(single, name=self.obj.name) # NB: caller is responsible for setting ser.index return ser def _get_data_to_aggregate(self) -> SingleManager: ser = self._obj_with_exclusions single = ser._mgr return single def _iterate_slices(self) -> Iterable[Series]: yield self._selected_obj _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.groupby([1, 1, 2, 2]).min() 1 1 2 3 dtype: int64 >>> s.groupby([1, 1, 2, 2]).agg('min') 1 1 2 3 dtype: int64 >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max']) min max 1 1 2 2 3 4 The output column names can be controlled by passing the desired column names and aggregations as keyword arguments. >>> s.groupby([1, 1, 2, 2]).agg( ... minimum='min', ... maximum='max', ... ) minimum maximum 1 1 2 2 3 4 .. versionchanged:: 1.3.0 The resulting dtype will reflect the return value of the aggregating function. >>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min()) 1 1.0 2 3.0 dtype: float64 """ ) @Appender( _apply_docs["template"].format( input="series", examples=_apply_docs["series_examples"] ) ) def apply(self, func, *args, **kwargs): return super().apply(func, *args, **kwargs) @doc(_agg_template, examples=_agg_examples_doc, klass="Series") def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): with self._group_selection_context(): data = self._selected_obj result = self._aggregate_with_numba( data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs ) index = self.grouper.result_index return self.obj._constructor(result.ravel(), index=index, name=data.name) relabeling = func is None columns = None if relabeling: columns, func = validate_func_kwargs(kwargs) kwargs = {} if isinstance(func, str): return getattr(self, func)(*args, **kwargs) elif isinstance(func, abc.Iterable): # Catch instances of lists / tuples # but not the class list / tuple itself. func = maybe_mangle_lambdas(func) ret = self._aggregate_multiple_funcs(func) if relabeling: # error: Incompatible types in assignment (expression has type # "Optional[List[str]]", variable has type "Index") ret.columns = columns # type: ignore[assignment] return ret else: cyfunc = com.get_cython_func(func) if cyfunc and not args and not kwargs: return getattr(self, cyfunc)() if self.grouper.nkeys > 1: return self._python_agg_general(func, *args, **kwargs) try: return self._python_agg_general(func, *args, **kwargs) except KeyError: # TODO: KeyError is raised in _python_agg_general, # see test_groupby.test_basic result = self._aggregate_named(func, *args, **kwargs) # result is a dict whose keys are the elements of result_index index = self.grouper.result_index return create_series_with_explicit_dtype( result, index=index, dtype_if_empty=object ) agg = aggregate def _aggregate_multiple_funcs(self, arg) -> DataFrame: if isinstance(arg, dict): # show the deprecation, but only if we # have not shown a higher level one # GH 15931 raise SpecificationError("nested renamer is not supported") elif any(isinstance(x, (tuple, list)) for x in arg): arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg] # indicated column order columns = next(zip(*arg)) else: # list of functions / function names columns = [] for f in arg: columns.append(com.get_callable_name(f) or f) arg = zip(columns, arg) results: dict[base.OutputKey, DataFrame | Series] = {} for idx, (name, func) in enumerate(arg): key = base.OutputKey(label=name, position=idx) results[key] = self.aggregate(func) if any(isinstance(x, DataFrame) for x in results.values()): from pandas import concat res_df = concat( results.values(), axis=1, keys=[key.label for key in results.keys()] ) return res_df indexed_output = {key.position: val for key, val in results.items()} output = self.obj._constructor_expanddim(indexed_output, index=None) output.columns = Index(key.label for key in results) output = self._reindex_output(output) return output def _indexed_output_to_ndframe( self, output: Mapping[base.OutputKey, ArrayLike] ) -> Series: """ Wrap the dict result of a GroupBy aggregation into a Series. """ assert len(output) == 1 values = next(iter(output.values())) result = self.obj._constructor(values) result.name = self.obj.name return result def _wrap_applied_output( self, data: Series, values: list[Any], not_indexed_same: bool = False, ) -> DataFrame | Series: """ Wrap the output of SeriesGroupBy.apply into the expected result. Parameters ---------- data : Series Input data for groupby operation. values : List[Any] Applied output for each group. not_indexed_same : bool, default False Whether the applied outputs are not indexed the same as the group axes. Returns ------- DataFrame or Series """ if len(values) == 0: # GH #6265 return self.obj._constructor( [], name=self.obj.name, index=self.grouper.result_index, dtype=data.dtype, ) assert values is not None if isinstance(values[0], dict): # GH #823 #24880 index = self.grouper.result_index res_df = self.obj._constructor_expanddim(values, index=index) res_df = self._reindex_output(res_df) # if self.observed is False, # keep all-NaN rows created while re-indexing res_ser = res_df.stack(dropna=self.observed) res_ser.name = self.obj.name return res_ser elif isinstance(values[0], (Series, DataFrame)): return self._concat_objects(values, not_indexed_same=not_indexed_same) else: # GH #6265 #24880 result = self.obj._constructor( data=values, index=self.grouper.result_index, name=self.obj.name ) return self._reindex_output(result) def _aggregate_named(self, func, *args, **kwargs): # Note: this is very similar to _aggregate_series_pure_python, # but that does not pin group.name result = {} initialized = False for name, group in self: object.__setattr__(group, "name", name) output = func(group, *args, **kwargs) output = libreduction.extract_result(output) if not initialized: # We only do this validation on the first iteration libreduction.check_result_array(output, group.dtype) initialized = True result[name] = output return result @Substitution(klass="Series") @Appender(_transform_template) def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): return self._transform( func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs ) def _cython_transform( self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs ): assert axis == 0 # handled by caller obj = self._selected_obj try: result = self.grouper._cython_operation( "transform", obj._values, how, axis, **kwargs ) except NotImplementedError as err: raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err return obj._constructor(result, index=self.obj.index, name=obj.name) def _transform_general(self, func: Callable, *args, **kwargs) -> Series: """ Transform with a callable func`. """ assert callable(func) klass = type(self.obj) results = [] for name, group in self: # this setattr is needed for test_transform_lambda_with_datetimetz object.__setattr__(group, "name", name) res = func(group, *args, **kwargs) results.append(klass(res, index=group.index)) # check for empty "results" to avoid concat ValueError if results: from pandas.core.reshape.concat import concat concatenated = concat(results) result = self._set_result_index_ordered(concatenated) else: result = self.obj._constructor(dtype=np.float64) result.name = self.obj.name return result def _can_use_transform_fast(self, result) -> bool: return True def filter(self, func, dropna: bool = True, *args, **kwargs): """ Return a copy of a Series excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- func : function To apply to each group. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.) 1 2 3 4 5 6 Name: B, dtype: int64 Returns ------- filtered : Series """ if isinstance(func, str): wrapper = lambda x: getattr(x, func)(*args, **kwargs) else: wrapper = lambda x: func(x, *args, **kwargs) # Interpret np.nan as False. def true_and_notna(x) -> bool: b = wrapper(x) return b and notna(b) try: indices = [ self._get_index(name) for name, group in self if true_and_notna(group) ] except (ValueError, TypeError) as err: raise TypeError("the filter must return a boolean result") from err filtered = self._apply_filter(indices, dropna) return filtered def nunique(self, dropna: bool = True) -> Series: """ Return number of unique elements in the group. Returns ------- Series Number of unique values within each group. """ ids, _, _ = self.grouper.group_info val = self.obj._values codes, _ = algorithms.factorize(val, sort=False) sorter = np.lexsort((codes, ids)) codes = codes[sorter] ids = ids[sorter] # group boundaries are where group ids change # unique observations are where sorted values change idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] inc = np.r_[1, codes[1:] != codes[:-1]] # 1st item of each group is a new unique observation mask = codes == -1 if dropna: inc[idx] = 1 inc[mask] = 0 else: inc[mask & np.r_[False, mask[:-1]]] = 0 inc[idx] = 1 out = np.add.reduceat(inc, idx).astype("int64", copy=False) if len(ids): # NaN/NaT group exists if the head of ids is -1, # so remove it from res and exclude its index from idx if ids[0] == -1: res = out[1:] idx = idx[np.flatnonzero(idx)] else: res = out else: res = out[1:] ri = self.grouper.result_index # we might have duplications among the bins if len(res) != len(ri): res, out = np.zeros(len(ri), dtype=out.dtype), res res[ids[idx]] = out result = self.obj._constructor(res, index=ri, name=self.obj.name) return self._reindex_output(result, fill_value=0) @doc(Series.describe) def describe(self, **kwargs): return super().describe(**kwargs) def value_counts( self, normalize: bool = False, sort: bool = True, ascending: bool = False, bins=None, dropna: bool = True, ): from pandas.core.reshape.merge import get_join_indexers from pandas.core.reshape.tile import cut ids, _, _ = self.grouper.group_info val = self.obj._values def apply_series_value_counts(): return self.apply( Series.value_counts, normalize=normalize, sort=sort, ascending=ascending, bins=bins, ) if bins is not None: if not np.iterable(bins): # scalar bins cannot be done at top level # in a backward compatible way return apply_series_value_counts() elif is_categorical_dtype(val.dtype): # GH38672 return apply_series_value_counts() # groupby removes null keys from groupings mask = ids != -1 ids, val = ids[mask], val[mask] if bins is None: lab, lev = algorithms.factorize(val, sort=True) llab = lambda lab, inc: lab[inc] else: # lab is a Categorical with categories an IntervalIndex lab = cut(Series(val), bins, include_lowest=True) # error: "ndarray" has no attribute "cat" lev = lab.cat.categories # type: ignore[attr-defined] # error: No overload variant of "take" of "_ArrayOrScalarCommon" matches # argument types "Any", "bool", "Union[Any, float]" lab = lev.take( # type: ignore[call-overload] # error: "ndarray" has no attribute "cat" lab.cat.codes, # type: ignore[attr-defined] allow_fill=True, # error: Item "ndarray" of "Union[ndarray, Index]" has no attribute # "_na_value" fill_value=lev._na_value, # type: ignore[union-attr] ) llab = lambda lab, inc: lab[inc]._multiindex.codes[-1] if is_interval_dtype(lab.dtype): # TODO: should we do this inside II? # error: "ndarray" has no attribute "left" # error: "ndarray" has no attribute "right" sorter = np.lexsort( (lab.left, lab.right, ids) # type: ignore[attr-defined] ) else: sorter = np.lexsort((lab, ids)) ids, lab = ids[sorter], lab[sorter] # group boundaries are where group ids change idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0] idx = np.r_[0, idchanges] if not len(ids): idx = idchanges # new values are where sorted labels change lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1)) inc = np.r_[True, lchanges] if not len(val): inc = lchanges inc[idx] = True # group boundaries are also new values out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts # num. of times each group should be repeated rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx)) # multi-index components codes = self.grouper.reconstructed_codes codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)] # error: List item 0 has incompatible type "Union[ndarray[Any, Any], Index]"; # expected "Index" levels = [ping.group_index for ping in self.grouper.groupings] + [ lev # type: ignore[list-item] ] names = self.grouper.names + [self.obj.name] if dropna: mask = codes[-1] != -1 if mask.all(): dropna = False else: out, codes = out[mask], [level_codes[mask] for level_codes in codes] if normalize: out = out.astype("float") d = np.diff(np.r_[idx, len(ids)]) if dropna: m = ids[lab == -1] np.add.at(d, m, -1) acc = rep(d)[mask] else: acc = rep(d) out /= acc if sort and bins is None: cat = ids[inc][mask] if dropna else ids[inc] sorter = np.lexsort((out if ascending else -out, cat)) out, codes[-1] = out[sorter], codes[-1][sorter] if bins is not None: # for compat. with libgroupby.value_counts need to ensure every # bin is present at every index level, null filled with zeros diff = np.zeros(len(out), dtype="bool") for level_codes in codes[:-1]: diff |= np.r_[True, level_codes[1:] != level_codes[:-1]] ncat, nbin = diff.sum(), len(levels[-1]) left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)] right = [diff.cumsum() - 1, codes[-1]] _, idx = get_join_indexers(left, right, sort=False, how="left") out = np.where(idx != -1, out[idx], 0) if sort: sorter = np.lexsort((out if ascending else -out, left[0])) out, left[-1] = out[sorter], left[-1][sorter] # build the multi-index w/ full levels def build_codes(lev_codes: np.ndarray) -> np.ndarray: return np.repeat(lev_codes[diff], nbin) codes = [build_codes(lev_codes) for lev_codes in codes[:-1]] codes.append(left[-1]) mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False) if
is_integer_dtype(out.dtype)
pandas.core.dtypes.common.is_integer_dtype
import logging import pandas as pd #from sodapy import Socrata from datetime import datetime import pymongo import expiringdict import utils # !pip install expiringdict client = pymongo.MongoClient() logger = logging.Logger(__name__) utils.setup_logger(logger, 'db.log') RESULT_CACHE_EXPIRATION = 2200 def upsert_crime(df): """ Update MongoDB database `crime` and collection `crime` with the given `DataFrame`. """ db = client.get_database("crime") collection = db.get_collection("crime") update_count = 0 if len(df) > 0: for record in df.to_dict('records'): result = collection.replace_one( filter = {'rpt_id': record['rpt_id']}, # locate the document if exists replacement = record, # latest document upsert=True) if result.matched_count > 0: update_count += 1 logger.info("rows={}, update={}, ".format(df.shape[0], update_count) + "insert={}".format(df.shape[0]-update_count)) def fetch_all_crime(): db = client.get_database("crime") collection = db.get_collection("crime") ret = list(collection.find()) logger.info(str(len(ret)) + ' documents read from the db') return ret _fetch_all_crime_as_df_cache = expiringdict.ExpiringDict(max_len=1, max_age_seconds=RESULT_CACHE_EXPIRATION) def fetch_all_crime_as_df(allow_cached=False): """Converts list of dicts returned by `fetch_all_crime` to DataFrame with ID removed Actual job is done in `_worker`. When `allow_cached`, attempt to retrieve timed cached from `_fetch_all_crime_as_df_cache`; ignore cache and call `_work` if cache expires or `allow_cached` is False. """ def _work(): data = fetch_all_crime() if len(data) == 0: return None df = pd.DataFrame.from_records(data) df.drop('_id', axis=1, inplace=True) df['arst_date'] = pd.to_datetime(df['arst_date']) df['month_string'] = df['arst_date'].apply(lambda x:str(x.year) + '-' + str(x.month)) df['month'] =
pd.to_datetime(df['month_string'])
pandas.to_datetime
""" .. module:: merge3 :synopsis: merge assemblies from different cell types jGEM version 3 merger .. moduleauthor:: <NAME> <<EMAIL>> """ # system imports import subprocess import multiprocessing import gzip import os import time import shutil from functools import reduce from operator import iadd, iand from collections import Counter from itertools import repeat import logging logging.basicConfig(level=logging.DEBUG) LOG = logging.getLogger(__name__) # 3rd party imports import pandas as PD import numpy as N import matplotlib.pyplot as P # LocalAssembler imports from collections import Counter from matplotlib.collections import BrokenBarHCollection from functools import partial, reduce from operator import iadd import bisect from scipy.optimize import nnls # library imports from jgem import utils as UT from jgem import bigwig as BW from jgem import bedtools as BT from jgem import gtfgffbed as GGB from jgem import taskqueue as TQ from jgem import assembler3 as A3 import jgem.cy.bw as cybw ############# Merge Prep ###################################################### class PrepBWSJ(object): def __init__(self, j2pres, genome, dstpre, libsizes=None, np=10): self.j2pres = j2pres self.libsizes = libsizes # scale = 1e6/libsize self.genome = genome self.dstpre = dstpre self.np = np def __call__(self): # exdf => ex.p, ex.n, ex.u # sjdf => sj.p, sj.n, sj.u # paths => sjpath.bed # divide into tasks (exdf,sjdf,paths) x chroms self.server = server = TQ.Server(name='PrepBWSJ', np=self.np) self.chroms = chroms = UT.chroms(self.genome) csizes = UT.df2dict(UT.chromdf(self.genome), 'chr', 'size') self.exstatus = exstatus = {} self.sjstatus = sjstatus = {} self.pastatus = pastatus = {} self.sdstatus = sdstatus = {} exdone=False sjdone=False padone=False sddone=False with server: for chrom in chroms: # exdf tasks tname = 'prep_exwig_chr.{0}'.format(chrom) args = (self.j2pres, self.libsizes, self.dstpre, chrom, csizes[chrom]) task = TQ.Task(tname, prep_exwig_chr, args) server.add_task(task) # exdf tasks tname = 'prep_sjwig_chr.{0}'.format(chrom) args = (self.j2pres, self.libsizes, self.dstpre, chrom, csizes[chrom]) task = TQ.Task(tname, prep_sjwig_chr, args) server.add_task(task) # exdf tasks tname = 'prep_sjpath_chr.{0}'.format(chrom) args = (self.j2pres, self.libsizes, self.dstpre, chrom) task = TQ.Task(tname, prep_sjpath_chr, args) server.add_task(task) tname = 'prep_sjdf_chr.{0}'.format(chrom) args = (self.j2pres, self.libsizes, self.dstpre, chrom) task = TQ.Task(tname, prep_sjdf_chr, args) server.add_task(task) while server.check_error(): try: name, rslt = server.get_result(timeout=5) # block until result come in except TQ.Empty: name, rslt = None, None if name is not None: if name.startswith('prep_exwig_chr.'): chrom = name.split('.')[1] exstatus[chrom] = rslt if len(exstatus)==len(chroms): # all finished print('$$$$$$$$ putting in prep_exbw $$$$$$$$$$$') tname='prep_exbw' args = (self.dstpre, chroms, self.genome) task = TQ.Task(tname, prep_exbw, args) server.add_task(task) if name.startswith('prep_sjwig_chr.'): chrom = name.split('.')[1] sjstatus[chrom] = rslt if len(sjstatus)==len(chroms): # all finished print('$$$$$$$$ putting in prep_sjbw $$$$$$$$$$$') tname='prep_sjbw' args = (self.dstpre, chroms, self.genome) task = TQ.Task(tname, prep_sjbw, args) server.add_task(task) if name.startswith('prep_sjpath_chr.'): chrom = name.split('.')[1] pastatus[chrom] = rslt if len(pastatus)==len(chroms): # all finished print('$$$$$$$$ putting in prep_sjpath $$$$$$$$$$$') tname='prep_sjpath' args = (self.dstpre, chroms) task = TQ.Task(tname, prep_sjpath, args) server.add_task(task) if name.startswith('prep_sjdf_chr.'): chrom = name.split('.')[1] sdstatus[chrom] = rslt if len(sdstatus)==len(chroms): # all finished print('$$$$$$$$ putting in prep_sjdf $$$$$$$$$$$') tname='prep_sjdf' args = (self.dstpre, chroms) task = TQ.Task(tname, prep_sjdf, args) server.add_task(task) if name=='prep_exbw': print('$$$$$$$$ prep_exbw done $$$$$$$$$$$') exdone=True if name=='prep_sjbw': print('$$$$$$$$ prep_sjbw done $$$$$$$$$$$') sjdone=True if name=='prep_sjpath': print('$$$$$$$$ prep_sjpath done $$$$$$$$$$$') padone=True if name=='prep_sjdf': print('$$$$$$$$ prep_sjdf done $$$$$$$$$$$') sddone=True if exdone&sjdone&padone&sddone: break print('Exit Loop') print('Done') def prep_exwig_chr(j2pres, libsizes, dstpre, chrom, csize): ss = ['p','n','u'] s2s = {'p':['+'],'n':['-'],'u':['.+','.-','.']} a = {s:N.zeros(csize) for s in ss} wigpaths = {s:dstpre+'.ex.{0}.{1}.wig'.format(s,chrom) for s in ss} if all([os.path.exists(dstpre+'.ex.{0}.bw'.format(s)) for s in ss]): return wigpaths if all([os.path.exists(dstpre+'.ex.{0}.wig'.format(s)) for s in ss]): return wigpaths if all([os.path.exists(wigpaths[s]) for s in ss]): return wigpaths if libsizes is None: n = 1 scales = N.ones(len(j2pres)) else: n = len(j2pres) scales = [1e6/float(x) for x in libsizes] for pre,scale in zip(j2pres, scales): exdf = UT.read_pandas(pre+'.exdf.txt.gz',names=A3.EXDFCOLS) exdf = exdf[exdf['chr']==chrom] for s in ss: exsub = exdf[exdf['strand'].isin(s2s[s])] for st,ed,ecov in exsub[['st','ed','ecov']].values: a[s][st:ed] += ecov*scale sedf = UT.read_pandas(pre+'.sedf.txt.gz',names=A3.EXDFCOLS) sedf = sedf[sedf['chr']==chrom] for s in ss: sesub = sedf[sedf['strand'].isin(s2s[s])] for st,ed,ecov in sesub[['st','ed','ecov']].values: a[s][st:ed] += ecov*scale for s in ['p','n','u']: if libsizes is not None: a[s] /= float(n) # average cybw.array2wiggle_chr64(a[s], chrom, wigpaths[s], 'w') return wigpaths def prep_sjwig_chr(j2pres, libsizes, dstpre, chrom, csize): ss = ['p','n','u'] s2s = {'p':['+'],'n':['-'],'u':['.+','.-']} a = {s:N.zeros(csize) for s in ss} wigpaths = {s:dstpre+'.sj.{0}.{1}.wig'.format(s,chrom) for s in ss} if all([os.path.exists(dstpre+'.sj.{0}.bw'.format(s)) for s in ss]): return wigpaths if all([os.path.exists(dstpre+'.sj.{0}.wig'.format(s)) for s in ss]): return wigpaths if all([os.path.exists(wigpaths[s]) for s in ss]): return wigpaths if libsizes is None: n = 1 scales = N.ones(len(j2pres)) else: n = len(j2pres) scales = [1e6/float(x) for x in libsizes] for pre,scale in zip(j2pres, scales): sjdf = UT.read_pandas(pre+'.sjdf.txt.gz',names=A3.SJDFCOLS) sjdf = sjdf[sjdf['chr']==chrom] for s in ss: sjsub = sjdf[sjdf['strand'].isin(s2s[s])] for st,ed,tcnt in sjsub[['st','ed','tcnt']].values: a[s][st:ed] += tcnt*scale for s in ['p','n','u']: if libsizes is not None: a[s] /= float(n) # average cybw.array2wiggle_chr64(a[s], chrom, wigpaths[s], 'w') return wigpaths def prep_sjpath_chr(j2pres, libsizes, dstpre, chrom): pc2st = {} pc2ed = {} pc2tst = {} pc2ted = {} pc2strand = {} pc2tcov = {} # pc2tcov0 = {} # chr,st,ed,name,sc1(tcov),strand,tst,ted,sc2(),#exons,estarts,esizes # cols = ['st','ed','name','strand','tst','ted','tcov0','tcov'] path = dstpre+'.sjpath.{0}.bed.gz'.format(chrom) path0 = dstpre+'.sjpath.bed.gz' if os.path.exists(path0): return path if os.path.exists(path): return path cols = ['st','ed','name','strand','tst','ted','tcov'] if libsizes is None: n = 1 scales = N.ones(len(j2pres)) else: n = len(j2pres) scales = [1e6/float(x) for x in libsizes] for pre,scale in zip(j2pres, scales): paths = UT.read_pandas(pre+'.paths.txt.gz', names=A3.PATHCOLS) paths = paths[paths['chr']==chrom] for st,ed,name,s,tst,ted,tcov in paths[cols].values: pc = ','.join(name.split(',')[1:-1]) # trim 53exons => intron chain if pc=='': continue # ignore no junction path pc2st[pc] = min(st, pc2st.get(pc,st)) pc2ed[pc] = max(ed, pc2ed.get(pc,ed)) pc2tst[pc] = tst pc2ted[pc] = ted pc2strand[pc] = s pc2tcov[pc] = pc2tcov.get(pc,0)+scale*tcov #pc2tcov0[pc] = pc2tcov0.get(pc,0)+scale*tcov0 df = PD.DataFrame({'st':pc2st,'ed':pc2ed,'tst':pc2tst,'ted':pc2ted, 'strand':pc2strand,'tcov':pc2tcov}) df['chr'] = chrom df.index.name = 'name' df.reset_index(inplace=True) # create bed12: parse name => #exons, esizes, estarts df['pc'] = df['name'].copy() idxp = df['strand'].isin(['+','.+']) if libsizes is not None: df['tcov'] = df['tcov']/float(n) df.loc[idxp,'name'] = ['{0},{1},{2}'.format(s,p,e) for s,p,e in df[idxp][['st','pc','ed']].values] df.loc[~idxp,'name'] = ['{2},{1},{0}'.format(s,p,e) for s,p,e in df[~idxp][['st','pc','ed']].values] df = df.groupby('pc').first() # get rid of unstranded duplicates cmax = 9+N.log2(N.mean(scales)) bed = A3.path2bed12(df, cmax) # reset sc1 to tcov (from log2(tcov+2)*100) bed['sc1'] = bed['tcov'] GGB.write_bed(bed, path, ncols=12) return path def prep_sjdf_chr(j2pres, libsizes, dstpre, chrom): pc2st = {} pc2ed = {} pc2strand = {} pc2tcnt = {} pc2ucnt = {} # chr,st,ed,name,sc1(tcov),strand,tst,ted,sc2(),#exons,estarts,esizes # cols = ['st','ed','name','strand','tst','ted','tcov0','tcov'] path = dstpre+'.sjdf.{0}.txt.gz'.format(chrom) path0 = dstpre+'.sjdf.txt.gz' if os.path.exists(path0): return path if os.path.exists(path): return path cols = ['st','ed','name','strand','st','ed','tcnt','ucnt'] # cols = A3.SJDFCOLS if libsizes is None: n = 1 scales = N.ones(len(j2pres)) else: n = len(j2pres) scales = [1e6/float(x) for x in libsizes] for pre,scale in zip(j2pres, scales): paths = UT.read_pandas(pre+'.sjdf.txt.gz', names=A3.SJDFCOLS) paths = paths[paths['chr']==chrom] for st,ed,pc,s,st,ed,tcnt,ucnt in paths[cols].values: pc2st[pc] = st pc2ed[pc] = ed pc2strand[pc] = s pc2tcnt[pc] = pc2tcnt.get(pc,0)+scale*tcnt pc2ucnt[pc] = pc2ucnt.get(pc,0)+scale*ucnt df = PD.DataFrame({'st':pc2st,'ed':pc2ed,'st':pc2st,'ed':pc2ed, 'strand':pc2strand,'tcnt':pc2tcnt,'ucnt':pc2ucnt}) df['chr'] = chrom df['kind'] = 'j' if libsizes is not None: df['tcnt'] = df['tcnt']/float(n) df['ucnt'] = df['ucnt']/float(n) df.index.name = 'name' df.reset_index(inplace=True) UT.write_pandas(df[A3.SJDFCOLS], path, '') return path def prep_exbw(dstpre, chroms, genome): return _prep_bw(dstpre, chroms, genome, 'ex') def prep_sjbw(dstpre, chroms, genome): return _prep_bw(dstpre, chroms, genome, 'sj') def _prep_bw(dstpre, chroms, genome, w): # concatenate ss = ['p','n','u'] files = [] bwpaths = {s: dstpre+'.{1}.{0}.bw'.format(s,w) for s in ss} if all([os.path.exists(bwpaths[s]) for s in ss]): return bwpaths for s in ss: dstwig = dstpre+'.{1}.{0}.wig'.format(s,w) with open(dstwig, 'wb') as dst: for c in chroms: srcpath = dstpre+'.{2}.{0}.{1}.wig'.format(s,c,w) with open(srcpath,'rb') as src: shutil.copyfileobj(src,dst) files.append(srcpath) files.append(dstwig) print('converting wig to bigwig {0}'.format(dstwig)) BT.wig2bw(dstwig, UT.chromsizes(genome), bwpaths[s]) # clean up for f in files: os.unlink(f) return bwpaths def prep_sjpath(dstpre, chroms): dstpath = dstpre+'.sjpath.bed.gz' if os.path.exists(dstpath): return dstpath files = [] with open(dstpath, 'wb') as dst: for c in chroms: srcpath = dstpre+'.sjpath.{0}.bed.gz'.format(c) with open(srcpath,'rb') as src: shutil.copyfileobj(src,dst) files.append(srcpath) # for f in files: # keep separate chr files # os.unlink(f) return dstpath def prep_sjdf(dstpre, chroms): dstpath = dstpre+'.sjdf.txt.gz' if os.path.exists(dstpath): return dstpath files = [] with open(dstpath, 'wb') as dst: for c in chroms: srcpath = dstpre+'.sjdf.{0}.txt.gz'.format(c) with open(srcpath,'rb') as src: shutil.copyfileobj(src,dst) files.append(srcpath) # for f in files: # keep separate chr files # os.unlink(f) return dstpath ############# SJ Filter ####################################################### SJFILTERPARAMS = dict( th_detected=1, th_maxcnt=1, th_maxoverhang=15, th_minedgeexon=15, th_sjratio=1e-3, filter_unstranded=False,# there are substantial number of high cov unstranded ) class SJFilter(object): def __init__(self, bwsjpre, statspath, genome, np=10, **kw): self.bwsjpre = bwsjpre self.statspath = statspath self.genome = genome self.np = np self.params = SJFILTERPARAMS.copy() self.params.update(kw) def __call__(self): chroms = UT.chroms(self.genome) csizedic = UT.df2dict(UT.chromdf(self.genome), 'chr', 'size') args = [] for c in chroms: csize = csizedic[c] args.append((self.bwsjpre, self.statspath, c, csize, self.params)) rslts = UT.process_mp(filter_sjpath, args, np=self.np, doreduce=False) dstpath = self.bwsjpre+'.filtered.sjpath.bed.gz' with open(dstpath,'wb') as dst: for c in chroms: srcpath = self.bwsjpre+'.filtered.sjpath.{0}.bed.gz'.format(c) with open(srcpath, 'rb') as src: shutil.copyfileobj(src, dst) rslts = UT.process_mp(filter_sjdf, args, np=self.np, doreduce=False) dstpath = self.bwsjpre+'.filtered.sjdf.txt.gz' with open(dstpath,'wb') as dst: for c in chroms: srcpath = self.bwsjpre+'.filtered.sjdf.{0}.txt.gz'.format(c) with open(srcpath, 'rb') as src: shutil.copyfileobj(src, dst) # make sj.bw sjfiltered2bw(self.bwsjpre, self.genome, self.np) for s in ['p','n','u']: src = self.bwsjpre + '.ex.{0}.bw'.format(s) dst = self.bwsjpre + '.filtered.ex.{0}.bw'.format(s) cmd = ['ln','-s', src, dst] subprocess.call(cmd) def locus2pc(l): chrom,sted,strand = l.split(':') st,ed = sted.split('-') st = str(int(st)-1) if strand in ['+','.']: return '|'.join([st,ed]) return '|'.join([ed,st]) def filter_sjpath(bwsjpre, statspath, chrom, csize, params): # read in junction stats stats = UT.read_pandas(statspath) if 'chr' not in stats: stats['chr'] = [x.split(':')[0] for x in stats['locus']] if '#detected' in stats: stats.rename(columns={'#detected':'detected'}, inplace=True) stats = stats[stats['chr']==chrom].copy() if 'pc' not in stats: stats['pc'] = [locus2pc(x) for x in stats['locus']] flds = ['detected','maxcnt','maxoverhang'] dics = {f: UT.df2dict(stats, 'pc', f) for f in flds} # read sjpath fpath_chr = bwsjpre+'.sjpath.{0}.bed.gz'.format(chrom) dstpath = bwsjpre+'.filtered.sjpath.{0}.bed.gz'.format(chrom) if os.path.exists(fpath_chr): sj = GGB.read_bed(fpath_chr) else: fpath = bwsjpre+'.sjpath.bed.gz' sj = GGB.read_bed(fpath) sj = sj[sj['chr']==chrom].copy() name0 = sj.iloc[0]['name'] if len(name0.split('|'))<len(name0.split(',')): # exons attached? sj['name'] = [','.join(x.split(',')[1:-1]) for x in sj['name']] # filter unstranded if params['filter_unstranded']: sj = sj[sj['strand'].isin(['+','-'])].copy() # filter with stats for f in flds: sj[f] = [N.min([dics[f].get(x,0) for x in y.split(',')]) for y in sj['name']] sj = sj[sj[f]>params['th_'+f]].copy() # filter # edge exon size sj['eflen'] = [int(x.split(',')[0]) for x in sj['esizes']] sj['ellen'] = [int(x.split(',')[-2]) for x in sj['esizes']] eth = params['th_minedgeexon'] sj = sj[(sj['eflen']>eth)&(sj['ellen']>eth)].copy() # calculate sjratio, sjratio if params['filter_unstranded']: sjexbw = A3.SjExBigWigs(bwsjpre, mixunstranded=False) else: sjexbw = A3.SjExBigWigs(bwsjpre, mixunstranded=True) with sjexbw: sa = sjexbw.bws['sj']['a'].get(chrom,0,csize) ea = sjexbw.bws['ex']['a'].get(chrom,0,csize) a = sa+ea # sj['sjratio'] = [x/N.mean(a[int(s):int(e)]) for x,s,e in sj[['sc1','tst','ted']].values] sj['sjratio'] = [x/N.max(a[int(s):int(e)]) for x,s,e in sj[['sc1','tst','ted']].values] sj = sj[sj['sjratio']>params['th_sjratio']] GGB.write_bed(sj, dstpath, ncols=12) def filter_sjdf(bwsjpre, statspath, chrom, csize, params): # read in junction stats stats = UT.read_pandas(statspath) if 'chr' not in stats: stats['chr'] = [x.split(':')[0] for x in stats['locus']] if '#detected' in stats: stats.rename(columns={'#detected':'detected'}, inplace=True) stats = stats[stats['chr']==chrom].copy() if 'pc' not in stats: stats['pc'] = [locus2pc(x) for x in stats['locus']] flds = ['detected','maxcnt','maxoverhang'] dics = {f: UT.df2dict(stats, 'pc', f) for f in flds} # read sjdf fpath_chr = bwsjpre+'.sjdf.{0}.txt.gz'.format(chrom) dstpath = bwsjpre+'.filtered.sjdf.{0}.txt.gz'.format(chrom) if os.path.exists(fpath_chr): sj = UT.read_pandas(fpath_chr, names=A3.SJDFCOLS) else: fpath = bwsjpre+'.sjdf.txt.gz' sj = UT.read_pandas(fpath, names=A3.SJDFCOLS) sj = sj[sj['chr']==chrom].copy() # filter unstranded if params['filter_unstranded']: sj = sj[sj['strand'].isin(['+','-'])].copy() # filter with stats for f in flds: # sj[f] = [N.min([dics[f].get(x,0) for x in y.split(',')]) for y in sj['name']] sj[f] = [dics[f].get(y,0) for y in sj['name']] sj = sj[sj[f]>params['th_'+f]].copy() # filter # edge exon size # sj['eflen'] = [int(x.split(',')[0]) for x in sj['esizes']] # sj['ellen'] = [int(x.split(',')[-2]) for x in sj['esizes']] # eth = params['th_minedgeexon'] # sj = sj[(sj['eflen']>eth)&(sj['ellen']>eth)].copy() # calculate sjratio, sjratio if params['filter_unstranded']: sjexbw = A3.SjExBigWigs(bwsjpre, mixunstranded=False) else: sjexbw = A3.SjExBigWigs(bwsjpre, mixunstranded=True) with sjexbw: sa = sjexbw.bws['sj']['a'].get(chrom,0,csize) ea = sjexbw.bws['ex']['a'].get(chrom,0,csize) a = sa+ea # sj['sjratio'] = [x/N.mean(a[int(s):int(e)]) for x,s,e in sj[['tcnt','st','ed']].values] sj['sjratio'] = [x/N.max(a[int(s):int(e)]) for x,s,e in sj[['tcnt','st','ed']].values] sj = sj[sj['sjratio']>params['th_sjratio']] UT.write_pandas(sj[A3.SJDFCOLS], dstpath, '') def sjfiltered2wig(bwpre, chrom, chromsize): a = {'+':N.zeros(chromsize, dtype=N.float64), '-':N.zeros(chromsize, dtype=N.float64), '.':N.zeros(chromsize, dtype=N.float64)} path = bwpre+'.filtered.sjdf.{0}.txt.gz'.format(chrom) sjchr = UT.read_pandas(path, names=A3.SJDFCOLS) for st,ed,v,strand in sjchr[['st','ed','tcnt','strand']].values: a[strand[0]][st:ed] += v for strand in a: wig = bwpre+'.filtered.sjdf.{0}.{1}.wig'.format(chrom, strand) cybw.array2wiggle_chr64(a[strand], chrom, wig) return path def sjfiltered2bw(bwpre, genome, np=12): chroms = UT.chroms(genome) chromdf = UT.chromdf(genome).sort_values('size',ascending=False) chroms = [x for x in chromdf['chr'] if x in chroms] chromdic = UT.df2dict(chromdf, 'chr', 'size') args = [(bwpre, c, chromdic[c]) for c in chroms] rslts = UT.process_mp(sjfiltered2wig, args, np=np, doreduce=False) S2N = {'+':'p','-':'n','.':'u'} rmfiles = [] for strand in ['+','-','.']: s = S2N[strand] wigpath = bwpre+'.filtered.sj.{0}.wig'.format(s) with open(wigpath, 'w') as dst: for chrom in chroms: f = bwpre+'.filtered.sjdf.{0}.{1}.wig'.format(chrom, strand) with open(f,'r') as src: shutil.copyfileobj(src, dst) rmfiles.append(f) bwpath = bwpre+'.filtered.sj.{0}.bw'.format(s) BT.wig2bw(wigpath, UT.chromsizes(genome), bwpath) rmfiles.append(wigpath) for f in rmfiles: os.unlink(f) ############# Cov Estimator ###################################################### class LocalEstimator(A3.LocalAssembler): def __init__(self, modelpre, bwpre, chrom, st, ed, dstpre, tcovth, usegeom=False): self.modelpre = modelpre self.tcovth = tcovth self.usegeom = usegeom A3.LocalAssembler.__init__(self, bwpre, chrom, st, ed, dstpre) bed12 = GGB.read_bed(modelpre+'.paths.withse.bed.gz') assert(all(bed12['tst']<bed12['ted'])) idx = (bed12['chr']==chrom)&(bed12['tst']>=st)&(bed12['ted']<=ed) self.paths = bed12[idx].copy() eids = set() sids = set() for n in self.paths['name']: eids.update(n.split('|')) sids.update(n.split(',')[1:-1]) tgt1 = bwpre+'.filtered.{0}.bed.gz'.format(chrom) tgt2 = bwpre+'.{0}.bed.gz'.format(chrom) tgt3 = bwpre+'.sjpath.bed.gz' if os.path.exists(tgt1): sj = GGB.read_bed(tgt1) elif os.path.exists(tgt2): sj = GGB.read_bed(tgt2) else: sj = GGB.read_bed(tgt3) idx0 = (sj['chr']==chrom)&(sj['tst']>=st)&(sj['ted']<=ed) self.sjpaths0 = sj[idx0].copy() # load exdf, sjdf sjdf = UT.read_pandas(modelpre+'.sjdf.txt.gz', names=A3.SJDFCOLS) sjdf['tst'] = sjdf['st'] # for sjpath compatibility sjdf['ted'] = sjdf['ed'] sjdf['sc1'] = sjdf['ucnt'] sjdf['sc2'] = sjdf['tcnt'] sjdf = sjdf[(sjdf['chr']==chrom)&(sjdf['st']>=st)&(sjdf['ed']<=ed)] sjdf = sjdf[sjdf['name'].isin(sids)] self.sjdf = sjdf.groupby(['chr','st','ed','strand']).first().reset_index() exdf = UT.read_pandas(modelpre+'.exdf.txt.gz', names=A3.EXDFCOLS) exdf = exdf[(exdf['chr']==chrom)&(exdf['st']>=st)&(exdf['ed']<=ed)] exdf = exdf[exdf['name'].isin(eids)] if os.path.exists(modelpre+'.sedf.txt.gz'): sedf = UT.read_pandas(modelpre+'.sedf.txt.gz', names=A3.EXDFCOLS) sedf = sedf[(sedf['chr']==chrom)&(sedf['st']>=st)&(sedf['ed']<=ed)] sedf = sedf[sedf['name'].isin(eids)] exdf = PD.concat([exdf,sedf],ignore_index=True) self.exdf = exdf.groupby(['chr','st','ed','strand','kind']).first().reset_index() A3.set_ad_pos(self.sjdf, 'sj') A3.set_ad_pos(self.exdf, 'ex') # filled self.filled = {} sjs = self.sjdf exs = self.exdf[self.exdf['kind']=='i'].copy() exs['ost'] = exs['st']-self.st exs['oed'] = exs['ed']-self.st for s in ['+','-']: sja = self.arrs['sj'][s] sj = sjs[sjs['strand'].isin(A3.STRS[s])] ex = exs[exs['strand'].isin(A3.STRS[s])] self.filled[s] = A3.fill_gap(sja, sj, ex, s, self.st) # fix_i53completematch(self.exdf, self.paths) # extend 5'3' exons completely matched internal exons def process(self): self.calculate_ecovs() self.calculate_scovs() self.estimate_abundance() self.write() return def calculate_scovs(self): sj = self.sjdf sj0 = self.sjpaths0 sj0mat = sj0[['sc1','sc2','name']].values tmp = [[(sc1,sc2) for sc1,sc2,p in sj0mat if y in p] for y in sj['name']] sj['ucnt'] = [N.sum([x[0] for x in y]) for y in tmp] sj['tcnt'] = [N.sum([x[1] for x in y]) for y in tmp] self.sjdfi = sj.set_index('name') def calculate_branchp(self, jids, eids): sj0 = self.sjdfi sj = sj0.ix[jids].reset_index() ex0 = self.exdfi ex = ex0.ix[eids].reset_index() dsump = sj.groupby('dpos')['tcnt'].sum().astype(float) tmp = dsump.ix[sj['dpos'].values] jdp = sj['tcnt'].values/tmp.values idx = N.array(tmp==0, dtype=bool) jdp[idx] = 0. j2p = dict(zip(sj['name'].values, jdp)) # exon groupby acceptor asump = ex.groupby('apos')['ecov'].sum().astype(float) tmp = asump.ix[ex['apos'].values] eap = ex['ecov'].values/(tmp.values) idx = N.array(tmp==0, dtype=bool) eap[idx] = 0. e2ap = dict(zip(ex['name'].values, eap)) dsump = ex.groupby('dpos')['ecov'].sum().astype(float) tmp = dsump.ix[ex['dpos'].values] edp = ex['ecov'].values/(tmp.values) idx = N.array(tmp==0, dtype=bool) edp[idx] = 0. e2dp = dict(zip(ex['name'].values, edp)) return j2p, e2ap, e2dp def tcov_by_nnls(self, s, e, strand): o = int(self.st) p = self.paths idx = (p['tst']>=s)&(p['ted']<=e)&(p['strand'].isin(A3.STRS[strand])) ps = p[idx] if len(ps)==0: return None pg = ps.groupby(['tst','ted']).first().reset_index()[['chr','tst','ted','strand','name']].sort_values(['tst','ted']) pg['strand'] = strand ne = len(pg) exa = self.arrs['ex'][strand] # sja = self.arrs['sj'][strand] sja = self.filled[strand] def cov0(s,e): # return N.sum(sja[s-o:e-o]+exa[s-o:e-o])/(e-s) return N.mean(sja[s-o:e-o]) # def cov1s(s): # s0 = max(0, int(s)-o-10) # s1 = max(s0+1,int(s)-o) # return N.mean(exa[s0:s1]) # def cov1e(e): # return N.mean(exa[int(e)-o:int(e)-o+10]) e_ed2cov = self.eed2cov[strand] e_st2cov = self.est2cov[strand] def cov1s(s): return e_ed2cov.get(s,0) def cov1e(e): return e_st2cov.get(e,0) def cov2s(s): # donor # s0 = max(0, s-o-1) return max(0, sja[int(s)-o]-sja[int(s)-o-1]) def cov2e(e): # acceptor # e0 = max(0, e-o-1) return max(0, sja[int(e)-o-1]-sja[int(e)-o]) # cov0 if ne>1: pg.rename(columns={'tst':'st','ted':'ed'}, inplace=True) pg['eid'] = N.arange(len(pg)) ci = UT.chopintervals(pg, idcol='eid') ci['cov'] = [cov0(s,e) for s,e in ci[['st','ed']].values] ci['name1'] = ci['name'].astype(str).apply(lambda x: [int(y) for y in x.split(',')]) nc = len(ci) mat = N.zeros((nc,ne)) for i,n1 in enumerate(ci['name1'].values):# fill in rows N.put(mat[i], N.array(n1), 1) try: ecov,err = nnls(mat, ci['cov'].values) pg['tcov0a'] = ecov except e: # too much iteration? LOG.warning('!!!!!! Exception in NNLS (tcov_by_nnls) @{0}:{1}-{2}, setting to zero !!!!!!!!!'.format(self.chrom, s, e)) pg['tcov0a'] = 0 # raise e pg.rename(columns={'st':'tst','ed':'ted'}, inplace=True) else: # this includes single exons s,e = pg.iloc[0][['tst','ted']] pg['tcov0a'] = cov0(s,e) # cov1, cov2 if ne>1: sts = sorted(set(pg['tst'].values)) eds = sorted(set(pg['ted'].values)) nst,ned = len(sts),len(eds) mat = N.array([(pg['tst']==x).values for x in sts]+[(pg['ted']==x).values for x in eds], dtype=float) c = N.array([cov1s(x) for x in sts]+[cov1e(x) for x in eds]) # enforce flux conservation: scale up 5' stsum = N.sum(c[:nst]) edsum = N.sum(c[nst:]) if stsum<1e-9 or edsum<1e-9: pg['tcov0b'] = 0 else: c0 = c.copy() if strand in ['+','.+']: c[:nst] = (edsum/stsum)*c[:nst] else: c[nst:] = (stsum/edsum)*c[nst:] try: ecov,err = nnls(mat, c) except e: print('s:{0},e:{1},strand:{2}'.format(s,e,strand)) print('stsum:', stsum) print('edsum:', edsum) print('nnls error tcov0b', mat, c, c0) print('sts:',sts) print('eds:',eds) print('pg:',pg) pg['tcov0c'] = 0 raise e pg['tcov0b'] = ecov mat = N.array([(pg['tst']==x).values for x in sts]+[(pg['ted']==x).values for x in eds], dtype=float) c = N.array([cov2s(x) for x in sts]+[cov2e(x) for x in eds]) # enforce flux conservation: scale up 5' stsum = N.sum(c[:nst]) edsum = N.sum(c[nst:]) if stsum<1e-9 or edsum<1e-9: pg['tcov0c'] = 0 else: if strand in ['+','.+']: c[:nst] = (edsum/stsum)*c[:nst] else: c[nst:] = (stsum/edsum)*c[nst:] try: ecov,err = nnls(mat, c) except e: print('s:{0},e:{1},strand:{2}'.format(s,e,strand)) print('nnls error tcov0c', mat, c) pg['tcov0c'] = 0 raise e pg['tcov0c'] = ecov else: s,e = pg.iloc[0][['tst','ted']] pg['tcov0b'] = (cov1s(s)+cov1e(e))/2. pg['tcov0c'] = (cov2s(s)+cov2e(e))/2. if not self.usegeom: # pg['tcov0'] = pg[['tcov0a','tcov0b','tcov0c']].mean(axis=1) # pg['tcov0'] = (2*pg['tcov0a']+pg['tcov0b']+pg['tcov0c'])/4. # weighted pg['tcov0'] = pg[['tcov0a','tcov0b','tcov0c']].median(axis=1) else: pg['tcov0'] = N.power(pg['tcov0a']*pg['tcov0b']*pg['tcov0c'], 1/3.) # geometric mean pg.loc[pg['tcov0']<0,'tcov0'] = 0 # shouldn't really happen keys = [tuple(x) for x in p[idx][['tst','ted']].values] for f in ['tcov0','tcov0a','tcov0b','tcov0c']: p.loc[idx, f] = pg.set_index(['tst','ted']).ix[keys][f].values return pg[['chr','tst','ted','strand','tcov0']] def tcov_by_branchp(self, tst, ted, strand, tcov0): p = self.paths idx = (p['strand'].isin(A3.STRS[strand]))&(p['tst']==tst)&(p['ted']==ted) if N.sum(idx)==0: return # if N.sum(idx)>1: # calculate branchp within this group jids = set() eids = set() for n in p[idx]['name']: jids.update(n.split(',')[1:-1]) eids.update(n.split('|')) j2p, e2ap, e2dp = self.calculate_branchp(jids, eids) def _prob(y): epath0 = y.split('|') e5 = epath0[0] # use donor p epath = epath0[1:] # use acceptor p jpath = y.split(',')[1:-1] return e2dp[e5]*N.prod([e2ap[x] for x in epath])*N.prod([j2p[x] for x in jpath]) p.loc[idx,'tcov'] = [tcov0*_prob(y) for y in p[idx]['name']] # else: # p.loc[idx,'tcov'] = tcov0 def estimate_abundance(self): # 1) 5-3 group by NNLS # 2) within 5-3 group by tree branch prob paths = self.paths idxme = paths['name'].str.contains('\|') mepaths = paths[idxme].copy() sepaths = paths[~idxme].copy() self.paths = mepaths for s in ['+','-']: ps = mepaths[mepaths['strand'].isin(A3.STRS[s])] if len(ps)==0: continue # for chrom,st,ed in UT.union_contiguous(ps[['chr','st','ed']],returndf=False): poscols = ['chr','tst','ted'] for chrom,st,ed in UT.union_contiguous(ps[poscols],pos_cols=poscols,returndf=False): pg = self.tcov_by_nnls(st,ed,s) if pg is not None: for chrom,tst,ted,strand,tcov0 in pg.values: self.tcov_by_branchp(tst,ted,strand,tcov0) e2c = UT.df2dict(self.exdf, 'name', 'ecov') sepaths['tcov'] = [e2c[x] for x in sepaths['name']] for f in ['tcov0','tcov0b']: sepaths[f] = sepaths['tcov'] sepaths['tcov0a'] = 0. sepaths['tcov0c'] = 0. paths =
PD.concat([mepaths, sepaths], ignore_index=True)
pandas.concat
import pandas as pd import numpy as np import scipy as sp import matplotlib.pyplot as plt import seaborn as sns import os def load_systems(file_name, exclude_reps=0, delim=','): """ Reads scenario data from a .csv file (assumes comma delimited). Assumes that each column represents a scenario. Returns a numpy array. Each row is a scenario, each col a replication. Assumes all scenarios have the same number of replications. @file_name = name of file containing csv data @delim = delimiter of file. Default = ',' for CSV. Notes: should this be in this module? """ return np.genfromtxt(file_name, delimiter=delim, skip_footer=exclude_reps) def load_model_file(filepath): return [filepath + "/" + f for f in os.listdir(filepath) if os.path.isfile(os.path.join(filepath, f))] def get_best_subset(dfs, labels, subset): """ Returns the best subset and best systems from a collection of performance measures across multiple Keyword arguments: dfs -- list of numpy.ndarrys labels -- labels/names of numpy.ndarrys in @dfs subset -- list of indexes to return """ df_list = [] for i in range(len(dfs)): df_sub = dfs[i][subset].mean() df_sub.rename(labels[i], inplace=True) df_list.append(df_sub) subset_kpi = pd.concat(df_list, axis=1) best_system_index = subset_kpi.sort_values(by=labels).index[0] return best_system_index, subset_kpi def best_subset_table(df_kpi, indexes, doe_file_name): """ """ df_doe = pd.read_csv(doe_file_name, index_col='System') df_doe.index -= 1 df_kpi = df_kpi[df_kpi.index.isin(indexes)] temp = df_doe[df_doe.index.isin(indexes)] df_subset_table =
pd.concat([temp, df_kpi], axis=1)
pandas.concat
import os import datetime import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from keras.models import Sequential from keras.layers import Dense, LSTM, Flatten DATA_PATH = 'data/' MODEL_PATH = 'model/' EPOCHS = 2 BATCH_SIZE = 60 class trainer: def __init__(self, type): try: if int(type) == 1: type = 'normal' elif int(type) == 2: type = 'attack' else: type = 'normal' print('Type was set to normal default.') except ValueError: raise ('Invalid type [ Select number 1 to normal or 2 to attack ]') self.type = type def calc_bytes(self, data): return len(data) / 1024 / 1024 def salveModelTo(self): now = datetime.datetime.now() self.timeLog = str(now.day) + "-" + str(now.month) + "-" + str(now.year) + "_" + str(now.hour) + \ ":" + str(now.minute) + ".nebulosa" self.fileName = self.type + "-" + self.timeLog return MODEL_PATH + self.fileName def LSTM_MODEL(self, inputlstm): model = Sequential() model.add(LSTM( 50, input_shape=inputlstm, return_sequences=True, activation='relu')) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) return model def compile_model(self, model): model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model def list_files(self): files = [] for r, d, f in os.walk(DATA_PATH + self.type + '/'): for file in f: files.append(file) return files def sort_list(self, X): data = [] i_a = [] i_b = [] a = [n for n in X if n[1][0] == 1] b = [n for n in X if n[1][0] == 0] step = 1 while len(data) < len(X): if step == 1: if len(i_a) > 0: i = i_a[(len(i_a) - 1)] else: i = 0 data.append(a[i]) i_a.append((i + 1)) step = 0 else: if len(i_b) > 0: i = i_b[(len(i_b) - 1)] else: i = 0 data.append(b[i]) i_b.append((i + 1)) step = 1 return data def decode_protocol(self, protocol): if protocol == 1: return "TCP" elif protocol == 2: return "UDP" else: return "Others" def get_data(self, list): data = None msg = 'SELECT YOUR DATASET\n\n' j = 0 for file in list: msg += str(j) + ' -> ' + file + '\n' j += 1 print(msg) while (data is None): s = input('dataset> ') try: with open(DATA_PATH + self.type + '/' + str(list[int(s)])) as rf: data = rf.read() rf.close() except IndexError: print('Index invalid, select a number valid.') return data, list[int(s)] def compile(self): self.list_files = self.list_files() if len(self.list_files) == 0: print("Path \"" + DATA_PATH + self.type + "\" is empty") exit() self.data = self.get_data(self.list_files) print('Size of {:.2f}MB of datas.'.format(self.calc_bytes(self.data[0]))) self.data = pd.read_csv(DATA_PATH + self.type + '/' + self.data[1]).values.tolist() data_n_out = [x[2] for x in self.data if x[2] == 0] data_n_input = [x[2] for x in self.data if x[2] == 1] data_n_proto = [self.decode_protocol(x[1]) for x in self.data] data_n_src = [x[3] for x in self.data] data_n_dst = [x[4] for x in self.data] data_n_payload = [x[5] for x in self.data] print('=========== STATUS ===========') print("I/O: ") print('Out=', len(data_n_out), '| Input=', len(data_n_input)) print('\nPROTOCOLS: ') print(pd.DataFrame({'protocol': data_n_proto})['protocol'].value_counts()) print('\nSource Ports: ') print(
pd.DataFrame({'src': data_n_src})
pandas.DataFrame
import os from sys import argv from shutil import copyfile import time import datetime as dt import werkzeug werkzeug.cached_property = werkzeug.utils.cached_property from robobrowser import RoboBrowser import numpy as np import pandas as pd import scipy.stats as sps import matplotlib.pyplot as plt from matplotlib import dates as mdates from matplotlib import ticker from matplotlib.ticker import FuncFormatter import schedule DIR = os.path.dirname(os.path.realpath(__file__)) HDIs = [90, 50, 10] df = pd.read_csv("SummaryInfo.csv", index_col=0) theta_omega = df.loc["theta_omega"]["Median"] theta_sigma = df.loc["theta_sigma"]["Median"] def millions(x, pos): 'The two args are the value and tick position' return f'{x*1e-6:.0f}' million_formatter = FuncFormatter(millions) def percentage(x, pos): 'The two args are the value and tick position' return f'{x:.0f}%' percentage_formatter = FuncFormatter(percentage) def HDI_of_grid(probMassVec, credMass=0.95): sortedProbMass = np.sort(probMassVec, axis=None)[::-1] HDIheightIdx = np.min(np.where(np.cumsum(sortedProbMass) >= credMass)) HDIheight = sortedProbMass[HDIheightIdx] HDImass = np.sum(probMassVec[probMassVec >= HDIheight]) idx = np.where(probMassVec >= HDIheight)[0] return {'indexes':idx, 'mass':HDImass, 'height':HDIheight} def HDI_of_grid_from_df(pmf, p): # If we pass a DataFrame, just call this recursively on the columns if(isinstance(pmf, pd.DataFrame)): return pd.DataFrame([HDI_of_grid_from_df(pmf[col], p=p) for col in pmf], index=pmf.columns) res = HDI_of_grid(pmf, p) #print(res["indexes"]) lo_idx = res["indexes"][0] hi_idx = res["indexes"][-1] lo = pmf.index[lo_idx] hi = pmf.index[hi_idx] return pd.Series([lo, hi], index=[f'Low_{p*100:.0f}', f'High_{p*100:.0f}']) def gamma_params(x): omega = x * theta_omega sigma = x * theta_sigma rate = (omega + np.sqrt(omega**2 + 4*(sigma**2))) / (2*(sigma**2)) shape = 1 + omega*rate scale = 1 / rate return (shape, scale) def gamma_priors(x, sigma=353333): omega = x rate = (omega + np.sqrt(omega**2 + 4*(sigma**2))) / (2*(sigma**2)) shape = 1 + omega*rate scale = 1 / rate return (shape, scale) def plot(results): fig, ax = plt.subplots(1,2,figsize=(14,5)) ax[0].plot( results.index, results["ML"], label='Most Likely', c='k', ) ax[1].plot( results.index, results["ML"]/it_pop*100, label='Most Likely', c='k', ) for i, hdi in enumerate(HDIs): ax[0].fill_between(results.index, results[f'Low_{hdi}'], results[f'High_{hdi}'], color='k', alpha=i*.2+.1, zorder=i+1, lw=0, label=f'HDI {hdi}%') ax[1].fill_between(results.index, results[f'Low_{hdi}']/it_pop*100, results[f'High_{hdi}']/it_pop*100, color='k', alpha=i*.2+.1, zorder=i+1, lw=0, label=f'HDI {hdi}%') ax[0].legend(loc="upper left") ax[1].legend(loc="upper left") for yy in np.arange(1e6, 10e6+1, 1e6): ax[0].axhline(yy, c="r", ls=":", alpha=.75, zorder=10) for yy in np.arange(2, 20+1, 2): ax[1].axhline(yy, c="r", ls=":", alpha=.75, zorder=10) ax[0].set_ylim(0, results[f'High_{HDIs[0]}'][-1]) ax[1].set_ylim(0, results[f'High_{HDIs[0]}'][-1]/it_pop*100) ax[0].set_title("Downloads totali stimati dell'app Immuni") ax[0].set_xlim(results.index[0], results.index[-1]) ax[0].xaxis.set_major_locator(mdates.MonthLocator()) ax[0].xaxis.set_major_formatter(mdates.DateFormatter('%b')) ax[0].xaxis.set_minor_locator(mdates.DayLocator()) ax[0].yaxis.set_major_formatter(million_formatter) ax[0].set_ylabel("Milioni di downloads") ax[1].set_title("Percentuale di downloads stimati su popolazione") ax[1].set_xlim(results.index[0], results.index[-1]) ax[1].xaxis.set_major_locator(mdates.MonthLocator()) ax[1].xaxis.set_major_formatter(mdates.DateFormatter('%b')) ax[1].xaxis.set_minor_locator(mdates.DayLocator()) ax[1].yaxis.set_major_formatter(percentage_formatter) ax[1].set_ylabel("Percentuali") fig.set_facecolor('w') plt.savefig("immuni.png", bbox_inches='tight') plt.close(fig="all") def bayes(reviews, column="reviews"): D_rows = sps.gamma.pdf(reviews[column].values[:,None], a=shape, scale=scale) D_rows = D_rows.transpose() likelihoods = pd.DataFrame( data=D_rows, columns=reviews.index, index=D_range[1:] ) posteriors = pd.DataFrame( index=D_range[1:], columns=reviews.index, data={ reviews.index[0]: prior0, } ) for previous_day, current_day in zip(reviews.index[:-1], reviews.index[1:]): current_prior = priors @ posteriors[previous_day] numerator = likelihoods[current_day] * current_prior denominator = np.sum(numerator) posteriors[current_day] = numerator/denominator return posteriors def update(from_browser=True): old =
pd.read_csv("immuni-reviews.csv", index_col=["date"], parse_dates=["date"])
pandas.read_csv
from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from keras.layers import Lambda, Input, Dense, Flatten, Reshape, Conv2DTranspose, Conv2D from keras.models import Model from keras.datasets import mnist from keras.losses import mse, binary_crossentropy from keras.utils import plot_model from keras import backend as K from keras.optimizers import Adam import numpy as np import matplotlib.pyplot as plt import argparse import os import pretty_midi from sklearn.model_selection import train_test_split import pandas as pd from sklearn.utils import shuffle class CVae: index = 0 def __init__(self): self.epochs = 10 self.batch_size = 32 self.intermediate_dim = 128 self.latent_dim = 2 self.random_state = 42 self.dataset_size = 500 #10000 self.list_files_name= [] self.file_shuffle=[] self.test_size=0.25 self.res = 64 # min 8 self.random_state = 42 self.filters = 16 self.kernel_size = 3 self.range_of_notes_to_extract = 16 self.number_of_data_to_extract = self.res * 2 #self.plot = plt.plot([], []) path_midi_file_to_initialize_model = "ressources/file_to_load_model/example_midi_file.mid" #path_midi_file_to_initialize_model = "/home/kyrillos/CODE/VAEMIDI/MuGen-master/ressources/file_to_load_model/example_midi_file.mid" data_to_initialize_model = self.load_data(path_midi_file_to_initialize_model, 0, 2) self.original_dim = data_to_initialize_model[0].shape[1] self.vae, self.encoder, self.decoder = self.compile_model(data_to_initialize_model) #weights = "src/Convolutional_VAE/good_only.h5" #weights = "src/Convolutional_VAE/all_dataset.h5" #print("LOADING WEIGHTS") #self.vae.load_weights(weights) #REMEMBER NO WEIGHTS data_to_plot = self.load_all_data("datasets/quantized_rythm_dataset_v2_temperature/100",1,0) self.create_plot(data_to_plot) def create_plot(self, data_to_plot): data_to_plot_x,data_to_plot_y = data_to_plot # display a 2D plot of the digit classes in the latent space z_mean, _, _ = self.encoder.predict(data_to_plot_x,batch_size=self.batch_size) plt.figure(figsize=(12, 10)) plt.scatter(z_mean[:, 0], z_mean[:, 1], c=data_to_plot_y) plt.colorbar() plt.xlabel("z[0]") plt.ylabel("z[1]") #for i, txt in enumerate(self.list_files_name): # pour toute la dataset [ :int(dataset_size *2* test_size)] #plt.annotate(txt, (z_mean[i, 0], z_mean[i, 1])) #plt.show(block=False) def add_to_plot(self, z_mean, data_to_plot_y): plt.scatter(z_mean[:, 0], z_mean[:, 1], c=[self.index]) #plt.colorbar() def sampling(self,args): z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] # by default, random_normal has mean = 0 and std = 1.0 epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon def get_coord(self,data_to_plot,batch_size=128, show_annotation=False): data_to_plot_x,data_to_plot_y = data_to_plot # display a 2D plot of the digit classes in the latent space z_mean, _, _ = self.encoder.predict(data_to_plot_x,batch_size=batch_size) self.add_to_plot(z_mean,data_to_plot_y) #print(len(z_mean)) if show_annotation: plt.figure(figsize=(12, 10)) plt.scatter(z_mean[:, 0], z_mean[:, 1], c=data_to_plot_y) plt.colorbar() plt.xlabel("z[0]") plt.ylabel("z[1]") print("DANS LANOATIONS") for i, txt in enumerate(self.list_files_name): #pour toute la dataset [ :int(dataset_size *2* test_size)] plt.annotate(txt,(z_mean[i,0], z_mean[i,1])) #plt.show(block=False) return z_mean def compile_model(self, data_to_plot_x): # network parameters input_shape = (self.number_of_data_to_extract,) # Convolutional VAE # ENCODER input_shape = (self.number_of_data_to_extract, self.range_of_notes_to_extract, 1) # datasize inputs = Input(shape=input_shape, name='encoder_input') x = inputs for i in range(2): self.filters *= 2 x = Conv2D(filters=self.filters, kernel_size=self.kernel_size, activation='relu', strides=2, padding='same')(x) # shape info needed to build decoder model shape = K.int_shape(x) # generate latent vector Q(z|X) x = Flatten()(x) x = Dense(16, activation='relu')(x) z_mean = Dense(self.latent_dim, name='z_mean')(x) z_log_var = Dense(self.latent_dim, name='z_log_var')(x) # use reparameterization trick to push the sampling out as input # note that "output_shape" isn't necessary with the TensorFlow backend z = Lambda(self.sampling, output_shape=(self.latent_dim,), name='z')([z_mean, z_log_var]) # instantiate encoder model encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder') # encoder.summary() # DECODER latent_inputs = Input(shape=(self.latent_dim,), name='z_sampling') x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs) x = Reshape((shape[1], shape[2], shape[3]))(x) # use Conv2DTranspose to reverse the conv layers from the encoder for i in range(2): x = Conv2DTranspose(filters=self.filters, kernel_size=self.kernel_size, activation='relu', strides=2, padding='same')(x) self.filters //= 2 outputs = Conv2DTranspose(filters=1, kernel_size=self.kernel_size, activation='sigmoid', padding='same', name='decoder_output')(x) # instantiate decoder model decoder = Model(latent_inputs, outputs, name='decoder') # decoder.summary() # Building the VAE outputs = decoder(encoder(inputs)[2]) vae = Model(inputs, outputs, name='vae') # LOSS use_mse = True if use_mse: reconstruction_loss = mse(K.flatten(inputs), K.flatten(outputs)) else: reconstruction_loss = binary_crossentropy(K.flatten(inputs), K.flatten(outputs)) reconstruction_loss *= self.range_of_notes_to_extract * self.number_of_data_to_extract kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var) kl_loss = K.sum(kl_loss, axis=-1) kl_loss *= -0.5 vae_loss = K.mean(reconstruction_loss + kl_loss) vae.add_loss(vae_loss) # Compile the VAE vae.compile(optimizer='rmsprop') # vae.summary() return vae, encoder, decoder def load_all_data(self,path, class_label, index_filename): print("LOADING PREVIOUS PLOT ") features = [] path, dirs, files = next(os.walk(path)) num_size = len(dirs) current_folder = 0 num_files = 0 for subdir, dirs, files in os.walk(path): for file in files: if num_files < self.dataset_size: if file != ".DS_Store": # print(os.path.join(subdir, file)) # try: midi_data = pretty_midi.PrettyMIDI(subdir + "/" + file) for instrument in midi_data.instruments: instrument.is_drum = False if len(midi_data.instruments) > 0: data = midi_data.get_piano_roll(fs=self.res)[35:51, 0:self.number_of_data_to_extract].astype( dtype=bool) data = data.flatten() if data.size >= 16 * self.number_of_data_to_extract: features.append([data, class_label]) self.list_files_name.insert(index_filename + num_files, file) num_files += 1 # except: # print("An exception occurred") current_folder += 1 print("Done ", num_files, " from ", current_folder, " folders on ", num_size) # Convert into a Panda dataframe featuresdf = pd.DataFrame(features, columns=['feature', 'class_label']) #print('Finished feature extraction from ', len(featuresdf), ' files') # Convert features & labels into numpy arrays listed_feature = featuresdf.feature.tolist() X = np.array(featuresdf.feature.tolist()) y = np.array(featuresdf.class_label.tolist()) print(X.shape, y.shape) X_shuffle = shuffle(X, random_state=self.random_state) y_shuffle = shuffle(y, random_state=self.random_state) file_shuffle = shuffle(self.list_files_name, random_state=self.random_state) data_to_plot_x = np.reshape(X, [-1, self.number_of_data_to_extract, self.range_of_notes_to_extract, 1]) data_to_plot_y = y return data_to_plot_x, data_to_plot_y def load_data(self ,path, class_label, index_filename ): features = [] midi_data = pretty_midi.PrettyMIDI(path) for instrument in midi_data.instruments: instrument.is_drum = False if len(midi_data.instruments) > 0: data = midi_data.get_piano_roll(fs=self.res)[35:51, 0:self.number_of_data_to_extract].astype( dtype=bool) data = data.flatten() if data.size >= 16 * self.number_of_data_to_extract: features.append([data, class_label]) # except: # print("An exception occurred") # Convert into a Panda dataframe featuresdf =
pd.DataFrame(features, columns=['feature', 'class_label'])
pandas.DataFrame
import pandas as pd import numpy as np import os from scipy.spatial import distance import networkx as nx import math import scipy.sparse as sp from glob import glob import argparse import time parser = argparse.ArgumentParser(description='Main Entrance of MP_MIM_RESEPT') parser.add_argument('--sampleName', type=str, default='151507') parser.add_argument('--MP-k-num', type=int, default=90, help='number of k_num in KNN graph of message passing (default: 90)') parser.add_argument('--MP-l-num', type=int, default=15, help='number of layer_num in message passing (default: 15)') args = parser.parse_args() ####KNN # knn_graph_edgelist def calculateKNNgraphDistanceWeighted(featureMatrix, distanceType, k): edgeListWeighted = [] for i in np.arange(featureMatrix.shape[0]): tmp = featureMatrix[i, :].reshape(1, -1) distMat = distance.cdist(tmp, featureMatrix, distanceType) res = distMat.argsort()[:k + 1] tmpdist = distMat[0, res[0][1:k + 1]] boundary = np.mean(tmpdist) + np.std(tmpdist) for j in np.arange(1, k + 1): if distMat[0, res[0][j]] <= boundary and i != res[0][j] : edgeListWeighted.append((i, res[0][j], 1)) return edgeListWeighted # generate_adj_nx_matirx def generate_adj_nx_weighted_adj(featureMatrix, distanceType, k): edgeList = calculateKNNgraphDistanceWeighted(featureMatrix, distanceType, k) nodes = range(0,featureMatrix.shape[0]) Gtmp = nx.Graph() Gtmp.add_nodes_from(nodes) Gtmp.add_weighted_edges_from(edgeList) adj = nx.adjacency_matrix(Gtmp) adj_knn_by_feature = np.array(adj.todense()) return adj_knn_by_feature # generate_self_loop_adj def preprocess_graph_self_loop(adj): adj = sp.coo_matrix(adj) adj_ = adj + sp.eye(adj.shape[0]) adj_ = adj_.A return adj_ ####MP # attention_ave def gat_forward_att_ave(adj, Wh): attention_ave = adj attention_ave_par = attention_ave.sum(axis=1, keepdims=True) attention_ave_final = attention_ave/attention_ave_par h_prime = np.dot(attention_ave_final, Wh) return h_prime # attention_dis def softmax(X): X_exp = np.exp(X) partition = X_exp.sum(axis=1, keepdims=True) return X_exp/partition def _prepare_euclidean_attentional_mechanism_input(Wh): distMat = distance.cdist(Wh, Wh, 'euclidean') return distMat def gat_forward_euclidean(adj, Wh): e = _prepare_euclidean_attentional_mechanism_input(Wh) zero_vec = -9e15*np.ones_like(e) attention = np.where(adj > 0, e, zero_vec) attention = softmax(attention) h_prime = np.dot(attention, Wh) return h_prime # layer_loop_att_ave def forward_basic_gcn_multi_layer(adj, Wh, layer_num): hidden = Wh for num in range(layer_num): h = gat_forward_att_ave(adj , hidden) hidden = h #print(num) return hidden # layer_loop_att_euc def forward_dis_gcn_multi_layer(adj, Wh, layer_num): hidden = Wh for num in range(layer_num): h = gat_forward_euclidean(adj , hidden) hidden = h #print(num) return hidden ####MI_GC # MI def Moran_I(multi_hop_weight_mat, feature, MI_type='normal'): if MI_type == 'normal': w = multi_hop_weight_mat y = feature n = len(y) z = y - y.mean() z2ss = (z * z).sum() s0 = np.sum(w) zl = np.dot(w , z) inum = (z * zl).sum() MI = n / s0 * inum / z2ss if MI_type == 'row_normalizaiton': WR_temp = multi_hop_weight_mat WR = np.zeros((WR_temp.shape[0],WR_temp.shape[1])) each_row_sum_list=[] for i in range(WR_temp.shape[0]): each_row_sum_list.append(np.sum(WR_temp[i,:])) for i in range(WR_temp.shape[0]): for j in range(WR_temp.shape[1]): if WR_temp[i,j] != 0: WR[i,j] = WR_temp[i,j]/each_row_sum_list[i] w = WR y = feature n = len(y) z = y - y.mean() z2ss = (z * z).sum() s0 = np.sum(w) zl = np.dot(w , z) inum = (z * zl).sum() MI = n / s0 * inum / z2ss return MI # GC def GC_related(multi_hop_weight_mat, feature, GC_type='normal'): if GC_type == 'normal': w = multi_hop_weight_mat y = np.asarray(feature).flatten() n = len(y) s0 = np.sum(w) yd = y - y.mean() yss = sum(yd * yd) den = yss * s0 * 2.0 _focal_ix, _neighbor_ix = w.nonzero() _weights = csr_matrix(w).data num = (_weights * ((y[_focal_ix] - y[_neighbor_ix])**2)).sum() a = (n - 1) * num GC = a / den if GC > 1: GC_related = GC - 1 if GC < 1: GC_related = 1 - GC if GC == 1: GC_related = 0 if GC_type == 'row_normalizaiton': WR_temp = multi_hop_weight_mat WR = np.zeros((WR_temp.shape[0],WR_temp.shape[1])) each_row_sum_list=[] for i in range(WR_temp.shape[0]): each_row_sum_list.append(np.sum(WR_temp[i,:])) for i in range(WR_temp.shape[0]): for j in range(WR_temp.shape[1]): if WR_temp[i,j] != 0: WR[i,j] = WR_temp[i,j]/each_row_sum_list[i] w = WR y = np.asarray(feature).flatten() n = len(y) s0 = np.sum(w) yd = y - y.mean() yss = sum(yd * yd) den = yss * s0 * 2.0 _focal_ix, _neighbor_ix = w.nonzero() _weights = csr_matrix(w).data num = (_weights * ((y[_focal_ix] - y[_neighbor_ix])**2)).sum() a = (n - 1) * num GC = a / den if GC > 1: GC_related = GC - 1 if GC < 1: GC_related = 1 - GC if GC == 1: GC_related = 0 return GC_related # spatial_adj_knn def calculateKNNDistanceWeighted_spatial_autocor(featureMatrix, distanceType, k): edgeListWeighted = [] for i in np.arange(featureMatrix.shape[0]): tmp = featureMatrix[i, :].reshape(1, -1) distMat = distance.cdist(tmp, featureMatrix, distanceType) res = distMat.argsort()[:k + 1] for j in np.arange(1, k + 1): edgeListWeighted.append((i, res[0][j], 1)) return edgeListWeighted # generate_adj_nx_matirx def generate_spatial_adj_nx_weighted_based_on_coordinate(featureMatrix, distanceType, k): edgeList = calculateKNNDistanceWeighted_spatial_autocor(featureMatrix, distanceType, k) nodes = range(0,featureMatrix.shape[0]) Gtmp = nx.Graph() Gtmp.add_nodes_from(nodes) Gtmp.add_weighted_edges_from(edgeList) adj = nx.adjacency_matrix(Gtmp) adj_knn_by_coordinate = np.array(adj.todense()) return adj_knn_by_coordinate # spatial_adj_distance def MI_spatial_adj_matrix(coordinateMatrix, hop_num=1, distanceType='cityblock'): distMat = distance.cdist(coordinateMatrix, coordinateMatrix, distanceType) multi_hop_weight_mat = np.zeros((distMat.shape[0] , distMat.shape[1])) if distanceType == 'euclidean': if hop_num == 1: for i in range(distMat.shape[0]): for j in range(distMat.shape[1]): if distMat[i][j] <= math.sqrt(2) and distMat[i][j] > 0: multi_hop_weight_mat[i][j] = 1 return multi_hop_weight_mat if __name__ == '__main__': ########RESEPT ####time_computing start_time = time.time() print("MP_MIM_RESEPT. Start Time: %s seconds" % (start_time)) ####parameter_set_initial PEalphaList = ['0.1','0.2','0.3', '0.5', '1.0', '1.2', '1.5','2.0'] zdimList = ['3','10', '16','32', '64', '128', '256'] sample = args.sampleName k_num_distance_att = args.MP_k_num layer_num_distance_att = args.MP_l_num ####sample_list sample_list = [ '151507','151508', '151509', '151510', '151669', '151670', '151671', '151672', '151673', '151674', '151675', '151676','18-64','2-5', '2-8', 'T4857'] letter_list = [ 'a','b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l','m', 'n', 'o', 'p'] count_init = sample_list.index(sample) count = 56*count_init letter = letter_list[count_init] embedding_MIrow_max_list = [] embedding_name_list = [] ####current_os meta_folder_path = os.path.abspath('./meta_data_folder/metaData_brain_16_coords') embedding_folder_path = os.path.abspath('./RESEPT_embedding_folder') embedding_in_RESEPT_folder = "RESEPT_MP_embedding_"+sample+"/" if not os.path.exists(embedding_in_RESEPT_folder): os.makedirs(embedding_in_RESEPT_folder) ####MP_parameter_set k_num_distance_att_list = [10,20,30,40,50,60,70,80,90] layer_num_distance_att_list = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] ####loop_part for i in range(len(PEalphaList)): for j in range((len(zdimList))): ####read_embedding count = count + 1 embedding_root_path = '/'+sample+'_embedding_raw/'+letter+'_'+str(count)+'_outputdir-3S-'+sample+'_raw_EM1_resolution0.3_euclidean_dummy_add_PEalpha'+str(PEalphaList[i])+'_k6_NA_zdim'+str(zdimList[j])+'/'+sample+'_raw_6_euclidean_NA_dummy_add_'+str(PEalphaList[i])+'_intersect_160_GridEx19_embedding.csv' embedding_df =
pd.read_csv(embedding_folder_path+embedding_root_path,index_col=0)
pandas.read_csv
import datetime as dt import pandas as pd from fiscalyear import * # gather FY start/end dates for previous quarter fq = FiscalQuarter.current().prev_fiscal_quarter start_date = fq.start.strftime('%Y-%m-%d') end_date = fq.end.strftime('%Y-%m-%d') start = dt.datetime.strptime(start_date,'%Y-%m-%d') end = dt.datetime.strptime(end_date,'%Y-%m-%d') # build an array for days between dates date_array = (start + dt.timedelta(days=x) for x in range(0, (end - start).days)) # get a unique list of year-months for url build months=[] for date_object in date_array: months.append(date_object.strftime("%Y-%m")) months = sorted(set(months)) df = pd.DataFrame(columns=['locationID', 'region', 'sponsor', 'met', 'wave']) for month in months: url = 'https://www.ndbc.noaa.gov/ioosstats/rpts/%s_ioos_regional.csv' % month.replace("-","_") print('Loading %s' % url) df1 = pd.read_csv(url, dtype={'met':float, 'wave':float}) df1['time (UTC)'] = pd.to_datetime(month) df = pd.concat([df,df1]) df["time (UTC)"] = pd.to_datetime(df["time (UTC)"]) # Remove time-zone info for easier plotting, it is all UTC. df["time (UTC)"] = df["time (UTC)"].dt.tz_localize(None) groups = df.groupby(
pd.Grouper(key="time (UTC)", freq="M")
pandas.Grouper
import click from collections import OrderedDict import numpy as np import pandas as pd import xarray as xr import sys import math from pathlib import Path import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.patches as mpatches import matplotlib.lines as mlines import matplotlib as mpl import matplotlib.cm as cm from matplotlib.collections import PatchCollection from tools import AnyObject, AnyObjectHandler def majority(x, axis=0): return np.argmax(np.bincount( x.astype(np.int))) # modify linewidth of hatches matplotlib.rcParams['hatch.linewidth'] = 0.5 # previous pdf hatch linewidth matplotlib.rcParams['axes.labelsize'] = 10 matplotlib.rcParams['font.size']= 10 # NEW CHILE BIOMIZATION D_cols2 = OrderedDict([('TeBE', "#006400"), ('TeBEX', "#8f8f00"), ('TeBS', "#90ee90"), ('TeSh', "#cdcd00"), ('TeNE', "#6b8e23"), ('BSh', "#ffaeb9"), ('BBS', "#314d5a"), ('BBE', "#45a081"), ('Grass', "#d2b48c")]) INCLUDE_GRASS = True # map individual pfts to pft groups with symbol D_pfts = OrderedDict() D_pfts['TeBE'] = ('TeBE_itm', 'TeBE_tm') D_pfts['TeBS'] = ('TeBS_itm', 'TeBS_tm') D_pfts['TeBEX'] = ('TeBE_itscl',) D_pfts['TeSh'] = ('TeE_s','TeR_s') D_pfts['TeNE'] = ('TeNE',) D_pfts['BSh'] = ('BE_s',) # 'BS_s') D_pfts['BBS'] = ('BBS_itm',) D_pfts['BBE'] = ('BBE_itm',) if INCLUDE_GRASS: D_pfts['Grass'] = ('C3G',) def shift_legend(leg, ax, x_offset, y_offset): """Shift the legend from the current position """ # Get the bounding box of the original legend bb = leg.get_bbox_to_anchor().inverse_transformed(ax.transAxes) # Change to location of the legend. bb.x0 += x_offset bb.x1 += x_offset bb.y0 -= y_offset bb.y1 -= y_offset leg.set_bbox_to_anchor(bb, transform = ax.transAxes) # PLOT TYPE bg_color = 'white' fg_color = 'white' if bg_color == 'black' else 'black' # read sites location #sites = pd.read_csv('ES_SiteLocations_checked.csv', sep=';') siteName = ['Nahuelbuta', 'La Campana', 'St. Gracia', 'Pan de Azucar'] siteNameShort = ['N','LC','SG','PdA'] VARS = ['temp', 'PRECT', 'FSDS'] Dvar = {'temp': "$\mathregular{T_{avg}\ [\degree C]}$", 'prec': "$\mathregular{Precip.\ [mm]}$", 'runoff': "$\mathregular{Runoff\ [mm]}$", 'fpc': "FPC [%]", 'vcf': "Fractional Cover [%] (VCF)", 'co2': 'CO$_2$ [ppm]', 'fire': "Fire RI [yr]", 'erosion': '$\mathregular{Erosion\ [mm\ y^{-1}]}$'} def clean(s): return s.replace('.','').replace(' ', '_') def smoothTriangle(data, degree, dropVals=False): """performs moving triangle smoothing with a variable degree.""" """note that if dropVals is False, output length will be identical to input length, but with copies of data at the flanking regions""" triangle=np.array(range(degree)+[degree]+range(degree)[::-1])+1 smoothed=[] for i in range(degree,len(data)-degree*2): point=data[i:i+len(triangle)]*triangle smoothed.append(sum(point)/sum(triangle)) if dropVals: return smoothed smoothed=[smoothed[0]]*(degree+degree/2)+smoothed while len(smoothed)<len(data):smoothed.append(smoothed[-1]) return smoothed def calc_delta(times): """ create time-slices for: 22000, 18000, 14000, 10000, 6000, 2000, 1990 BP """ btime = 22000 # BP if type(times) == list: idx = [(t+btime)*12 for t in times] else: idx = times+btime*12 return idx def custom_stackplot(axis, x, ys, colors, hatches, **kwargs): """Custom stackplot (with individual hatches)""" prev_d = None cd1 = np.array([0] * len(ys[0])) for i, d2 in enumerate(ys): cd2 = cd1 + np.array(d2) axis.fill_between(x, cd1, cd2, facecolor=colors[i], edgecolor='black', hatch = hatches[i], **kwargs) ## < removes facecolor cd1 = cd2 def read_co2(fname): df = pd.read_table(fname, delim_whitespace=True, header=None) df.columns = ['time', 'CO2'] df['time'] = df.time - 1950 # convert to BP values return df def extract_data(ds, var, lfid=None, lfidvar='landform__ID'): """return mean value for ids""" ds = xr.open_dataset(ds, decode_times=False) ids = [lfid] if lfid else np.unique(ds[lfidvar].values) return {i: ds[var].where(ds[lfidvar]==i).mean().values for i in ids} def extract_area(ds, lfid=None, lfidvar='landform__ID'): """return area value for ids""" ds = xr.open_dataset(ds, decode_times=False) ids = [lfid] if lfid else np.unique(ds[lfidvar].values) tot = ds[lfidvar].count().values return {i: ds[lfidvar].where(ds[lfidvar]==i).count().values / tot for i in ids} def main(data, data_lpj, outname, components, lfid): #def main(fdataname, fbiomename, flandlab, p, maxlfid=False): # refactor this later DO_MAXLFID = False ADD_BIOME = False INCLUDE_GRASS = True DROP_LABELS = False if lfid == -1: DO_MAXLFID = True if components == 'ALL': ADD_BIOME = True ncs = sorted(Path(data).glob('*.nc')) data = [extract_data(nc, 'soil__depth') for nc in ncs] area = [extract_area(nc) for nc in ncs] print(
pd.DataFrame(data)
pandas.DataFrame
import pandas as pd import os import dash import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as go file_path = os.path.join('C:/Users/<NAME>/Documents/Python Project/PollGraph', 'polldata_2002_to_2017.csv') df = pd.read_csv(file_path, encoding='latin1', index_col='Team') # Deal with San Jose accent mark & set teams as index file_path2 = os.path.join('C:/Users/Kyle Teegarden/Documents/Python Project/PollGraph', 'team_data.csv') team_data =
pd.read_csv(file_path2, encoding='latin1', index_col='School')
pandas.read_csv
#-------------------------------------------------------------------------------------------------------------------------- import os, sys, pandas from git import Repo #-------------------------------------------------------------------------------------------------------------------------- if __name__ == '__main__': if len(sys.argv) != 5: print('Usage: python script.py <data_dir> <project_name> <min_date> <max_date>') sys.exit() data_dir = sys.argv[1] + '/' project_name = sys.argv[2] min_date = sys.argv[3] max_date = sys.argv[4] lines_deleted_data_filename = data_dir + 'lines_deleted_in_bf_shas/' + project_name + '.buggylines' csvdata =
pandas.read_csv(lines_deleted_data_filename, index_col=False)
pandas.read_csv
import pandas as pd def auto_adjust_columns_width(writer, sheet, df): worksheet = writer.sheets[sheet] # pull worksheet object for idx, col in enumerate(df): # loop through all columns series = df[col] max_len = max(( series.astype(str).map(len).max(), # len of largest item len(str(series.name)) # len of column name/header )) + 1 # adding a little extra space worksheet.set_column(idx, idx, max_len) # set column width # Change DOI to get newer version of DrugMechDB DOI = 3708278 file_location = "https://zenodo.org/record/{}/files/".format(DOI) + \ "indication_MOA_paths.xlsx?download=1" # Get the latest iteration of DrugMech DB print('Downloading: ', file_location) dmdb =
pd.read_excel(file_location, None)
pandas.read_excel
import pandas as pd def convert_year(in_string): '''Returns input as integer if possible, else None''' try: return int(in_string) except: return None def get_country(in_string): '''Return the country element from the location.''' nulls = ['', 'n/a', 'x', 'far away...', 'universe', 'n/a - on the road'] usa = ['usa', 'us', 'united states', 'united state', 'u.s.a.'] filipines = ['philippines', 'phillipines'] catalonia = ['catalonia', 'catalunya'] italy = ['italy', 'italia'] try: # the [1:] index because all the split results will contain # a space in the beginning country = in_string.rsplit(',', 1)[1][1:] except: country = None if country in nulls: country = None elif country in usa: country = 'usa' elif country in filipines: country = 'philippines' elif country in catalonia: country = 'catalonia' elif country in italy: country = 'italy' if country is not None: country = country.strip('"') return country def get_province(in_string): '''Return the province/state/area element from the location''' try: return in_string.rsplit(',', 2)[1][1:] except: return None def get_clean_data(path='./data/'): ''' Returns 3 cleaned datasets. Enter the path if the csv files is not under \data\ in your system :return: DataFrame - pandas dataframe of books DataFrame - pandas dataframe of users DataFrame - pandas dataframe of ratings ''' # skip some lines. Only like 5 of them. Errors likely because there # are semicolons in the title and pandas recognizes it as another column df_books = pd.read_csv( path + "BX-Books.csv", sep=';', encoding="ISO-8859-1", error_bad_lines=False ) df_users = pd.read_csv(path + "BX-Users.csv", sep=';', encoding="ISO-8859-1") df_ratings = pd.read_csv( path + "BX-Book-Ratings.csv", sep=';', encoding="ISO-8859-1" ) df_books.columns = [ 'isbn', 'title', 'author', 'pub_year', 'publisher', 'url_s', 'url_m', 'url_l' ] df_ratings.columns = ['user', 'isbn', 'rating'] df_users.columns = ['user', 'location', 'age'] df_books.pub_year = ( df_books.pub_year.apply(convert_year) ) # Drop the 3 bad rows df_books = df_books[~df_books.pub_year.isna()] # pub_year 0 most certainly means unknown value or null # anything > 2018 don't make sense either df_books.pub_year[ (df_books.pub_year > 2018) | (df_books.pub_year == 0) ] = None # Age 0 doesnt make sense and is most likely unknown or unrecorded value # Age > 122 doesnt make sense either as 122 is the recorded oldest person # on earth. (Prolly a lot of those over 100 are errors too but we cant # tell) df_users.age[(df_users.age == 0) | (df_users.age > 122)] = None df_users["country"] = df_users.location.apply(get_country) df_users["province"] = df_users.location.apply(get_province) return df_books, df_users, df_ratings ################################################################################ def get_merged_data_frame(user_argv=-1, isbn_argv=-1, path='./data/'): ''' Returns a merged dataframe of users, books, ratings. :param user_argv: integer, threshold to filter users fewer than this number of books rated :param isbn_argv: integer, threshold to filter books that has fewer than this number of ratings. :return: dataframe ''' df_books, df_users, df_ratings = get_clean_data(path=path) # merge ratings table with users table by user_ID df_merges = pd.merge(df_ratings, df_users, on='user') # based on the previous df_merges merge with books table by isbn df_merges =
pd.merge(df_merges, df_books, on='isbn')
pandas.merge
# Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from collections import OrderedDict from typing import NamedTuple, Any, List, Dict, Union, Callable import numpy as np import pandas as pd from ...core import OutputType, Entity, Base from ...operands import OperandStage from ...utils import tokenize, is_build_mode, enter_mode, recursive_tile from ...serialize import BoolField, AnyField, DataTypeField, Int32Field from ..core import SERIES_TYPE from ..utils import parse_index, build_df, build_empty_df, build_series, \ build_empty_series, validate_axis from ..operands import DataFrameOperandMixin, DataFrameOperand, DATAFRAME_TYPE class DataFrameReductionOperand(DataFrameOperand): _axis = AnyField('axis') _skipna = BoolField('skipna') _level = AnyField('level') _numeric_only = BoolField('numeric_only') _bool_only = BoolField('bool_only') _min_count = Int32Field('min_count') _use_inf_as_na = BoolField('use_inf_as_na') _dtype = DataTypeField('dtype') _combine_size = Int32Field('combine_size') def __init__(self, axis=None, skipna=None, level=None, numeric_only=None, bool_only=None, min_count=None, stage=None, dtype=None, combine_size=None, gpu=None, sparse=None, output_types=None, use_inf_as_na=None, **kw): super().__init__(_axis=axis, _skipna=skipna, _level=level, _numeric_only=numeric_only, _bool_only=bool_only, _min_count=min_count, _stage=stage, _dtype=dtype, _combine_size=combine_size, _gpu=gpu, _sparse=sparse, _output_types=output_types, _use_inf_as_na=use_inf_as_na, **kw) @property def axis(self): return self._axis @property def skipna(self): return self._skipna @property def level(self): return self._level @property def numeric_only(self): return self._numeric_only @property def bool_only(self): return self._bool_only @property def min_count(self): return self._min_count @property def dtype(self): return self._dtype @property def combine_size(self): return self._combine_size @property def use_inf_as_na(self): return self._use_inf_as_na class DataFrameCumReductionOperand(DataFrameOperand): _axis = AnyField('axis') _skipna = BoolField('skipna') _use_inf_as_na = BoolField('use_inf_as_na') _dtype = DataTypeField('dtype') def __init__(self, axis=None, skipna=None, dtype=None, gpu=None, sparse=None, output_types=None, use_inf_as_na=None, stage=None, **kw): super().__init__(_axis=axis, _skipna=skipna, _dtype=dtype, _gpu=gpu, _sparse=sparse, _output_types=output_types, _stage=stage, _use_inf_as_na=use_inf_as_na, **kw) @property def axis(self): return self._axis @property def skipna(self): return self._skipna @property def dtype(self): return self._dtype @property def use_inf_as_na(self): return self._use_inf_as_na def _default_agg_fun(value, func_name=None, **kw): if value.ndim == 1: kw.pop('bool_only', None) kw.pop('numeric_only', None) return getattr(value, func_name)(**kw) else: return getattr(value, func_name)(**kw) class DataFrameReductionMixin(DataFrameOperandMixin): @classmethod def _make_agg_object(cls, op): func_name = getattr(op, '_func_name') kw = dict(skipna=op.skipna, numeric_only=op.numeric_only, bool_only=op.bool_only) kw = {k: v for k, v in kw.items() if v is not None} fun = functools.partial(_default_agg_fun, func_name=func_name, **kw) fun.__name__ = func_name return fun @classmethod def tile(cls, op): in_df = op.inputs[0] out_df = op.outputs[0] if isinstance(out_df, SERIES_TYPE): output_type = OutputType.series dtypes = pd.Series([out_df.dtype], index=[out_df.name]) index = out_df.index_value.to_pandas() elif out_df.ndim == 1: output_type = OutputType.tensor dtypes, index = out_df.dtype, None else: output_type = OutputType.scalar dtypes, index = out_df.dtype, None out_df = recursive_tile(in_df.agg( cls._make_agg_object(op), axis=op.axis or 0, numeric_only=op.numeric_only, bool_only=op.bool_only, combine_size=op.combine_size, _output_type=output_type, _dtypes=dtypes, _index=index )) return [out_df] def _call_dataframe(self, df): axis = getattr(self, 'axis', None) or 0 level = getattr(self, 'level', None) skipna = getattr(self, 'skipna', None) numeric_only = getattr(self, 'numeric_only', None) bool_only = getattr(self, 'bool_only', None) self._axis = axis = validate_axis(axis, df) # TODO: enable specify level if we support groupby if level is not None: raise NotImplementedError('Not support specify level now') empty_df = build_df(df) func_name = getattr(self, '_func_name') if func_name == 'count': reduced_df = getattr(empty_df, func_name)(axis=axis, level=level, numeric_only=numeric_only) elif func_name == 'nunique': reduced_df = getattr(empty_df, func_name)(axis=axis) elif func_name in ('all', 'any'): reduced_df = getattr(empty_df, func_name)(axis=axis, level=level, bool_only=bool_only) elif func_name == 'size': reduced_df = pd.Series(np.zeros(df.shape[1 - axis]), index=empty_df.columns if axis == 0 else None) elif func_name == 'custom_reduction': reduced_df = getattr(self, 'custom_reduction').__call_agg__(empty_df) else: reduced_df = getattr(empty_df, func_name)(axis=axis, level=level, skipna=skipna, numeric_only=numeric_only) reduced_shape = (df.shape[0],) if axis == 1 else reduced_df.shape return self.new_series([df], shape=reduced_shape, dtype=reduced_df.dtype, index_value=parse_index(reduced_df.index, store_data=axis == 0)) def _call_series(self, series): level = getattr(self, 'level', None) axis = getattr(self, 'axis', None) skipna = getattr(self, 'skipna', None) numeric_only = getattr(self, 'numeric_only', None) bool_only = getattr(self, 'bool_only', None) if axis == 'index': axis = 0 self._axis = axis # TODO: enable specify level if we support groupby if level is not None: raise NotImplementedError('Not support specified level now') empty_series = build_series(series) func_name = getattr(self, '_func_name') if func_name == 'count': reduced_series = empty_series.count(level=level) elif func_name == 'nunique': reduced_series = empty_series.nunique() elif func_name in ('all', 'any'): reduced_series = getattr(empty_series, func_name)(axis=axis, level=level, bool_only=bool_only) elif func_name == 'size': reduced_series = empty_series.size elif func_name == 'custom_reduction': reduced_series = getattr(self, 'custom_reduction').__call_agg__(empty_series) else: reduced_series = getattr(empty_series, func_name)(axis=axis, level=level, skipna=skipna, numeric_only=numeric_only) return self.new_scalar([series], dtype=np.array(reduced_series).dtype) def __call__(self, a): if isinstance(a, DATAFRAME_TYPE): return self._call_dataframe(a) else: return self._call_series(a) class DataFrameCumReductionMixin(DataFrameOperandMixin): @classmethod def _tile_one_chunk(cls, op): df = op.outputs[0] params = df.params.copy() chk = op.inputs[0].chunks[0] chunk_params = {k: v for k, v in chk.params.items() if k in df.params} chunk_params['shape'] = df.shape chunk_params['index'] = chk.index new_chunk_op = op.copy().reset_key() chunk = new_chunk_op.new_chunk(op.inputs[0].chunks, kws=[chunk_params]) new_op = op.copy() nsplits = tuple((s,) for s in chunk.shape) params['chunks'] = [chunk] params['nsplits'] = nsplits return new_op.new_tileables(op.inputs, kws=[params]) @classmethod def _build_combine(cls, op, input_chunks, summary_chunks, idx): c = input_chunks[idx] to_concat_chunks = [c] for j in range(idx): to_concat_chunks.append(summary_chunks[j]) new_chunk_op = op.copy().reset_key() new_chunk_op._stage = OperandStage.combine return new_chunk_op.new_chunk(to_concat_chunks, **c.params) @classmethod def _tile_dataframe(cls, op): in_df = op.inputs[0] df = op.outputs[0] n_rows, n_cols = in_df.chunk_shape # map to get individual results and summaries src_chunks = np.empty(in_df.chunk_shape, dtype=np.object) summary_chunks = np.empty(in_df.chunk_shape, dtype=np.object) for c in in_df.chunks: new_chunk_op = op.copy().reset_key() new_chunk_op._stage = OperandStage.map if op.axis == 1: summary_shape = (c.shape[0], 1) else: summary_shape = (1, c.shape[1]) src_chunks[c.index] = c summary_chunks[c.index] = new_chunk_op.new_chunk([c], shape=summary_shape, dtypes=df.dtypes) # combine summaries into results output_chunk_array = np.empty(in_df.chunk_shape, dtype=np.object) if op.axis == 1: for row in range(n_rows): row_src = src_chunks[row, :] row_summaries = summary_chunks[row, :] for col in range(n_cols): output_chunk_array[row, col] = cls._build_combine(op, row_src, row_summaries, col) else: for col in range(n_cols): col_src = src_chunks[:, col] col_summaries = summary_chunks[:, col] for row in range(n_rows): output_chunk_array[row, col] = cls._build_combine(op, col_src, col_summaries, row) output_chunks = list(output_chunk_array.reshape((n_rows * n_cols,))) new_op = op.copy().reset_key() return new_op.new_tileables(op.inputs, shape=in_df.shape, nsplits=in_df.nsplits, chunks=output_chunks, dtypes=df.dtypes, index_value=df.index_value, columns_value=df.columns_value) @classmethod def _tile_series(cls, op): in_series = op.inputs[0] series = op.outputs[0] # map to get individual results and summaries summary_chunks = np.empty(in_series.chunk_shape, dtype=np.object) for c in in_series.chunks: new_chunk_op = op.copy().reset_key() new_chunk_op._stage = OperandStage.map summary_chunks[c.index] = new_chunk_op.new_chunk([c], shape=(1,), dtype=series.dtype) # combine summaries into results output_chunks = [ cls._build_combine(op, in_series.chunks, summary_chunks, i) for i in range(len(in_series.chunks)) ] new_op = op.copy().reset_key() return new_op.new_tileables(op.inputs, shape=in_series.shape, nsplits=in_series.nsplits, chunks=output_chunks, dtype=series.dtype, index_value=series.index_value, name=series.name) @classmethod def tile(cls, op): in_df = op.inputs[0] if len(in_df.chunks) == 1: return cls._tile_one_chunk(op) if isinstance(in_df, DATAFRAME_TYPE): return cls._tile_dataframe(op) else: return cls._tile_series(op) @staticmethod def _get_last_slice(op, df, start): if op.output_types[0] == OutputType.series: return df.iloc[start:] else: if op.axis == 1: return df.iloc[:, start:] else: return df.iloc[start:, :] @classmethod def _execute_map(cls, ctx, op): in_data = ctx[op.inputs[0].key] kwargs = dict() if op.axis is not None: kwargs['axis'] = op.axis if op.skipna is not None: kwargs['skipna'] = op.skipna partial = getattr(in_data, getattr(cls, '_func_name'))(**kwargs) if op.skipna: partial.fillna(method='ffill', axis=op.axis, inplace=True) ctx[op.outputs[0].key] = cls._get_last_slice(op, partial, -1) @classmethod def _execute_combine(cls, ctx, op): kwargs = dict() if op.axis is not None: kwargs['axis'] = op.axis if op.skipna is not None: kwargs['skipna'] = op.skipna if len(op.inputs) > 1: ref_datas = [ctx[inp.key] for inp in op.inputs[1:]] concat_df = getattr(pd.concat(ref_datas, axis=op.axis), getattr(cls, '_func_name'))(**kwargs) if op.skipna: concat_df.fillna(method='ffill', axis=op.axis, inplace=True) in_data = ctx[op.inputs[0].key] concat_df = pd.concat([cls._get_last_slice(op, concat_df, -1), in_data], axis=op.axis) result = getattr(concat_df, getattr(cls, '_func_name'))(**kwargs) ctx[op.outputs[0].key] = cls._get_last_slice(op, result, 1) else: ctx[op.outputs[0].key] = getattr(ctx[op.inputs[0].key], getattr(cls, '_func_name'))(**kwargs) @classmethod def execute(cls, ctx, op): try:
pd.set_option('mode.use_inf_as_na', op.use_inf_as_na)
pandas.set_option
from flowsa.common import WITHDRAWN_KEYWORD from flowsa.flowbyfunctions import assign_fips_location_system from flowsa.location import US_FIPS import math import pandas as pd import io from flowsa.settings import log from string import digits YEARS_COVERED = { "asbestos": "2014-2018", "barite": "2014-2018", "bauxite": "2013-2017", "beryllium": "2014-2018", "boron": "2014-2018", "chromium": "2014-2018", "clay": "2015-2016", "cobalt": "2013-2017", "copper": "2011-2015", "diatomite": "2014-2018", "feldspar": "2013-2017", "fluorspar": "2013-2017", "fluorspar_inports": ["2016", "2017"], "gallium": "2014-2018", "garnet": "2014-2018", "gold": "2013-2017", "graphite": "2013-2017", "gypsum": "2014-2018", "iodine": "2014-2018", "ironore": "2014-2018", "kyanite": "2014-2018", "lead": "2012-2018", "lime": "2014-2018", "lithium": "2013-2017", "magnesium": "2013-2017", "manganese": "2012-2016", "manufacturedabrasive": "2017-2018", "mica": "2014-2018", "molybdenum": "2014-2018", "nickel": "2012-2016", "niobium": "2014-2018", "peat": "2014-2018", "perlite": "2013-2017", "phosphate": "2014-2018", "platinum": "2014-2018", "potash": "2014-2018", "pumice": "2014-2018", "rhenium": "2014-2018", "salt": "2013-2017", "sandgravelconstruction": "2013-2017", "sandgravelindustrial": "2014-2018", "silver": "2012-2016", "sodaash": "2010-2017", "sodaash_t4": ["2016", "2017"], "stonecrushed": "2013-2017", "stonedimension": "2013-2017", "strontium": "2014-2018", "talc": "2013-2017", "titanium": "2013-2017", "tungsten": "2013-2017", "vermiculite": "2014-2018", "zeolites": "2014-2018", "zinc": "2013-2017", "zirconium": "2013-2017", } def usgs_myb_year(years, current_year_str): """ Sets the column for the string based on the year. Checks that the year you picked is in the last file. :param years: string, with hypthon :param current_year_str: string, year of interest :return: string, year """ years_array = years.split("-") lower_year = int(years_array[0]) upper_year = int(years_array[1]) current_year = int(current_year_str) if lower_year <= current_year <= upper_year: column_val = current_year - lower_year + 1 return "year_" + str(column_val) else: log.info("Your year is out of scope. Pick a year between %s and %s", lower_year, upper_year) def usgs_myb_name(USGS_Source): """ Takes the USGS source name and parses it so it can be used in other parts of Flow by activity. :param USGS_Source: string, usgs source name :return: """ source_split = USGS_Source.split("_") name_cc = str(source_split[2]) name = "" for char in name_cc: if char.isupper(): name = name + " " + char else: name = name + char name = name.lower() name = name.strip() return name def usgs_myb_static_variables(): """ Populates the data values for Flow by activity that are the same for all of USGS_MYB Files :return: """ data = {} data["Class"] = "Geological" data['FlowType'] = "ELEMENTARY_FLOWS" data["Location"] = US_FIPS data["Compartment"] = "ground" data["Context"] = None data["ActivityConsumedBy"] = None return data def usgs_myb_remove_digits(value_string): """ Eliminates numbers in a string :param value_string: :return: """ remove_digits = str.maketrans('', '', digits) return_string = value_string.translate(remove_digits) return return_string def usgs_myb_url_helper(*, build_url, **_): """ This helper function uses the "build_url" input from flowbyactivity.py, which is a base url for data imports that requires parts of the url text string to be replaced with info specific to the data year. This function does not parse the data, only modifies the urls from which data is obtained. :param build_url: string, base url :param config: dictionary, items in FBA method yaml :param args: dictionary, arguments specified when running flowbyactivity.py flowbyactivity.py ('year' and 'source') :return: list, urls to call, concat, parse, format into Flow-By-Activity format """ return [build_url] def usgs_asbestos_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[4:11]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data.columns) > 12: for x in range(12, len(df_data.columns)): col_name = "Unnamed: " + str(x) del df_data[col_name] if len(df_data. columns) == 12: df_data.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['asbestos'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_asbestos_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity"] product = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == \ "Imports for consumption:": product = "imports" elif df.iloc[index]["Production"].strip() == \ "Exports and reexports:": product = "exports" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year) if str(df.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) elif str(df.iloc[index][col_name]) == "nan": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system(dataframe, str(year)) return dataframe def usgs_barite_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel( io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[7:14]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data. columns) == 11: df_data.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['barite'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_barite_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['barite'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == \ "Imports for consumption:3": product = "imports" elif df.iloc[index]["Production"].strip() == \ "Crude, sold or used by producers:": product = "production" elif df.iloc[index]["Production"].strip() == "Exports:2": product = "exports" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['barite'], year) if str(df.iloc[index][col_name]) == "--" or \ str(df.iloc[index][col_name]) == "(3)": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_bauxite_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_one = pd.DataFrame(df_raw_data_one.loc[6:14]).reindex() df_data_one = df_data_one.reset_index() del df_data_one["index"] if len(df_data_one. columns) == 11: df_data_one.columns = ["Production", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['bauxite'], year)) for col in df_data_one.columns: if col not in col_to_use: del df_data_one[col] frames = [df_data_one] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_bauxite_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Production", "Total"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['bauxite'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Production": prod = "production" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption, as shipped:": prod = "import" elif df.iloc[index]["Production"].strip() == \ "Exports, as shipped:": prod = "export" if df.iloc[index]["Production"].strip() in row_to_use: product = df.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" flow_amount = str(df.iloc[index][col_name]) if str(df.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = flow_amount data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_beryllium_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T4') df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:9]).reindex() df_data_1 = df_data_1.reset_index() del df_data_1["index"] df_data_2 = pd.DataFrame(df_raw_data.loc[12:12]).reindex() df_data_2 = df_data_2.reset_index() del df_data_2["index"] if len(df_data_2.columns) > 11: for x in range(11, len(df_data_2.columns)): col_name = "Unnamed: " + str(x) del df_data_2[col_name] if len(df_data_1. columns) == 11: df_data_1.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] if len(df_data_2. columns) == 11: df_data_2.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['beryllium'], year)) for col in df_data_1.columns: if col not in col_to_use: del df_data_1[col] for col in df_data_2.columns: if col not in col_to_use: del df_data_2[col] frames = [df_data_1, df_data_2] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_beryllium_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["United States6", "Mine shipments1", "Imports for consumption, beryl2"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['beryllium'], year) for df in df_list: for index, row in df.iterrows(): prod = "production" if df.iloc[index]["Production"].strip() == \ "Imports for consumption, beryl2": prod = "imports" if df.iloc[index]["Production"].strip() in row_to_use: remove_digits = str.maketrans('', '', digits) product = df.iloc[index][ "Production"].strip().translate(remove_digits) data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" data["Description"] = name data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_boron_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_one = pd.DataFrame(df_raw_data.loc[8:8]).reindex() df_data_one = df_data_one.reset_index() del df_data_one["index"] df_data_two = pd.DataFrame(df_raw_data.loc[21:22]).reindex() df_data_two = df_data_two.reset_index() del df_data_two["index"] df_data_three = pd.DataFrame(df_raw_data.loc[27:28]).reindex() df_data_three = df_data_three.reset_index() del df_data_three["index"] if len(df_data_one. columns) == 11: df_data_one.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] df_data_two.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] df_data_three.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['boron'], year)) for col in df_data_one.columns: if col not in col_to_use: del df_data_one[col] del df_data_two[col] del df_data_three[col] frames = [df_data_one, df_data_two, df_data_three] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_boron_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["B2O3 content", "Quantity"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['boron'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "B2O3 content" or \ df.iloc[index]["Production"].strip() == "Quantity": product = "production" if df.iloc[index]["Production"].strip() == "Colemanite:4": des = "Colemanite" elif df.iloc[index]["Production"].strip() == "Ulexite:4": des = "Ulexite" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" if des == name: data['FlowName'] = name + " " + product else: data['FlowName'] = name + " " + product + " " + des data["Description"] = des data["ActivityProducedBy"] = name if str(df.iloc[index][col_name]) == "--" or \ str(df.iloc[index][col_name]) == "(3)": data["FlowAmount"] = str(0) elif str(df.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_chromium_call(*, resp, year, **_): """" Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[4:24]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data. columns) == 12: df_data.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] elif len(df_data. columns) == 13: df_data.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5", "space_6"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['chromium'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_chromium_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Secondary2", "Total"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['chromium'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Imports:": product = "imports" elif df.iloc[index]["Production"].strip() == "Secondary2": product = "production" elif df.iloc[index]["Production"].strip() == "Exports:": product = "exports" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['chromium'], year) if str(df.iloc[index][col_name]) == "--" or \ str(df.iloc[index][col_name]) == "(3)": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_clay_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: pandas dataframe of original source data """ df_raw_data_ball = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T3') df_data_ball = pd.DataFrame(df_raw_data_ball.loc[19:19]).reindex() df_data_ball = df_data_ball.reset_index() del df_data_ball["index"] df_raw_data_bentonite = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T4 ') df_data_bentonite = pd.DataFrame( df_raw_data_bentonite.loc[28:28]).reindex() df_data_bentonite = df_data_bentonite.reset_index() del df_data_bentonite["index"] df_raw_data_common = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T5 ') df_data_common = pd.DataFrame(df_raw_data_common.loc[40:40]).reindex() df_data_common = df_data_common.reset_index() del df_data_common["index"] df_raw_data_fire = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T6 ') df_data_fire = pd.DataFrame(df_raw_data_fire.loc[12:12]).reindex() df_data_fire = df_data_fire.reset_index() del df_data_fire["index"] df_raw_data_fuller = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T7 ') df_data_fuller = pd.DataFrame(df_raw_data_fuller.loc[17:17]).reindex() df_data_fuller = df_data_fuller.reset_index() del df_data_fuller["index"] df_raw_data_kaolin = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T8 ') df_data_kaolin = pd.DataFrame(df_raw_data_kaolin.loc[18:18]).reindex() df_data_kaolin = df_data_kaolin.reset_index() del df_data_kaolin["index"] df_raw_data_export = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T13') df_data_export = pd.DataFrame(df_raw_data_export.loc[6:15]).reindex() df_data_export = df_data_export.reset_index() del df_data_export["index"] df_raw_data_import = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T14') df_data_import = pd.DataFrame(df_raw_data_import.loc[6:13]).reindex() df_data_import = df_data_import.reset_index() del df_data_import["index"] df_data_ball.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2"] df_data_bentonite.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2"] df_data_common.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2"] df_data_fire.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2"] df_data_fuller.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2"] df_data_kaolin.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2"] df_data_export.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2", "space_5", "extra"] df_data_import.columns = ["Production", "space_1", "year_1", "space_2", "value_1", "space_3", "year_2", "space_4", "value_2", "space_5", "extra"] df_data_ball["type"] = "Ball clay" df_data_bentonite["type"] = "Bentonite" df_data_common["type"] = "Common clay" df_data_fire["type"] = "Fire clay" df_data_fuller["type"] = "Fuller’s earth" df_data_kaolin["type"] = "Kaolin" df_data_export["type"] = "export" df_data_import["type"] = "import" col_to_use = ["Production", "type"] col_to_use.append(usgs_myb_year(YEARS_COVERED['clay'], year)) for col in df_data_import.columns: if col not in col_to_use: del df_data_import[col] del df_data_export[col] for col in df_data_ball.columns: if col not in col_to_use: del df_data_ball[col] del df_data_bentonite[col] del df_data_common[col] del df_data_fire[col] del df_data_fuller[col] del df_data_kaolin[col] frames = [df_data_import, df_data_export, df_data_ball, df_data_bentonite, df_data_common, df_data_fire, df_data_fuller, df_data_kaolin] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_clay_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Ball clay", "Bentonite", "Fire clay", "Kaolin", "Fuller’s earth", "Total", "Grand total", "Artificially activated clay and earth", "Clays, not elsewhere classified", "Clays, not elsewhere classified"] dataframe = pd.DataFrame() for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["type"].strip() == "import": product = "imports" elif df.iloc[index]["type"].strip() == "export": product = "exports" else: product = "production" if str(df.iloc[index]["Production"]).strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" if product == "production": data['FlowName'] = \ df.iloc[index]["type"].strip() + " " + product data["Description"] = df.iloc[index]["type"].strip() data["ActivityProducedBy"] = df.iloc[index]["type"].strip() else: data['FlowName'] = \ df.iloc[index]["Production"].strip() + " " + product data["Description"] = df.iloc[index]["Production"].strip() data["ActivityProducedBy"] = \ df.iloc[index]["Production"].strip() col_name = usgs_myb_year(YEARS_COVERED['clay'], year) if str(df.iloc[index][col_name]) == "--" or \ str(df.iloc[index][col_name]) == "(3)" or \ str(df.iloc[index][col_name]) == "(2)": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_cobalt_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T8') df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:11]).reindex() df_data_1 = df_data_1.reset_index() del df_data_1["index"] df_data_2 = pd.DataFrame(df_raw_data.loc[23:23]).reindex() df_data_2 = df_data_2.reset_index() del df_data_2["index"] if len(df_data_2.columns) > 11: for x in range(11, len(df_data_2.columns)): col_name = "Unnamed: " + str(x) del df_data_2[col_name] if len(df_data_1. columns) == 12: df_data_1.columns = ["Production", "space_6", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] if len(df_data_2. columns) == 11: df_data_2.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['cobalt'], year)) for col in df_data_1.columns: if col not in col_to_use: del df_data_1[col] for col in df_data_2.columns: if col not in col_to_use: del df_data_2[col] frames = [df_data_1, df_data_2] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_cobalt_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} name = usgs_myb_name(source) des = name row_to_use = ["United Statese, 16, 17", "Mine productione", "Imports for consumption", "Exports"] dataframe = pd.DataFrame() for df in df_list: for index, row in df.iterrows(): prod = "production" if df.iloc[index]["Production"].strip() == \ "United Statese, 16, 17": prod = "production" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption": prod = "imports" elif df.iloc[index]["Production"].strip() == "Exports": prod = "exports" if df.iloc[index]["Production"].strip() in row_to_use: remove_digits = str.maketrans('', '', digits) product = df.iloc[index][ "Production"].strip().translate(remove_digits) data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['cobalt'], year) data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod data["FlowAmount"] = str(df.iloc[index][col_name]) remove_rows = ["(18)", "(2)"] if data["FlowAmount"] not in remove_rows: dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_copper_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_1 = pd.DataFrame(df_raw_data.loc[12:12]).reindex() df_data_1 = df_data_1.reset_index() del df_data_1["index"] df_data_2 = pd.DataFrame(df_raw_data.loc[30:31]).reindex() df_data_2 = df_data_2.reset_index() del df_data_2["index"] if len(df_data_1. columns) == 12: df_data_1.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] df_data_2.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production", "Unit"] col_to_use.append(usgs_myb_year(YEARS_COVERED['copper'], year)) for col in df_data_1.columns: if col not in col_to_use: del df_data_1[col] for col in df_data_2.columns: if col not in col_to_use: del df_data_2[col] frames = [df_data_1, df_data_2] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_copper_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() for df in df_list: for index, row in df.iterrows(): remove_digits = str.maketrans('', '', digits) product = df.iloc[index][ "Production"].strip().translate(remove_digits) data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) if product == "Total": prod = "production" elif product == "Exports, refined": prod = "exports" elif product == "Imports, refined": prod = "imports" data["ActivityProducedBy"] = "Copper; Mine" data['FlowName'] = name + " " + prod data["Unit"] = "Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['copper'], year) data["Description"] = "Copper; Mine" data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_diatomite_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex() df_data_one = df_data_one.reset_index() del df_data_one["index"] if len(df_data_one.columns) == 10: df_data_one.columns = ["Production", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['diatomite'], year)) for col in df_data_one.columns: if col not in col_to_use: del df_data_one[col] frames = [df_data_one] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_diatomite_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Exports2", "Imports for consumption2"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Exports2": prod = "exports" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption2": prod = "imports" elif df.iloc[index]["Production"].strip() == "Quantity": prod = "production" if df.iloc[index]["Production"].strip() in row_to_use: product = df.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand metric tons" col_name = usgs_myb_year(YEARS_COVERED['diatomite'], year) data["FlowAmount"] = str(df.iloc[index][col_name]) data["Description"] = name data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_feldspar_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_two = pd.DataFrame(df_raw_data_two.loc[4:8]).reindex() df_data_two = df_data_two.reset_index() del df_data_two["index"] df_data_one = pd.DataFrame(df_raw_data_two.loc[10:15]).reindex() df_data_one = df_data_one.reset_index() del df_data_one["index"] if len(df_data_two. columns) == 13: df_data_two.columns = ["Production", "space_1", "unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] df_data_one.columns = ["Production", "space_1", "unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['feldspar'], year)) for col in df_data_two.columns: if col not in col_to_use: del df_data_two[col] del df_data_one[col] frames = [df_data_two, df_data_one] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_feldspar_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Quantity3"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Exports, feldspar:4": prod = "exports" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption:4": prod = "imports" elif df.iloc[index]["Production"].strip() == \ "Production, feldspar:e, 2": prod = "production" elif df.iloc[index]["Production"].strip() == "Nepheline syenite:": prod = "production" des = "Nepheline syenite" if df.iloc[index]["Production"].strip() in row_to_use: product = df.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['feldspar'], year) data["FlowAmount"] = str(df.iloc[index][col_name]) data["Description"] = des data["ActivityProducedBy"] = name if name == des: data['FlowName'] = name + " " + prod else: data['FlowName'] = name + " " + prod + " " + des dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_fluorspar_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') if year in YEARS_COVERED['fluorspar_inports']: df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T2') df_raw_data_three = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T7') df_raw_data_four = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T8') df_data_one = pd.DataFrame(df_raw_data_one.loc[5:15]).reindex() df_data_one = df_data_one.reset_index() del df_data_one["index"] if year in YEARS_COVERED['fluorspar_inports']: df_data_two = pd.DataFrame(df_raw_data_two.loc[7:8]).reindex() df_data_three = pd.DataFrame(df_raw_data_three.loc[19:19]).reindex() df_data_four = pd.DataFrame(df_raw_data_four.loc[11:11]).reindex() if len(df_data_two.columns) == 13: df_data_two.columns = ["Production", "space_1", "not_1", "space_2", "not_2", "space_3", "not_3", "space_4", "not_4", "space_5", "year_4", "space_6", "year_5"] if len(df_data_three.columns) == 9: df_data_three.columns = ["Production", "space_1", "year_4", "space_2", "not_1", "space_3", "year_5", "space_4", "not_2"] df_data_four.columns = ["Production", "space_1", "year_4", "space_2", "not_1", "space_3", "year_5", "space_4", "not_2"] if len(df_data_one. columns) == 13: df_data_one.columns = ["Production", "space_1", "unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['fluorspar'], year)) for col in df_data_one.columns: if col not in col_to_use: del df_data_one[col] if year in YEARS_COVERED['fluorspar_inports']: for col in df_data_two.columns: if col not in col_to_use: del df_data_two[col] for col in df_data_three.columns: if col not in col_to_use: del df_data_three[col] for col in df_data_four.columns: if col not in col_to_use: del df_data_four[col] df_data_one["type"] = "data_one" if year in YEARS_COVERED['fluorspar_inports']: # aluminum fluoride # cryolite df_data_two["type"] = "data_two" df_data_three["type"] = "Aluminum Fluoride" df_data_four["type"] = "Cryolite" frames = [df_data_one, df_data_two, df_data_three, df_data_four] else: frames = [df_data_one] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_fluorspar_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Quantity3", "Total", "Hydrofluoric acid", "Metallurgical", "Production"] prod = "" name = usgs_myb_name(source) dataframe = pd.DataFrame() for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Exports:3": prod = "exports" des = name elif df.iloc[index]["Production"].strip() == \ "Imports for consumption:3": prod = "imports" des = name elif df.iloc[index]["Production"].strip() == "Fluorosilicic acid:": prod = "production" des = "Fluorosilicic acid:" if str(df.iloc[index]["type"]).strip() == "data_two": prod = "imports" des = df.iloc[index]["Production"].strip() elif str(df.iloc[index]["type"]).strip() == \ "Aluminum Fluoride" or \ str(df.iloc[index]["type"]).strip() == "Cryolite": prod = "imports" des = df.iloc[index]["type"].strip() if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['fluorspar'], year) if str(df.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(df.iloc[index][col_name]) data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_gallium_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[5:7]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data.columns) > 11: for x in range(11, len(df_data.columns)): col_name = "Unnamed: " + str(x) del df_data[col_name] if len(df_data.columns) == 11: df_data.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['gallium'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_gallium_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Production, primary crude", "Metal"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['gallium'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == \ "Imports for consumption:": product = "imports" elif df.iloc[index]["Production"].strip() == \ "Production, primary crude": product = "production" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Kilograms" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['gallium'], year) if str(df.iloc[index][col_name]).strip() == "--": data["FlowAmount"] = str(0) elif str(df.iloc[index][col_name]) == "nan": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_garnet_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_two = pd.DataFrame(df_raw_data_two.loc[4:5]).reindex() df_data_two = df_data_two.reset_index() del df_data_two["index"] df_data_one = pd.DataFrame(df_raw_data_two.loc[10:14]).reindex() df_data_one = df_data_one.reset_index() del df_data_one["index"] if len(df_data_one.columns) > 13: for x in range(13, len(df_data_one.columns)): col_name = "Unnamed: " + str(x) del df_data_one[col_name] del df_data_two[col_name] if len(df_data_two. columns) == 13: df_data_two.columns = ["Production", "space_1", "unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] df_data_one.columns = ["Production", "space_1", "unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['garnet'], year)) for col in df_data_two.columns: if col not in col_to_use: del df_data_two[col] del df_data_one[col] frames = [df_data_two, df_data_one] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_garnet_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Exports:2": prod = "exports" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption: 3": prod = "imports" elif df.iloc[index]["Production"].strip() == "Crude production:": prod = "production" if df.iloc[index]["Production"].strip() in row_to_use: product = df.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['garnet'], year) data["FlowAmount"] = str(df.iloc[index][col_name]) data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_gold_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[6:14]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data.columns) == 13: df_data.columns = ["Production", "Space", "Units", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['gold'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_gold_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Exports, refined bullion", "Imports for consumption, refined bullion"] dataframe = pd.DataFrame() product = "production" name = usgs_myb_name(source) des = name for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Quantity": product = "production" elif df.iloc[index]["Production"].strip() == \ "Exports, refined bullion": product = "exports" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption, refined bullion": product = "imports" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "kilograms" data['FlowName'] = name + " " + product data["Description"] = des data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['gold'], year) if str(df.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_graphite_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[5:9]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data. columns) == 13: df_data.columns = ["Production", "space_1", "Unit", "space_6", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['graphite'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_graphite_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantiy", "Quantity"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['graphite'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == \ "Imports for consumption:": product = "imports" elif df.iloc[index]["Production"].strip() == "Exports:": product = "exports" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['graphite'], year) if str(df.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) elif str(df.iloc[index][col_name]) == "nan": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_gypsum_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: pandas dataframe of original source data """ df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex() df_data_one = df_data_one.reset_index() del df_data_one["index"] if len(df_data_one.columns) > 11: for x in range(11, len(df_data_one.columns)): col_name = "Unnamed: " + str(x) del df_data_one[col_name] if len(df_data_one.columns) == 11: df_data_one.columns = ["Production", "space_1", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['gypsum'], year)) for col in df_data_one.columns: if col not in col_to_use: del df_data_one[col] frames = [df_data_one] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_gypsum_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Imports for consumption"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['gypsum'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == \ "Imports for consumption": prod = "imports" elif df.iloc[index]["Production"].strip() == "Quantity": prod = "production" if df.iloc[index]["Production"].strip() in row_to_use: product = df.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data["FlowAmount"] = str(df.iloc[index][col_name]) if str(df.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_iodine_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[6:10]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data. columns) == 11: df_data.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] elif len(df_data. columns) == 13: df_data.columns = ["Production", "unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5", "space_6"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['iodine'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_iodine_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Production", "Quantity, for consumption", "Exports2"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['iodine'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Imports:2": product = "imports" elif df.iloc[index]["Production"].strip() == "Production": product = "production" elif df.iloc[index]["Production"].strip() == "Exports2": product = "exports" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['iodine'], year) if str(df.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) elif str(df.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_iron_ore_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[7:25]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data. columns) == 12: df_data.columns = ["Production", "Units", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production", "Units"] col_to_use.append(usgs_myb_year(YEARS_COVERED['ironore'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_iron_ore_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} name = usgs_myb_name(source) des = name row_to_use = ["Gross weight", "Quantity"] dataframe = pd.DataFrame() for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Production:": product = "production" elif df.iloc[index]["Production"].strip() == "Exports:": product = "exports" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption:": product = "imports" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" data['FlowName'] = "Iron Ore " + product data["Description"] = "Iron Ore" data["ActivityProducedBy"] = "Iron Ore" col_name = usgs_myb_year(YEARS_COVERED['ironore'], year) if str(df.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_kyanite_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_one = pd.DataFrame(df_raw_data_one.loc[4:13]).reindex() df_data_one = df_data_one.reset_index() del df_data_one["index"] if len(df_data_one. columns) == 12: df_data_one.columns = ["Production", "unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['kyanite'], year)) for col in df_data_one.columns: if col not in col_to_use: del df_data_one[col] frames = [df_data_one] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_kyanite_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Quantity2"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['kyanite'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == \ "Exports of kyanite concentrate:3": prod = "exports" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption, all kyanite minerals:3": prod = "imports" elif df.iloc[index]["Production"].strip() == "Production:": prod = "production" if df.iloc[index]["Production"].strip() in row_to_use: product = df.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data["FlowAmount"] = str(df.iloc[index][col_name]) if str(df.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_lead_url_helper(*, year, **_): """ This helper function uses the "build_url" input from flowbyactivity.py, which is a base url for data imports that requires parts of the url text string to be replaced with info specific to the data year. This function does not parse the data, only modifies the urls from which data is obtained. :param build_url: string, base url :return: list, urls to call, concat, parse, format into Flow-By-Activity format """ if int(year) < 2013: build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/' 'palladium/production/atoms/files/myb1-2016-lead.xls') elif int(year) < 2014: build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/' 'palladium/production/atoms/files/myb1-2017-lead.xls') else: build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/' 'palladium/production/s3fs-public/media/files/myb1-2018-lead-advrel.xlsx') url = build_url return [url] def usgs_lead_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[8:15]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data.columns) > 12: for x in range(12, len(df_data.columns)): col_name = "Unnamed: " + str(x) del df_data[col_name] if len(df_data. columns) == 12: df_data.columns = ["Production", "Units", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production", "Units"] if int(year) == 2013: modified_sy = "2013-2018" col_to_use.append(usgs_myb_year(modified_sy, year)) elif int(year) > 2013: modified_sy = "2014-2018" col_to_use.append(usgs_myb_year(modified_sy, year)) else: col_to_use.append(usgs_myb_year(YEARS_COVERED['lead'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_lead_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} name = usgs_myb_name(source) des = name row_to_use = ["Primary lead, refined content, " "domestic ores and base bullion", "Secondary lead, lead content", "Lead ore and concentrates", "Lead in base bullion"] import_export = ["Exports, lead content:", "Imports for consumption, lead content:"] dataframe = pd.DataFrame() product = "production" for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() in import_export: if df.iloc[index]["Production"].strip() == \ "Exports, lead content:": product = "exports" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption, lead content:": product = "imports" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["ActivityProducedBy"] = df.iloc[index]["Production"] if int(year) == 2013: modified_sy = "2013-2018" col_name = usgs_myb_year(modified_sy, year) elif int(year) > 2013: modified_sy = "2014-2018" col_name = usgs_myb_year(modified_sy, year) else: col_name = usgs_myb_year(YEARS_COVERED['lead'], year) if str(df.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_lime_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_1 = pd.DataFrame(df_raw_data_two.loc[16:16]).reindex() df_data_1 = df_data_1.reset_index() del df_data_1["index"] df_data_2 = pd.DataFrame(df_raw_data_two.loc[28:32]).reindex() df_data_2 = df_data_2.reset_index() del df_data_2["index"] if len(df_data_1.columns) > 12: for x in range(12, len(df_data_1.columns)): col_name = "Unnamed: " + str(x) del df_data_1[col_name] del df_data_2[col_name] if len(df_data_1. columns) == 12: df_data_1.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] df_data_2.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['lime'], year)) for col in df_data_1.columns: if col not in col_to_use: del df_data_1[col] for col in df_data_2.columns: if col not in col_to_use: del df_data_2[col] frames = [df_data_1, df_data_2] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_lime_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Total", "Quantity"] import_export = ["Exports:7", "Imports for consumption:7"] name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() for df in df_list: prod = "production" for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Exports:7": prod = "exports" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption:7": prod = "imports" if df.iloc[index]["Production"].strip() in row_to_use: remove_digits = str.maketrans('', '', digits) product = df.iloc[index][ "Production"].strip().translate(remove_digits) data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['lime'], year) data["Description"] = des data["ActivityProducedBy"] = name if product.strip() == "Total": data['FlowName'] = name + " " + prod elif product.strip() == "Quantity": data['FlowName'] = name + " " + prod data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_lithium_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_one = pd.DataFrame(df_raw_data_one.loc[6:8]).reindex() df_data_one = df_data_one.reset_index() del df_data_one["index"] if len(df_data_one.columns) > 11: for x in range(11, len(df_data_one.columns)): col_name = "Unnamed: " + str(x) del df_data_one[col_name] if len(df_data_one. columns) == 11: df_data_one.columns = ["Production", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['lithium'], year)) for col in df_data_one.columns: if col not in col_to_use: del df_data_one[col] frames = [df_data_one] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_lithium_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Exports3", "Imports3", "Production"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['lithium'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Exports3": prod = "exports" elif df.iloc[index]["Production"].strip() == "Imports3": prod = "imports" elif df.iloc[index]["Production"].strip() == "Production": prod = "production" if df.iloc[index]["Production"].strip() in row_to_use: product = df.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data["FlowAmount"] = str(df.iloc[index][col_name]) if str(df.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_magnesium_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[7:15]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data. columns) == 12: df_data.columns = ["Production", "Units", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['magnesium'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_magnesium_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Secondary", "Primary", "Exports", "Imports for consumption"] dataframe = pd.DataFrame() name = usgs_myb_name(source) des = name for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Exports": product = "exports" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption": product = "imports" elif df.iloc[index]["Production"].strip() == "Secondary" or \ df.iloc[index]["Production"].strip() == "Primary": product = "production" + " " + \ df.iloc[index]["Production"].strip() if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['magnesium'], year) if str(df.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) elif str(df.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_manganese_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[7:9]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data.columns) > 12: for x in range(12, len(df_data.columns)): col_name = "Unnamed: " + str(x) del df_data[col_name] if len(df_data. columns) == 12: df_data.columns = ["Production", "Unit", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['manganese'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_manganese_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Production", "Exports", "Imports for consumption"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['manganese'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == \ "Imports for consumption": product = "imports" elif df.iloc[index]["Production"].strip() == "Production": product = "production" elif df.iloc[index]["Production"].strip() == "Exports": product = "exports" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['manganese'], year) if str(df.iloc[index][col_name]) == "--" or \ str(df.iloc[index][col_name]) == "(3)": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_ma_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param args: dictionary, arguments specified when running flowbyactivity.py ('year' and 'source') :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T2') df_data = pd.DataFrame(df_raw_data.loc[6:7]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data.columns) > 9: for x in range(9, len(df_data.columns)): col_name = "Unnamed: " + str(x) del df_data[col_name] if len(df_data. columns) == 9: df_data.columns = ["Product", "space_1", "quality_year_1", "space_2", "value_year_1", "space_3", "quality_year_2", "space_4", "value_year_2"] elif len(df_data. columns) == 9: df_data.columns = ["Product", "space_1", "quality_year_1", "space_2", "value_year_1", "space_3", "quality_year_2", "space_4", "value_year_2"] col_to_use = ["Product"] col_to_use.append("quality_" + usgs_myb_year(YEARS_COVERED['manufacturedabrasive'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_ma_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param args: dictionary, used to run flowbyactivity.py ('year' and 'source') :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Silicon carbide"] name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() for df in df_list: for index, row in df.iterrows(): remove_digits = str.maketrans('', '', digits) product = df.iloc[index][ "Product"].strip().translate(remove_digits) if product in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data['FlowName'] = "Silicon carbide" data["ActivityProducedBy"] = "Silicon carbide" data["Unit"] = "Metric Tons" col_name = ("quality_" + usgs_myb_year( YEARS_COVERED['manufacturedabrasive'], year)) col_name_array = col_name.split("_") data["Description"] = product + " " + col_name_array[0] data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_mica_call(*, resp, source, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_one = pd.DataFrame(df_raw_data_one.loc[4:6]).reindex() df_data_one = df_data_one.reset_index() del df_data_one["index"] name = usgs_myb_name(source) des = name if len(df_data_one. columns) == 12: df_data_one.columns = ["Production", "Unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['mica'], year)) for col in df_data_one.columns: if col not in col_to_use: del df_data_one[col] frames = [df_data_one] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_mica_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['mica'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == \ "Production, sold or used by producers:": prod = "production" if df.iloc[index]["Production"].strip() in row_to_use: product = df.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data["FlowAmount"] = str(df.iloc[index][col_name]) if str(df.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_molybdenum_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[7:11]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data. columns) == 11: df_data.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['molybdenum'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_molybdenum_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Production", "Imports for consumption", "Exports"] dataframe = pd.DataFrame() name = usgs_myb_name(source) des = name for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Exports": product = "exports" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption": product = "imports" elif df.iloc[index]["Production"].strip() == "Production": product = "production" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = des data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['molybdenum'], year) if str(df.iloc[index][col_name]) == "--": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_nickel_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T10') df_data_1 = pd.DataFrame(df_raw_data.loc[36:36]).reindex() df_data_1 = df_data_1.reset_index() del df_data_1["index"] df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_2 = pd.DataFrame(df_raw_data_two.loc[11:16]).reindex() df_data_2 = df_data_2.reset_index() del df_data_2["index"] if len(df_data_1.columns) > 11: for x in range(11, len(df_data_1.columns)): col_name = "Unnamed: " + str(x) del df_data_1[col_name] if len(df_data_1. columns) == 11: df_data_1.columns = ["Production", "space_1", "year_1", "space_2", "year_2", "space_3", "year_3", "space_4", "year_4", "space_5", "year_5"] if len(df_data_2.columns) == 12: df_data_2.columns = ["Production", "space_1", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['nickel'], year)) for col in df_data_1.columns: if col not in col_to_use: del df_data_1[col] for col in df_data_2.columns: if col not in col_to_use: del df_data_2[col] frames = [df_data_1, df_data_2] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_nickel_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Ores and concentrates3", "United States, sulfide ore, concentrate"] import_export = ["Exports:", "Imports for consumption:"] name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() for df in df_list: prod = "production" for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Exports:": prod = "exports" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption:": prod = "imports" if df.iloc[index]["Production"].strip() in row_to_use: remove_digits = str.maketrans('', '', digits) product = df.iloc[index][ "Production"].strip().translate(remove_digits) data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" col_name = usgs_myb_year(YEARS_COVERED['nickel'], year) if product.strip() == \ "United States, sulfide ore, concentrate": data["Description"] = \ "United States, sulfide ore, concentrate Nickel" data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod elif product.strip() == "Ores and concentrates": data["Description"] = "Ores and concentrates Nickel" data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod if str(df.iloc[index][col_name]) == "--" or \ str(df.iloc[index][col_name]) == "(4)": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_niobium_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data = pd.DataFrame(df_raw_data.loc[4:19]).reindex() df_data = df_data.reset_index() del df_data["index"] if len(df_data.columns) > 13: for x in range(13, len(df_data.columns)): col_name = "Unnamed: " + str(x) del df_data[col_name] if len(df_data. columns) == 13: df_data.columns = ["Production", "space_1", "Unit_1", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['niobium'], year)) for col in df_data.columns: if col not in col_to_use: del df_data[col] return df_data def usgs_niobium_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Total imports, Nb content", "Total exports, Nb content"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['niobium'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == \ "Imports for consumption:": product = "imports" elif df.iloc[index]["Production"].strip() == "Exports:": product = "exports" if df.iloc[index]["Production"].strip() in row_to_use: data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Metric Tons" data['FlowName'] = name + " " + product data["Description"] = name data["ActivityProducedBy"] = name col_name = usgs_myb_year(YEARS_COVERED['niobium'], year) if str(df.iloc[index][col_name]) == "--" or \ str(df.iloc[index][col_name]) == "(3)": data["FlowAmount"] = str(0) else: data["FlowAmount"] = str(df.iloc[index][col_name]) dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_peat_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param url: string, url :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ """Calls the excel sheet for nickel and removes extra columns""" df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_one = pd.DataFrame(df_raw_data_one.loc[7:18]).reindex() df_data_one = df_data_one.reset_index() del df_data_one["index"] if len(df_data_one.columns) > 12: for x in range(12, len(df_data_one.columns)): col_name = "Unnamed: " + str(x) del df_data_one[col_name] if len(df_data_one.columns) == 12: df_data_one.columns = ["Production", "Unit", "space_2", "year_1", "space_3", "year_2", "space_4", "year_3", "space_5", "year_4", "space_6", "year_5"] col_to_use = ["Production"] col_to_use.append(usgs_myb_year(YEARS_COVERED['peat'], year)) for col in df_data_one.columns: if col not in col_to_use: del df_data_one[col] frames = [df_data_one] df_data = pd.concat(frames) df_data = df_data.reset_index() del df_data["index"] return df_data def usgs_peat_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Production", "Exports", "Imports for consumption"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['peat'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Production": prod = "production" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption": prod = "import" elif df.iloc[index]["Production"].strip() == "Exports": prod = "export" if df.iloc[index]["Production"].strip() in row_to_use: product = df.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" data["FlowAmount"] = str(df.iloc[index][col_name]) if str(df.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe def usgs_perlite_call(*, resp, year, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param resp: df, response from url call :param year: year :return: pandas dataframe of original source data """ df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content), sheet_name='T1') df_data_one = pd.DataFrame(df_raw_data_one.loc[6:6]).reindex() df_data_one = df_data_one.reset_index() del df_data_one["index"] df_data_two =
pd.DataFrame(df_raw_data_one.loc[20:25])
pandas.DataFrame
## FROM KAGGLE KERNEL: import os import random import gc import tqdm import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # import matplotlib.pyplot as plt # import seaborn as sns # # from sklearn import preprocessing # from sklearn.model_selection import KFold # import lightgbm as lgb # import xgboost as xgb # import catboost as cb # %% {"_kg_hide-input": true} # Copy from https://www.kaggle.com/gemartin/load-data-reduce-memory-usage by @gemartin # Modified to support timestamp type # Modified to add option to use float16 or not. feather format does not support float16. from pandas.api.types import is_datetime64_any_dtype as is_datetime def reduce_mem_usage(df, use_float16=False): """ iterate through all the columns of a dataframe and modify the data type to reduce memory usage. """ start_mem = df.memory_usage().sum() / 1024 ** 2 print('Memory usage of dataframe is {:.2f} MB'.format(start_mem)) for col in df.columns: if is_datetime(df[col]): # skip datetime type continue col_type = df[col].dtype if col_type != object: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if use_float16 and c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) else: df[col] = df[col].astype('category') end_mem = df.memory_usage().sum() / 1024 ** 2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df def import_data(file): """create a dataframe and optimize its memory usage""" df = pd.read_csv(file, parse_dates=True, keep_date_col=True) df = reduce_mem_usage(df) return df #%% # %% from pathlib import Path import zipfile DATA_PATH = '~/ashrae/data/raw' DATA_PATH = Path(DATA_PATH) DATA_PATH = DATA_PATH.expanduser() assert DATA_PATH.exists(), DATA_PATH DATA_FEATHER_PATH ='~/ashrae/data/feather' DATA_FEATHER_PATH = Path(DATA_FEATHER_PATH) DATA_FEATHER_PATH = DATA_FEATHER_PATH.expanduser() assert DATA_FEATHER_PATH.exists() # zipfile.ZipFile(DATA_PATH).infolist() #%% ZIPPED = False # %%time if ZIPPED: with zipfile.ZipFile(DATA_PATH) as zf: with zf.open('train.csv') as zcsv: train_df = pd.read_csv(zcsv) with zf.open('test.csv') as zcsv: test_df = pd.read_csv(zcsv) with zf.open('weather_train.csv') as zcsv: weather_train_df = pd.read_csv(zcsv) with zf.open('weather_test.csv') as zcsv: weather_test_df = pd.read_csv(zcsv) with zf.open('building_metadata.csv') as zcsv: building_meta_df = pd.read_csv(zcsv) with zf.open('sample_submission.csv') as zcsv: sample_submission = pd.read_csv(zcsv) #%% train_df = pd.read_csv(DATA_PATH / 'train.zip') test_df = pd.read_csv(DATA_PATH / 'test.zip') weather_train_df = pd.read_csv(DATA_PATH / 'weather_train.zip') weather_test_df = pd.read_csv(DATA_PATH / 'weather_test.zip') building_meta_df = pd.read_csv(DATA_PATH / 'building_metadata.zip') sample_submission = pd.read_csv(DATA_PATH / 'sample_submission.zip') # %% # # %%time # # Read data... # root = '../input/ashrae-energy-prediction' # train_df = pd.read_csv(os.path.join(root, 'train.csv')) # weather_train_df = pd.read_csv(os.path.join(root, 'weather_train.csv')) # test_df = pd.read_csv(os.path.join(root, 'test.csv')) # weather_test_df = pd.read_csv(os.path.join(root, 'weather_test.csv')) # building_meta_df = pd.read_csv(os.path.join(root, 'building_metadata.csv')) # sample_submission = pd.read_csv(os.path.join(root, 'sample_submission.csv')) # %% train_df['timestamp'] = pd.to_datetime(train_df['timestamp']) test_df['timestamp'] = pd.to_datetime(test_df['timestamp']) weather_train_df['timestamp'] = pd.to_datetime(weather_train_df['timestamp']) weather_test_df['timestamp'] = pd.to_datetime(weather_test_df['timestamp']) # %% {"_kg_hide-input": true, "_kg_hide-output": true} # # categorize primary_use column to reduce memory on merge... # primary_use_dict = {key: value for value, key in enumerate(primary_use_list)} # print('primary_use_dict: ', primary_use_dict) # building_meta_df['primary_use'] = building_meta_df['primary_use'].map(primary_use_dict) # gc.collect() # %% {"_kg_hide-input": true, "_kg_hide-output": true} reduce_mem_usage(train_df) reduce_mem_usage(test_df) reduce_mem_usage(building_meta_df) reduce_mem_usage(weather_train_df) reduce_mem_usage(weather_test_df) # %% [markdown] # # Save data in feather format # %% # %%time train_df.to_feather('train.feather') test_df.to_feather('test.feather') weather_train_df.to_feather('weather_train.feather') weather_test_df.to_feather('weather_test.feather') building_meta_df.to_feather('building_metadata.feather') sample_submission.to_feather('sample_submission.feather') # %% [markdown] # # Read data in feather format # # You can see "+ Add data" button on top-right of notebook, press this button and add output of this kernel, then you can use above saved feather data frame for fast loading! # # Let's see how fast it is. # %% # %%time train_df = pd.read_feather('train.feather') weather_train_df = pd.read_feather('weather_train.feather') test_df = pd.read_feather('test.feather') weather_test_df = pd.read_feather('weather_test.feather') building_meta_df =
pd.read_feather('building_metadata.feather')
pandas.read_feather
import os import pickle import sys from pathlib import Path from typing import Union import matplotlib.pyplot as plt import numpy as np import pandas as pd from Bio import pairwise2 from scipy import interp from scipy.stats import linregress from sklearn.metrics import roc_curve, auc, precision_recall_curve import thoipapy import thoipapy.validation.bocurve from thoipapy.utils import make_sure_path_exists def collect_indiv_validation_data(s, df_set, logging, namedict, predictors, THOIPA_predictor_name, subsets): """ Parameters ---------- s df_set logging namedict predictors THOIPA_predictor_name Returns ------- """ logging.info("start collect_indiv_validation_data THOIPA_PREDDIMER_TMDOCK") ROC_AUC_df = pd.DataFrame() PR_AUC_df =
pd.DataFrame()
pandas.DataFrame
""" DB Recovery """ import pandas as pd from flask import current_app as app from app import celery from app.db_tools import ( nat_data_coll, nat_trends_coll, nat_series_coll, reg_data_coll, reg_trends_coll, reg_series_coll, reg_bdown_coll, prov_data_coll, prov_trends_coll, prov_series_coll, prov_bdown_coll, vax_admins_coll, vax_admins_summary_coll, pop_coll ) from app.db_tools.etl import ( preprocess_national_df, preprocess_regional_df, preprocess_provincial_df, build_national_trends, build_regional_trends, build_provincial_trends, build_regional_breakdown, build_provincial_breakdowns, build_national_series, build_regional_series, build_provincial_series, COLUMNS_TO_DROP, preprocess_vax_admins_df, preprocess_vax_admins_summary_df ) from settings.urls import ( URL_NATIONAL, URL_REGIONAL, URL_PROVINCIAL, URL_VAX_ADMINS_DATA, URL_VAX_ADMINS_SUMMARY_DATA, URL_VAX_POP_DATA ) from settings.vars import DATE_KEY, VAX_DATE_KEY class CollectionCreator: """Collection Creator""" @staticmethod @celery.task def create_national_collection(): """Drop and recreate national data collection""" df = pd.read_csv(URL_NATIONAL, parse_dates=[DATE_KEY]) df.drop(columns=COLUMNS_TO_DROP, inplace=True) df_national_augmented = preprocess_national_df(df) national_records = df_national_augmented.to_dict(orient='records') try: app.logger.info("Creating national collection") nat_data_coll.drop() nat_data_coll.insert_many(national_records, ordered=True) except Exception as e: app.logger.error(e) @staticmethod @celery.task def create_national_trends_collection(): """Drop and recreate national trends data collection""" df = pd.read_csv(URL_NATIONAL, parse_dates=[DATE_KEY]) df.drop(columns=COLUMNS_TO_DROP, inplace=True) df_national_augmented = preprocess_national_df(df) national_trends = build_national_trends(df_national_augmented) try: app.logger.info("Creating national trends collection") nat_trends_coll.drop() nat_trends_coll.insert_many(national_trends) except Exception as e: app.logger.error(e) @staticmethod @celery.task def create_national_series_collection(): """Drop and recreate national series data collection""" df = pd.read_csv(URL_NATIONAL, parse_dates=[DATE_KEY]) df.drop(columns=COLUMNS_TO_DROP, inplace=True) df_national_augmented = preprocess_national_df(df) national_series = build_national_series(df_national_augmented) try: app.logger.info("Creating national series collection") nat_series_coll.drop() nat_series_coll.insert_one(national_series) except Exception as e: app.logger.error(e) @staticmethod @celery.task def create_regional_collection(): """Drop and recreate regional data collection""" df = pd.read_csv(URL_REGIONAL, parse_dates=[DATE_KEY]) df.drop(columns=COLUMNS_TO_DROP, inplace=True) df_regional_augmented = preprocess_regional_df(df) regional_records = df_regional_augmented.to_dict(orient='records') try: app.logger.info("Creating regional collection") reg_data_coll.drop() reg_data_coll.insert_many(regional_records, ordered=True) except Exception as e: app.logger.error(e) @staticmethod @celery.task def create_regional_breakdown_collection(): """Drop and recreate regional breakdown data collection""" df = pd.read_csv( URL_REGIONAL, parse_dates=[DATE_KEY], low_memory=False) df.drop(columns=COLUMNS_TO_DROP, inplace=True) df_regional_augmented = preprocess_regional_df(df) regional_breakdown = build_regional_breakdown(df_regional_augmented) try: app.logger.info("Creating regional breakdown collection") reg_bdown_coll.drop() reg_bdown_coll.insert_one(regional_breakdown) except Exception as e: app.logger.error(e) @staticmethod @celery.task def create_regional_series_collection(): """Drop and recreate regional series data collection""" df = pd.read_csv( URL_REGIONAL, parse_dates=[DATE_KEY], low_memory=False) df.drop(columns=COLUMNS_TO_DROP, inplace=True) df_regional_augmented = preprocess_regional_df(df) regional_series = build_regional_series(df_regional_augmented) try: app.logger.info("Creating regional series collection") reg_series_coll.drop() reg_series_coll.insert_many(regional_series) except Exception as e: app.logger.error(e) @staticmethod @celery.task def create_regional_trends_collection(): """Drop and recreate regional trends data collection""" df = pd.read_csv( URL_REGIONAL, parse_dates=[DATE_KEY], low_memory=False) df.drop(columns=COLUMNS_TO_DROP, inplace=True) df_regional_augmented = preprocess_regional_df(df) regional_trends = build_regional_trends(df_regional_augmented) try: app.logger.info("Creating regional trends collection") reg_trends_coll.drop() reg_trends_coll.insert_many(regional_trends) except Exception as e: app.logger.error(e) @staticmethod @celery.task def create_provincial_collection(): """Drop and recreate provincial collection""" df = pd.read_csv(URL_PROVINCIAL) df = df.rename(columns=lambda x: x.strip()) df[DATE_KEY] = pd.to_datetime(df[DATE_KEY]) df.drop(columns=COLUMNS_TO_DROP, inplace=True) df_provincial_augmented = preprocess_provincial_df(df) provincial_records = df_provincial_augmented.to_dict(orient='records') try: app.logger.info("Creating provincial") prov_data_coll.drop() prov_data_coll.insert_many(provincial_records, ordered=True) except Exception as e: app.logger.error(e) @staticmethod @celery.task def create_provincial_breakdown_collection(): """Drop and create provincial breakdown collection""" df = pd.read_csv(URL_PROVINCIAL) df = df.rename(columns=lambda x: x.strip()) df[DATE_KEY] = pd.to_datetime(df[DATE_KEY]) df.drop(columns=COLUMNS_TO_DROP, inplace=True) df_provincial_augmented = preprocess_provincial_df(df) provincial_breakdowns = build_provincial_breakdowns( df_provincial_augmented) try: app.logger.info("Creating provincial breakdowns collection") prov_bdown_coll.drop() prov_bdown_coll.insert_many(provincial_breakdowns) except Exception as e: app.logger.error(e) @staticmethod @celery.task def create_provincial_series_collection(): """Drop and recreate provincial series data collection""" df = pd.read_csv(URL_PROVINCIAL) df = df.rename(columns=lambda x: x.strip()) df[DATE_KEY] = pd.to_datetime(df[DATE_KEY]) df.drop(columns=COLUMNS_TO_DROP, inplace=True) df_provincial_augmented = preprocess_provincial_df(df) provincial_series = build_provincial_series( df_provincial_augmented) try: app.logger.info("Creating provincial series collection") prov_series_coll.drop() prov_series_coll.insert_many(provincial_series) except Exception as e: app.logger.error(e) @staticmethod @celery.task def create_provincial_trends_collection(): """Create provincial trends data collection""" df = pd.read_csv(URL_PROVINCIAL) df = df.rename(columns=lambda x: x.strip()) df[DATE_KEY] = pd.to_datetime(df[DATE_KEY]) df.drop(columns=COLUMNS_TO_DROP, inplace=True) df_provincial_augmented = preprocess_provincial_df(df) provincial_trends = build_provincial_trends(df_provincial_augmented) try: app.logger.info("Creating provincial trends collection") prov_trends_coll.drop() prov_trends_coll.insert_many(provincial_trends) except Exception as e: app.logger.error(e) @staticmethod @celery.task def create_vax_admins_collection(): """Create vaccine administrations colleciton""" df = pd.read_csv( URL_VAX_ADMINS_DATA, parse_dates=[VAX_DATE_KEY], low_memory=False) df = preprocess_vax_admins_df(df) records = df.to_dict(orient='records') try: app.logger.info("Creating vax admins collection") vax_admins_coll.drop() vax_admins_coll.insert_many(records, ordered=True) except Exception as e: app.logger.error(f"While creating vax admins collection: {e}") @staticmethod @celery.task def create_vax_admins_summary_collection(): """Create vaccine administrations summary colleciton""" df = pd.read_csv( URL_VAX_ADMINS_SUMMARY_DATA, parse_dates=[VAX_DATE_KEY]) df = preprocess_vax_admins_summary_df(df) records = df.to_dict(orient='records') try: app.logger.info("Creating vax admins summary collection") vax_admins_summary_coll.drop() vax_admins_summary_coll.insert_many(records, ordered=True) except Exception as e: app.logger.error( f"While creating vax admins summary collection: {e}") @staticmethod @celery.task def create_vax_pop_collection(): """Create OD population collection""" try: pop_df =
pd.read_csv(URL_VAX_POP_DATA)
pandas.read_csv
import pandas as pd from datetime import datetime import haversine as hs from haversine import Unit class clientclass: """The class clientclass is used to secure all the information required by each client for doing a trip. In this class, methods were added so that the client object could perform some functionalities. :param dftrip: Dataframe with the trips :type dftrip: Pandas Dataframe :param vm_df: Dataframe with all the user defined VMs :type vm_df: Pandas Dataframe :param df_LTE: Dataframe with all the LTE stations :type df_LTE: Pandas Dataframe :param cone: Cone aperture :type cone: Integer :param dfmigrations: Dataframe with the source and suggested destination for migration :type dfmigrations: Pandas Dataframe :param vm: Dataframe with the current virtual machine running :type vm: Pandas Dataframe :param lte_st: Dataframe with the current LTE station that the client is connected with :type lte_st: Pandas Dataframe :param lte_df: Dataframe with all the LTE stations :type lte_df: Pandas Dataframe :param latencies: List with the latency that the user has to the source and to the target :type latencies: List :param distancies: List with the distance that the user has to the source and to the target :type distancies: List :param liststats: List with all the statistics from the migrations that occured for that client on that trip :type liststats: List :param mig_id_inc: The number of migrations that occured in that trip for that client :type mig_id_inc: Integer :param triptime: The time that the whole trip took :type triptime: Integer :param tripdistance: The distance of the whole trip :type tripdistance: Integer :param mig_under: Defines if the client is under a migration or not :type mig_under: Integer :param timeout: The current time of timeout that the client is serving :type timeout: Integer :param station_heading: The heading the station has relative to the client :type station_heading: Integer :param cone: The cone aperture :type cone: Integer :param cone_min: The minimum value for the cone aperture :type cone_min: Integer :param cone_max: The maximum value for the cone aperture :type cone_max: Integer """ def __init__(self,dftrip, vm_df, df_LTE, cone): self.dftrip = dftrip self.dfmigrations = pd.DataFrame({ 'TripID': pd.Series([], dtype='str'), 'Latitude': pd.Series([], dtype='float'), 'Longitude':
pd.Series([], dtype='float')
pandas.Series
import copy import datetime as dt import itertools import os from pathlib import Path import matplotlib.pyplot as plt import numpy as np import pandas as pd import statsmodels.graphics.tsaplots as tpl import statsmodels.tsa.arima.model as arm import statsmodels.tsa.stattools as stls import tqdm from bokeh.io import export_png, export_svgs from bokeh.models import DatetimeTickFormatter, Range1d from bokeh.plotting import figure, output_file, show from pytz import timezone from scipy.stats import boxcox from sklearn.base import BaseEstimator, TransformerMixin from sklearn.compose import ColumnTransformer from sklearn.impute import KNNImputer from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import StandardScaler from statsmodels.tsa.stattools import acf, pacf from statsmodels.tsa.tsatools import detrend import mise.data as data from mise.constants import SEOUL_STATIONS, SEOULTZ HOURLY_DATA_PATH = "/input/python/input_seoul_imputed_hourly_pandas.csv" DAILY_DATA_PATH = "/input/python/input_seoul_imputed_daily_pandas.csv" def stats_arima(station_name="종로구"): print("Data loading start...", flush=True) _df_h = data.load_imputed([1], filepath=HOURLY_DATA_PATH) df_h = _df_h.query('stationCode == "' + str(SEOUL_STATIONS[station_name]) + '"') if ( station_name == "종로구" and not Path("/input/python/input_jongno_imputed_hourly_pandas.csv").is_file() ): # load imputed result df_h.to_csv("/input/python/input_jongno_imputed_hourly_pandas.csv") print("Data loading complete", flush=True) targets = ["PM10", "PM25"] # p (1, 0, 0) ~ (3, 0, 0), (4, 0, 0) ~ (6, 0, 0), (7, 0, 0) ~ (9, 0, 0), # p (1, 0, 1) ~ (3, 0, 1), (4, 0, 1) ~ (6, 0, 1), (7, 0, 1) ~ (9, 0, 1), # p (1, 0, 2) ~ (3, 0, 2), (4, 0, 2) ~ (6, 0, 2), (7, 0, 2) ~ (9, 0, 2), orders = [(2, 0, 0), (3, 0, 0)] sample_size = 48 output_size = 24 train_fdate = dt.datetime(2008, 1, 4, 0).astimezone(SEOULTZ) train_tdate = dt.datetime(2018, 12, 31, 23).astimezone(SEOULTZ) test_fdate = dt.datetime(2019, 1, 1, 0).astimezone(SEOULTZ) test_tdate = dt.datetime(2020, 10, 31, 23).astimezone(SEOULTZ) # consective dates between train and test assert train_tdate + dt.timedelta(hours=1) == test_fdate for target in targets: for order in orders: output_dir = Path( "/mnt/data/ARIMA_" + str(order) + "/" + station_name + "/" + target + "/" ) png_dir = output_dir / Path("png/") svg_dir = output_dir / Path("svg/") data_dir = output_dir / Path("csv/") Path.mkdir(data_dir, parents=True, exist_ok=True) Path.mkdir(png_dir, parents=True, exist_ok=True) Path.mkdir(svg_dir, parents=True, exist_ok=True) # norm_values, norm_maxlog = boxcox(df_h[target]) # norm_target = "norm_" + target train_set = data.UnivariateRNNMeanSeasonalityDataset( station_name=station_name, target=target, filepath=HOURLY_DATA_PATH, features=[ "SO2", "CO", "O3", "NO2", "PM10", "PM25", "temp", "u", "v", "pres", "humid", "prep", "snow", ], features_1=[ "SO2", "CO", "O3", "NO2", "PM10", "PM25", "temp", "v", "pres", "humid", "prep", "snow", ], features_2=["u"], fdate=train_fdate, tdate=train_tdate, sample_size=sample_size, output_size=output_size, train_valid_ratio=0.8, ) train_set.preprocess() # set fdate=test_fdate, test_set = data.UnivariateRNNMeanSeasonalityDataset( station_name=station_name, target=target, filepath=HOURLY_DATA_PATH, features=[ "SO2", "CO", "O3", "NO2", "PM10", "PM25", "temp", "u", "v", "pres", "humid", "prep", "snow", ], features_1=[ "SO2", "CO", "O3", "NO2", "PM10", "PM25", "temp", "v", "pres", "humid", "prep", "snow", ], features_2=["u"], fdate=test_fdate, tdate=test_tdate, sample_size=sample_size, output_size=output_size, scaler_X=train_set.scaler_X, scaler_Y=train_set.scaler_Y, ) test_set.transform() df_train = train_set.ys.loc[train_fdate:train_tdate, :].copy() df_test = test_set.ys.loc[test_fdate:test_tdate, :].copy() df_test_org = test_set.ys_raw.loc[test_fdate:test_tdate, :].copy() print("ARIMA " + str(order) + " of " + target + "...", flush=True) def run_arima(order): df_obs = mw_df(df_test_org, target, output_size, test_fdate, test_tdate) dates = df_obs.index df_sim = sim_arima( df_train, df_test, dates, target, order, test_set.scaler_Y, sample_size, output_size, data_dir, ) assert df_obs.shape == df_sim.shape # join df plot_arima( df_sim, df_obs, target, order, data_dir, png_dir, svg_dir, test_fdate, test_tdate, station_name, output_size, ) # save to csv csv_fname = "df_test_obs.csv" df_obs.to_csv(data_dir / csv_fname) csv_fname = "df_test_sim.csv" df_sim.to_csv(data_dir / csv_fname) print("ARIMA " + str(order) + " ...") run_arima(order) def mw_df(df_org, target, output_size, fdate, tdate): """ moving window """ cols = [str(i) for i in range(output_size)] df_obs = pd.DataFrame(columns=cols) df = df_org.loc[fdate:tdate, :] cols = [str(t) for t in range(output_size)] df_obs = pd.DataFrame(columns=cols) for _, (index, _) in enumerate(df.iterrows()): # skip prediction before fdate if index < fdate: continue # findex ~ tindex = output_size findex = index tindex = index + dt.timedelta(hours=(output_size - 1)) if tindex > tdate - dt.timedelta(hours=output_size): break _df = df.loc[findex:tindex, :] df_obs.loc[findex] = _df.to_numpy().reshape(-1) df_obs.index.name = "date" return df_obs def sim_arima( df_train, df_test, dates, target, order, scaler, sample_size, output_size, data_dir ): # columns are offset to datetime cols = [str(i) for i in range(output_size)] df_sim = pd.DataFrame(columns=cols) model_dir = data_dir / "model" Path.mkdir(model_dir, parents=True, exist_ok=True) # initial endog # train data -> initial endog values = np.zeros((len(dates), output_size), dtype=df_train[target].dtype) model = arm.ARIMA(endog=df_train.to_numpy(), order=order) model_fit = model.fit() # TODO : Need Optimization, too slow! assert df_test.index[0] == dates[0] for i, (index, _) in tqdm.tqdm( enumerate(df_test.iterrows()), total=len(dates) - 1 ): if i >= len(dates): print(model_fit.summary()) break # out-of-sample forecast ys = model_fit.forecast(steps=output_size) # same state-space params, but different input (endog) model_fit = model_fit.append([df_test.loc[index, :].to_numpy()]) if i % 24 == 0: summary = model_fit.summary() filename = str(model_dir / ("model_" + index.strftime("%Y%m%d%H") + ".csv")) with open(filename, "w") as f: f.write(summary.as_csv()) # inverse_transform _dates = pd.date_range( index, index + dt.timedelta(hours=(output_size - 1)), freq="1H" ) value = scaler.named_transformers_["num"].inverse_transform( pd.DataFrame(data=ys, index=_dates, columns=[target]) ) values[i, :] = value.squeeze() df_sim = pd.DataFrame(data=values, index=dates, columns=cols) df_sim.index.name = "date" return df_sim def plot_arima( df_sim, df_obs, target, order, data_dir, png_dir, svg_dir, _test_fdate, _test_tdate, station_name, output_size, ): # dir_prefix = Path("/mnt/data/ARIMA/" + station_name + "/" + target + "/") times = list(range(0, output_size + 1)) corrs = [1.0] test_fdate = _test_fdate test_tdate = _test_tdate - dt.timedelta(hours=output_size) _obs = df_obs[ (df_obs.index.get_level_values(level="date") >= test_fdate) & (df_obs.index.get_level_values(level="date") <= test_tdate) ] # simulation result might have exceed our observation _sim = df_sim[ (df_sim.index.get_level_values(level="date") >= test_fdate) & (df_sim.index.get_level_values(level="date") <= test_tdate) ] for t in range(output_size): # zero-padded directory name Path.mkdir(data_dir / Path(str(t).zfill(2)), parents=True, exist_ok=True) Path.mkdir(svg_dir / Path(str(t).zfill(2)), parents=True, exist_ok=True) Path.mkdir(png_dir / Path(str(t).zfill(2)), parents=True, exist_ok=True) # get column per each time obs = _obs[str(t)].to_numpy() sim = _sim[str(t)].to_numpy() scatter_fname = "scatter_" + str(t).zfill(2) + "h" plot_scatter( obs, sim, data_dir / Path(str(t).zfill(2)), png_dir / Path(str(t).zfill(2)), svg_dir / Path(str(t).zfill(2)), scatter_fname, ) # plot line line_fname = "line_" + str(t).zfill(2) + "h" plot_dates = plot_line( obs, sim, test_fdate, test_tdate, target, data_dir / Path(str(t).zfill(2)), png_dir / Path(str(t).zfill(2)), svg_dir / Path(str(t).zfill(2)), line_fname, ) csv_fname = "data_" + str(t).zfill(2) + "h.csv" df_obs_sim = pd.DataFrame({"obs": obs, "sim": sim}, index=plot_dates) df_obs_sim.to_csv(data_dir / Path(str(t).zfill(2)) / csv_fname) # np.corrcoef -> [[1.0, corr], [corr, 1]] corrs.append(np.corrcoef(obs, sim)[0, 1]) # plot corr for all times corr_fname = "corr_time" plot_corr(times, corrs, data_dir, png_dir, svg_dir, corr_fname) def plot_scatter(obs, sim, data_dir, png_dir, svg_dir, output_name): png_path = png_dir / (output_name + ".png") svg_path = svg_dir / (output_name + ".svg") df_scatter = pd.DataFrame({"obs": obs, "sim": sim}) df_scatter.to_csv(data_dir / (output_name + ".csv")) p = figure(title="Model/OBS") p.toolbar.logo = None p.toolbar_location = None p.xaxis.axis_label = "OBS" p.yaxis.axis_label = "Model" maxval = np.nanmax([np.nanmax(obs), np.nanmax(sim)]) p.xaxis.bounds = (0.0, maxval) p.yaxis.bounds = (0.0, maxval) p.x_range = Range1d(0.0, maxval) p.y_range = Range1d(0.0, maxval) p.scatter(obs, sim) export_png(p, filename=png_path) p.output_backend = "svg" export_svgs(p, filename=str(svg_path)) def plot_line( obs, sim, test_fdate, test_tdate, target, data_dir, png_dir, svg_dir, output_name ): png_path = png_dir / (output_name + ".png") svg_path = svg_dir / (output_name + ".svg") dates = np.array([test_fdate + dt.timedelta(hours=i) for i in range(len(obs))]) df_line = pd.DataFrame({"dates": dates, "obs": obs, "sim": sim}) df_line.to_csv(data_dir / (output_name + ".csv")) p = figure(title="OBS & Model") p.toolbar.logo = None p.toolbar_location = None p.xaxis.axis_label = "dates" p.xaxis.formatter = DatetimeTickFormatter() p.yaxis.axis_label = target p.line(x=dates, y=obs, line_color="dodgerblue", legend_label="obs") p.line(x=dates, y=sim, line_color="lightcoral", legend_label="sim") export_png(p, filename=png_path) p.output_backend = "svg" export_svgs(p, filename=str(svg_path)) return dates def plot_corr(times, corrs, data_dir, png_dir, svg_dir, output_name): png_path = png_dir / (output_name + ".png") svg_path = svg_dir / (output_name + ".svg") df_corr = pd.DataFrame({"lags": times, "corr": corrs}) df_corr.to_csv(data_dir / (output_name + ".csv")) p = figure(title="Correlation of OBS & Model") p.toolbar.logo = None p.toolbar_location = None p.xaxis.axis_label = "lags" p.yaxis.axis_label = "corr" p.yaxis.bounds = (0.0, 1.0) p.y_range = Range1d(0.0, 1.0) p.line(x=times, y=corrs) export_png(p, filename=png_path) p.output_backend = "svg" export_svgs(p, filename=str(svg_path)) def plot_acf(x, nlags, _acf, _pacf, data_dir, png_dir, svg_dir): lags_acf = range(len(_acf)) lags_pacf = range(len(_pacf)) png_path = png_dir / ("acf.png") svg_path = svg_dir / ("acf.svg") plt.figure() fig = tpl.plot_acf(x, lags=nlags) fig.savefig(png_path) fig.savefig(svg_path) csv_path = data_dir / ("acf.csv") df_acf =
pd.DataFrame({"lags": lags_acf, "acf": _acf})
pandas.DataFrame
from itertools import chain from collections import defaultdict from functools import partial import warnings import numpy as np import pandas as pd from scipy.signal import fftconvolve from scipy.interpolate import interp1d from cooler.tools import partition import cooler import bioframe from .lib import assign_supports, numutils where = np.flatnonzero concat = chain.from_iterable def _contact_areas(distbins, scaffold_length): distbins = distbins.astype(float) scaffold_length = float(scaffold_length) outer_areas = np.maximum(scaffold_length - distbins[:-1], 0) ** 2 inner_areas = np.maximum(scaffold_length - distbins[1:], 0) ** 2 return 0.5 * (outer_areas - inner_areas) def contact_areas(distbins, region1, region2): if region1 == region2: start, end = region1 areas = _contact_areas(distbins, end - start) else: start1, end1 = region1 start2, end2 = region2 if start2 <= start1: start1, start2 = start2, start1 end1, end2 = end2, end1 areas = ( _contact_areas(distbins, end2 - start1) - _contact_areas(distbins, start2 - start1) - _contact_areas(distbins, end2 - end1) ) if end1 < start2: areas += _contact_areas(distbins, start2 - end1) return areas def compute_scaling(df, region1, region2=None, dmin=int(1e1), dmax=int(1e7), n_bins=50): import dask.array as da if region2 is None: region2 = region1 distbins = numutils.logbins(dmin, dmax, N=n_bins) areas = contact_areas(distbins, region1, region2) df = df[ (df["pos1"] >= region1[0]) & (df["pos1"] < region1[1]) & (df["pos2"] >= region2[0]) & (df["pos2"] < region2[1]) ] dists = (df["pos2"] - df["pos1"]).values if isinstance(dists, da.Array): obs, _ = da.histogram(dists[(dists >= dmin) & (dists < dmax)], bins=distbins) else: obs, _ = np.histogram(dists[(dists >= dmin) & (dists < dmax)], bins=distbins) return distbins, obs, areas def lattice_pdist_frequencies(n, points): """ Distribution of pairwise 1D distances among a collection of distinct integers ranging from 0 to n-1. Parameters ---------- n : int Size of the lattice on which the integer points reside. points : sequence of int Arbitrary integers between 0 and n-1, inclusive, in any order but with no duplicates. Returns ------- h : 1D array of length n h[d] counts the number of integer pairs that are exactly d units apart Notes ----- This is done using a convolution via FFT. Thanks to <NAME>; see `<http://stackoverflow.com/questions/42423823/distribution-of-pairwise-distances-between-many-integers>`_. """ if len(np.unique(points)) != len(points): raise ValueError("Integers must be distinct.") x = np.zeros(n) x[points] = 1 return np.round(fftconvolve(x, x[::-1], mode="full")).astype(int)[-n:] def count_bad_pixels_per_diag(n, bad_bins): """ Efficiently count the number of bad pixels on each upper diagonal of a matrix assuming a sequence of bad bins forms a "grid" of invalid pixels. Each bad bin bifurcates into two a row and column of bad pixels, so an upper bound on number of bad pixels per diagonal is 2*k, where k is the number of bad bins. For a given diagonal, we need to subtract from this upper estimate the contribution from rows/columns reaching "out-of-bounds" and the contribution of the intersection points of bad rows with bad columns that get double counted. :: o : bad bin * : bad pixel x : intersection bad pixel $ : out of bounds bad pixel $ $ $ *--------------------------+ * * * * | * * * * | ** * * | o****x*****x***********|$ * * * | * * * | * * * | o******x***********|$ * * | * * | * * | * * | * * | ** | o***********|$ * | * | Parameters ---------- n : int total number of bins bad_bins : 1D array of int sorted array of bad bin indexes Returns ------- dcount : 1D array of length n dcount[d] == number of bad pixels on diagonal d """ k = len(bad_bins) dcount = np.zeros(n, dtype=int) # Store all intersection pixels in a separate array # ~O(n log n) with fft ixn = lattice_pdist_frequencies(n, bad_bins) dcount[0] = ixn[0] # Keep track of out-of-bounds pixels by squeezing left and right bounds # ~O(n) pl = 0 pr = k for diag in range(1, n): if pl < k: while (bad_bins[pl] - diag) < 0: pl += 1 if pl == k: break if pr > 0: while (bad_bins[pr - 1] + diag) >= n: pr -= 1 if pr == 0: break dcount[diag] = 2 * k - ixn[diag] - pl - (k - pr) return dcount def count_all_pixels_per_diag(n): """ Total number of pixels on each upper diagonal of a square matrix. Parameters ---------- n : int total number of bins (dimension of square matrix) Returns ------- dcount : 1D array of length n dcount[d] == total number of pixels on diagonal d """ return np.arange(n, 0, -1) def count_all_pixels_per_block(x, y): """ Calculate total number of pixels in a rectangular block Parameters ---------- x : int block width in pixels y : int block height in pixels Returns ------- number_of_pixels : int total number of pixels in a block """ return x * y def count_bad_pixels_per_block(x, y, bad_bins_x, bad_bins_y): """ Calculate number of "bad" pixels per rectangular block of a contact map "Bad" pixels are inferred from the balancing weight column `weight_name` or provided directly in the form of an array `bad_bins`. Setting `weight_name` and `bad_bins` to `None` yields 0 bad pixels in a block. Parameters ---------- x : int block width in pixels y : int block height in pixels bad_bins_x : int number of bad bins on x-side bad_bins_y : int number of bad bins on y-side Returns ------- number_of_pixes : int number of "bad" pixels in a block """ # Calculate the resulting bad pixels in a rectangular block: return (x * bad_bins_y) + (y * bad_bins_x) - (bad_bins_x * bad_bins_y) def make_diag_table(bad_mask, span1, span2): """ Compute the total number of elements ``n_elem`` and the number of bad elements ``n_bad`` per diagonal for a single contact area encompassing ``span1`` and ``span2`` on the same genomic scaffold (cis matrix). Follows the same principle as the algorithm for finding contact areas for computing scalings. Parameters ---------- bad_mask : 1D array of bool Mask of bad bins for the whole genomic scaffold containing the regions of interest. span1, span2 : pair of ints The bin spans (not genomic coordinates) of the two regions of interest. Returns ------- diags : pandas.DataFrame Table indexed by 'diag' with columns ['n_elem', 'n_bad']. """ def _make_diag_table(n_bins, bad_locs): diags = pd.DataFrame(index=pd.Series(np.arange(n_bins), name="diag")) diags["n_elem"] = count_all_pixels_per_diag(n_bins) diags["n_valid"] = diags["n_elem"] - count_bad_pixels_per_diag(n_bins, bad_locs) return diags if span1 == span2: lo, hi = span1 diags = _make_diag_table(hi - lo, where(bad_mask[lo:hi])) else: lo1, hi1 = span1 lo2, hi2 = span2 if lo2 <= lo1: lo1, lo2 = lo2, lo1 hi1, hi2 = hi2, hi1 diags = ( _make_diag_table(hi2 - lo1, where(bad_mask[lo1:hi2])) .subtract( _make_diag_table(lo2 - lo1, where(bad_mask[lo1:lo2])), fill_value=0 ) .subtract( _make_diag_table(hi2 - hi1, where(bad_mask[hi1:hi2])), fill_value=0 ) ) if hi1 < lo2: diags.add( _make_diag_table(lo2 - hi1, where(bad_mask[hi1:lo2])), fill_value=0 ) diags = diags[diags["n_elem"] > 0] diags = diags.drop("n_elem", axis=1) return diags.astype(int) def make_diag_tables(clr, regions, regions2=None, weight_name="weight", bad_bins=None): """ For every support region infer diagonals that intersect this region and calculate the size of these intersections in pixels, both "total" and "n_valid", where "n_valid" does not include "bad" bins into counting. "Bad" pixels are inferred from the balancing weight column `weight_name` or provided directly in the form of an array `bad_bins`. Setting `weight_name` and `bad_bins` to `None` yields 0 "bad" pixels per diagonal per support region. When `regions2` are provided, all intersecting diagonals are reported for each rectangular and asymmetric block defined by combinations of matching elements of `regions` and `regions2`. Otherwise only `regions`-based symmetric square blocks are considered. Only intra-chromosomal regions are supported. Parameters ---------- clr : cooler.Cooler Input cooler regions : list a list of genomic support regions regions2 : list a list of genomic support regions for asymmetric regions weight_name : str name of the weight vector in the "bins" table, if weight_name is None returns 0 for each block. Balancing weight are used to infer bad bins. bad_bins : array-like a list of bins to ignore. Indexes of bins must be absolute, as in clr.bins()[:], as opposed to being offset by chromosome start. "bad_bins" will be combined with the bad bins masked by balancing if there are any. Returns ------- diag_tables : dict dictionary with DataFrames of relevant diagonals for every support. """ regions = bioframe.parse_regions(regions, clr.chromsizes).values if regions2 is not None: regions2 = bioframe.parse_regions(regions2, clr.chromsizes).values bins = clr.bins()[:] if weight_name is None: # ignore bad bins sizes = dict(bins.groupby("chrom").size()) bad_bin_dict = { chrom: np.zeros(sizes[chrom], dtype=bool) for chrom in sizes.keys() } elif isinstance(weight_name, str): # using balacning weight to infer bad bins if weight_name not in clr.bins().columns: raise KeyError(f"Balancing weight {weight_name} not found!") groups = dict(iter(bins.groupby("chrom")[weight_name])) bad_bin_dict = { chrom: np.array(groups[chrom].isnull()) for chrom in groups.keys() } else: raise ValueError("`weight_name` can be `str` or `None`") # combine custom "bad_bins" with "bad_bin_dict": if bad_bins is not None: # check if "bad_bins" are legit: try: bad_bins_chrom = bins.iloc[bad_bins].reset_index(drop=False) except IndexError: raise ValueError("Provided `bad_bins` are incorrect or out-of-bound") # group them by observed chromosomes only bad_bins_grp = bad_bins_chrom[["index", "chrom"]].groupby( "chrom", observed=True ) # update "bad_bin_dict" with "bad_bins" for each chrom: for chrom, bin_ids in bad_bins_grp["index"]: co = clr.offset(chrom) # adjust by chromosome offset bad_bin_dict[chrom][bin_ids.values - co] = True diag_tables = {} for i in range(len(regions)): chrom, start1, end1, name1 = regions[i] if regions2 is not None: chrom2, start2, end2, name2 = regions2[i] # cis-only for now: assert chrom2 == chrom else: start2, end2 = start1, end1 # translate regions into relative bin id-s: lo1, hi1 = clr.extent((chrom, start1, end1)) lo2, hi2 = clr.extent((chrom, start2, end2)) co = clr.offset(chrom) lo1 -= co lo2 -= co hi1 -= co hi2 -= co bad_mask = bad_bin_dict[chrom] newname = name1 if regions2 is not None: newname = (name1, name2) diag_tables[newname] = make_diag_table(bad_mask, [lo1, hi1], [lo2, hi2]) return diag_tables def make_block_table(clr, regions1, regions2, weight_name="weight", bad_bins=None): """ Creates a table that characterizes a set of rectangular genomic blocks formed by combining regions from regions1 and regions2. For every block calculate its "area" in pixels ("n_total"), and calculate number of "valid" pixels in each block ("n_valid"). "Valid" pixels exclude "bad" pixels, which in turn inferred from the balancing weight column `weight_name` or provided directly in the form of an array of `bad_bins`. Setting `weight_name` and `bad_bins` to `None` yields 0 "bad" pixels per block. Parameters ---------- clr : cooler.Cooler Input cooler regions1 : iterable a collection of genomic regions regions2 : iterable a collection of genomic regions weight_name : str name of the weight vector in the "bins" table, if weight_name is None returns 0 for each block. Balancing weight are used to infer bad bins. bad_bins : array-like a list of bins to ignore. Indexes of bins must be absolute, as in clr.bins()[:], as opposed to being offset by chromosome start. "bad_bins" will be combined with the bad bins masked by balancing if there are any. Returns ------- block_table : dict dictionary for blocks that are 0-indexed """ if bad_bins is None: bad_bins = np.asarray([]).astype(int) else: bad_bins = np.asarray(bad_bins).astype(int) regions1 = bioframe.parse_regions(regions1, clr.chromsizes).values regions2 = bioframe.parse_regions(regions2, clr.chromsizes).values # should we check for nestedness here, or that each region1 is < region2 ? block_table = {} for r1, r2 in zip(regions1, regions2): chrom1, start1, end1, name1 = r1 chrom2, start2, end2, name2 = r2 # translate regions into relative bin id-s: lo1, hi1 = clr.extent((chrom1, start1, end1)) lo2, hi2 = clr.extent((chrom2, start2, end2)) # width and height of a block: x = hi1 - lo1 y = hi2 - lo2 # get "regional" bad_bins for each of the regions bx = bad_bins[(bad_bins >= lo1) & (bad_bins < hi1)] - lo1 by = bad_bins[(bad_bins >= lo2) & (bad_bins < hi2)] - lo2 # now we need to combine it with the balancing weights if weight_name is None: bad_bins_x = len(bx) bad_bins_y = len(by) elif isinstance(weight_name, str): if weight_name not in clr.bins().columns: raise KeyError(f"Balancing weight {weight_name} not found!") else: # extract "bad" bins filtered by balancing: cb_bins_x = clr.bins()[weight_name][lo1:hi1].isnull().values cb_bins_y = clr.bins()[weight_name][lo2:hi2].isnull().values # combine with "bad_bins" using assignment: cb_bins_x[bx] = True cb_bins_y[by] = True # count and yield final list of bad bins: bad_bins_x = np.count_nonzero(cb_bins_x) bad_bins_y = np.count_nonzero(cb_bins_y) else: raise ValueError("`weight_name` can be `str` or `None`") # calculate total and bad pixels per block: n_tot = count_all_pixels_per_block(x, y) n_bad = count_bad_pixels_per_block(x, y, bad_bins_x, bad_bins_y) # fill in "block_table" with number of valid pixels: block_table[name1, name2] = defaultdict(int) block_table[name1, name2]["n_valid"] = n_tot - n_bad return block_table def _diagsum_symm(clr, fields, transforms, regions, span): """ calculates diagonal summary for a collection of square symmteric regions defined by regions. returns a dictionary of DataFrames with diagonal sums as values, and 0-based indexes of square genomic regions as keys. """ lo, hi = span bins = clr.bins()[:] pixels = clr.pixels()[lo:hi] pixels = cooler.annotate(pixels, bins, replace=False) # this could further expanded to allow for custom groupings: pixels["dist"] = pixels["bin2_id"] - pixels["bin1_id"] for field, t in transforms.items(): pixels[field] = t(pixels) diag_sums = {} # r define square symmetric block i: for i, r in enumerate(regions): r1 = assign_supports(pixels, [r], suffix="1") r2 = assign_supports(pixels, [r], suffix="2") # calculate diag_sums on the spot to allow for overlapping blocks: diag_sums[i] = pixels[(r1 == r2)].groupby("dist")[fields].sum() return diag_sums def diagsum( clr, regions, transforms={}, weight_name="weight", bad_bins=None, chunksize=10000000, ignore_diags=2, map=map, ): """ Intra-chromosomal diagonal summary statistics. Parameters ---------- clr : cooler.Cooler Cooler object regions : sequence of genomic range tuples Support regions for intra-chromosomal diagonal summation transforms : dict of str -> callable, optional Transformations to apply to pixels. The result will be assigned to a temporary column with the name given by the key. Callables take one argument: the current chunk of the (annotated) pixel dataframe. weight_name : str name of the balancing weight vector used to count "bad"(masked) pixels per diagonal. Use `None` to avoid masking "bad" pixels. bad_bins : array-like a list of bins to ignore per support region. Combines with the list of bad bins from balacning weight. chunksize : int, optional Size of pixel table chunks to process ignore_diags : int, optional Number of intial diagonals to exclude from statistics map : callable, optional Map functor implementation. Returns ------- Dataframe of diagonal statistics for all regions """ spans = partition(0, len(clr.pixels()), chunksize) fields = ["count"] + list(transforms.keys()) regions = bioframe.parse_regions(regions, clr.chromsizes) regions = regions[regions['chrom'].isin(clr.chromnames)].reset_index(drop=True) dtables = make_diag_tables(clr, regions, weight_name=weight_name, bad_bins=bad_bins) # combine masking with existing transforms and add a "count" transform: if bad_bins is not None: # turn bad_bins into a mask of size clr.bins: mask_size = len(clr.bins()) bad_bins_mask = np.ones(mask_size, dtype=int) bad_bins_mask[bad_bins] = 0 # masked_transforms = {} bin1 = "bin1_id" bin2 = "bin2_id" for field in fields: if field in transforms: # combine masking and transform, minding the scope: t = transforms[field] masked_transforms[field] = ( lambda p, t=t, m=bad_bins_mask: t(p) * m[p[bin1]] * m[p[bin2]] ) else: # presumably field == "count", mind the scope as well: masked_transforms[field] = ( lambda p, f=field, m=bad_bins_mask: p[f] * m[p[bin1]] * m[p[bin2]] ) # substitute transforms to the masked_transforms: transforms = masked_transforms for dt in dtables.values(): for field in fields: agg_name = "{}.sum".format(field) dt[agg_name] = 0 job = partial(_diagsum_symm, clr, fields, transforms, regions.values) results = map(job, spans) for result in results: for i, agg in result.items(): region = regions.loc[i, "name"] for field in fields: agg_name = "{}.sum".format(field) dtables[region][agg_name] = dtables[region][agg_name].add( agg[field], fill_value=0 ) if ignore_diags: for dt in dtables.values(): for field in fields: agg_name = "{}.sum".format(field) j = dt.columns.get_loc(agg_name) dt.iloc[:ignore_diags, j] = np.nan # returning dataframe for API consistency result = [] for i, dtable in dtables.items(): dtable = dtable.reset_index() dtable.insert(0, "region", i) result.append(dtable) return pd.concat(result).reset_index(drop=True) def _diagsum_asymm(clr, fields, transforms, regions1, regions2, span): """ calculates diagonal summary for a collection of rectangular regions defined as combinations of regions1 and regions2. returns a dictionary of DataFrames with diagonal sums as values, and 0-based indexes of rectangular genomic regions as keys. """ lo, hi = span bins = clr.bins()[:] pixels = clr.pixels()[lo:hi] pixels = cooler.annotate(pixels, bins, replace=False) # this could further expanded to allow for custom groupings: pixels["dist"] = pixels["bin2_id"] - pixels["bin1_id"] for field, t in transforms.items(): pixels[field] = t(pixels) diag_sums = {} # r1 and r2 define rectangular block i: for i, (r1, r2) in enumerate(zip(regions1, regions2)): r1 = assign_supports(pixels, [r1], suffix="1") r2 = assign_supports(pixels, [r2], suffix="2") # calculate diag_sums on the spot to allow for overlapping blocks: diag_sums[i] = pixels[(r1 == r2)].groupby("dist")[fields].sum() return diag_sums def diagsum_asymm( clr, regions1, regions2, transforms={}, weight_name="weight", bad_bins=None, chunksize=10000000, map=map, ): """ Diagonal summary statistics. Matchings elements of `regions1` and `regions2` define asymmetric rectangular blocks for calculating diagonal summary statistics. Only intra-chromosomal blocks are supported. Parameters ---------- clr : cooler.Cooler Cooler object regions1 : sequence of genomic range tuples "left"-side support regions for diagonal summation regions2 : sequence of genomic range tuples "right"-side support regions for diagonal summation transforms : dict of str -> callable, optional Transformations to apply to pixels. The result will be assigned to a temporary column with the name given by the key. Callables take one argument: the current chunk of the (annotated) pixel dataframe. weight_name : str name of the balancing weight vector used to count "bad"(masked) pixels per diagonal. Use `None` to avoid masking "bad" pixels. bad_bins : array-like a list of bins to ignore per support region. Combines with the list of bad bins from balacning weight. chunksize : int, optional Size of pixel table chunks to process map : callable, optional Map functor implementation. Returns ------- DataFrame with summary statistic of every diagonal of every block: region1, region2, diag, n_valid, count.sum """ spans = partition(0, len(clr.pixels()), chunksize) fields = ["count"] + list(transforms.keys()) regions1 = bioframe.parse_regions(regions1, clr.chromsizes) regions2 = bioframe.parse_regions(regions2, clr.chromsizes) dtables = make_diag_tables( clr, regions1, regions2, weight_name=weight_name, bad_bins=bad_bins ) # combine masking with existing transforms and add a "count" transform: if bad_bins is not None: # turn bad_bins into a mask of size clr.bins: mask_size = len(clr.bins()) bad_bins_mask = np.ones(mask_size, dtype=int) bad_bins_mask[bad_bins] = 0 # masked_transforms = {} bin1 = "bin1_id" bin2 = "bin2_id" for field in fields: if field in transforms: # combine masking and transform, minding the scope: t = transforms[field] masked_transforms[field] = ( lambda p, t=t, m=bad_bins_mask: t(p) * m[p[bin1]] * m[p[bin2]] ) else: # presumably field == "count", mind the scope as well: masked_transforms[field] = ( lambda p, f=field, m=bad_bins_mask: p[f] * m[p[bin1]] * m[p[bin2]] ) # substitute transforms to the masked_transforms: transforms = masked_transforms for dt in dtables.values(): for field in fields: agg_name = "{}.sum".format(field) dt[agg_name] = 0 job = partial( _diagsum_asymm, clr, fields, transforms, regions1.values, regions2.values ) results = map(job, spans) for result in results: for i, agg in result.items(): region1 = regions1.loc[i, "name"] region2 = regions2.loc[i, "name"] for field in fields: agg_name = "{}.sum".format(field) dtables[region1, region2][agg_name] = dtables[region1, region2][ agg_name ].add(agg[field], fill_value=0) # returning a dataframe for API consistency: result = [] for (i, j), dtable in dtables.items(): dtable = dtable.reset_index() dtable.insert(0, "region1", i) dtable.insert(1, "region2", j) result.append(dtable) return
pd.concat(result)
pandas.concat
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Aug 7 17:09:57 2019n @author: abhik """ import os import pandas as pd import matplotlib.pyplot as plt import seaborn as sns #heatmap df = pd.read_excel("Excel/Final_result.xlsx") df1 = df[['Unnamed: 0.1','up/down','down/up']] df2 = df1.loc[df1['up/down'] > 1.5] df2 = df2.sort_values("up/down",ascending=False) df2 = df2.head() df3 = df1.loc[df1['down/up'] > 1.5] df3 = df3.sort_values("down/up",ascending=False) df3 = df3.head() df4 = df1.loc[(df1['Unnamed: 0.1'] == 'Mad') | (df1['Unnamed: 0.1'] == 'Trl') | (df1['Unnamed: 0.1'] == 'mes2') | (df1['Unnamed: 0.1'] == 'hth') ] df5 = df1.loc[(df1['down/up'] < 1.5) & (df1['up/down'] < 1.5)].head() df6 = pd.concat([df2,df3,df4,df5]) heatmap1_data =
pd.pivot_table(df6,values = 'down/up', index = ['Unnamed: 0.1'], columns = 'up/down')
pandas.pivot_table
"""Predict labels for the test set using a second-level classifier. This script trains a logistic regression classifier on the training set meta-features created using the ``meta_features.py`` script. It then generates predictions for either the training set or the test set. The former refers to training and predicting each fold. This script requires three command-line arguments: * train_path: Path to training features. * metadata_path: Path to training metadata. * output_path: Output file path. It also takes an optional argument: * --test_path: Path to test features. If this is specified, the script will generate predictions for the test set and write them to a submission file. Otherwise, it will generate predictions for the training set on a fold-by-fold basis and write them to a csv file. """ import argparse import sys import h5py import numpy as np import pandas as pd from sklearn.linear_model import LogisticRegression sys.path.append('task2') import file_io as io import utils as utils def train(x, df): """Train a logistic regression classifier. Args: x (np.ndarray): Training data. df (pd.DataFrame): Training metadata. Returns: The trained classifier. """ y = df.label.astype('category').cat.codes.values sample_weight = np.ones(len(x)) sample_weight[df.manually_verified == 0] = 0.65 clf = LogisticRegression( penalty='l2', tol=0.0001, C=1.0, random_state=1000, class_weight='balanced', ) clf.fit(x, y, sample_weight=sample_weight) return clf # Parse command line arguments parser = argparse.ArgumentParser() parser.add_argument('train_path', help='path to training features') parser.add_argument('metadata_path', help='path to training metadata') parser.add_argument('output_path', help='output file path') parser.add_argument('--test_path', help='path to test features') args = parser.parse_args() # Load training data with h5py.File(args.train_path, 'r') as f: x_train = np.array(f['F']) df_train = pd.read_csv(args.metadata_path, index_col=0) y_train = df_train.label.astype('category').cat.codes.values if args.test_path: # Load test data with h5py.File(args.test_path, 'r') as f: x_test = np.array(f['F']) index = pd.Index(f['names'], name='fname') # Train and predict the test data clf = train(x_train, df_train) y_pred = clf.predict_proba(x_test) # Write to a submission file. df_pred = pd.DataFrame(y_pred, index=index, columns=utils.LABELS) io.write_predictions(df_pred, args.output_path) else: index =
pd.Index([], name='fname')
pandas.Index
# -*- coding: utf-8 -*- """ Created on Tue May 19 17:54:12 2020 @author: Shaji,Charu,Selva """ import scipy import numpy as np import pandas as pd from sklearn.impute import KNNImputer from . import helper from . import exceptions
pd.set_option('mode.chained_assignment', None)
pandas.set_option
# Copyright 2020 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Utilities functions to manipulate the data in the colab.""" import datetime import itertools import operator from typing import List, Optional import dataclasses import numpy as np import pandas as pd import pandas.io.formats.style as style from scipy import stats from trimmed_match.design import common_classes TimeWindow = common_classes.TimeWindow FormatOptions = common_classes.FormatOptions _operator_functions = {'>': operator.gt, '<': operator.lt, '<=': operator.le, '>=': operator.ge, '=': operator.eq, '!=': operator.ne} _inverse_op = {'<': '>', '<=': '>=', '>': '<', '>=': '<=', '=': '!='} @dataclasses.dataclass class CalculateMinDetectableIroas: """Class for the calculation of the minimum detectable iROAS. Hypothesis testing for H0: iROAS=0 vs H1: iROAS>=min_detectable_iroas based on one sample X which follows a normal distribution with mean iROAS (unknown) and standard deviation rmse (known). Typical usage example: calc_min_detectable_iroas = CalculateMinDetectableIroas(0.1, 0.9) min_detectable_iroas = calc_min_detectable_iroas.at(2.0) """ # chance of rejecting H0 incorrectly when H0 holds. significance_level: float = 0.1 # chance of rejecting H0 correctly when H1 holds. power_level: float = 0.9 # minimum detectable iroas at rmse=1. rmse_multiplier: float = dataclasses.field(init=False) def __post_init__(self): """Calculates rmse_multiplier. Raises: ValueError: if significance_level or power_level is not in (0, 1). """ if self.significance_level <= 0 or self.significance_level >= 1.0: raise ValueError('significance_level must be in (0, 1), but got ' f'{self.significance_level}.') if self.power_level <= 0 or self.power_level >= 1.0: raise ValueError('power_level must be in (0, 1), but got ' f'{self.power_level}.') self.rmse_multiplier = ( stats.norm.ppf(self.power_level) + stats.norm.ppf(1 - self.significance_level)) def at(self, rmse: float) -> float: """Calculates min_detectable_iroas at the specified rmse.""" return rmse * self.rmse_multiplier def find_days_to_exclude( dates_to_exclude: List[str]) -> List[TimeWindow]: """Returns a list of time windows to exclude from a list of days and weeks. Args: dates_to_exclude: a List of strings with format indicating a single day as '2020/01/01' (YYYY/MM/DD) or an entire time period as '2020/01/01 - 2020/02/01' (indicating start and end date of the time period) Returns: days_exclude: a List of TimeWindows obtained from the list in input. """ days_exclude = [] for x in dates_to_exclude: tmp = x.split('-') if len(tmp) == 1: try: days_exclude.append( TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[0]))) except ValueError: raise ValueError(f'Cannot convert the string {tmp[0]} to a valid date.') elif len(tmp) == 2: try: days_exclude.append( TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[1]))) except ValueError: raise ValueError( f'Cannot convert the strings in {tmp} to a valid date.') else: raise ValueError(f'The input {tmp} cannot be interpreted as a single' + ' day or a time window') return days_exclude def expand_time_windows(periods: List[TimeWindow]) -> List[pd.Timestamp]: """Return a list of days to exclude from a list of TimeWindows. Args: periods: List of time windows (first day, last day). Returns: days_exclude: a List of obtained by expanding the list in input. """ days_exclude = [] for window in periods: days_exclude += pd.date_range(window.first_day, window.last_day, freq='D') return list(set(days_exclude)) def overlap_percent(dates_left: List['datetime.datetime'], dates_right: List['datetime.datetime']) -> float: """Find the size of the intersections of two arrays, relative to the first array. Args: dates_left: List of datetime.datetime dates_right: List of datetime.datetime Returns: percentage: the percentage of elements of dates_right that also appear in dates_left """ intersection = np.intersect1d(dates_left, dates_right) percentage = 100 * len(intersection) / len(dates_right) return percentage def check_time_periods(geox_data: pd.DataFrame, start_date_eval: pd.Timestamp, start_date_aa_test: pd.Timestamp, experiment_duration_weeks: int, frequency: str) -> bool: """Checks that the geox_data contains the data for the two periods. Check that the geox_data contains all observations during the evaluation and AA test periods to guarantee that the experiment lasts exactly a certain number of days/weeks, depending on the frequency of the data (daily/weekly). Args: geox_data: pd.Dataframe with at least the columns (date, geo). start_date_eval: start date of the evaluation period. start_date_aa_test: start date of the aa test period. experiment_duration_weeks: int, length of the experiment in weeks. frequency: str indicating the frequency of the time series. It should be one of 'infer', 'D', 'W'. Returns: bool: a bool, True if the time periods specified pass all the checks Raises: ValueError: if part of the evaluation or AA test period are shorter than experiment_duration (either weeks or days). """ if frequency not in ['infer', 'D', 'W']: raise ValueError( f'frequency should be one of ["infer", "D", "W"], got {frequency}') if frequency == 'infer': tmp = geox_data.copy().set_index(['date', 'geo']) frequency = infer_frequency(tmp, 'date', 'geo') if frequency == 'W': frequency = '7D' number_of_observations = experiment_duration_weeks else: number_of_observations = 7 * experiment_duration_weeks freq_str = 'weeks' if frequency == '7D' else 'days' missing_eval = find_missing_dates(geox_data, start_date_eval, experiment_duration_weeks, number_of_observations, frequency) if missing_eval: raise ValueError( (f'The evaluation period contains the following {freq_str} ' + f'{missing_eval} for which we do not have data.')) missing_aa_test = find_missing_dates(geox_data, start_date_aa_test, experiment_duration_weeks, number_of_observations, frequency) if missing_aa_test: raise ValueError((f'The AA test period contains the following {freq_str} ' + f'{missing_aa_test} for which we do not have data.')) return True def find_missing_dates(geox_data: pd.DataFrame, start_date: pd.Timestamp, period_duration_weeks: int, number_of_observations: int, frequency: str) -> List[str]: """Find missing observations in a time period. Args: geox_data: pd.Dataframe with at least the columns (date, geo). start_date: start date of the evaluation period. period_duration_weeks: int, length of the period in weeks. number_of_observations: expected number of time points. frequency: str or pd.DateOffset indicating the frequency of the time series. Returns: missing: a list of strings, containing the dates for which data are missing in geox_data. """ days = datetime.timedelta(days=7 * period_duration_weeks - 1) period_dates = ((geox_data['date'] >= start_date) & (geox_data['date'] <= start_date + days)) days_in_period = geox_data.loc[ period_dates, 'date'].drop_duplicates().dt.strftime('%Y-%m-%d').to_list() missing = np.array([]) if len(days_in_period) != number_of_observations: expected_observations = list( pd.date_range(start_date, start_date + days, freq=frequency).strftime('%Y-%m-%d')) missing = set(expected_observations) - set(days_in_period) return sorted(missing) def infer_frequency(data: pd.DataFrame, date_index: str, series_index: str) -> str: """Infers frequency of data from pd.DataFrame with multiple indices. Infers frequency of data from pd.DataFrame with two indices, one for the slice name and one for the date-time. Example: df = pd.Dataframe{'date': [2020-10-10, 2020-10-11], 'geo': [1, 1], 'response': [10, 20]} df.set_index(['geo', 'date'], inplace=True) infer_frequency(df, 'date', 'geo') Args: data: a pd.DataFrame for which frequency needs to be inferred. date_index: string containing the name of the time index. series_index: string containing the name of the series index. Returns: A str, either 'D' or 'W' indicating the most likely frequency inferred from the data. Raises: ValueError: if it is not possible to infer frequency of sampling from the provided pd.DataFrame. """ data = data.sort_values(by=[date_index, series_index]) # Infer most likely frequence for each series_index series_names = data.index.get_level_values(series_index).unique().tolist() series_frequencies = [] for series in series_names: observed_times = data.iloc[data.index.get_level_values(series_index) == series].index.get_level_values(date_index) n_steps = len(observed_times) if n_steps > 1: time_diffs = ( observed_times[1:n_steps] - observed_times[0:(n_steps - 1)]).astype('timedelta64[D]').values modal_frequency, _ = np.unique(time_diffs, return_counts=True) series_frequencies.append(modal_frequency[0]) if not series_frequencies: raise ValueError( 'At least one series with more than one observation must be provided.') if series_frequencies.count(series_frequencies[0]) != len(series_frequencies): raise ValueError( 'The provided time series seem to have irregular frequencies.') try: frequency = { 1: 'D', 7: 'W' }[series_frequencies[0]] except KeyError: raise ValueError('Frequency could not be identified. Got %d days.' % series_frequencies[0]) return frequency def human_readable_number(number: float) -> str: """Print a large number in a readable format. Return a readable format for a number, e.g. 123 milions becomes 123M. Args: number: a float to be printed in human readable format. Returns: readable_number: a string containing the formatted number. """ number = float('{:.3g}'.format(number)) magnitude = 0 while abs(number) >= 1000 and magnitude < 4: magnitude += 1 number /= 1000.0 readable_number = '{}{}'.format('{:f}'.format(number).rstrip('0').rstrip('.'), ['', 'K', 'M', 'B', 'tn'][magnitude]) return readable_number def change_background_row(df: pd.DataFrame, value: float, operation: str, column: str): """Colors a row of a table based on the expression in input. Color a row in: - orange if the value of the column satisfies the expression in input - beige if the value of the column satisfies the inverse expression in input - green otherwise For example, if the column has values [1, 2, 3] and we pass 'value' equal to 2, and operation '>', then - 1 is marked in beige (1 < 2, which is the inverse expression) - 2 is marked in green (it's not > and it's not <) - 3 is marked in orange(3 > 2, which is the expression) Args: df: the table of which we want to change the background color. value: term of comparison to be used in the expression. operation: a string to define which operator to use, e.g. '>' or '='. For a full list check _operator_functions. column: name of the column to be used for the comparison Returns: pd.Series """ if _operator_functions[operation](float(df[column]), value): return pd.Series('background-color: orange', df.index) elif _operator_functions[_inverse_op[operation]](float(df[column]), value): return
pd.Series('background-color: beige', df.index)
pandas.Series
""" Toxopy (https://github.com/bchaselab/Toxopy) © <NAME>, University of Nebraska at Omaha Licensed under the terms of the MIT license """ import pandas as pd from pathlib import Path from glob import glob import os import dirtyR from toxopy import trials, concat_csv, set_status from platform import platform from subprocess import Popen from tqdm import tqdm from rich.console import Console from shutil import move def analyze_turnpoints(improved_dlc_dir, output_dir): """ 'improved_dlc_dir' the path to improved_dlc files (timed & combined) 'output_dir' is the dir in which the output files will be saved see: https://metacpan.org/pod/Statistics::Sequences::Turns """ console = Console() def mkD(name): return f'{output_dir}/{name}' for x in [mkD('plots'), mkD('diff'), mkD('single_files')]: if not os.path.exists(x): os.makedirs(x) ''' 'csv_dir' is combined and timed files 'variable' is usually 'velocity_loess05' 'output_dir' is the output directory for the R script output (not the script file itself!) ''' dirtyR.turnpoints(improved_dlc_dir, 'velocity_loess05', output_dir) while True: if 'macOS' in platform(): Popen(['open', f'plot_turnpoints.r']) else: pass ans = input('Done? (y) ') if ans.lower() == "y": break continue for x in ['/*.r', '/.*']: [ os.remove(x) for x in glob(f'{output_dir}/{x}') if os.path.isfile(x) is True ] for plot in glob(f'{output_dir}/*.png'): move(plot, mkD('plots')) def improve_turnpoints(output_dir): """ 'output_dir' is the initial turnpoints csv files dir, and also the dir in which the output files will be saved """ tt = [300, 420, 600, 720, 900, 1020, 1200, 1320, 1500, 1620] files = glob(f'{output_dir}/*.csv') console.print('\nIMPROVING TURNPOINTS FILES...', style='bold blue') for file in tqdm(files): df = pd.read_csv(file) tls, times, proba = [], [], [] trls = trials() for x in df['tppos']: times.append(round(x / 30, 4)) for y in df['proba']: proba.append(format(y, '.8f')) df = df.drop(columns=['proba']) for i in times: if i < tt[0]: tls.append('FT') else: for q, p in zip(range(0, 10), trls[1:]): if tt[q] < i < tt[q+1]: tls.append(p) df['proba'], df['trial'], df['time'] = proba, tls, times df.to_csv(f'{output_dir}/{Path(file).stem}.csv', index=False, sep=',', encoding='utf-8') improve_turnpoints(output_dir) def find_tps_velocity_values(improved_dlc_dir, output_dir, super_output_dir): """ 'improved_dlc_dir' is the directory with the 'improved_dlc' files 'output_dir' is the directory with the improved turning points csv files 'super_output_dir' is the directory where output files will be save """ def tp_csv_file(cat): fs = glob(f'{output_dir}/{cat}.csv') for f in fs: return pd.read_csv(f) files = glob(f'{improved_dlc_dir}/*.csv') console.print('\nFINDING VELOCITY VALUES...', style='bold blue') for file in tqdm(files): df =
pd.read_csv(file)
pandas.read_csv