prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#%% [markdown]
# # Matching when including the contralateral connections
#%% [markdown]
# ## Preliminaries
#%%
import datetime
import os
import time
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.plot import adjplot, matched_stripplot, matrixplot
from numba import jit
from pkg.data import load_maggot_graph, load_matched
from pkg.io import OUT_PATH
from pkg.io import glue as default_glue
from pkg.io import savefig
from pkg.match import BisectedGraphMatchSolver, GraphMatchSolver
from pkg.plot import method_palette, set_theme
from pkg.utils import get_paired_inds, get_paired_subgraphs, get_seeds
from scipy.optimize import linear_sum_assignment
from scipy.stats import wilcoxon
FILENAME = "larva_brain"
DISPLAY_FIGS = True
OUT_PATH = OUT_PATH / FILENAME
def glue(name, var, **kwargs):
default_glue(name, var, FILENAME, **kwargs)
def gluefig(name, fig, **kwargs):
savefig(name, foldername=FILENAME, **kwargs)
glue(name, fig, figure=True)
if not DISPLAY_FIGS:
plt.close()
t0 = time.time()
set_theme()
rng = np.random.default_rng(8888)
#%% [markdown]
# ### Load the data
#%%
left_adj, left_nodes = load_matched("left")
right_adj, right_nodes = load_matched("right")
left_nodes["inds"] = range(len(left_nodes))
right_nodes["inds"] = range(len(right_nodes))
seeds = get_seeds(left_nodes, right_nodes)
all_nodes = pd.concat((left_nodes, right_nodes))
all_nodes["inds"] = range(len(all_nodes))
left_nodes.iloc[seeds[0]]["pair_id"]
assert len(left_nodes) == len(right_nodes)
#%%
mg = load_maggot_graph()
mg = mg.node_subgraph(all_nodes.index)
adj = mg.sum.adj
n = len(left_nodes)
left_inds = np.arange(n)
right_inds = np.arange(n) + n
glue("n_nodes", n)
#%% [markdown]
# ### Run the graph matching experiment
n_sims = 25
glue("n_initializations", n_sims)
RERUN_SIMS = False
if RERUN_SIMS:
seeds = rng.integers(np.iinfo(np.int32).max, size=n_sims)
rows = []
for sim, seed in enumerate(seeds):
for Solver, method in zip(
[BisectedGraphMatchSolver, GraphMatchSolver], ["BGM", "GM"]
):
run_start = time.time()
solver = Solver(adj, left_inds, right_inds, rng=seed)
solver.solve()
match_ratio = (solver.permutation_ == np.arange(n)).mean()
elapsed = time.time() - run_start
print(f"{elapsed:.3f} seconds elapsed.")
rows.append(
{
"match_ratio": match_ratio,
"sim": sim,
"method": method,
"seed": seed,
"elapsed": elapsed,
"converged": solver.converged,
"n_iter": solver.n_iter,
"score": solver.score_,
}
)
results = | pd.DataFrame(rows) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.stats import pearsonr,spearmanr
from scipy.stats import entropy as kl
from sklearn.metrics import roc_auc_score, f1_score, mean_squared_error
from math import sqrt
import os
import multiprocessing as mp
def get_annotator_ensemble_baseline(annotations, k, agg_function, eval_function, n_t, n_p):
assert(n_t + n_p <=k)
np.random.seed()
annotations = annotations.dropna()
groups = annotations.groupby(annotations.index)
groups = [e[1] for e in groups if e[1].shape[0]>=k]
d_ts = []
d_ps = []
for g in groups:
g = g.iloc[np.random.permutation(len(g))]
d_ts.append(g[0:n_t])
d_ps.append(g[n_t:(n_t+n_p)])
d_t = | pd.concat(d_ts) | pandas.concat |
'''
@Author: mendeslbruno
Date: 2021-01-26
Descr: Performs some simple analyzes for several actions of the index SP500.
'''
import pandas as pd
import yfinance as yf
import streamlit as st
import datetime as dt
import plotly.graph_objects as go
from plotly.subplots import make_subplots
snp500 = | pd.read_csv("datasets/SP500.csv") | pandas.read_csv |
import torch
import os
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
import torchvision
import math
import numpy as np
import pandas as pd
from .adversarial import fgsm_image, fgsm_k_image, boundary_attack_image
from tqdm import tqdm, trange
import time
import copy
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from seaborn import heatmap
# Adapted from zeiss_umbrella.resnet.train_model
def train_model(model, dataloaders, dataset_sizes, criterion, optimizer, device, fundus_dataset=None, valid=True,
ex=None, seed=None,
scheduler=None, adv_training_config=None, num_epochs=25, return_best=False):
"""
Trains the given model, and returns it.
model: model to be trained
dataloaders: dictionary of pytorch DataLoader objects, should contain typically data for training and validation.
format: dataloaders['train'] -> dataloader object for training dataset
dataset_sizes: sizes of dataset. Dictionary of integer indicating sizes of datasets used for different stage.
format: dataset_sizes['train'] -> size of training dataset
criterion: loss function of torch.nn e.g. nn.CrossEntropyLoss()
optimizer: optimizers in torch.optim e.g. optim.Adam()
device: device used for computation (cuda:0 or cuda or cpu)
fundus_dataset: data.FundusDataset object (needed for decision boundary attack)
valid: perform validation stage if true, only perform training stage if false
ex: sacred.Experiment object
seed: seed for control of randomness
scheduler: optim.scheduler object, used for customising optimizer.
adv_training_config: dictionary containing setting for adversarial training. Details can be found in training script.
num_epoch: number of epochs
return_best: if true will return weights with best balanced validation accuracy in addition to the weights at the
last epoch.
"""
if seed:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
since = time.time()
# best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
epochbar = trange(num_epochs)
for epoch in epochbar:
if scheduler:
epochbar.set_description('Epoch {}/{}, learning rate: {}'.format(epoch, num_epochs - 1, scheduler.get_lr()))
else:
epochbar.set_description('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase if valid is true, only training if valid is false.
if valid:
phase_list = ['train', 'valid']
else:
phase_list = ['train']
for phase in phase_list:
if phase == 'train':
# scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
num_attacks = 0
ground_truth = torch.Tensor().type(torch.long).to(device)
predictions = torch.Tensor().type(torch.long).to(device)
# Iterate over data.
batches = tqdm(dataloaders[phase], total=dataset_sizes[phase] / dataloaders[phase].batch_size)
for inputs, labels in batches:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# if model
if model.__class__.__name__ == 'Inception3' and phase == 'train':
outputs, aux = model(inputs)
else:
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
BATCH_ACC_TOL = adv_training_config.get('batch_acc_tol', 0.8)
ADVERSARIAL_WEIGHT = adv_training_config.get('weight', 0.5)
batch_acc = (preds == labels).type(torch.float).sum() / labels.shape[0]
if phase == 'train':
if batch_acc > BATCH_ACC_TOL and adv_training_config['type'] != 'baseline' \
and adv_training_config is not None:
adversarial_examples, adversarial_labels = get_adversarial_samples(inputs, labels, model,
criterion, device,
fundus_dataset,
adv_training_config,
seed=seed)
if adversarial_examples is not None and adversarial_labels is not None:
num_attacks += 1
adversarial_loss = criterion(model(adversarial_examples), adversarial_labels)
loss = loss * (1 - ADVERSARIAL_WEIGHT) + ADVERSARIAL_WEIGHT * adversarial_loss
# clean up model gradients
model.zero_grad()
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
ground_truth = torch.cat((ground_truth, labels))
predictions = torch.cat((predictions, preds))
batches.set_description("Running loss:{},Running corrects:{}".format(running_loss, running_corrects))
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
balanced_acc = balanced_accuracy_score(ground_truth.cpu().tolist(), predictions.cpu().tolist())
cks = cohen_kappa_score(ground_truth.cpu().tolist(), predictions.cpu().tolist(), weights='quadratic')
# Output metrics using sacred ex
if ex:
record_training_info(ex, phase, epoch_loss, epoch_acc, balanced_acc, cks)
if phase == 'train':
print("number of attack performed: {}".format(num_attacks))
print('{} Loss: {:.4f} Acc: {:.4f} Balanced Acc: {:.4f} cohen kappa score: {}'
.format(phase, epoch_loss, epoch_acc, balanced_acc, cks))
# deep copy the model
if phase == 'valid' and balanced_acc > best_acc:
best_acc = balanced_acc
best_model_wts = copy.deepcopy(model.state_dict())
if scheduler:
scheduler.step()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
# model.load_state_dict(best_model_wts)
if return_best:
return model, best_model_wts, optimizer
else:
return model, optimizer
def record_training_info(ex, phase, epoch_loss, epoch_acc, balanced_acc, cks):
if phase == 'train':
ex.log_scalar("train loss", epoch_loss)
ex.log_scalar("train accuracy", epoch_acc.item())
ex.log_scalar("train balanced accuracy", balanced_acc)
elif phase == 'valid':
ex.log_scalar("valid loss", epoch_loss)
ex.log_scalar("valid accuracy", epoch_acc.item())
ex.log_scalar("valid balanced accuracy", balanced_acc)
ex.log_scalar("valid cohen square kappa", cks)
def get_adversarial_samples(inputs, labels, model, criterion, device, fundus_dataset, adv_training_config, seed):
EPSILON_fgsm = adv_training_config.get('epsilon_fgsm', 1.0 / 255.0)
ALPHA_fgsm = adv_training_config.get('alpha_fgsm', None)
STEPS = adv_training_config.get('steps', None)
EPSILON_dba = adv_training_config.get('epsilon_dba', 1.)
DELTA_dba = adv_training_config.get('delta_dba', 0.1)
N_STEP_MAX_dba = adv_training_config.get('n_step_max_dba', 250)
E_STEP_MAX_dba = adv_training_config.get('e_step_max_dba', 20)
D_STEP_MAX_dba = adv_training_config.get('d_step_max_dba', 10)
UQSRT = adv_training_config.get('unqualified_sample_ratio_tol_dba', 0.4)
DIFF_TOL_dba = adv_training_config.get('diff_tol_dba', 10)
if adv_training_config['type'] == 'fgsm':
adversarial_samples = fgsm_image(inputs, labels, EPSILON_fgsm, model, criterion,
device=device)
adversarial_labels = labels.clone().detach()
elif adv_training_config['type'] == 'fgsm_k_image':
adversarial_samples = fgsm_k_image(inputs, labels, model, criterion, device=device,
epsilon=EPSILON_fgsm, steps=STEPS, alpha=ALPHA_fgsm)
adversarial_labels = labels.clone().detach()
elif adv_training_config['type'] == 'pgd':
adversarial_samples = fgsm_k_image(inputs, labels, model, criterion, device=device,
epsilon=EPSILON_fgsm, steps=STEPS, rand=True)
adversarial_labels = labels.clone().detach()
elif adv_training_config['type'] == 'boundary_attack':
adversarial_samples, adversarial_labels = boundary_attack_image(model, device,
inputs, labels,
seed=seed,
fundus_dataset=fundus_dataset,
epsilon=EPSILON_dba,
delta=DELTA_dba,
n_step_max=N_STEP_MAX_dba,
e_step_max=E_STEP_MAX_dba,
diff_tol=DIFF_TOL_dba,
d_step_max=D_STEP_MAX_dba,
unqualified_sample_ratio_tol=UQSRT)
else:
adversarial_samples, adversarial_labels = None, None
return adversarial_samples, adversarial_labels
def find_lr(model, optimizer, criterion, trn_loader, device, init_value=1e-8, final_value=10., beta=0.98):
"""
Basic learning rate finder implemented in fastai
quoted from https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html
:param model: model to be trained
:param optimizer: optimizer to be used
:param criterion: loss function
:param trn_loader: training loader
:param device: 'cpu' or 'cuda'
:param init_value: the initial value for the learning rate
:param final_value: the final value for the learning rate
:param beta: a weighted parameter
:return: log10 of the lrs and the corresponding loss,
good to pick the lr an order of magnitude smaller than the best
"""
num = len(trn_loader) - 1
mult = (final_value / init_value) ** (1 / num)
lr = init_value
optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.
best_loss = 0.
batch_num = 0
losses = []
log_lrs = []
for data in trn_loader:
batch_num += 1
# As before, get the loss for this mini-batch of inputs/outputs
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
# Compute the smoothed loss
avg_loss = beta * avg_loss + (1 - beta) * loss.data
smoothed_loss = avg_loss / (1 - beta ** batch_num)
# Stop if the loss is exploding
if batch_num > 1 and smoothed_loss > 4 * best_loss:
return log_lrs, losses
# Record the best loss
if smoothed_loss < best_loss or batch_num == 1:
best_loss = smoothed_loss
# Store the values
losses.append(smoothed_loss)
log_lrs.append(math.log10(lr))
# Do the SGD step
loss.backward()
optimizer.step()
# Update the lr for the next step
lr *= mult
optimizer.param_groups[0]['lr'] = lr
return log_lrs, losses
class FocalLoss_SM(nn.Module):
"""
Focal loss for softmax function. Note that in our case the labels are mutually exclusive.
Another possibility is the focal loss for sigmoid function which assumes that the labels are not mutually exclusive.
"""
def __init__(self, gamma=2, alpha=None, size_average=True):
super(FocalLoss_SM, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.size_average = size_average
def forward(self, input, target):
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target.view(-1, 1))
logpt = logpt.view(-1)
pt = logpt.exp()
if self.alpha is not None:
logpt = logpt * self.alpha.gather(0, target)
loss = -1. * (1. - pt) ** self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
def test_model(model, dataloader, dataset_size, criterion, device, plot_confusion_matrix=True,
confusion_matrix_name=None, ex=None):
"""
Test the performance of the given model at the given dataset (in form of data loader).
"""
since = time.time()
model.eval()
with torch.no_grad():
running_loss = 0.0
running_corrects = 0
ground_truth = torch.Tensor().type(torch.long).to(device)
predictions = torch.Tensor().type(torch.long).to(device)
# Iterate over data.
# i=0
batches = tqdm(dataloader)
for inputs, labels in batches:
inputs = inputs.to(device)
labels = labels.to(device)
# forward
# track history if only in train
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
ground_truth = torch.cat((ground_truth, labels))
predictions = torch.cat((predictions, preds))
batches.set_description('Batch loss {:.4f}, batch accuracy {:.4f}'.format(loss.item() * inputs.size(0),
torch.sum(
preds == labels.data).type(
torch.float) / len(labels)))
loss = running_loss / dataset_size
acc = running_corrects.double() / dataset_size
balanced_acc = balanced_accuracy_score(ground_truth.cpu().tolist(), predictions.cpu().tolist())
chs = cohen_kappa_score(ground_truth.cpu().tolist(), predictions.cpu().tolist(), weights='quadratic')
if ex:
ex.log_scalar('loss', loss)
ex.log_scalar('accuracy', acc.item())
ex.log_scalar('balanced accuracy', balanced_acc)
ex.log_scalar('cohen kappa score', chs)
if plot_confusion_matrix:
cm_analysis(ground_truth.cpu().tolist(), predictions.cpu().tolist(), confusion_matrix_name,
labels=None)
print('Loss: {:.4f} Acc: {:.4f} Balanced Acc: {:.4f} cohen kappa: {:.4f}'
.format(loss, acc, balanced_acc, chs))
time_elapsed = time.time() - since
print('Testing complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
# confusion matrix plot and save untility from https://gist.github.com/hitvoice/36cf44689065ca9b927431546381a3f7
def cm_analysis(y_true, y_pred, filename, labels, ymap=None, figsize=(10, 10)):
"""
Generate matrix plot of confusion matrix with pretty annotations.
The plot image is saved to disk.
args:
y_true: true label of the data, with shape (nsamples,)
y_pred: prediction of the data, with shape (nsamples,)
filename: filename of figure file to save
labels: string array, name the order of class labels in the confusion matrix.
use `clf.classes_` if using scikit-learn models.
with shape (nclass,).
ymap: dict: any -> string, length == nclass.
if not None, map the labels & ys to more understandable strings.
Caution: original y_true, y_pred and labels must align.
figsize: the size of the figure plotted.
"""
if ymap is not None:
y_pred = [ymap[yi] for yi in y_pred]
y_true = [ymap[yi] for yi in y_true]
labels = [ymap[yi] for yi in labels]
cm = confusion_matrix(y_true, y_pred, labels=labels)
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
s = cm_sum[i]
annot[i, j] = '%.1f%%\n%d/%d' % (p, c, s)
elif c == 0:
annot[i, j] = ''
else:
annot[i, j] = '%.1f%%\n%d' % (p, c)
cm = | pd.DataFrame(cm, index=labels, columns=labels) | pandas.DataFrame |
import re
import os
import string
from read import *
import pandas as pd
from pandas import ExcelWriter, ExcelFile
import numpy as np
import matplotlib.pyplot as plt
import spacy
from nltk.corpus import stopwords
import nltk
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.externals import joblib
class Reach(object):
def __init__(self):
pass
def custom_sum(self, df_list): #Custom sum function to calculate likes as some fields have video views as well in dataset
summ = 0 #Initialising value to zero
for val in df_list: #Running through the entire column
if(type(val)!=int): #Checking if the value is a pure integer or not
continue #If not, then continue to next value
summ += val #Else add the val to summ
return summ
def custom_time_sum(self, df_list): #Custom time sum function to calculate the sum of times in the dataset by removing " hours"
summ = 0
for val in df_list: #Checking for every value in the column
val = val.replace(u' hours',u'') #Replacing " hours" with a null string
summ += int(val) #Adding the integral value of hours to summ
return summ
def custom_time_list(self, df_list): #Custom time sum function to calculate the sum of times in the dataset by removing " hours"
#print(df_list)
for i in range(0,len(df_list)): #Checking for every value in the column
df_list[i] = df_list[i].replace(u' hours',u'') #Replacing " hours" with a null string
df_list[i] = int(df_list[i]) #Adding the integral value of hours to summ
return df_list
def caption_hashtag_generator(self, sentence):
nlp = spacy.load("en_core_web_sm")
stopw = stopwords.words("english")
noun_list = []
sentence = re.sub(r'https?:\/\/.*\/\w*','',sentence) # Remove hyperlinks
sentence = re.sub(r'['+string.punctuation+']+', ' ',sentence) # Remove puncutations like 's
sentence = sentence.replace("#","")
emoji_pattern = re.compile("["u"\U0001F600-\U0001F64F" u"\U0001F300-\U0001F5FF" u"\U0001F680-\U0001F6FF" u"\U0001F1E0-\U0001F1FF""]+", flags=re.UNICODE) #Removes emoji
sentence = emoji_pattern.sub(r'', sentence) # no emoji
doc = nlp(sentence)
temp_list = []
for sent in doc.sents:
for token in sent:
token_temp = str(token)
#print(sent)
print(token.text, token.pos_)
if(token.pos_=="NOUN" and token.text not in stopw):
#print(sent)
#print(i, token.text)
temp_list.append(token.text)
noun_list.append(temp_list)
temp_list = []
#print(noun_list)
return noun_list
def model(self, frame_df, no_followers=400):
custom_time_list(frame_df['Time since posted'])
inp = frame_df[['Followers', 'Time since posted']]
op = frame_df[['Likes']]
train_x, test_x, train_y, test_y = train_test_split(inp, op, test_size = 0.2, random_state = 999)
lr = LinearRegression().fit(train_x, train_y) #Fitting and creating a model
pred = lr.predict(test_x) #Predicting the answers for valdiation data
mse = mean_squared_error(pred, test_y) #finding the mean squared error
model = joblib.load("../models/reach_model")
reach_pred = model.predict([[no_followers,10]])
#print(reach_pred, mse)
expected_reach = "Expected Reach is " + str(int(reach_pred-round(mse**0.5))) + "-" + str(int(reach_pred+round(mse**0.5)))
return expected_reach
def combine(self, followers, caption):
df = pd.read_csv("d../atasets/combined_hashtag.csv") #Reading the new csv file
frame_df = | pd.DataFrame(df) | pandas.DataFrame |
# coding: utf-8
# # Example 01: Basic Queries
#
# Retrieving data from Socrata databases using sodapy
# ## Setup
# In[1]:
import os
import pandas as pd
import numpy as np
from sodapy import Socrata
# ## Find some data
#
# Though any organization can host their own data with Socrata's tools, Socrata also hosts several open datasets themselves:
#
# https://opendata.socrata.com/browse
#
# The following search options can help you find some great datasets for getting started:
# * Limit to data sets (pre-analyzed stuff is great, but if you're using sodapy you probably want the raw numbers!)
# * Sort by "Most Accessed"
#
# [Here's](https://opendata.socrata.com/browse?limitTo=datasets&sortBy=most_accessed&utf8=%E2%9C%93&page=1) a link that applies those filters automatically.
#
# Click on a few listings until you find one that looks interesting. Then click API and extract the following bits of data from the displayed url.
#
# https://<**opendata.socrata.com**>/dataset/Santa-Fe-Contributors/<**f92i-ik66**>.json
#
# 
# In[4]:
# Enter the information from those sections here
socrata_domain = "opendata.socrata.com"
socrata_dataset_identifier = "f92i-ik66"
# App Tokens can be generated by creating an account at https://opendata.socrata.com/signup
# Tokens are optional (`None` can be used instead), though requests will be rate limited.
#
# If you choose to use a token, run the following command on the terminal (or add it to your .bashrc)
# $ export SODAPY_APPTOKEN=<token>
socrata_token = os.environ.get("SODAPY_APPTOKEN")
# ## Get all the data
# In[5]:
client = Socrata(socrata_domain, socrata_token)
print(
"Domain: {domain:}\nSession: {session:}\nURI Prefix: {uri_prefix:}".format(
**client.__dict__
)
)
# In[6]:
results = client.get(socrata_dataset_identifier)
df = pd.DataFrame.from_dict(results)
df.head()
# Success! Let's do some minimal cleaning and analysis just to justify the bandwidth used.
# In[7]:
df["amount"] = df["amount"].astype(float)
# In[8]:
by_candidate = (
df.groupby("recipient").amount.aggregate([np.sum, np.mean, np.size]).round(0)
)
by_candidate.sort_values("sum", ascending=False).head()
# ## Multiple Data Sources
#
# That was much less annoying than downloading a CSV, though you can always save the dataframe to a CSV if you'd like. Where sodapy really shines though is in grabbing different data sources and mashing them together.
#
# For example, let's compare 311 calls between [New York City](https://data.cityofnewyork.us/Social-Services/311-Service-Requests-from-2010-to-Present/erm2-nwe9) and [Chattanooga, TN](https://data.chattlibrary.org/Government/311-Service-Requests/9iep-6yhz). Socrata makes it so easy, you'd be crazy _not_ to do it!
# In[12]:
nyc_domain = "data.cityofnewyork.us"
nyc_dataset_identifier = "fhrw-4uyv"
nyc_client = Socrata(nyc_domain, socrata_token)
nyc_results = nyc_client.get(nyc_dataset_identifier)
nyc_df = pd.DataFrame.from_dict(nyc_results)
print(nyc_df.shape)
chatt_domain = "data.chattlibrary.org"
chatt_dataset_identifier = "sf89-4qcw"
chatt_client = Socrata(chatt_domain, socrata_token)
chatt_results = chatt_client.get(chatt_dataset_identifier)
chatt_df = | pd.DataFrame.from_dict(chatt_results) | pandas.DataFrame.from_dict |
"""
Import spatio-temporal data
"""
import glob
from random import choice, sample
from typing import List, Tuple
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import cartopy.crs as ccrs
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
import pandas as pd
from netCDF4 import Dataset
from matplotlib.gridspec import GridSpec
Coordinate = Tuple[float, float]
CoordinateRange = Tuple[float, float]
features = ['lat', 'lon', 'H2O', 'delD']
flags = ['flag_srf', 'flag_cld', 'flag_qual']
# create a random colormap for nice color separation
np.random.seed(45)
random_colors = colors.ListedColormap(np.random.rand(256, 3))
class GeographicArea:
"""Provides methods to import and plot data of a given area"""
def __init__(self, lat: CoordinateRange = (90, -90), lon: CoordinateRange = (90, -90), level=4):
"""Extend of area in lat lon. Per default all coordinate are included
:param lat : Tupel(south, north)
:param lon : Tupel(west, east)
:param level: atmospheric level (0..8). 4 = 4.2 km
"""
self.lat = lat
self.lon = lon
self.level = level
def import_dataset(self, file_pattern: str) -> pd.DataFrame:
"""Import and filter measurements in area matching file pattern"""
frames = []
for file in glob.glob(file_pattern):
frame = pd.DataFrame()
with Dataset(file) as nc:
# lat and lon
for feature in features[:2]:
var = nc[feature][...]
assert not var.mask.any()
frame[feature] = var.data
# H2O and delD
for feature in features[2:]:
var = nc[feature][:, self.level]
assert not var.mask.any()
frame[feature] = var.data
for flag in flags:
flag_data = nc['/FLAGS/' + flag][...]
frame[flag] = flag_data.data
for flag in ['flag_vres', 'flag_resp']:
flag_data = nc['/FLAGS/' + flag][:, self.level]
frame[flag] = flag_data.data
frame = self.filter_location(frame)
frame = self.filter_flags(frame)
frames.append(frame)
return | pd.concat(frames, ignore_index=True) | pandas.concat |
import requests
import deeptrade
import pandas as pd
class StockPrice():
def __init__(self):
self.head = {'Authorization': "Token %s" %deeptrade.api_key}
def by_date(self,date,dataframe=False):
"""
:parameters:
- date: a day date in the format %YYYY-%MM-%DD
- dataframe: whehter result in json (False) or pandas dataframe
:returns:
json or pandas dataframe with all the tickers of the day date and
their corresponding stock price (OHLC)
"""
endpoint = deeptrade.api_base+"stock_date/"+date
g = requests.get(endpoint, headers=self.head).json()
if dataframe:
df = pd.DataFrame(g)
return df
else:
return g
def by_ticker(self,ticker,dataframe=False):
"""
:parameters:
- ticker: a ticker such as 'AMZN'
- dataframe: whehter result in json (False) or pandas dataframe
:returns:
json or pandas dataframe with all the hist. OHLC information of the ticker
"""
endpoint = deeptrade.api_base+"stocks/"+ticker
g = requests.get(endpoint, headers=self.head).json()
if dataframe:
df = | pd.DataFrame(g) | pandas.DataFrame |
import pandas as pd
chrom_sizes = pd.Series(
{1: 249250621,
10: 135534747,
11: 135006516,
12: 133851895,
13: 115169878,
14: 107349540,
15: 102531392,
16: 90354753,
17: 81195210,
18: 78077248,
19: 59128983,
2: 243199373,
20: 63025520,
21: 48129895,
22: 51304566,
3: 198022430,
4: 191154276,
5: 180915260,
6: 171115067,
7: 159138663,
8: 146364022,
9: 141213431,
}
)
chrom_sizes_norm = chrom_sizes / chrom_sizes.max()
def _make_tableau20():
# tableau20 from # http://www.randalolson.com/2014/06/28/how-to-make-beautiful-data-visualizations-in-python-with-matplotlib/
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib
# accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
return tableau20
tableau20 = _make_tableau20()
def generate_null_snvs(df, snvs, num_null_sets=5):
"""
Generate a set of null SNVs based on an input list of SNVs and categorical
annotations.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe where each column is a categorization of SNPs.
The index should be SNPs of the form chrom:pos.
snvs : list
List of input SNVs in the format chrom:pos. Entries that aren't in
the index of df will be dropped.
num_null_sets : int
Number of sets of null SNVs to generate.
Returns
-------
null_sets : pandas.Dataframe
Pandas dataframe with input SNVs as first column and null SNVs as
following columns.
"""
import numpy as np
import random
random.seed(20151007)
input_snvs = list(set(df.index) & set(snvs))
sig = df.ix[input_snvs]
not_sig = df.ix[set(df.index) - set(snvs)]
sig['group'] = sig.apply(lambda x: '::'.join(x), axis=1)
not_sig['group'] = not_sig.apply(lambda x: '::'.join(x), axis=1)
null_sets = []
vc = sig.group.value_counts()
bins = {c:sorted(list(df[c].value_counts().index)) for c in df.columns}
ordered_inputs = []
for i in vc.index:
ordered_inputs += list(sig[sig.group == i].index)
tdf = not_sig[not_sig.group == i]
count = vc[i]
for n in range(num_null_sets):
if tdf.shape[0] == 0:
groups = [i]
while tdf.shape[0] == 0:
# If there are no potential null SNVs in this group, we'll
# expand the group randomly.
g = groups[-1]
# Choose random bin.
cols = list(not_sig.columns)
cols.remove('group')
b = random.choice(cols)
# Get possibilities for that bin.
t = bins[b]
# Get last set of bin values and the value for the bin we
# want to change.
d = dict(list(zip(not_sig.columns, g.split('::'))))
cat = d[b]
# Randomly walk away from bin value.
ind = t.index(cat)
if ind == 0:
ind += 1
elif ind == len(t) - 1:
ind -= 1
else:
ind += random.choice([-1, 1])
d[b] = t[ind]
groups.append('::'.join(pd.Series(d)[not_sig.columns].astype(str)))
tdf = not_sig[not_sig.group.apply(lambda x: x in groups)]
if count <= tdf.shape[0]:
ind = random.sample(tdf.index, count)
else:
ind = list(np.random.choice(tdf.index, size=count, replace=True))
if i == vc.index[0]:
null_sets.append(ind)
else:
null_sets[n] += ind
null_sets = | pd.DataFrame(null_sets) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
from sklearn.model_selection import train_test_split
import string
import nltk
from nltk.corpus import stopwords
plt.style.use(style='seaborn')
#%matplotlib inline
df=pd.read_csv('all-data.csv',encoding = "ISO-8859-1")
print(df.head())
y=df['Sentiment'].values
x=df['News Headline'].values
(x_train,x_test,y_train,y_test)=train_test_split(x,y,test_size=0.4)
# Train
df1=pd.DataFrame(x_train)
df1=df1.rename(columns={0:'news'})
df2=pd.DataFrame(y_train)
df2=df2.rename(columns={0:'sentiment'})
df_train= | pd.concat([df1,df2],axis=1) | pandas.concat |
#!/usr/bin/env python
'''
Calculates the total number of character occurances at each position within the set of sequences passed.
'''
from __future__ import division
import argparse
import numpy as np
import sys
import pandas as pd
import mpathic.qc as qc
import mpathic.io as io
from mpathic import SortSeqError
def main(dataset_df, bin=None, start=0, end=None):
"""
Computes character counts at each position
Arguments:
dataset_df (pd.DataFrame): A dataframe containing a valid dataset.
bin (int): A bin number specifying which counts to use
start (int): An integer specifying the sequence start position
end (int): An integer specifying the sequence end position
Returns:
counts_df (pd.DataFrame): A dataframe containing counts for each nucleotide/amino acid character at each position.
"""
# Validate dataset_df
qc.validate_dataset(dataset_df)
# Retrieve type of sequence
seq_cols = [c for c in dataset_df.columns if qc.is_col_type(c,'seqs')]
if not len(seq_cols)==1:
raise SortSeqError('Dataset dataframe must have only one seq colum.')
colname = seq_cols[0]
seqtype = qc.colname_to_seqtype_dict[colname]
alphabet = qc.seqtype_to_alphabet_dict[seqtype]
num_chars = len(alphabet)
# Retrieve sequence length
if not dataset_df.shape[0] > 1:
raise SortSeqError('Dataset dataframe must have at least one row.')
total_seq_length = len(dataset_df[colname].iloc[0])
# Validate start and end
if start<0:
raise SortSeqError('start=%d is negative.'%start)
elif start>=total_seq_length:
raise SortSeqError('start=%d >= total_seq_length=%d'%\
(start,total_seq_length))
if end is None:
end=total_seq_length
elif end<=start:
raise SortSeqError('end=%d <= start=%d.'%(end,start))
elif end>total_seq_length:
raise SortSeqError('end=%d > total_seq_length=%d'%\
(start,total_seq_length))
# Set positions
poss = pd.Series(range(start,end),name='pos')
num_poss = len(poss)
# Retrieve counts
if bin is None:
ct_col = 'ct'
else:
ct_col = 'ct_%d'%bin
if not ct_col in dataset_df.columns:
raise SortSeqError('Column "%s" is not in columns=%s'%\
(ct_col,str(dataset_df.columns)))
counts = dataset_df[ct_col]
# Compute counts profile
counts_array = np.zeros([num_poss,num_chars])
counts_cols = ['ct_'+a for a in alphabet]
for i,pos in enumerate(range(start,end)):
char_list = dataset_df[colname].str.slice(pos,pos+1)
counts_array[i,:] = [np.sum(counts[char_list==a]) for a in alphabet]
temp_df = pd.DataFrame(counts_array,columns=counts_cols)
counts_df = | pd.concat([poss,temp_df],axis=1) | pandas.concat |
import os
from unittest import TestCase
import annotator
import commons
from annotator.annot import Annotator
import experiments.alpha_eval_one as aone
import math
import pandas as pd
class AlphaOneTest(TestCase):
def test_compute_class_alpha_accuracy(self):
arr = [
["abc1.txt", 0, 0, 0.1, 0.2],
["abc2.txt", 0, 0, 0.8, 0.9],
["abc3.txt", 0, 0, 0.7, 0.9],
]
df = pd.DataFrame(arr, columns=['fname', 'colid', 'fsid', 'from_alpha', 'to_alpha'])
acc = aone.compute_class_alpha_accuracy(df, {'mean': 0.85, 'median': 0.82})
self.assertEqual(acc['mean'], 2.0/3)
def test_compute_accuracy_for_all_classes(self):
arr = [
["abc1.txt", 0, 0, 0.1, 0.2],
["abc2.txt", 0, 0, 0.8, 0.9],
["abc3.txt", 0, 0, 0.7, 0.9],
["abc4.txt", 0, 0, 0.1, 0.2],
["abc5.txt", 0, 0, 0.8, 0.9],
["abc6.txt", 0, 0, 0.7, 0.9],
]
df = | pd.DataFrame(arr, columns=['fname', 'colid', 'fsid', 'from_alpha', 'to_alpha']) | pandas.DataFrame |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return DataFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(df, result)
# column is mi
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(df, result)
# dup column names?
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
| tm.assert_frame_equal(df, result) | pandas._testing.assert_frame_equal |
#!/usr/bin/env python
"""
Module Docstring
"""
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "MIT"
filters = ['all', 'to', 'from', 'ghosted']
def get_chat_history(message_filter:str, Heading:str = 'Users', username:str = ''):
""" Main entry point of the app """
import pandas as pd
import heatmap as map
import json
from settings import RAW_CHAT_HIST as rch
from settings import CLEANED_DATA_PATH as cdp
try:
global filters
# validate username input
if message_filter not in filters:
print('filter not set to valid option')
return
if username != '':
if filter == 'ghosted':
print('ghosted filter cannot be used with a specified username.')
print('Please filter by To, From, or All messages when specifying a username')
return
username_list = username
else:
username_list = get_chat_usernames(message_filter)
# Opening JSON file
f = open(rch)
# returns JSON object as
# a dictionary
data = json.load(f)
# Collect timestamps of sent and received messages for a particular user
createdlist = []
if 'Received Chat History' in data and filter != 'to':
for i in data['Received Chat History']:
if i['From'] in username_list:
createdlist.append([i['Created']])
if 'Sent Chat History' in data and filter != 'from':
for i in data['Sent Chat History']:
if i['To'] in username_list:
createdlist.append([i['Created']])
f.close()
# Organize data by date and count of messages sent&received for that date. Output as csv
df = pd.DataFrame(createdlist, columns=['Date'])
df['Date'] = | pd.to_datetime(df['Date'], yearfirst=True) | pandas.to_datetime |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| This file contains SDC overloads for common algorithms used internally
"""
import numpy
import pandas
from pandas.core.indexing import IndexingError
import numba
from numba.misc import quicksort
from numba import types
from numba.core.errors import TypingError
from numba.extending import register_jitable
from numba.np import numpy_support
from numba.typed import Dict
import sdc
from sdc.hiframes.api import isna
from sdc.hiframes.pd_series_type import SeriesType
from sdc.functions import numpy_like
from sdc.str_arr_type import string_array_type, StringArrayType
from sdc.datatypes.range_index_type import RangeIndexType
from sdc.str_arr_ext import (num_total_chars, append_string_array_to,
str_arr_is_na, pre_alloc_string_array, str_arr_set_na, string_array_type,
cp_str_list_to_array, create_str_arr_from_list, get_utf8_size,
str_arr_set_na_by_mask)
from sdc.utilities.prange_utils import parallel_chunks
from sdc.utilities.utils import sdc_overload, sdc_register_jitable
from sdc.utilities.sdc_typing_utils import (find_common_dtype_from_numpy_dtypes,
TypeChecker)
class SDCLimitation(Exception):
"""Exception to be raised in case of SDC limitation"""
pass
def hpat_arrays_append(A, B):
pass
@sdc_overload(hpat_arrays_append, jit_options={'parallel': False})
def hpat_arrays_append_overload(A, B):
"""Function for appending underlying arrays (A and B) or list/tuple of arrays B to an array A"""
A_is_range_index = isinstance(A, RangeIndexType)
B_is_range_index = isinstance(B, RangeIndexType)
if isinstance(A, (types.Array, RangeIndexType)):
if isinstance(B, (types.Array, RangeIndexType)):
def _append_single_numeric_impl(A, B):
_A = A.values if A_is_range_index == True else A # noqa
_B = B.values if B_is_range_index == True else B # noqa
return numpy.concatenate((_A, _B,))
return _append_single_numeric_impl
elif isinstance(B, (types.UniTuple, types.List)) and isinstance(B.dtype, (types.Array, RangeIndexType)):
B_dtype_is_range_index = isinstance(B.dtype, RangeIndexType)
numba_common_dtype = find_common_dtype_from_numpy_dtypes([A.dtype, B.dtype.dtype], [])
# TODO: refactor to use numpy.concatenate when Numba supports building a tuple at runtime
def _append_list_numeric_impl(A, B):
total_length = len(A) + numpy.array([len(arr) for arr in B]).sum()
new_data = numpy.empty(total_length, numba_common_dtype)
stop = len(A)
_A = numpy.array(A) if A_is_range_index == True else A # noqa
new_data[:stop] = _A
for arr in B:
_arr = numpy.array(arr) if B_dtype_is_range_index == True else arr # noqa
start = stop
stop = start + len(_arr)
new_data[start:stop] = _arr
return new_data
return _append_list_numeric_impl
elif A == string_array_type:
if B == string_array_type:
def _append_single_string_array_impl(A, B):
total_size = len(A) + len(B)
total_chars = num_total_chars(A) + num_total_chars(B)
new_data = sdc.str_arr_ext.pre_alloc_string_array(total_size, total_chars)
pos = 0
pos += append_string_array_to(new_data, pos, A)
pos += append_string_array_to(new_data, pos, B)
return new_data
return _append_single_string_array_impl
elif (isinstance(B, (types.UniTuple, types.List)) and B.dtype == string_array_type):
def _append_list_string_array_impl(A, B):
array_list = [A] + list(B)
total_size = numpy.array([len(arr) for arr in array_list]).sum()
total_chars = numpy.array([num_total_chars(arr) for arr in array_list]).sum()
new_data = sdc.str_arr_ext.pre_alloc_string_array(total_size, total_chars)
pos = 0
pos += append_string_array_to(new_data, pos, A)
for arr in B:
pos += append_string_array_to(new_data, pos, arr)
return new_data
return _append_list_string_array_impl
@sdc_register_jitable
def fill_array(data, size, fill_value=numpy.nan, push_back=True):
"""
Fill array with given values to reach the size
"""
if push_back:
return numpy.append(data, numpy.repeat(fill_value, size - data.size))
return numpy.append(numpy.repeat(fill_value, size - data.size), data)
@sdc_register_jitable
def fill_str_array(data, size, push_back=True):
"""
Fill StringArrayType array with given values to reach the size
"""
string_array_size = len(data)
nan_array_size = size - string_array_size
num_chars = sdc.str_arr_ext.num_total_chars(data)
result_data = sdc.str_arr_ext.pre_alloc_string_array(size, num_chars)
# Keep NaN values of initial array
arr_is_na_mask = numpy.array([sdc.hiframes.api.isna(data, i) for i in range(string_array_size)])
data_str_list = sdc.str_arr_ext.to_string_list(data)
nan_list = [''] * nan_array_size
result_list = data_str_list + nan_list if push_back else nan_list + data_str_list
cp_str_list_to_array(result_data, result_list)
# Batch=64 iteration to avoid threads competition
batch_size = 64
if push_back:
for i in numba.prange(size//batch_size + 1):
for j in range(i*batch_size, min((i+1)*batch_size, size)):
if j < string_array_size:
if arr_is_na_mask[j]:
str_arr_set_na(result_data, j)
else:
str_arr_set_na(result_data, j)
else:
for i in numba.prange(size//batch_size + 1):
for j in range(i*batch_size, min((i+1)*batch_size, size)):
if j < nan_array_size:
str_arr_set_na(result_data, j)
else:
str_arr_j = j - nan_array_size
if arr_is_na_mask[str_arr_j]:
str_arr_set_na(result_data, j)
return result_data
@numba.njit
def _hpat_ensure_array_capacity(new_size, arr):
""" Function ensuring that the size of numpy array is at least as specified
Returns newly allocated array of bigger size with copied elements if existing size is less than requested
"""
k = len(arr)
if k >= new_size:
return arr
n = k
while n < new_size:
n = 2 * n
res = numpy.empty(n, arr.dtype)
res[:k] = arr[:k]
return res
def sdc_join_series_indexes(left, right):
pass
@sdc_overload(sdc_join_series_indexes, jit_options={'parallel': False})
def sdc_join_series_indexes_overload(left, right):
"""Function for joining arrays left and right in a way similar to pandas.join 'outer' algorithm"""
# check that both operands are of types used for representing Pandas indexes
if not (isinstance(left, (types.Array, StringArrayType, RangeIndexType))
and isinstance(right, (types.Array, StringArrayType, RangeIndexType))):
return None
convert_left = isinstance(left, RangeIndexType)
convert_right = isinstance(right, RangeIndexType)
def _convert_to_arrays_impl(left, right):
_left = left.values if convert_left == True else left # noqa
_right = right.values if convert_right == True else right # noqa
return sdc_join_series_indexes(_left, _right)
if isinstance(left, RangeIndexType) and isinstance(right, RangeIndexType):
def sdc_join_range_indexes_impl(left, right):
if (left is right or numpy_like.array_equal(left, right)):
joined = left.values
lidx = numpy.arange(len(joined))
ridx = lidx
return joined, lidx, ridx
else:
return sdc_join_series_indexes(left.values, right.values)
return sdc_join_range_indexes_impl
elif isinstance(left, RangeIndexType) and isinstance(right, types.Array):
return _convert_to_arrays_impl
elif isinstance(left, types.Array) and isinstance(right, RangeIndexType):
return _convert_to_arrays_impl
# TODO: remove code duplication below and merge numeric and StringArray impls into one
# needs equivalents of numpy.arsort and _hpat_ensure_array_capacity for StringArrays
elif isinstance(left, types.Array) and isinstance(right, types.Array):
numba_common_dtype = find_common_dtype_from_numpy_dtypes([left.dtype, right.dtype], [])
if isinstance(numba_common_dtype, types.Number):
def sdc_join_series_indexes_impl(left, right):
# allocate result arrays
lsize = len(left)
rsize = len(right)
est_total_size = int(1.1 * (lsize + rsize))
lidx = numpy.empty(est_total_size, numpy.int64)
ridx = numpy.empty(est_total_size, numpy.int64)
joined = numpy.empty(est_total_size, numba_common_dtype)
left_nan = []
right_nan = []
for i in range(lsize):
if numpy.isnan(left[i]):
left_nan.append(i)
for i in range(rsize):
if numpy.isnan(right[i]):
right_nan.append(i)
# sort arrays saving the old positions
sorted_left = numpy.argsort(left, kind='mergesort')
sorted_right = numpy.argsort(right, kind='mergesort')
# put the position of the nans in an increasing sequence
sorted_left[lsize-len(left_nan):] = left_nan
sorted_right[rsize-len(right_nan):] = right_nan
i, j, k = 0, 0, 0
while (i < lsize and j < rsize):
joined = _hpat_ensure_array_capacity(k + 1, joined)
lidx = _hpat_ensure_array_capacity(k + 1, lidx)
ridx = _hpat_ensure_array_capacity(k + 1, ridx)
left_index = left[sorted_left[i]]
right_index = right[sorted_right[j]]
if (left_index < right_index) or numpy.isnan(right_index):
joined[k] = left_index
lidx[k] = sorted_left[i]
ridx[k] = -1
i += 1
k += 1
elif (left_index > right_index) or numpy.isnan(left_index):
joined[k] = right_index
lidx[k] = -1
ridx[k] = sorted_right[j]
j += 1
k += 1
else:
# find ends of sequences of equal index values in left and right
ni, nj = i, j
while (ni < lsize and left[sorted_left[ni]] == left_index):
ni += 1
while (nj < rsize and right[sorted_right[nj]] == right_index):
nj += 1
# join the blocks found into results
for s in numpy.arange(i, ni, 1):
block_size = nj - j
to_joined = numpy.repeat(left_index, block_size)
to_lidx = numpy.repeat(sorted_left[s], block_size)
to_ridx = numpy.array([sorted_right[k] for k in numpy.arange(j, nj, 1)], numpy.int64)
joined = _hpat_ensure_array_capacity(k + block_size, joined)
lidx = _hpat_ensure_array_capacity(k + block_size, lidx)
ridx = _hpat_ensure_array_capacity(k + block_size, ridx)
joined[k:k + block_size] = to_joined
lidx[k:k + block_size] = to_lidx
ridx[k:k + block_size] = to_ridx
k += block_size
i = ni
j = nj
# fill the end of joined with remaining part of left or right
if i < lsize:
block_size = lsize - i
joined = _hpat_ensure_array_capacity(k + block_size, joined)
lidx = _hpat_ensure_array_capacity(k + block_size, lidx)
ridx = _hpat_ensure_array_capacity(k + block_size, ridx)
ridx[k: k + block_size] = numpy.repeat(-1, block_size)
while i < lsize:
joined[k] = left[sorted_left[i]]
lidx[k] = sorted_left[i]
i += 1
k += 1
elif j < rsize:
block_size = rsize - j
joined = _hpat_ensure_array_capacity(k + block_size, joined)
lidx = _hpat_ensure_array_capacity(k + block_size, lidx)
ridx = _hpat_ensure_array_capacity(k + block_size, ridx)
lidx[k: k + block_size] = numpy.repeat(-1, block_size)
while j < rsize:
joined[k] = right[sorted_right[j]]
ridx[k] = sorted_right[j]
j += 1
k += 1
return joined[:k], lidx[:k], ridx[:k]
return sdc_join_series_indexes_impl
else:
return None
elif (left == string_array_type and right == string_array_type):
def sdc_join_series_indexes_impl(left, right):
# allocate result arrays
lsize = len(left)
rsize = len(right)
est_total_size = int(1.1 * (lsize + rsize))
lidx = numpy.empty(est_total_size, numpy.int64)
ridx = numpy.empty(est_total_size, numpy.int64)
# use Series.sort_values since argsort for StringArrays not implemented
original_left_series = pandas.Series(left)
original_right_series = | pandas.Series(right) | pandas.Series |
# # Imports
# import pandas as pd
# from matplotlib import pyplot as plt
# import numpy as np
# from sklearn.feature_selection import SelectKBest
# from sklearn.feature_selection import chi2
# from sklearn.ensemble import RandomForestRegressor
# from sklearn.cross_validation import cross_val_score, ShuffleSplit
# from sklearn.feature_selection import RFE
# from sklearn.linear_model import LinearRegression
# from sklearn.decomposition import PCA, TruncatedSVD
# from sklearn.model_selection import train_test_split
#
#
#
# # Read in player data and team ranks
# fantasy_data_file = './resources/player_data.csv'
# league_ranks_file = './resources/team_ranks.csv'
# league_ranks = pd.read_csv(league_ranks_file)
# fantasy_data = pd.read_csv(fantasy_data_file)
#
# # Only take players that have played over 0 minutes in each game, and separate into positions for models
# Reliable_players = fantasy_data.loc[fantasy_data['minutes'] > 0]
# Goalkeepers = Reliable_players.loc[Reliable_players['pos'] == 'Goalkeeper']
# Defenders = Reliable_players.loc[Reliable_players['pos'] == 'Defender']
# Midfielders = Reliable_players.loc[Reliable_players['pos'] == 'Midfielder']
# Forwards = Reliable_players.loc[Reliable_players['pos'] == 'Forward']
#
# # Rename and drop unwanted Columns
# league_ranks.rename(columns={'More': 'round'}, inplace=True)
# league_ranks.rename(columns={'Club': 'team'}, inplace=True)
# league_ranks.drop(['Played', 'Won', 'Drawn', 'Lost', 'GF', 'GA', 'GD', 'Points'], axis=1, inplace=True)
#
# # Position values show previous position, get rid of this and keep the original position
# league_ranks['Position'] = league_ranks['Position'].str[0:2]
#
# # Give the league ranks a round value, this is the gameweek that each ranking belongs to
# x = 1
# for i in range(0, 760, 20):
# j = i + 20
# league_ranks.iloc[i:j, league_ranks.columns.get_loc('round')] = x
# x = x + 1
#
# # Merge the two DataFrames so that we have individual player data with opponent teams position in the table
# DefenderModal = pd.merge(Defenders, league_ranks, how='left', left_on = ['round','team'], right_on = ['round','team'])
#
# DefenderModal.drop(['Unnamed: 0', 'saves','ict_index','big_chances_created',
# 'selected','transfers_in','transfers_out'], axis=1, inplace=True)
#
# DefenderModal.rename(columns={'Position': 'team_rank'}, inplace=True)
# league_ranks.rename(columns={'team': 'opponents'}, inplace=True)
#
# DefenderModal = pd.merge(DefenderModal, league_ranks, how='left', left_on = ['round','opponents'], right_on = ['round','opponents'])
#
# DefenderModal.rename(columns={'Position': 'opponent_team_rank'}, inplace=True)
#
# DefenderModal = DefenderModal[['player_id', 'name', 'team', 'pos', 'round', 'opponents', 'venue',
# 'team_goals', 'opposition_goals', 'minutes',
# 'goals_scored', 'assists', 'clean_sheets', 'bonus', 'value',
# 'team_rank', 'opponent_team_rank', 'total_points']]
#
# # DefenderModal.to_csv('./resources/DefenderModal.csv', sep=',', encoding='utf-8')
#
# DefenderModal.drop(['value'], axis=1, inplace=True)
# DefenderModal.drop(['pos'], axis=1, inplace=True)
# DefenderModal.drop(['team'], axis=1, inplace=True)
# DefenderModal.drop(['name'], axis=1, inplace=True)
# DefenderModal.drop(['opponents'], axis=1, inplace=True)
#
# DefenderModal.columns = ['player_id', 'round', 'home',
# 'team_goals', 'opposition_goals', 'minutes', 'goals', 'assists',
# 'clean_sheets', 'bonus', 'team_rank', 'opponent_team_rank',
# 'total_points']
#
# DefenderModal[['round','team_rank', 'opponent_team_rank']] = \
# DefenderModal[['round','team_rank', 'opponent_team_rank']].apply(pd.to_numeric)
#
# home_away = {'H': True, 'A': False}
# DefenderModal['home'] = DefenderModal['home'].map(home_away)
#
# DefenderModal.rename(columns={'total_points': 'prediction_points'}, inplace=True)
# for index, row in DefenderModal.iterrows():
# if DefenderModal.loc[index, "prediction_points"] < 6:
# DefenderModal.loc[index, "prediction_points"] = False
# else:
# DefenderModal.loc[index, "prediction_points"] = True
#
# # UNIVARIATE SELECTION
# def univariate_selection():
# array = DefenderModal.values
# X = array[:, 0:12]
# Y = array[:, 12]
#
# test = SelectKBest(score_func=chi2, k=8)
# fit = test.fit(X, Y.astype(int))
#
# np.set_printoptions(precision=3)
# print(fit.scores_)
#
# def model_based_ranking():
# array = DefenderModal.values
# X = array[:, 0:12]
# Y = array[:, 12]
# names = DefenderModal.columns
#
# rf = RandomForestRegressor(n_estimators=20, max_depth=4)
# scores = []
# for i in range(X.shape[1]):
# score = cross_val_score(rf, X[:, i:i + 1], Y, scoring="r2",
# cv=ShuffleSplit(len(X), 3, .3))
# scores.append((round(np.mean(score), 3), names[i]))
#
# print(sorted(scores, reverse=True))
#
# def recursive_feature_elimination():
# array = DefenderModal.values
# X = array[:,0:12]
# Y = array[:,12]
# # feature extraction
# model = LinearRegression()
# rfe = RFE(model, 9)
# fit = rfe.fit(X, Y)
# names = DefenderModal.columns
#
# print('Features sorted by their rank:')
# print(sorted(zip(map(lambda x: round(x, 4), rfe.ranking_), names)))
#
# print("RECURSIVE FEATURE ELIMINATION:")
# recursive_feature_elimination()
#
# def PCA():
# array = DefenderModal.values
# X = array[:, 0:12]
# Y = array[:, 12]
#
# reg = LinearRegression()
# x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=4)
# reg.fit(x_train, y_train)
# reg.score(x_test, y_test)
#
# def SVD():
# array = DefenderModal.values
# X = array[:, 0:12]
# Y = array[:, 12]
# svd = TruncatedSVD(n_components=8)
# x = svd.fit(X).transform(X)
# reg = LinearRegression()
# x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=4)
# reg.fit(x_train, y_train)
#
# # Load dataframe value into array for best feature selection
# # array = DefenderModal.values
# # X = array[:, 0:12]
# # Y = array[:, 12]
# #
# # test = SelectKBest(score_func=chi2, k=10)
# # fit = test.fit(X, Y.astype(int))
# #
# # np.set_printoptions(precision=3)
# Imports
import pandas as pd
from pandas.plotting import scatter_matrix
from matplotlib import pyplot as plt
def clean_defender_data():
# Read in player data and team ranks
fantasy_data_file = './resources/player_data.csv'
league_ranks_file = './resources/team_ranks.csv'
league_ranks = pd.read_csv(league_ranks_file)
fantasy_data = | pd.read_csv(fantasy_data_file) | pandas.read_csv |
import pandas as pd
import os, sys, pickle
from keras import models
from keras import layers
from keras import optimizers
from keras.callbacks import ModelCheckpoint
from keras.utils import multi_gpu_model
import tensorflow as tf
import subprocess, argparse
from get_model import *
from get_generators import *
n_GPUs = str(subprocess.check_output(["nvidia-smi", "-L"])).count('UUID')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.clear_session()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--img_size", type=int, default = 256, help="Input sky image resolution")
parser.add_argument("--no_img", type=int, default= 2, help="Length of input image sequence")
parser.add_argument("--train_batchsize", type=int, default= 64)
parser.add_argument("--validation_batchsize", type=int, default= 64)
parser.add_argument("--test_batchsize", type=int, default= 1)
args = parser.parse_args()
return args
args = get_args()
img_size = args.img_size
no_img = args.no_img
train_batchsize = args.train_batchsize
validation_batchsize = args.validation_batchsize
test_batchsize = args.test_batchsize
start_train = pd.to_datetime('2012-01-01')
end_train = | pd.to_datetime('2014-12-31') | pandas.to_datetime |
import pandas as pd
import numpy as np
import math
import re
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.patches import Rectangle
import io
from IPython.display import display, HTML, Markdown, SVG
from datetime import timedelta
import scipy
import scipy.signal
plt.style.use('seaborn-darkgrid')
plt.rcParams['figure.dpi'] = 300
plt.rcParams['font.size'] = 10 # controls default text sizes
plt.rcParams['axes.titlesize'] = 'small' # fontsize of the axes title
plt.rcParams['axes.labelsize'] = 'small' # fontsize of the x and y labels
plt.rcParams['xtick.labelsize'] = 'x-small' # fontsize of the tick labels
plt.rcParams['ytick.labelsize'] = 'x-small' # fontsize of the tick labels
plt.rcParams['legend.fontsize'] = 'x-small' # legend fontsize
plt.rcParams['figure.titlesize'] = 'small' # fontsize of the figure title
plt.rcParams['axes.axisbelow'] = True # axis behind the graphs
def get_admission(admissionid, con):
sql_admissions_diagnoses = """
WITH diagnosis_groups AS (
SELECT admissionid,
item,
value as diagnosis_group,
CASE
WHEN itemid = 13110 AND valueid BETWEEN 1 AND 3 THEN 1 --D_Hoofdgroep
WHEN itemid = 16651 AND valueid BETWEEN 7 AND 9 THEN 1 --DMC_Hoofdgroep
WHEN itemid = 16997 AND valueid BETWEEN 11 AND 20 THEN 1 --APACHE IV Groepen
WHEN itemid = 18588 AND valueid BETWEEN 1 AND 7 THEN 1 --Apache II Hoofdgroep
ELSE 0
END AS surgical,
ROW_NUMBER() OVER(PARTITION BY admissionid
ORDER BY measuredat DESC) AS rownum
FROM listitems
WHERE itemid IN (
--MAIN GROUP - LEVEL 0
13110, --D_Hoofdgroep
16651 --DMC_Hoofdgroep, Medium Care
)
),diagnosis_subgroups AS (
SELECT admissionid,
item,
value as diagnosis_subgroup,
ROW_NUMBER() OVER(PARTITION BY admissionid
ORDER BY measuredat DESC) AS rownum
FROM listitems
WHERE itemid IN (
--SUB GROUP - LEVEL 1
13111, --D_Subgroep_Thoraxchirurgie
16669, --DMC_Subgroep_Thoraxchirurgie
13112, --D_Subgroep_Algemene chirurgie
16665, --DMC_Subgroep_Algemene chirurgie
13113, --D_Subgroep_Neurochirurgie
16667, --DMC_Subgroep_Neurochirurgie
13114, --D_Subgroep_Neurologie
16668, --DMC_Subgroep_Neurologie
13115, --D_Subgroep_Interne geneeskunde
16666 --DMC_Subgroep_Interne geneeskunde
)
), diagnoses AS (
SELECT admissionid,
item,
value as diagnosis,
ROW_NUMBER() OVER(PARTITION BY admissionid
ORDER BY measuredat DESC) AS rownum
FROM listitems
WHERE itemid IN (
-- Diagnosis - LEVEL 2
--SURGICAL
13116, --D_Thoraxchirurgie_CABG en Klepchirurgie
16671, --DMC_Thoraxchirurgie_CABG en Klepchirurgie
13117, --D_Thoraxchirurgie_Cardio anders
16672, --DMC_Thoraxchirurgie_Cardio anders
13118, --D_Thoraxchirurgie_Aorta chirurgie
16670, --DMC_Thoraxchirurgie_Aorta chirurgie
13119, --D_Thoraxchirurgie_Pulmonale chirurgie
16673, --DMC_Thoraxchirurgie_Pulmonale chirurgie
13141, --D_Algemene chirurgie_Algemeen
16642, --DMC_Algemene chirurgie_Algemeen
13121, --D_Algemene chirurgie_Buikchirurgie
16643, --DMC_Algemene chirurgie_Buikchirurgie
13123, --D_Algemene chirurgie_Endocrinologische chirurgie
16644, --DMC_Algemene chirurgie_Endocrinologische chirurgie
13145, --D_Algemene chirurgie_KNO/Overige
16645, --DMC_Algemene chirurgie_KNO/Overige
13125, --D_Algemene chirurgie_Orthopedische chirurgie
16646, --DMC_Algemene chirurgie_Orthopedische chirurgie
13122, --D_Algemene chirurgie_Transplantatie chirurgie
16647, --DMC_Algemene chirurgie_Transplantatie chirurgie
13124, --D_Algemene chirurgie_Trauma
16648, --DMC_Algemene chirurgie_Trauma
13126, --D_Algemene chirurgie_Urogenitaal
16649, --DMC_Algemene chirurgie_Urogenitaal
13120, --D_Algemene chirurgie_Vaatchirurgie
16650, --DMC_Algemene chirurgie_Vaatchirurgie
13128, --D_Neurochirurgie _Vasculair chirurgisch
16661, --DMC_Neurochirurgie _Vasculair chirurgisch
13129, --D_Neurochirurgie _Tumor chirurgie
16660, --DMC_Neurochirurgie _Tumor chirurgie
13130, --D_Neurochirurgie_Overige
16662, --DMC_Neurochirurgie_Overige
--MEDICAL
13133, --D_Interne Geneeskunde_Cardiovasculair
16653, --DMC_Interne Geneeskunde_Cardiovasculair
13134, --D_Interne Geneeskunde_Pulmonaal
16658, --DMC_Interne Geneeskunde_Pulmonaal
13135, --D_Interne Geneeskunde_Abdominaal
16652, --DMC_Interne Geneeskunde_Abdominaal
13136, --D_Interne Geneeskunde_Infectieziekten
16655, --DMC_Interne Geneeskunde_Infectieziekten
13137, --D_Interne Geneeskunde_Metabool
16656, --DMC_Interne Geneeskunde_Metabool
13138, --D_Interne Geneeskunde_Renaal
16659, --DMC_Interne Geneeskunde_Renaal
13139, --D_Interne Geneeskunde_Hematologisch
16654, --DMC_Interne Geneeskunde_Hematologisch
13140, --D_Interne Geneeskunde_Overige
16657, --DMC_Interne Geneeskunde_Overige
13131, --D_Neurologie_Vasculair neurologisch
16664, --DMC_Neurologie_Vasculair neurologisch
13132, --D_Neurologie_Overige
16663, --DMC_Neurologie_Overige
13127 --D_KNO/Overige
)
),
interventions AS (
SELECT
admissionid,
CASE
WHEN SUM((item ILIKE \'%ECMO%\')::INT) > 0 THEN TRUE
ELSE FALSE
END AS ecmo,
CASE
WHEN SUM((item ILIKE \'%CVVH%\')::INT) > 0 THEN TRUE
ELSE FALSE
END AS crrt,
CASE
WHEN SUM((item ILIKE \'%SWAN%\')::INT) > 0 THEN TRUE
ELSE FALSE
END AS pac
FROM processitems
GROUP BY admissionid
)
SELECT
a.admissionid,
a.admissioncount,
a.location,
a.urgency,
a.origin,
a.admittedat,
a.admissionyeargroup,
a.dischargedat,
a.lengthofstay,
a.destination,
a.gender,
a.agegroup,
a.dateofdeath,
a.weightgroup,
a.heightgroup,
a.specialty,
diagnoses.diagnosis,
diagnosis_subgroups.diagnosis_subgroup,
diagnosis_groups.diagnosis_group,
i.pac,
i.ecmo,
i.crrt
FROM admissions a
LEFT JOIN diagnoses ON a.admissionid = diagnoses.admissionid
LEFT JOIN diagnosis_subgroups ON a.admissionid = diagnosis_subgroups.admissionid
LEFT JOIN diagnosis_groups ON a.admissionid = diagnosis_groups.admissionid
LEFT JOIN interventions i ON a.admissionid = i.admissionid
WHERE
(diagnoses.rownum = 1 OR diagnoses.rownum IS NULL)
AND (diagnosis_subgroups.rownum = 1 OR diagnosis_subgroups.rownum IS NULL)
AND (diagnosis_groups.rownum = 1 OR diagnosis_groups.rownum IS NULL) --only last updated record
AND a.admissionid = {admissionid}
"""
return pd.read_sql(sql_admissions_diagnoses.format(admissionid=admissionid), con)
def get_admissiondiags(con):
#gets all admissions
return get_admission("a.admissionid", con)
# small utility function to 'apply' a new index to a (grouped) dataframe
def reindex_by_date(df, index_start, index_end, interval, method=None, limit=None, fill_value=0):
new_index = pd.date_range(index_start, index_end, freq=interval)
return df.reindex(new_index, method=method, limit=limit, fill_value=fill_value)
def add_section_box(axis, title):
# adds section headers
x = -0.5
y = 0
width = 0.075
height = 1
rect = Rectangle((x, y), width, height, transform=axis.transAxes, clip_on=False)
axis.add_patch(rect)
axis.text(s=title, x=(x + width / 2), y=height / 2, rotation=90, transform=axis.transAxes,
color='white', fontweight='bold', horizontalalignment='center', verticalalignment='center')
def twinax_match_ticks(ax, twin):
# get info of primary axis
ylim1 = ax.get_ylim()
len1 = ylim1[1] - ylim1[0]
yticks1 = ax.get_yticks()
# gets the distances between ticks for the primary axis
rel_dist = [(y - ylim1[0]) / len1 for y in yticks1]
# gets info of secondary axis
ylim2 = twin.get_ylim()
len2 = ylim2[1] - ylim2[0]
# sets the same distances for the ticks of the twin axis
yticks2 = [ry * len2 + ylim2[0] for ry in rel_dist]
# change the ticks of the twin axis (secondary y-axis)
twin.set_yticks(yticks2) # changes the thicks
twin.set_ylim(ylim2) # restores the previous limits
twin.grid(False) # hides the unneccessary lines
def show_chart(admissionid, con):
admission = get_admission(admissionid, con)
admittedat = admission['admittedat'].values[0]
#size of medication action icons vs infusion bar
scale_factor = 0.3
los = admission['lengthofstay'].values[0]
los_rounded = math.ceil(los / 12) * 12
if los_rounded < 7*24:
fluids_interval = timedelta(hours=8)
else:
fluids_interval = timedelta(hours=24)
flowsheet_interval = timedelta(hours=int(los_rounded/5))
signals_interval = timedelta(minutes=10)
drugs_info_interval = timedelta(hours=int(los_rounded/12))
# defines all subplots here for sharing the x-axis among them
fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(6, 1, sharex=True, figsize=(8, 12), constrained_layout=True,
gridspec_kw={'height_ratios': [1.25, 0.75, 1, 1.25, 1, 1]})
# date format for x-axes
ax1.xaxis_date()
ax2.xaxis_date()
ax3.xaxis_date()
ax4.xaxis_date()
ax5.xaxis_date()
ax6.xaxis_date()
#######################################
# Vitals
#######################################
ax1_twin = ax1.twinx() # second axis sharing x-axis for SpO2
ax2_twin = ax2.twinx() # second axis sharing x-axis for SvO2
ax2_twin_right = ax2.twinx() # second axis for temperatures
sql_signals = """
SELECT
n.measuredat AS time,
n.value,
n.itemid,
n.item,
n.unit
FROM numericitems n
LEFT JOIN admissions a ON
n.admissionid = a.admissionid
WHERE
n.admissionid = {admissionid}
AND n.measuredat >= a.admittedat AND n.measuredat <= a.dischargedat
--vital signs
AND (
n.itemid IN (
--vitals
6640, --Heart rate
6642, -- mABP
6641, -- sABP
6643, -- dABP
6709, -- SpO2
--circulation
6656, --Cardiac Output
10053, --Lactaat (bloed)
--temperatures
8658, --Temp Bloed
8659, --Temperatuur Perifeer 2
8662, --Temperatuur Perifeer 1
13058, --Temp Rectaal
13059, --Temp Lies
13060, --Temp Axillair
13061, --Temp Oraal
13062, --Temp Oor
13063, --Temp Huid
13952, --Temp Blaas
16110 --Temp Oesophagus
)
)
UNION
--query for getting (mixed/central) venous samples
SELECT
n.measuredat AS time,
CASE
WHEN n.itemid = 12311 THEN
CASE
WHEN n.value <= 1 THEN n.value*100
ELSE n.value
END
ELSE n.value
END AS value,
n.itemid,
CASE
WHEN n.itemid = 12311 THEN 'SvO2'
ELSE n.item
END AS item,
n.unit
FROM numericitems n
LEFT JOIN freetextitems f ON
n.admissionid = f.admissionid
AND n.measuredat = f.measuredat
AND f.itemid = 11646 --Afname (bloed): source of specimen
LEFT JOIN admissions a ON
n.admissionid = a.admissionid
WHERE
n.admissionid = {admissionid}
AND n.measuredat >= a.admittedat AND n.measuredat <= a.dischargedat
AND n.itemid IN (
12311 --Saturatie (bloed)
)
AND LOWER(f.value) LIKE '%ven%' -- source is (mixed) venous
ORDER BY time
""".format(admissionid=admissionid)
signals = pd.read_sql(sql_signals, con)
signals['time'] = pd.to_datetime(signals['time'], unit='ms')
# downsample to 1-hour for readability of device data
signals = signals.set_index(['time', 'item', 'itemid', 'unit']).groupby(
['item', 'itemid', 'unit']).resample('1H', level=0).mean().reset_index().dropna()
# reasonable physiological limits for axes
ax1.set_ylim(top=300, bottom=0) # vitals
ax1_twin.set_ylim(top=100, bottom=40) # oxygenation
ax2.set_ylim(top=10, bottom=0) # circulation
ax2_twin.set_ylim(top=100, bottom=0) # SvO2
ax2_twin_right.set_ylim(top=42, bottom=32) # temperature
# formatting of the twin axes
ax1_twin.tick_params('y', colors='c') # cyan tick labels
ax2_twin.spines["left"].set_position(("axes", 0)) #
ax2_twin.yaxis.set_label_position('left')
ax2_twin.yaxis.set_ticks_position('left')
ax2_twin.spines["left"].set_visible(True)
ax2_twin.tick_params('y', direction='in', pad=-2, colors='b') # blue tick labels
plt.setp(ax2_twin.get_yticklabels(), ha="left")
ax2_twin_right.tick_params('y', colors='r') # red tick labels
signalids = [
##itemid, color, z_order, axis, fill_between, id/value, id or value
(6641, 'v-r', 10, ax1, 'no_fill'), # sbp
(6642, '.-r', 11, ax1, 'no_fill'), # mabp
(6643, '^-r', 12, ax1, 'no_fill'), # dabp
(6641, 'r', 13, ax1, 'fill_between', 'fill_id', 6643), # fill in between SBP and DABP
(6640, 'x-g', 14, ax1, 'no_fill'), # heart rate
(6709, 'c', 15, ax1_twin, 'no_fill'), # SpO2
(6709, 'c', 16, ax1_twin, 'fill_between', 'fill_value', 100), # SpO2 fill between 100
(10053, 'o-m', 20, ax2, 'no_fill'), # lactate
(10053, 'm', 20, ax2, 'fill_between', 'fill_value', 0), # lactate fill between 0
(6656, 'o-y', 21, ax2, 'no_fill'), # cadiac output
(6656, 'y', 21, ax2, 'fill_between', 'fill_value', 0), # cadiac output fill between 0
(12311, 'o--b', 20, ax2_twin, 'no_fill'), # venous saturation
(12311, 'b', 21, ax2_twin, 'fill_between', 'fill_value', 100), # cadiac output fill between 0
(8658, '-r', 20, ax2_twin_right, 'no_fill'), # Temp Bloed
(8659, '--r', 20, ax2_twin_right, 'no_fill'), # Temperatuur Perifeer 2
(8662, '--r', 20, ax2_twin_right, 'no_fill'), # Temperatuur Perifeer 1
(13058, ':r', 20, ax2_twin_right, 'no_fill'), # Temp Rectaal
(13059, '-.r', 20, ax2_twin_right, 'no_fill'), # Temp Lies
(13060, '-.r', 20, ax2_twin_right, 'no_fill'), # Temp Axillair
(13061, '-.r', 20, ax2_twin_right, 'no_fill'), # Temp Oraal
(13062, '-.r', 20, ax2_twin_right, 'no_fill'), # Temp Oor
(13063, '-.r', 20, ax2_twin_right, 'no_fill'), # Temp Huid
(13952, '--r', 20, ax2_twin_right, 'no_fill'), # Temp Blaas
(16110, ':r', 20, ax2_twin_right, 'no_fill') # Temp Oesophagus
]
# English translations
signal_labels = {
6641: 'ABP systolic',
6642: 'ABP mean',
6643: 'ABP diastolic',
6640: 'Heart rate',
6709: 'SpO2',
10053: 'Lactate',
6656: 'Cardiac output',
12311: 'SvO2',
8658: 'Temperature blood',
8659: 'Temperature peripheral 2',
8662: 'Temperature peripheral 1',
13058: 'Temperature rectal',
13059: 'Temperature inguinal',
13060: 'Temperature axillary',
13061: 'Temperature oral',
13062: 'Temperature tympanic',
13063: 'Temperature skin',
13952: 'Temperature bladder',
16110: 'Temperature esophaghus'
}
for s in signalids:
ax = s[3] # axis
signal = signals[signals['itemid'] == s[0]]
if len(signal) == 0:
continue
if not s[4] == 'fill_between': # regular plot (not fill between)
ax.plot(signal['time'],
signal['value'],
s[1], # fmt = '[marker][line][color]',
markersize=0, # hide for readability
label=signal_labels[s[0]],
zorder=s[2])
else: # fill between
if s[5] == 'fill_id':
other_signal = signals[signals['itemid'] == s[6]]
if len(other_signal) > len(signal):
signal = signal.reindex(other_signal.index, method='nearest')
elif len(signal) > len(other_signal):
other_signal = other_signal.reindex(signal.index, method='nearest')
other_signal_values = other_signal['value']
else:
other_signal_values = s[6]
ax.fill_between(signal['time'],
other_signal_values,
y2=signal['value'],
facecolor=s[1],
alpha=0.1,
zorder=s[2])
# create the legends outside the axes
ax1.legend(bbox_to_anchor=(-.1, 1), loc='upper right', borderaxespad=0, markerfirst=False)
ax1_twin.legend(bbox_to_anchor=(1.1, 1), loc='upper left', borderaxespad=0)
ax2.legend(bbox_to_anchor=(-.1, 1), loc='upper right', borderaxespad=0, markerfirst=False)
ax2_twin.legend(bbox_to_anchor=(-.1, 0), loc='lower right', borderaxespad=0, markerfirst=False)
plt.setp(ax2_twin.get_legend().get_texts(), color='b')
ax2_twin_right.legend(bbox_to_anchor=(1.1, 1), loc='upper left', borderaxespad=0)
# create a banner
x = -0.5
y = 1.05
width = 2
height = 0.2
admissionyeargroup = admission['admissionyeargroup'].values[0]
agegroup = admission['agegroup'].values[0]
gender = admission['gender'].values[0]
diagnosis = admission['diagnosis'].values[0]
# translation
if diagnosis == 'Na reanimatie':
diagnosis = 'Post CPR'
if gender == 'Man':
gender = 'Male'
elif gender == 'Vrouw':
gender = 'Female'
title = 'AmsterdamUMCdb admissionid: {} ({} - {} y - {})\nDiagnosis: {}'.format(admissionid, admissionyeargroup,
agegroup, gender, diagnosis)
rect = Rectangle((x, y), width, height, transform=ax1.transAxes, clip_on=False)
ax1.add_patch(rect)
ax1.text(s=title, x=0.5, y=(y + height / 2), rotation=0, transform=ax1.transAxes,
color='white', fontweight='bold', horizontalalignment='center', verticalalignment='center')
##############################################
# FLOWSHEET STYLE DATA
##############################################
sql_flowsheet = """
WITH gcs_components AS (
SELECT
eyes.admissionid,
CASE eyes.itemid
WHEN 6732 THEN 5 - eyes.valueid --Actief openen van de ogen
END AS eyes_score,
CASE motor.itemid
WHEN 6734 THEN 7 - motor.valueid --Beste motore reactie van de armen
END AS motor_score,
CASE verbal.itemid
WHEN 6735 THEN 6 - verbal.valueid --Beste verbale reactie
END AS verbal_score,
eyes.registeredby,
eyes.measuredat AS time
FROM listitems eyes
LEFT JOIN listitems motor ON
eyes.admissionid = motor.admissionid AND
eyes.measuredat = motor.measuredat AND
motor.itemid IN (
6734 --Beste motore reactie van de armen
)
LEFT JOIN listitems verbal ON
eyes.admissionid = verbal.admissionid AND
eyes.measuredat = verbal.measuredat AND
verbal.itemid IN (
6735 --Beste verbale reactie
)
WHERE
eyes.itemid IN (
6732 --Actief openen van de ogen
)
AND eyes.registeredby IN (
'ICV_IC-Verpleegkundig',
'ICV_MC-Verpleegkundig'
)
AND eyes.admissionid = {admissionid}
)
SELECT
time,
'GCS score' AS item,
'E' || eyes_score || 'M' || motor_score || 'V' || (
CASE
WHEN verbal_score < 1 THEN 1
ELSE verbal_score
END)
--|| '=' || (
-- eyes_score + motor_score + (
-- CASE
-- WHEN verbal_score < 1 THEN 1
-- ELSE verbal_score
-- END
-- )
--)
AS value,
eyes_score + motor_score + (
CASE
WHEN verbal_score < 1 THEN 1
ELSE verbal_score
END
)
AS valueid,
'00. Glasgow Coma Scale' AS category
FROM gcs_components
UNION
SELECT
measuredat AS time,
item,
value,
valueid,
CASE
WHEN itemid IN (
9534, --Type beademing Evita 1
6685, --Type Beademing Evita 4
12290 --Ventilatie Mode (Set)
) THEN '02-1. Respiratory support'
WHEN itemid IN (
8189 --Toedieningsweg
) THEN '03-1. Oxygen delivery device'
WHEN itemid IN (
6671 --Hartritme
) THEN '01. Heart rhythm'
END AS category
FROM listitems
WHERE
itemid IN (
9534, --Type beademing Evita 1
6685, --Type Beademing Evita 4
12290, --Ventilatie Mode (Set)
8189, --Toedieningsweg
6671 --Hartritme
)
AND admissionid = {admissionid}
UNION
SELECT
measuredat AS time,
item,
CAST(value AS varchar),
0 AS valueid, --to allow UNION both tables
CASE
WHEN itemid IN (
6699, --FiO2 %: setting on Evita ventilator
12279, --O2 concentratie --measurement by Servo-i/Servo-U ventilator
12369 --SET %O2: used with BiPap Vision ventilator
) THEN '02-2. FiO2'
WHEN itemid IN (
-- Peak pressures on ventilator
8852, --P max
8877, --Peak druk -- Evita
12281, --Piek druk --Servo-i
16239 --Zephyros Ppeak
) THEN '02-3. P peak'
WHEN itemid IN (
--PEEP settings on respiratory support
12284, --PEEP (Set): setting on Servo-i ventilator
8862, --PEEP/CPAP: setting on Evita ventilator
--8879, --PEEP (gemeten): measured by Evita ventilator
16250 --Zephyros PEEP
) THEN '02-4. PEEP'
WHEN itemid IN (
--Oxygen flow
8845, -- O2 l/min
10387, --Zuurstof toediening (bloed)
18587 --Zuurstof toediening
) THEN '03-2. Oxygen flow'
END AS category
FROM numericitems
WHERE
itemid IN (
-- FiO2
6699, --FiO2 %: setting on Evita ventilator
12279, --O2 concentratie --measurement by Servo-i/Servo-U ventilator
12369, --SET %O2: used with BiPap Vision ventilator
-- Peak pressures on ventilator
8852, --P max
8877, --Peak druk -- Evita
12281, --Piek druk --Servo-i
16239, --Zephyros Ppeak
--PEEP settings on respiratory support
12284, --PEEP (Set): setting on Servo-i ventilator
8862, --PEEP/CPAP: setting on Evita ventilator
--8879, --PEEP (gemeten): measured by Evita ventilator
16250, --Zephyros PEEP
--Oxygen flow
8845, -- O2 l/min
10387, --Zuurstof toediening (bloed)
18587 --Zuurstof toediening
)
AND admissionid = {admissionid}
""".format(admissionid=admissionid)
flowsheet = pd.read_sql(sql_flowsheet, con)
flowsheet['time'] = pd.to_datetime(flowsheet['time'], unit='ms')
# downsample to hourly, re-index to create same numer of rows per category and the downsample to requested interval
flowsheet_resampled_hourly = flowsheet.set_index(['time', 'category']).sort_values(['valueid'],
ascending=False).groupby(
['category']).resample(timedelta(hours=1), level=0).first()
index_start = flowsheet_resampled_hourly.index.get_level_values('time').min()
index_end = flowsheet_resampled_hourly.index.get_level_values('time').max()
flowsheet_reindexed_hourly = flowsheet_resampled_hourly.reset_index().set_index('time').groupby(
['category']).apply(
reindex_by_date, index_start=index_start, index_end=index_end, interval=timedelta(hours=1),
method=None, fill_value=np.NaN)[['value', 'valueid']]
flowsheet_reindexed_hourly.index.set_names('time', level=1, inplace=True)
flowsheet_resampled = flowsheet_reindexed_hourly.sort_values(['valueid'], ascending=False).groupby(
['category']).resample(flowsheet_interval, level=1).first()
# translate some values:
flowsheet_resampled.loc[flowsheet_resampled['value'] == 'Kunstneus', 'value'] = 'HME'
flowsheet_resampled.loc[flowsheet_resampled['value'] == 'O2-bril', 'value'] = 'Prongs'
labels = []
ticks = []
pos = 0
# display flowsheet
flowsheet_groups = flowsheet_resampled.fillna('').reset_index().groupby(['category'])
for name, group in flowsheet_groups:
label = re.sub(r'[0-9\-].+\.\s', '', name)
labels.append(label) # saves the label for ticks
ticks.append(pos) ##saves the position for ticks
for index, row in group.iterrows():
ax3.barh(pos, flowsheet_interval, left=row['time'], height=4, facecolor='white', alpha=0.0,
edgecolor='white', linewidth=1)
ax3.annotate(row['value'], xy=(row['time'] + flowsheet_interval / 2, pos), fontsize='x-small',
color='black', horizontalalignment='center', verticalalignment='center')
ax3.axhline(y=pos + 2, ls='-', color='white') # horizontal gridline
pos = pos - 4
ax3.axhline(y=pos + 2, ls='-', color='white')
# shows the labels and a flowsheet grid
ax3.set_yticks(ticks)
ax3.set_yticklabels(labels)
ax3.grid(False, which='major', axis='y')
##############################################
# CONTINUOUS INFUSIONS
##############################################
sql_drugitems_continuous = """
SELECT
ordercategoryid,
ordercategory,
itemid,
item,
CASE
WHEN rate >= 0 THEN rate
ELSE dose
END AS rate,
start AS time,
stop - start AS duration,
action
FROM drugitems
WHERE
iscontinuous = B'1'
AND NOT itemid IN (
--from ordercategoryid 65 (syringe pumps)
9424, --NaCL 0,9% spuit
19129, --Insuline aspart (Novorapid)
9001, --Kaliumchloride (KCL)
18783, --Calciumgluconaat 10%
--other
7257, --Glucose 5 %
7291, --NaCl 0,45%/Glucose 2,5%
7293, --NaCl 0,9 %
7316, --Ri-Lac (Ringers lactaat)
8937, --Drukzak
8939, --Medicijnlijn medicatie
12610, --Nutrison Sterilized water
16904 --Drukzak IABP
)
AND admissionid = {admissionid}
ORDER BY itemid, start
""".format(admissionid=admissionid)
drugitems_continuous = pd.read_sql(sql_drugitems_continuous, con)
drugitems_continuous['time'] = pd.to_datetime(drugitems_continuous['time'], unit='ms')
drugitems_continuous['duration'] = pd.to_timedelta(drugitems_continuous['duration'], unit='ms')
# resample for displaying annotations (rates)
drugitems_continuous_resampled = drugitems_continuous.set_index(
['time', 'ordercategory', 'item', 'itemid']).groupby(
['ordercategory', 'item', 'itemid']).resample(fluids_interval, level=0).max().reset_index()
drugitems_continuous_groups = drugitems_continuous.groupby(['ordercategory', 'itemid'])
drugitems_continuous_groups_for_annotation = drugitems_continuous_resampled.groupby(['ordercategory', 'itemid'])
drugitems_continous_labels = {
6964: 'Clonidine', # Clonidine (Catapresan)
7196: 'Enoximone', # Enoximon (Perfan)
7219: 'Fentanyl', # Fentanyl
7244: 'Furosemide', # Furosemide (Lasix)
7930: 'Heparin', # Heparine
7194: 'Midazolam', # Midazolam (Dormicum)
7229: 'Norepinephrine', # Noradrenaline (Norepinefrine)
7480: 'Propofol', # Propofol (Diprivan)
12588: 'Nutrison Protein Plus', # Nutrison Protein Plus
}
# display continuous infusions
labels = []
ticks = []
pos = 0
last_rate = 0
last_annotation_time = 0
for name, group in drugitems_continuous_groups:
# label = drugitems_continous_labels[name[1]]
label = group['item'].values[0].split('(')[0]
labels.append(label) # saves the label for ticks
ticks.append(pos) ##saves the position for ticks
for index, row in group.iterrows():
ax4.barh(pos, row['duration'], left=row['time'], height=10, facecolor='xkcd:slate blue',
edgecolor='black', linewidth=0)
if (last_annotation_time == 0) or (row[
'time'] - last_annotation_time >= drugs_info_interval): # only display data when interval has passed
if row['rate'] > last_rate:
# up arrow
ax4.annotate("{:.1f}".format(row['rate']), xy=(row['time'], pos + 5), xytext=(0, 0),
textcoords='offset points',
fontsize='xx-small', color='black', horizontalalignment='center',
verticalalignment='bottom')
ax4.plot(row['time'], pos, 's', markeredgecolor='black', markerfacecolor='white',
markersize=22 * scale_factor, markeredgewidth=1)
ax4.plot(row['time'], pos, 'r^', markersize=10 * scale_factor, markeredgewidth=1)
elif row['rate'] < last_rate:
# down arrow
ax4.annotate("{:.1f}".format(row['rate']), xy=(row['time'], pos + 5), xytext=(0, 0),
textcoords='offset points',
fontsize='xx-small', color='black', horizontalalignment='center',
verticalalignment='bottom')
ax4.plot(row['time'], pos, 's', markeredgecolor='black', markerfacecolor='white',
markersize=22 * scale_factor, markeredgewidth=1)
ax4.plot(row['time'], pos, 'gv', markersize=10 * scale_factor, markeredgewidth=1)
else:
# start arrow
ax4.annotate("{:.1f}".format(row['rate']), xy=(row['time'], pos + 5), xytext=(0, 0),
textcoords='offset points',
fontsize='xx-small', color='black', horizontalalignment='center',
verticalalignment='bottom')
ax4.plot(row['time'], pos, 's', markeredgecolor='black', markerfacecolor='white',
markersize=22 * scale_factor, markeredgewidth=1)
ax4.plot(row['time'], pos, 'y>', markersize=10 * scale_factor, markeredgewidth=1)
last_annotation_time = row['time']
last_rate = row['rate']
pos = pos - 20
last_rate = 0
last_annotation_time = 0
ax4.set_yticks(ticks)
ax4.set_yticklabels(labels)
##############################################
# FLUIDS
##############################################
# get the fluid IN
sql_fluids_in = """
WITH fluids_in_categorised AS (
SELECT
stop as time, --for simplicity assume everything has been infused at the stop time
ordercategoryid,
ordercategory,
fluidin,
CASE
WHEN ordercategoryid IN (
61 --Infuus - Bloedproducten
) THEN '01. Blood products'
WHEN ordercategoryid IN (
65, --2. Spuitpompen
26, --Injecties Tractus Digestivus
25, --Injecties Haematologisch
24, --Injecties Circulatie/Diuretica
15, --Injecties Antimicrobiele middelen
23, --Injecties CZS/Sedatie/Analgetica
55, --Infuus - Crystalloid
27, --Injecties Overig
67 --Injecties Hormonen/Vitaminen/Mineralen
) THEN '02. Crystalloids'
WHEN ordercategoryid IN (
17 --Infuus - Colloid
) THEN '03. Colloids'
WHEN ordercategoryid IN (
114, --Voeding Enteraal
63, --Voeding Enteraal oud
39, --Voeding Parenteraal
42 --Voeding Drinken
) THEN '04. Nutrition'
WHEN ordercategoryid IN (
70, --Niet iv Tractus Respiratorius
119, --Niet IV Groot Volume
21, --Niet iv Antimicrobiele middelen
71, --Niet iv Hormonen/Vitaminen/Mineralen
32, --Niet iv Overig
31, --Niet iv Tractus Digestivus
29, --Niet iv CZS/Sedatie/Analgetica
69 --Niet iv Zalven/Crèmes/Druppels
) THEN '05. Enteral medication'
END AS category
FROM drugitems
WHERE fluidin > 0 --in ml
AND admissionid = {admissionid}
)
SELECT *,
CASE category
WHEN '01. Blood products' THEN 'xkcd:red'
WHEN '02. Crystalloids' THEN 'xkcd:water blue'
WHEN '03. Colloids' THEN 'xkcd:cream'
WHEN '04. Nutrition' THEN 'xkcd:beige'
WHEN '05. Enteral medication' THEN 'xkcd:grey'
END AS colour
FROM fluids_in_categorised
ORDER BY category, time
""".format(admissionid=admissionid)
fluids_in = pd.read_sql(sql_fluids_in, con)
fluids_in['time'] = | pd.to_datetime(fluids_in['time'], unit='ms') | pandas.to_datetime |
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas._libs.tslibs.ccalendar import (
DAYS,
MONTHS,
)
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG
from pandas.compat import is_platform_windows
from pandas import (
DatetimeIndex,
Index,
Series,
Timestamp,
date_range,
period_range,
)
import pandas._testing as tm
from pandas.core.tools.datetimes import to_datetime
import pandas.tseries.frequencies as frequencies
import pandas.tseries.offsets as offsets
@pytest.fixture(
params=[
(timedelta(1), "D"),
(timedelta(hours=1), "H"),
(timedelta(minutes=1), "T"),
(timedelta(seconds=1), "S"),
(np.timedelta64(1, "ns"), "N"),
(timedelta(microseconds=1), "U"),
(timedelta(microseconds=1000), "L"),
]
)
def base_delta_code_pair(request):
return request.param
freqs = (
[f"Q-{month}" for month in MONTHS]
+ [f"{annual}-{month}" for annual in ["A", "BA"] for month in MONTHS]
+ ["M", "BM", "BMS"]
+ [f"WOM-{count}{day}" for count in range(1, 5) for day in DAYS]
+ [f"W-{day}" for day in DAYS]
)
@pytest.mark.parametrize("freq", freqs)
@pytest.mark.parametrize("periods", [5, 7])
def test_infer_freq_range(periods, freq):
freq = freq.upper()
gen = date_range("1/1/2000", periods=periods, freq=freq)
index = DatetimeIndex(gen.values)
if not freq.startswith("Q-"):
assert frequencies.infer_freq(index) == gen.freqstr
else:
inf_freq = frequencies.infer_freq(index)
is_dec_range = inf_freq == "Q-DEC" and gen.freqstr in (
"Q",
"Q-DEC",
"Q-SEP",
"Q-JUN",
"Q-MAR",
)
is_nov_range = inf_freq == "Q-NOV" and gen.freqstr in (
"Q-NOV",
"Q-AUG",
"Q-MAY",
"Q-FEB",
)
is_oct_range = inf_freq == "Q-OCT" and gen.freqstr in (
"Q-OCT",
"Q-JUL",
"Q-APR",
"Q-JAN",
)
assert is_dec_range or is_nov_range or is_oct_range
def test_raise_if_period_index():
index = period_range(start="1/1/1990", periods=20, freq="M")
msg = "Check the `freq` attribute instead of using infer_freq"
with pytest.raises(TypeError, match=msg):
frequencies.infer_freq(index)
def test_raise_if_too_few():
index = DatetimeIndex(["12/31/1998", "1/3/1999"])
msg = "Need at least 3 dates to infer frequency"
with pytest.raises(ValueError, match=msg):
frequencies.infer_freq(index)
def test_business_daily():
index = | DatetimeIndex(["01/01/1999", "1/4/1999", "1/5/1999"]) | pandas.DatetimeIndex |
from os.path import isfile
from pandas import read_csv, DataFrame
class DB:
def __init__(self, csv_path: str):
self.data: DataFrame
self.csv_path = csv_path
if isfile(self.csv_path):
self.data = read_csv(csv_path, memory_map=True)
else:
open(self.csv_path, 'x')
print("created new file at path - '{}'".format(self.csv_path))
self.data = read_csv(csv_path, memory_map=True)
def AddRow(self, item, important_column=None):
if important_column:
ds = self.GetRowByColumnValue(important_column, item[important_column])
if ds is not None and not ds.empty:
self.UpdateRow(item, ds.name)
return
if issubclass(type(item), dict):
self.data = self.data.append(DataFrame(data=item, index=[self.GetUnusedIndex()]),
ignore_index=not important_column)
elif issubclass(type(item), DataFrame):
self.data = self.data.append(item, ignore_index=not important_column)
else:
raise Exception('uncaught item type {}'.format(type(item)))
def UpdateRow(self, item: dict, index: int):
indexes_list = [index for _ in range(1)]
self.data.update(DataFrame(data=item, index=indexes_list), overwrite=True)
def UpdateRowByColumnValue(self, updated_row: dict, column_name: str, column_value):
for row in self.data.iloc:
if row[column_name] == column_value:
self.data.update(DataFrame(data=updated_row, index=[row.name]), overwrite=True)
def GetRowByColumnValue(self, column_name: str, column_value):
frame_result = self.data.loc[self.data[column_name] == column_value]
if frame_result.empty:
return None
row_result = frame_result.iloc[0]
return row_result
def GetUnusedIndex(self):
if self.data.empty:
return 0
return self.data.index.values.max()
def SaveRow(self, row: int):
self.data.to_csv(self.csv_path, index=False)
def __save__(self):
self.data.to_csv(self.csv_path, index=False)
def __load__(self):
self.data = | read_csv(self.csv_path, memory_map=True) | pandas.read_csv |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base64 import b64encode
from datetime import date, datetime, timedelta
from io import BytesIO
import os
import re
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
font = {'family': 'Open Sans Condensed'}
matplotlib.rc('font',**font)
la = matplotlib.font_manager.FontManager()
lu = matplotlib.font_manager.FontProperties(family = "Open Sans Condensed")
from matplotlib.dates import DateFormatter
import matplotlib.colors as mcolors
class LeanOutputReader(object):
def __init__(self, data, dpi, output):
self.data = data
self.dpi = dpi
self.output = output
# Parse the input file and make sure the input file is complete
self.is_drawable = False
if "Strategy Equity" in data["Charts"] and "Benchmark" in data["Charts"]:
# Get value series from the input file
strategySeries = data["Charts"]["Strategy Equity"]["Series"]["Equity"]["Values"]
benchmarkSeries = data["Charts"]["Benchmark"]["Series"]["Benchmark"]["Values"]
df_strategy = pd.DataFrame(strategySeries).set_index('x')
df_benchmark = pd.DataFrame(benchmarkSeries).set_index('x')
df_strategy = df_strategy[df_strategy > 0]
df_benchmark = df_benchmark[df_benchmark > 0]
df_strategy = df_strategy[~df_strategy.index.duplicated(keep='first')]
df_benchmark = df_benchmark[~df_benchmark.index.duplicated(keep='first')]
df = pd.concat([df_strategy,df_benchmark],axis = 1)
df.columns = ['Strategy','Benchmark']
df = df.set_index( | pd.to_datetime(df.index, unit='s') | pandas.to_datetime |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import pandas as pd
import time
from datetime import date
import re
def is_time_format(str_input):
try:
time.strptime(str_input, '%H:%M')
return True
except ValueError:
return False
def scrape_cineman(cities=("Zürich")):
"""
This function opens a Selenium driver, goes to the cineman.ch Showtimes page.
Then it scrapes the entries for a specified city.
Optional argument:
- cities: tuple of strings, specifies the cities for which showtimes will be scraped, defaults to ('Zürich')
Returns:
- content: html code of the page scraped with BeautifulSoup
"""
# Open the driver and go to the page with the showtimes
options = Options()
options.add_argument("--headless")
driver = webdriver.Chrome(options=options)
driver.get("https://www.cineman.ch/en/showtimes/time/")
# Click the cookie button
cookie_button = driver.find_element_by_class_name("cc-btn")
cookie_button.click()
time.sleep(2)
# Click the region dropdown, select the city and save
region_dropdown = driver.find_element_by_class_name("selectize-control")
region_dropdown.click()
input_div = driver.find_elements_by_xpath('//input[@type="text"]')
for city in cities:
input_div[6].send_keys(city)
input_div[6].send_keys(Keys.RETURN)
save_button = driver.find_element_by_class_name("select-region-save")
save_button.click()
time.sleep(5)
# Scrape the content and close the driver
content = BeautifulSoup(driver.page_source, features="html.parser")
driver.close()
return content
def format_cineman_content(html_content):
"""
This function takes in content scraped from cineman.ch with BeautifulSoup and creates a dataframe from it.
Required arguments:
- content: html contents scraped from cineman.ch
Returns:
- movie_program_df: pandas dataframe containing the scraped data
"""
movies = html_content.findAll("div", {"class": "col-xs-12 col-sm-9"})
# Now create the data frame
# Initialize the lists to store the details for each movie screening
movies_list = []
genres_list = []
cinemas_list = []
places_list = []
age_limits = []
all_showtimes_lists = []
all_languages_lists = []
for movie in movies:
# Movie title
title = movie.find("h4").get_text()
movies_list.append(title)
# Movie genre
genre = movie.find("p").get_text()
genres_list.append(genre)
# Cinemas and place
cinemas = movie.findAll("h5")
cinema_names = []
places = []
for cinema in cinemas:
cinema_name = cinema.find("em").get_text()
cinema_names.append(cinema_name)
place = cinema.findAll("a")[1].get_text()
places.append(place)
cinemas_list.append(cinema_names)
places_list.append(places)
# Age limit
age_links = movie.findAll("a", {"class": "link"})
age_limit = age_links[-1].get_text()
if age_limit == "Reservation":
age_limit = age_links[-2].get_text()
if age_limit.find("Y.") == -1:
age_limits.append("unknown")
else:
age_limits.append(age_limit)
# Showtimes and languages
showtimes_list_div = movie.find("div", {"class": "showtimes-list"})
showtimes_string = showtimes_list_div.prettify().split("h5")
showtimes_list = []
languages_list = []
for string in showtimes_string:
strings = re.sub('<[^<]+?>\n', '', string).split(" ")
showtimes = []
languages = []
for s in strings:
s = s.strip("<></–)")
s = re.sub("\t", "", s)
s = s.strip()
if is_time_format(s):
showtimes.append(s)
elif (s.find("/") != -1 and s.find("Y.") == -1) or s in ["G", "F", "O", "I", "E"]:
languages.append(s)
if showtimes:
showtimes_list.append(showtimes)
if languages:
languages_list.append(languages)
if showtimes_list:
all_showtimes_lists.append(showtimes_list)
if languages_list:
all_languages_lists.append(languages_list)
# Initializing the dictionary to store the lists
all_info_dict = dict()
all_info_dict["movie"] = movies_list
all_info_dict["genre"] = genres_list
all_info_dict["age_limit"] = age_limits
all_info_dict["language"] = all_languages_lists
all_info_dict["showtime"] = all_showtimes_lists
all_info_dict["date"] = f'{date.today()}'
all_info_dict["cinema"] = cinemas_list
all_info_dict["place"] = places_list
movie_program_df = | pd.DataFrame(all_info_dict) | pandas.DataFrame |
import os
import sys
import glob
import random
import numpy as np
np.random.seed(23087)
import pandas as pd
import tensorflow as tf
from keras import backend as k
from keras.utils import np_utils
from keras.optimizers import Adam
from keras.models import Sequential, load_model
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix
from sklearn.cross_validation import train_test_split
from keras.layers.pooling import GlobalAveragePooling1D
from keras.layers.normalization import BatchNormalization
from keras.layers import Dropout, Activation, Dense, Flatten
from keras.layers.convolutional import Convolution1D,AveragePooling1D,MaxPooling1D
'''
###################################
config = tf.ConfigProto()
# Don't pre-allocate memory; allocate as-needed
config.gpu_options.allow_growth = True
# Only allow a total of half the GPU memory to be allocated
config.gpu_options.per_process_gpu_memory_fraction = 0.5
# Create a session with the above options specified.
k.tensorflow_backend.set_session(tf.Session(config=config))
###################################
'''
def train(argv):
#Params
epochs = 500
batch_size = 512
train_test_percent = 0.15 #optional
folds = 10
max_aug = int(argv[2])
Mn_All,labels = load_data()
for shift_aug_factor in range(1, max_aug+1):
print("Performing {}x shifting data augmentation".format(shift_aug_factor))
if argv[3] != None:
root_path = os.path.join("weights","cross_validation_results", argv[3]+str("_Shift_dataaug-x")+str(shift_aug_factor))
if not os.path.exists(root_path):
os.mkdir(root_path)
for fold in range(folds):
model = build_neural_network_graph(graph_type=argv[1])
(X_train, y_train), (X_test, y_test) = preprocess_crossval_aug(Mn_All, labels, shift_aug_factor, cv=True,
fold=fold,n_splits=folds, crop_spectra=True, pca_aug = False)
save_dir = os.path.join(root_path,"weights_"+str(fold))
if not os.path.exists(save_dir):
os.mkdir(save_dir)
best_model_file = save_dir+"/highest_val_acc_weights_epoch{epoch:02d}-val_acc{val_acc:.3f}_.h5"
best_model = ModelCheckpoint(best_model_file, monitor='val_acc', verbose = 1, save_best_only = True)
hist = model.fit(X_train, y_train, validation_data=(X_test, y_test),
nb_epoch=epochs, batch_size=batch_size,
callbacks = [best_model], shuffle = True, verbose=1)
training_graphs(save_dir, hist)
chemical_shift_test_acc=run_eval(root_path,Mn_All,labels,folds,shift_aug_factor)
pd.DataFrame(chemical_shift_test_acc).to_csv(os.path.join(root_path,"chemical_shifts_acc.csv"), index=False, header=False)
def run_eval(root_path,Mn_All,labels,folds,shift_aug_factor):
print( root_path)
weight = load_best_weights(model=root_path)
num_bins_translate = 100
num_classes = 3
total_confusion_matrix=np.zeros((num_classes,num_classes))
acc = np.zeros(num_bins_translate)
chemical_shift_test_acc= []
for fold in range(folds):
model = load_model(weight[fold])
(X_train, y_train), (X_test, y_test) = preprocess_crossval_aug(Mn_All, labels, 0, cv=True, fold=fold,n_splits=folds, crop_spectra=False, pca_aug = False)
run = chemical_shift_test(X_test, y_test, model, num_bins_translate)[:,1]
acc += run
print( run)
total_confusion_matrix += confusion_matrix_generator(X_test[:,200:500], y_test, model)
chemical_shift_test_acc.append(acc/len(weight))
for i in range(len(chemical_shift_test_acc)):
plot_chemical_shift_test(root_path, num_bins_translate, chemical_shift_test_acc[i])
total_cv_confusion_matrix_generator(total_confusion_matrix)
return chemical_shift_test_acc
def preprocess_crossval_aug(x, y, shift_aug_factor, cv=True, fold=None, n_splits=0, train_test_percent=0.25, crop_spectra=True, pca_aug = False):
if cv == True:
from sklearn.model_selection import StratifiedKFold
cv = StratifiedKFold(n_splits=n_splits, random_state=13, shuffle=False)
X_train = [x[train_index] for train_index, test_index in cv.split(x, y)]
X_test = [x[test_index] for train_index, test_index in cv.split(x, y)]
y_train = [y[train_index] for train_index, test_index in cv.split(x, y)]
y_test = [y[test_index] for train_index, test_index in cv.split(x, y)]
else:
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=train_test_percent,
random_state=13, stratify=y)
if fold != None:
X_train, X_test, y_train, y_test = X_train[fold], X_test[fold], y_train[fold], y_test[fold]
print("Samples will be from fold", (fold+1), " out of the", n_splits, " n_splits")
print('Param train_test_percent will be ignored since folds are being used.')
if pca_aug == True:
from sklearn.decomposition import PCA
noise = np.copy(X_train)
mu = np.mean(noise, axis=0)
pca = PCA()
noise_model = pca.fit(noise)
nComp = 10
Xhat = np.dot(pca.transform(noise)[:,:nComp], pca.components_[:nComp,:])
noise_level = np.dot(pca.transform(noise)[:,nComp:], pca.components_[nComp:,:])
Xhat += mu
snr_num = 200
SNR = np.linspace(0,10,snr_num)
noise_aug = []
for i in range(len(SNR)):
noise_aug.append(SNR[i]*noise_level + Xhat)
j = 0
for spectra in noise_aug[i]:
noise_aug[i][j] = spectra/np.max(spectra)
j += 1
X_train = np.array(noise_aug).reshape(snr_num*X_train.shape[0], X_train.shape[1])
y_train = [item for i in range(snr_num) for item in y_train]
lower_bound,upper_bound=200,500
X_train, X_test, y_train, y_test = preprocess(X_train, X_test, y_train, y_test, lower_bound, upper_bound,
shift_aug_factor=shift_aug_factor, crop=crop_spectra ,mean_center = True, norm = True )
return (X_train, y_train), (X_test, y_test)
def preprocess(X_train, X_test, y_train, y_test, lower_bound, upper_bound, shift_aug_factor=None, crop=True, mean_center = False, norm = True):
X_train = np.array(X_train).astype('float32')
X_test = np.array(X_test).astype('float32')
if mean_center == True:
X_train -= np.mean(X_train)
X_test -= np.mean(X_test)
print( 'Data mean-centered')
if norm == True:
X_train /= np.max(X_train)
X_test /= np.max(X_test)
print( 'Data normalized')
if shift_aug_factor != 0:
print( "DATA AUG SHIFT")
cropX_train,cropX_test=[],[]
for i in range(len(X_train)):
for j in range(shift_aug_factor):
draw = int(random.random()*100)
cropX_train.append(X_train[i,150+draw:450+draw])
X_train = np.array(cropX_train).reshape(len(X_train)*shift_aug_factor, 300)
X_test = X_test[:,lower_bound:upper_bound]
y_train = sorted([item for i in range(shift_aug_factor) for item in y_train])
elif crop==True and shift_aug_factor==1:
X_train = X_train[:,lower_bound:upper_bound] #for test set, cropping to 635-665 eV, closer to qualitative
X_test = X_test[:,lower_bound:upper_bound] #for test set, cropping to 635-665 eV, closer to qualitative
elif shift_aug_factor==0:
pass
else:
X_train = X_train[:,lower_bound:upper_bound] #for test set, cropping to 635-665 eV, closer to qualitative
X_test = X_test[:,lower_bound:upper_bound] #for test set, cropping to 635-665 eV, closer to qualitative
X_test = X_test.reshape(X_test.shape + (1,))
X_train = X_train.reshape(X_train.shape + (1,))
y_train = np.array(y_train)
y_test = np.array(y_test)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
print( 'Data one-hot encoded')
print("Total of "+str(y_test.shape[1])+" classes.")
print("Total of "+str(len(X_train))+" training samples.")
print("Total of "+str(len(X_test))+" testing samples.")
return X_train, X_test, y_train, y_test
def chemical_shift_test(X_test, y_test, model, num_bins_translate):
accuracy_on_shifted_test_set = [model.evaluate(x = X_test[:,150+i:450+i], y = y_test, verbose=0 ) for i in range(num_bins_translate)]
accuracy_on_shifted_test_set = np.array(accuracy_on_shifted_test_set)
return accuracy_on_shifted_test_set
def plot_chemical_shift_test(path_to_output, num_bins_translate, acc1):
delta_E = np.linspace(-5.,5.,100)
plt.figure(1, figsize=(10,6))
plt.plot(delta_E, acc1, 'b', label='Dense Network with Shift Aug.', linewidth = 2)
plt.axvline(x=0, c = 'black')
plt.ylabel('10-fold Cross Validation Test Accuracy')
plt.xlabel('Chemical Shift (eV)')
#plt.legend(loc = 'lower left')
plt.savefig(os.path.join(path_to_output, 'chemical_shift_test.png'))
plt.close()
def load_best_weights(model):
root_path = model
weight_folds=sorted(next(os.walk(root_path))[1])
weights=[]
for fold in weight_folds:
files_path = os.path.join(root_path, fold, '*.h5')
cv_weights = sorted(glob.iglob(files_path), key=os.path.getctime, reverse=True)
weights.append(cv_weights[0])
return weights
def confusion_matrix_generator(X_test, y_test, model):
y_test_pred, y_test_labels=[], []
for i in range(len(X_test)):
y_test_pred.append(np.argmax(model.predict(X_test[i:i+1])))
y_test_labels.append(np.argmax(y_test[i]))
print("Confusion Matrix of Test Set")
conf_matrix = pd.DataFrame(confusion_matrix(y_pred=y_test_pred, y_true=y_test_labels))
conf_matrix.columns = ["Mn2+", "Mn3+", "Mn4+" ]
conf_matrix = | pd.DataFrame.transpose(conf_matrix) | pandas.DataFrame.transpose |
import sys
import time
import pandas as pd
import numpy as np
import copyreg, types
from tqdm import tqdm
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-talk')
plt.style.use('bmh')
#plt.rcParams['font.family'] = 'DejaVu Sans Mono'
plt.rcParams['font.size'] = 9.5
plt.rcParams['font.weight'] = 'medium'
# =======================================================
# Symmetric CUSUM Filter [2.5.2.1]
def getTEvents(gRaw, h):
"""cusum filter
args
----
gRaw: array-like
h: int() or float()
returns
-------
pd.DatetimeIndex()
"""
tEvents, sPos, sNeg = [], 0, 0
diff = np.log(gRaw).diff().dropna().abs()
for i in tqdm(diff.index[1:]):
try:
pos, neg = float(sPos+diff.loc[i]), float(sNeg+diff.loc[i])
except Exception as e:
print(e)
print(sPos+diff.loc[i], type(sPos+diff.loc[i]))
print(sNeg+diff.loc[i], type(sNeg+diff.loc[i]))
break
sPos, sNeg=max(0., pos), min(0., neg)
if sNeg<-h:
sNeg=0;tEvents.append(i)
elif sPos>h:
sPos=0;tEvents.append(i)
return pd.DatetimeIndex(tEvents)
# =======================================================
# Daily Volatility Estimator [3.1]
## for wtvr reason dates are not aligned for return calculation
## must account for it for computation
def getDailyVol(close,span0=100):
# daily vol reindexed to close
df0=close.index.searchsorted(close.index- | pd.Timedelta(days=1) | pandas.Timedelta |
import json
import boto3 as bt
import pandas as pd
from io import StringIO
import mechanize
from lxml import etree
import numpy as np
from datetime import date
import re
import time
import os
def lambda_handler(event, context):
start_time = time.time()
client = bt.client(
's3',
aws_access_key_id= os.getenv("ACCESS_KEY"),
aws_secret_access_key= os.getenv("SECRET_KEY")
)
ssm_client = bt.client('ssm',
region_name="eu-west-2",
aws_access_key_id= os.getenv("ACCESS_KEY"),
aws_secret_access_key= os.getenv("SECRET_KEY")
)
#Get a table of the IDs of previously downloaded jobs (these are not downloaded again - and this
#file is updated at the end of this code)
previous_ids = client.get_object(Bucket="civil-service-jobs", Key="existing_refs.csv")
body = previous_ids['Body']
csv_string = body.read().decode('utf-8')
previous_ids_df = pd.read_csv(StringIO(csv_string))
#User name and password is stored in parameter store and is used to log into Civil Service jobs
csj_username = ssm_client.get_parameter(
Name='/CivilServiceJobsExplorer/Toby/csjemail', WithDecryption=True)
csj_password = ssm_client.get_parameter(
Name='/CivilServiceJobsExplorer/Toby/csjpassword', WithDecryption=True)
#Log in at the log in page
br = mechanize.Browser()
br.open("https://www.civilservicejobs.service.gov.uk/csr/login.cgi")
br.select_form(nr=1)
br.form['username'] = csj_username['Parameter']['Value']
br.form['password_login_window'] = csj_password['Parameter']['Value']
req = br.submit()
search_url = "https://www.civilservicejobs.service.gov.uk/csr/index.cgi"
#Preform a search of jobs that covers all of the UK and overseas
br.open(search_url)
br.select_form(nr=1)
br.form['postcode'] = "Birmingham"
br.form['postcodedistance'] = ["600"]
br.form['postcodeinclusive'] = ["1"]
##Serach and extract html of search results
req = br.submit()
html = req.read()
tree = etree.HTML(html)
#Gets a list of all the search pages
link_elements = tree.xpath("//div//div//div//a")
link_urls = [link.get('href') for link in link_elements]
link_titles = [link.get('title') for link in link_elements]
links = tuple(zip(link_urls,link_titles))
links = [page for page in links if page[1] is not None]
#This line finds those links that are search pages and removes duplicates
search_links = list(dict.fromkeys([page[0] for page in links if
page[1].find("Go to search results") != -1])) + [req.geturl()]
basic_advert_results = []
for (i, page) in zip(range(1,len(search_links)+1), search_links):
#This loop goes to each page in the search links and converts
#the data there into a narrow dataframe of ref, variable,value
print("Searching page " + str(i) + " of " + str(len(search_links)))
open_page = br.open(page)
html = open_page.read()
tree = etree.HTML(html)
xpath = "//ul//li//div | //ul//li//div//a"
elements = tree.xpath(xpath)
link = [link.get('href') for link in elements]
node_class = [link.get('class') for link in elements]
text = [link.text for link in elements]
df = pd.DataFrame(data = list(zip(link, node_class, text)),
columns = ["link", "variable", "value"])
df['job_ref'] = np.where(df['variable'] == "search-results-job-box-refcode", df['value'], None)
df['job_ref'] = df['job_ref'].bfill() #upfill references
df['job_ref'] = df['job_ref'].str.replace('Reference: ' ,'')
#links are treated seperately as they are part of the href under the job title element
links = df[~df['link'].isnull()]
links = links[links['link'].str.contains("https://www.civilservicejobs.service.gov.uk/csr/index.cgi")]
links['variable'] = "link"
links = links[["job_ref","variable","link"]]
links = links.rename(columns = {"link":"value"})
df['link'].fillna("", inplace = True)
df['variable'].fillna("title", inplace = True)
df = df[(df['variable'].str.contains("search-results-job-box-")) | (df['link'].str.contains("https://www.civilservicejobs.service.gov.uk/csr/index.cgi")) ]
df = df[~df['value'].isnull()]
df['variable'] = df['variable'].str.replace('search-results-job-box-','')
df['variable'] = df['variable'].str.replace('stage','approach')
df = df[["job_ref","variable","value"]]
page_data = df.append(links, sort=False)
basic_advert_results.append(page_data)
basic_data = pd.concat(basic_advert_results, sort=False)
#filter jobs to new jobs
basic_new_data = basic_data[~basic_data["job_ref"].isin(previous_ids_df['job_ref'])]
basic_new_data = basic_new_data[basic_new_data['job_ref'].notnull()]
full_advert_results = []
new_links = basic_new_data[basic_new_data['variable'] == "link"]
for (i, page, job_ref) in zip(range(1,len(new_links)+1), new_links['value'], new_links['job_ref'] ):
#itterate over new links and get full jobs
print("Scraping page " + str(i) + " of " + str(len(new_links)))
open_page = br.open(page)
html = open_page.read()
tree = etree.HTML(html)
elements = tree.cssselect('.vac_display_field_value , h3')
node_tag = [e.tag for e in elements]
node_text = [etree.tostring(e, encoding='unicode', method='text') for e in elements]
node_html = [etree.tostring(e, encoding='unicode', method='html') for e in elements]
df = pd.DataFrame(list(zip(node_tag, node_text, node_html)),
columns =['tag', 'text','html'])
#h3 elements are assumed to be teh variable headings and other elements (divs) and taken as the values
#the values for a given heading are all the divs (that match the cssselect) below that heading, but
#before another heading
df['variable'] = np.where(df['tag'] == "h3", df['text'], None)
df['variable'] = df['variable'].ffill()
df['text'] = df['text'].str.strip().replace(r'\\n',' ')
df['text'] = df['text'].apply(lambda x: re.sub(r'\x95',"",x))
df['text'] = df['text'].apply(lambda x: re.sub(r'\t'," ",x))
df['text'] = df['text'].apply(lambda x: re.sub(r'\r'," ",x))
#This html stuff is just here to handle roll types
df['html'] = df['html'].apply(lambda x: re.sub("<div class=\"vac_display_field_value\">","",x))
df['html'] = df['html'].apply(lambda x: re.sub("</div>","",x))
df['html'] = df['html'].apply(lambda x: re.sub("<br>","!!!",x))
df['text'] = np.where(df['variable'] == "Type of role", df['html'], df['text'])
df['variable'] = df['variable'].str.strip()
df = df[df['tag'] != "h3"]
df['value'] = df.groupby(['variable'])['text'].transform(lambda x : "!!!".join(x))
df = df[["variable","value"]]
df = df.drop_duplicates()
df['job_ref'] = job_ref
df = df.append(
{"variable": "date_downloaded", "value": str(date.today()) , "job_ref": job_ref},
ignore_index=True )
#need to check the time in here and if there is not enought time to save - is shoudl quite the looos
#And then filter the basic data to the full data that it has managed to donwload before the concat
full_advert_results.append(df)
time_running = time.time() - start_time
if time_running >= 740:
print("time out")
break
#Join all new full advert dataframes together
if(len(full_advert_results) < 1):
return
full_advert_data = pd.concat(full_advert_results, sort=False)
basic_new_data = basic_data[basic_data["job_ref"].isin(full_advert_data['job_ref'])]
full_and_basic_data = | pd.concat([full_advert_data, basic_new_data], sort=False) | pandas.concat |
from flask import Flask
from flask import request
import pandas as pd
app = Flask(__name__)
DATA_FILE_NAME = "client_rate.json"
@app.route("/")
def default():
return "FIRST PROJECT - we have " + str(len(get_client_rates())) + " clients in total."
def get_client_rates():
df = pd.read_json(DATA_FILE_NAME)
return df.to_dict()
@app.route("/rate/<client_id>")
def get_client_rate(client_id) -> str:
rates = get_client_rates()
if client_id in rates:
return str(rates[client_id]["rate"])
return "0"
@app.route("/rate", methods=['POST'])
def upsert_client_rate() -> str:
param = request.get_json()
client_id = param['client_id']
rate = param['rate']
update_client_rates(client_id, rate)
return str(param)
def update_client_rates(client_id: str, rate: float) -> None:
"""
update or insert a client_id - rate pair.
:param client_id: string, e.g. 'client1'
:param rate: float, e.g. 0.1
:return:
"""
rates = get_client_rates()
rates[client_id] = {"rate": rate}
df = | pd.DataFrame.from_dict(rates) | pandas.DataFrame.from_dict |
from datetime import datetime
from decimal import Decimal
import numpy as np
import pytest
import pytz
from pandas.compat import is_platform_little_endian
from pandas import CategoricalIndex, DataFrame, Index, Interval, RangeIndex, Series
import pandas._testing as tm
class TestFromRecords:
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH#6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH#6140
expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[ns]")]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[m]")]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in blocks.items():
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in blocks.items():
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(
columns=df.columns
)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(
columns=df.columns
)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(
columns=df.columns
)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(
columns=df.columns
)
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, RangeIndex(8))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index("C"), columns.index("E1")]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result["C"], df["C"])
tm.assert_series_equal(result["E1"], df["E1"].astype("float64"))
# empty case
result = DataFrame.from_records([], columns=["foo", "bar", "baz"])
assert len(result) == 0
tm.assert_index_equal(result.columns, Index(["foo", "bar", "baz"]))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in blocks.items():
columns.extend(b.columns)
asdict = {x: y for x, y in df.items()}
asdict2 = {x: y.values for x, y in df.items()}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(
DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)
)
results.append(
DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)
)
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
# should pass
df1 = DataFrame.from_records(df, index=["C"])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index="C")
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(df, index=[2])
with pytest.raises(KeyError, match=r"^2$"):
DataFrame.from_records(df, index=2)
def test_from_records_non_tuple(self):
class Record:
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = [tuple(rec) for rec in recs]
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# GH#2633
result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])
expected = Index(["bar"])
assert len(result) == 0
assert result.index.name == "foo"
tm.assert_index_equal(result.columns, expected)
def test_from_records_series_list_dict(self):
# GH#27358
expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T
data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]])
result = DataFrame.from_records(data)
tm.assert_frame_equal(result, expected)
def test_from_records_series_categorical_index(self):
# GH#32805
index = CategoricalIndex(
[Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]
)
series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index)
frame = DataFrame.from_records(series_of_dicts, index=index)
expected = DataFrame(
{"a": [1, 2, np.NaN], "b": [np.NaN, np.NaN, 3]}, index=index
)
tm.assert_frame_equal(frame, expected)
def test_frame_from_records_utc(self):
rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)}
# it works
DataFrame.from_records([rec], index="begin_time")
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=("i4,f4,a10"))
arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal( | DataFrame.from_records(arr2) | pandas.DataFrame.from_records |
import os
from datetime import datetime as dt
from datetime import timedelta as td
from uuid import uuid4
import json
import pandas as pd
import numpy as np
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.config import Config
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import StringProperty, ObjectProperty, BooleanProperty
from kivy.uix.behaviors import FocusBehavior
from kivy.uix.recycleview.layout import LayoutSelectionBehavior
from kivy.uix.recycleboxlayout import RecycleBoxLayout
from kivy.uix.recycleview.views import RecycleDataViewBehavior
from kivy.uix.label import Label
from kivy.uix.modalview import ModalView
from kivy.uix.filechooser import FileChooserListView
import drive as drive
Config.set("graphics", "width", "800")
Config.set("graphics", "height", "480")
Config.set("graphics", "borderless", "1")
TEST_MODE = True
plant_data_path = os.path.join(".", "data", "plants_data.csv")
if TEST_MODE:
jobs_file_path = os.path.join(".", "test_files", "test_jobs.json")
log_file_path = os.path.join(".", "test_files", "test_logs.txt")
else:
jobs_file_path = os.path.join(".", "data", "jobs_data.json")
log_file_path = os.path.join(".", "data", "logs.txt")
if os.path.isfile(plant_data_path):
plant_data = pd.read_csv(plant_data_path)
else:
plant_data = pd.DataFrame(
columns=["experiment", "plant_name", "position", "allow_capture"]
)
if os.path.isfile(jobs_file_path):
with open(jobs_file_path, "r") as f:
jobs_data = json.load(f)
else:
jobs_data = {"jobs": []}
if os.path.isfile(log_file_path):
with open(log_file_path, "r") as f:
log_data = [
line.replace("\n", "") for line in f.readlines() if line != "\n"
]
else:
log_data = []
class RootWidget(BoxLayout):
pass
class ModalDialog(ModalView):
modal_dialog_title = ObjectProperty(None)
modal_dialog_body = ObjectProperty(None)
class SelectableRecycleBoxLayout(
FocusBehavior, LayoutSelectionBehavior, RecycleBoxLayout
):
""" Adds selection and focus behavior to the view. """
class SelectableLabel(RecycleDataViewBehavior, Label):
""" Add selection support to the Label """
index = None
selected = BooleanProperty(False)
selectable = BooleanProperty(True)
guid = StringProperty("")
def refresh_view_attrs(self, rv, index, data):
""" Catch and handle the view changes """
self.index = index
return super(SelectableLabel, self).refresh_view_attrs(rv, index, data)
def on_touch_down(self, touch):
""" Add selection on touch down """
if super(SelectableLabel, self).on_touch_down(touch):
return True
if self.collide_point(*touch.pos) and self.selectable:
return self.parent.select_with_touch(self.index, touch)
def apply_selection(self, rv, index, is_selected):
""" Respond to the selection of items in the view. """
self.selected = is_selected
if self.selected and hasattr(rv, "page"):
if hasattr(rv.page, "update_data"):
rv.page.update_data(guid=self.guid)
elif hasattr(rv.page, "update_plants"):
rv.page.update_plants(experiment=self.text)
class MyPageManager(ScreenManager):
bt_back = ObjectProperty(None)
lbl_title = ObjectProperty(None)
lbl_info = ObjectProperty(None)
lbl_status = ObjectProperty(None)
def on_back(self):
self.current_screen.back()
def back_text(self):
return self.current_screen.back_text
def format_text(
self, text: str, is_bold: bool = False, font_size: int = 0
):
prefix = "[b]" if is_bold else ""
prefix += f"[size={font_size}]" if font_size > 0 else ""
suffix = "[/b]" if is_bold else ""
suffix += "[/size]" if font_size > 0 else ""
return f"{prefix}{text}{suffix}"
def set_active_page(self, new_page_name: str, direction: str):
self.transition.direction = direction
self.current = new_page_name
self.bt_back.text = self.current_screen.back_text
self.lbl_title.text = self.format_text(
self.current_screen.title_text, is_bold=True, font_size=30
)
self.lbl_info.text = self.current_screen.info_text
self.lbl_status.text = ""
class StartUpPage(Screen):
back_text = "< Exit"
title_text = "Start up"
info_text = "Welcome to RobotRacine control center"
modal_dialog = None
def dialog_callback(self, instance):
if instance.modal_result == 1:
plant_data.to_csv(plant_data_path, index=False)
with open(os.path.join(".", "data", "logs.txt"), "w") as f:
f.writelines(log_data)
with open(os.path.join(".", "data", "jobs_data.json"), "w") as f:
json.dump(jobs_data, f, indent=2)
App.get_running_app().stop()
return False
def back(self):
self.modal_dialog = ModalDialog()
self.modal_dialog.modal_dialog_title.text = self.manager.format_text(
text="Confirmation required", is_bold=False, font_size=0
)
self.modal_dialog.modal_dialog_body.text = (
"Quit program?\nAll jobs will be stopped."
)
self.modal_dialog.bind(on_dismiss=self.dialog_callback)
self.modal_dialog.open()
class MyScreen(Screen):
def back(self):
self.manager.set_active_page(
new_page_name=self.back_target, direction="right"
)
class ManualRoot(MyScreen):
back_text = "< Back"
back_target = "start_up"
title_text = "Manual Override"
info_text = "Take control of the robot"
def go_home(self):
self.manager.lbl_status.text = "Going Home"
drive.go_home()
self.manager.lbl_status.text = "Went Home"
def run(self):
self.manager.lbl_status.text = "Engine On"
drive.run()
def go_next(self):
self.manager.lbl_status.text = "Setting next plant"
drive.go_next()
self.manager.lbl_status.text = "Next plant set"
def stop(self):
drive.stop()
self.manager.lbl_status.text = "Engine stopped"
class ManualCapture(MyScreen):
back_text = "< Back"
back_target = "manual_root"
title_text = "Manual Capture"
info_text = "Set the camera and take snapshots"
bt_back = ObjectProperty(None)
def snap(self):
self.manager.lbl_status.text = "Oh, snap"
class Jobs(MyScreen):
back_text = "< Back"
back_target = "start_up"
title_text = "Jobs"
info_text = "All about jobs"
class PlantSelector(ModalView):
available_plants = ObjectProperty(None)
selected_plants = ObjectProperty(None)
selected_plants_list = []
job = None
def update_list_views(self):
self.ids["available_plants"].data = [
{"text": j}
for j in [
plant
for plant in plant_data.plant_name.unique()
if plant not in self.selected_plants_list
]
]
self.ids["selected_plants"].data = [
{"text": j} for j in self.selected_plants_list
]
def add_to_selection(self):
selected_nodes = self.ids[
"available_plants"
].layout_manager.selected_nodes
self.ids["available_plants"].layout_manager.selected_nodes = []
if selected_nodes:
self.selected_plants_list.extend(
[
self.ids["available_plants"].data[i]["text"]
for i in selected_nodes
]
)
self.update_list_views()
def remove_from_selection(self):
selected_nodes = [
self.ids["selected_plants"].data[i]["text"]
for i in self.ids["selected_plants"].layout_manager.selected_nodes
]
self.ids["selected_plants"].layout_manager.selected_nodes = []
if selected_nodes:
self.selected_plants_list = [
p for p in self.selected_plants_list if p not in selected_nodes
]
self.update_list_views()
class JobsManage(MyScreen):
back_text = "< Back"
back_target = "jobs"
title_text = "Manage jobs"
info_text = "About the jobs..."
jobs_list = "Take a snapshot"
job_description = ObjectProperty(None)
def init_jobs(self):
set_index = not self.jobs_list.data
self.jobs_list.data = [
{"text": j["name"], "guid": j["guid"]} for j in jobs_data["jobs"]
]
if set_index and self.jobs_list.data:
self.jobs_list.layout_manager.selected_nodes = [0]
def get_current_description(self):
return "description"
def get_job(self, guid):
for job in jobs_data["jobs"]:
if job["guid"] == guid:
return job
else:
return None
def get_job_index(self, guid):
for i, job in enumerate(jobs_data["jobs"]):
if job["guid"] == guid:
return i
else:
return -1
def new_job(self):
if not jobs_data:
jobs_data["jobs"] = []
jobs_data["jobs"].append(
{
"name": "Job " + dt.now().strftime("%Y%m%d %H:%M:%S"),
"state": "active",
"guid": str(uuid4()),
"description": "",
"owner": "",
"mail_to": "",
"repetition_mode": "every",
"repetition_value": 6,
"repetition_unit": "hours",
"timestamp_start": dt.now().strftime("%Y%m%d %H:%M:%S"),
"timestamp_end": (dt.now() + td(days=14)).strftime(
"%Y%m%d %H:%M:%S"
),
"plants": "",
}
)
self.init_jobs()
self.update_data(guid=jobs_data["jobs"][-1]["guid"])
self.jobs_list.layout_manager.selected_nodes = [
len(jobs_data["jobs"]) - 1
]
def update_data(self, guid):
job = self.get_job(guid=guid)
if job is None:
return
self.job_name.input_text = job["name"]
self.job_guid = guid
self.job_state.text = "" if job["state"] == "active" else "paused"
self.job_description.input_text = job["description"]
self.job_owner.input_text = job["owner"]
self.job_mail_list.input_text = "; ".join(job["mail_to"])
self.time_mode.text = job["repetition_mode"]
self.time_value.text = str(job["repetition_value"])
self.time_unit.text = job["repetition_unit"]
self.date_start.text = job["timestamp_start"]
self.date_end.text = job["timestamp_end"]
self.job_plant_list.text = ";".join(job["plants"])
def save_job(self, guid):
index = self.get_job_index(guid=guid)
if index < 0:
return
jobs_data["jobs"][index]["name"] = self.job_name.text_holder.text
jobs_data["jobs"][index]["state"] = (
"paused" if self.job_state.text == "paused" else "active"
)
jobs_data["jobs"][index][
"description"
] = self.job_description.text_holder.text
jobs_data["jobs"][index]["owner"] = self.job_owner.text_holder.text
jobs_data["jobs"][index][
"mail_to"
] = self.job_mail_list.text_holder.text.replace(" ", "").split(";")
jobs_data["jobs"][index]["repetition_mode"] = self.time_mode.text
try:
jobs_data["jobs"][index]["repetition_value"] = int(
self.time_value.text
)
except:
jobs_data["jobs"][index]["repetition_value"] = 0
jobs_data["jobs"][index]["repetition_unit"] = self.time_unit.text
jobs_data["jobs"][index]["timestamp_start"] = self.date_start.text
jobs_data["jobs"][index]["timestamp_end"] = self.date_end.text
def resume_job(self, guid):
job = self.get_job(guid=guid)
if job is None:
return
job["state"] = "active"
self.update_data(guid=guid)
def pause_job(self, guid):
job = self.get_job(guid=guid)
if job is None:
return
job["state"] = "paused"
self.update_data(guid=guid)
def delete_job(self, guid):
index = self.get_job_index(guid=guid)
if index < 0:
return
jobs_data["jobs"].pop(index)
self.init_jobs()
self.update_data(guid=jobs_data["jobs"][-1]["guid"])
def close_plant_selection(self, instance):
if instance.modal_result == 1:
current_plants = [
d["text"] for d in instance.ids["selected_plants"].data
]
instance.job["plants"] = current_plants
self.update_data(guid=instance.job["guid"])
return False
def select_plants(self, guid):
job = self.get_job(guid=guid)
if job is None:
return
self.plant_selector = PlantSelector()
pl = job["plants"][:]
if not pl:
pl = []
self.plant_selector.selected_plants_list = pl
self.plant_selector.job = job
self.plant_selector.update_list_views()
self.plant_selector.bind(on_dismiss=self.close_plant_selection)
self.plant_selector.open()
class JobsLog(MyScreen):
back_text = "< Back"
back_target = "jobs"
title_text = "Jobs log"
info_text = "Logs, logs everywhere"
def init_logs(self):
self.log_text.text = "\n".join(reversed(log_data))
class FileLoader(ModalView):
file_name = ObjectProperty(None)
class DataIn(MyScreen):
back_text = "< Back"
back_target = "start_up"
title_text = "Data In"
info_text = "View/add/remove Data In files"
experiments_list = ObjectProperty(None)
plants_list = ObjectProperty(None)
def init_experiments(self):
self.experiments_list.data = [
{"text": j} for j in plant_data.experiment.unique()
]
def close_file_selection(self, instance):
if instance.modal_result == 1:
try:
new_df = | pd.read_csv(instance.ids["file_name"].text) | pandas.read_csv |
import cleaning_string
import codecs
import pandas as pd
import subprocess
import nltk
import re as regex
import string
import collections
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.wsd import lesk
from nltk.corpus import wordnet
import operator
import numpy as np
from nltk.stem.snowball import SnowballStemmer
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, RandomizedSearchCV
import time
import gensim
from gensim import corpora
import csv
import _pickle as cPickle
from nltk.corpus import words as wd
def main():
string=str(input('enter tweet'))
string =[string]
file = codecs.open('data.txt', 'w', 'utf-8')
doc_list=[]
doc_clean=cleaning_string.get_clean_docs(string)
for doc in doc_clean:
document_text=" ".join(doc)
file.write(document_text+'\n')
file.close()
p = subprocess.Popen(["sh", "runExample_1.sh"], shell=True)
print(p.communicate())
result_doc=open("C:/Users/brohi/OneDrive/Desktop/BTM-master/output/model/k5.pz_d",'r')
topic=[]
for doc in result_doc.readlines():
doc_dist=doc.split()
topic.append(doc_dist.index(max(doc_dist)))
df = pd.DataFrame({'text':string})
se = pd.Series(topic)
df['label'] = se
df.to_csv('output_btm.csv')
# Data Pre-processing for Sentiment Analysis
data = TwitterData_Initialize()
data.initialize("./script/output_btm.csv")
data.processed_Traindata
nltk.download('words')
word_dictionary = list(set(wd.words()))
for alphabet in "bcdefghjklmnopqrstuvwxyzBCDEFGHJKLMNOPQRSTUVWXYZ":
word_dictionary.remove(alphabet)
words = collections.Counter()
for idx in data.processed_Traindata.index:
words.update(data.processed_Traindata.loc[idx, "text"])
stopwords = nltk.corpus.stopwords.words("english")
whitelist = ["n't", "not"]
for idx, stop_word in enumerate(stopwords):
if stop_word not in whitelist:
del words[stop_word]
words.most_common(5)
data = WordList(data)
data.buildWordlist()
data = BagOfWords(data)
bow, labels = data.buildDataModel()
bow.head(5)
data = ExtraFeatures()
data.initialize("./output_btm.csv")
data.build_features()
data.cleaningData(DataPreprocessing())
data.tokenize()
data.stem()
data.buildWordlist()
data_model, labels = data.build_data_model()
print (data_model)
# Load Naive-Bayes Library
with open('../model/NaiveBayesClassifier.pkl', 'rb') as nid:
nb_loaded = cPickle.load(nid)
with open('../model/RandomForestClassifier.pkl', 'rb') as rid:
rf_loaded = cPickle.load(rid)
result_nb = nb_loaded.predict(data_model)
print(type(result_nb))
print("Naive-Bayes Prediction : ", result_nb)
result_rf = rf_loaded.predict(data_model)
print("Random Forest Prediction : ", result_rf)
df_csv = pd.read_csv("./output_btm.csv")
df_csv['NaiveBayesSentiment'] = pd.DataFrame(result_nb)
df_csv['RandomForestSentiment'] = pd.DataFrame(result_rf)
df_csv.to_csv("./output_btm.csv")
# Detecting Emoticons
class EmoticonDetector:
emoticons = {}
def __init__(self, emoticon_file="../data/emoticons.txt"):
from pathlib import Path
content = Path(emoticon_file).read_text()
positive = True
for line in content.split("\n"):
if "positive" in line.lower():
positive = True
continue
elif "negative" in line.lower():
positive = False
continue
self.emoticons[line] = positive
def is_positive(self, emoticon):
if emoticon in self.emoticons:
return self.emoticons[emoticon]
return False
def is_emoticon(self, to_check):
return to_check in self.emoticons
class TwitterData_Initialize():
processed_Traindata = []
wordlist = []
data_model = None
data_labels = None
def initialize(self, csv_file, from_cached=None):
if from_cached is not None:
self.data_model = pd.read_csv(from_cached)
return
self.processed_Traindata = | pd.read_csv(csv_file, usecols=[0, 1]) | pandas.read_csv |
import pandas as pd
from sklearn import cluster
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
def k_means(data_set, output_file, png_file, t_labels, score_file, set_name):
model = cluster.KMeans(n_clusters=4, max_iter=100, n_jobs=4, init="k-means++")
model.fit(data_set)
# print(list(model.labels_))
p_labels = list(model.labels_)
r = pd.concat([data_set, pd.Series(model.labels_, index=data_set.index)], axis=1)
r.columns = list(data_set.columns) + [u'聚类类别']
print(r)
r.to_excel(output_file)
with open(score_file, "a") as sf:
sf.write("By k-means, the f-m_score of " + set_name + " is: " + str(metrics.fowlkes_mallows_score(t_labels, p_labels))+"\n")
sf.write("By k-means, the rand_score of " + set_name + " is: " + str(metrics.adjusted_rand_score(t_labels, p_labels))+"\n")
t_sne = TSNE()
t_sne.fit(data_set)
t_sne = pd.DataFrame(t_sne.embedding_, index=data_set.index)
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
dd = t_sne[r[u'聚类类别'] == 0]
plt.plot(dd[0], dd[1], 'r.')
dd = t_sne[r[u'聚类类别'] == 1]
plt.plot(dd[0], dd[1], 'go')
dd = t_sne[r[u'聚类类别'] == 2]
plt.plot(dd[0], dd[1], 'b*')
dd = t_sne[r[u'聚类类别'] == 3]
plt.plot(dd[0], dd[1], 'o')
plt.savefig(png_file)
plt.clf()
# plt.show()
frog_data = pd.read_csv("../datas/Frogs_MFCCs.csv")
tLabel = []
for family in frog_data['Family']:
if family == "Leptodactylidae":
tLabel.append(0)
elif family == "Dendrobatidae":
tLabel.append(1)
elif family == "Hylidae":
tLabel.append(2)
else:
tLabel.append(3)
scoreFile = "../output/scoreOfClustering.txt"
first_set = frog_data[['MFCCs_ 1', 'MFCCs_ 5', 'MFCCs_ 9', 'MFCCs_13', 'MFCCs_17', 'MFCCs_21']]
k_means(first_set, "../output/kMeansSet_1.xlsx", "../output/kMeansSet_1.png", tLabel, scoreFile, "Set_1")
second_set = frog_data[['MFCCs_ 3', 'MFCCs_ 7', 'MFCCs_11', 'MFCCs_15', 'MFCCs_19']]
k_means(second_set, "../output/kMeansSet_2.xlsx", "../output/kMeansSet_2.png", tLabel, scoreFile, "Set_2")
# DBSCAN begins
db = cluster.DBSCAN(eps=0.1011, min_samples=115, n_jobs=-1)
db.fit(first_set)
r = pd.concat([first_set, pd.Series(db.labels_, index=first_set.index)], axis=1)
r.columns = list(first_set.columns) + [u'聚类类别']
# print(r)
r.to_excel("../output/dbscanSet_1.xlsx")
p_labels = list(db.labels_)
with open(scoreFile, "a") as sf:
sf.write("By DBSCAN, the f-m_score of Set_1 is: " + str(
metrics.fowlkes_mallows_score(tLabel, p_labels)) + "\n")
sf.write("By DBSCAN, the rand_score of Set_1 is: " + str(
metrics.adjusted_rand_score(tLabel, p_labels)) + "\n")
t_sne_db_1 = TSNE()
t_sne_db_1.fit(first_set)
t_sne_db_1 = pd.DataFrame(t_sne_db_1.embedding_, index=first_set.index)
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
dd = t_sne_db_1[r[u'聚类类别'] == 0]
plt.plot(dd[0], dd[1], 'r.')
dd = t_sne_db_1[r[u'聚类类别'] == 1]
plt.plot(dd[0], dd[1], 'go')
dd = t_sne_db_1[r[u'聚类类别'] == 2]
plt.plot(dd[0], dd[1], 'b*')
dd = t_sne_db_1[r[u'聚类类别'] == -1]
plt.plot(dd[0], dd[1], 'o')
plt.savefig("../output/dbscanSet_1.png")
plt.clf()
# dbscan of set 2
db = cluster.DBSCAN(eps=0.1020, min_samples=90, n_jobs=-1)
db.fit(second_set)
r = pd.concat([second_set, pd.Series(db.labels_, index=second_set.index)], axis=1)
r.columns = list(second_set.columns) + [u'聚类类别']
# print(r)
r.to_excel("../output/dbscanSet_2.xlsx")
p_labels = list(db.labels_)
with open(scoreFile, "a") as sf:
sf.write("By DBSCAN, the f-m_score of Set_2 is: " + str(
metrics.fowlkes_mallows_score(tLabel, p_labels)) + "\n")
sf.write("By DBSCAN, the rand_score of Set_2 is: " + str(
metrics.adjusted_rand_score(tLabel, p_labels)) + "\n")
t_sne_db_2 = TSNE()
t_sne_db_2.fit(second_set)
t_sne_db_2 = | pd.DataFrame(t_sne_db_2.embedding_, index=second_set.index) | pandas.DataFrame |
# HELPER FUNCTIONS FOR ATTENTION AND MEMORY ANALYSES
import os
import pickle
import pandas as pd
from matplotlib import pyplot as plt
import ast
import json
import re
from datetime import datetime
import time
import hypertools as hyp
import numpy as np
from matplotlib import patches as patches
import seaborn as sb
# BEHAVIORAL DATA ANALYSIS FUNCTIONS
# Functions to Aggregate Subject Data and Verify Correct Stimuli were Presented
def sum_pd(subdir):
'''
input: subject directory (string)
output: full experiment info (dataframe)
'''
files = [ x for x in os.listdir(subdir) if 'pres' in x or 'mem' in x ]
df_list = [ pd.read_csv(subdir+'/'+x) for x in files ]
df = pd.concat(df_list, ignore_index=True)
return(df)
# Functions for Simple Behavioral Analyses
def add_level(df):
'''
input: subject dataframe
output: subject dataframe w/ Attention Level string for each Memory trial row
'''
for x in df.Run.unique():
mask = df['Run']==x
df[mask] = run_level(df[mask])
return(df)
def run_level(df):
'''
input: df containing pres and mem from single run
output: df with string in 'Attention Level' column in each Memory trial row
'''
cued_cat = df[df['Trial Type']=='Presentation']['Cued Category'].tolist()[0]
for index,row in df.iterrows():
if row['Trial Type']=='Memory':
mem_image = row['Memory Image']
for cue in ['Cued ', 'Uncued ']:
for cat in ['Face', 'Place']:
if df.loc[df[cue+cat] == mem_image].shape[0]!=0:
if cat == cued_cat:
df['Category'][index]=cued_cat
if cue == 'Cued ':
attention = "Full"
elif cue == 'Uncued ':
attention = "Category"
else:
df['Category'][index]=cat
if cue == 'Uncued ':
attention = "None"
elif cue == 'Cued ':
attention = "Side"
df['Attention Level'][index] = attention
mem_mask = df['Trial Type']=='Memory'
df.loc[mem_mask,'Attention Level'] = df.loc[mem_mask,'Attention Level'].fillna('Novel')
return(df)
def ROC(df, plot=True):
'''
input: subject df
output: ROC plot or ROC data dict
'''
ratings = [1.0, 2.0, 3.0, 4.0]
ROC = {}
fig = plt.figure()
ax1 = fig.add_subplot(111)
# for each attention level
for attn in ['Novel', 'None','Side','Full','Category']:
ROC[attn] = [0, 1]
# for each possible number rating
for rate in ratings:
# proportion of images in that attn level rated this rating or higher
num = df.loc[(df['Attention Level'] == attn) & (df['Familiarity Rating'] >= rate)].shape[0]
denom = df.loc[df['Attention Level'] == attn].shape[0]
ROC[attn].append(float(num)/denom)
ROC[attn].sort()
# proportions of various attention-level images, by rating, on y-axis
# proportions of novel images, by rating, on x-axis
if attn != 'Novel':
ax1.plot(ROC['Novel'], ROC[attn], '-o', label=attn)
if plot:
plt.legend(loc='upper left');
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
else:
return(ROC)
# EYE GAZE DATA ANALYSIS FUNCTIONS
class parseFile():
def __init__(self, file):
self.file = file
def parse(self):
data = open(self.file).read()
return(data)
def load(path):
'''
input: path to directory containing eye track data
output: raw parsed eye data
'''
data = []
files = [f for f in os.listdir(path)]
for x in files:
#if os.path.isfile(path+x):
newFile = parseFile(path+x)
data1 = newFile.parse()
for a,b in zip(['true','false'], ['True', 'False']):
data1 = data1.replace(a, b)
data1 = data1.split('\n')
data1 = [x for x in data1 if "tracker" in x]
data.extend(data1)
return(data)
def df_create(data):
"""
input: raw parsed eye data
output: dataframe of eye data (screen location in centimeters)
"""
dict_list = [ast.literal_eval(x) for x in data]
dict_list = [x['values']['frame'] for x in dict_list if 'frame' in x['values']]
df = pd.DataFrame(dict_list)
# right and left eye
for eye in ['righteye','lefteye']:
for coord in ['x','y']:
df[coord+'Raw_'+eye] = [df[eye][row]['raw'][coord] for row in df.index.values]
# convert to centimeters
df['av_x_coord'] = (59.8/2048)*(df[['xRaw_righteye', 'xRaw_lefteye']].mean(axis=1))
df['av_y_coord'] = (33.6/1152)*(df[['yRaw_righteye', 'yRaw_lefteye']].mean(axis=1))
# convert timestamp
df['timestamp']=[time.mktime(time.strptime(x[:], "%Y-%m-%d %H:%M:%S.%f")) for x in df['timestamp']]
return(df)
def pres_gaze(subdir, eye_df, interval='images'):
'''
input: subject's experiment df and eye track df
output: list of eye data df's
each df is either eye data from full pres block, or from single pres trial (interval='images')
'''
pres_gaze = []
for f in os.listdir(subdir):
if 'pres' in f:
pres_df = | pd.read_csv(subdir+'/'+f) | pandas.read_csv |
import requests
import json
import pandas as pd
def get_espn_info(season, espn_league_id, cookies = None):
r = requests.get('https://fantasy.espn.com/apis/v3/games/ffl/seasons/{}/segments/0/leagues/{}'.format(season, espn_league_id),
params={ 'view': ['mTeam', 'mRoster', 'mSettings']},
cookies = cookies)
if r.status_code in [400, 404]:
raise Exception("League not found! Please check 'espn_league_id'")
elif r.status_code == 401:
raise Exception("This is a private ESPN league and you are not authorized to view this league. Please check or include the 'cookie' parameters.")
elif r.status_code != 200:
print(r.status_code)
raise Exception("Unable to import ESPN League information.")
data = r.json()
teams, roster = get_league_teams(data)
members = get_league_members(data)
teams = teams.merge(members[['display_name', 'owner_id', 'owner_name']],on = 'owner_id', how = 'left')
teams.drop(['location', 'nickname', 'owner_id'], axis = 1, inplace = True)
roster = roster.astype(str).merge(teams[['team_id', 'team_name']], on = 'team_id', how = 'left')
return teams, roster
def get_league_teams(data):
teams = | pd.DataFrame() | pandas.DataFrame |
# %% markdown
# Portfolio Optimization - Risk
# %% add path
if __name__ == '__main__' and __package__ is None:
import sys, os.path
sys.path
# append parent of the directory the current file is in
inputfilename1 = r"C:\Users\<NAME>\Documents\Onedrive\Python scripts\_01 Liam Stock Analysis Project\stock_analysis\Python_Core"
inputfilename2 = r"C:\Users\l.morrow\OneDrive\Python scripts\_01 Liam Stock Analysis Project\stock_analysis\Python_Core"
sys.path.append(inputfilename1)
sys.path.append(inputfilename2)
# %% imports
import numpy as np
import math
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from pandas import ExcelWriter
from pandas import ExcelFile
from scipy.optimize import minimize
import stock_utilities as su
import scipy.optimize as sco
sns.set(style="darkgrid")
# %% fetch stock data
# tickers="AFDIX,FXAIX,JLGRX,MEIKX,PGOYX,HFMVX,FCVIX,FSSNX,WSCGX,CVMIX,DOMOX,FSPSX,ODVYX,MINJX,FGDIX,CMJIX,FFIVX,FCIFX,FFVIX,FDIFX,FIAFX,BPRIX,CBDIX,OIBYX,PDBZX"
# tickers="AFDIX,FXAIX,JLGRX,MEIKX"
tickers = "PRHSX,IAU,VWIGX,GBF,TRBCF,PRSCX"
start_date = pd.to_datetime('1/1/2016', utc=True)
end_date = pd.to_datetime('1/6/2020', utc=True)
stock_df = su.yahoo_stock_fetch(tickers, start_date, end_date)
# %% make df
analysis_df = {}
for t in stock_df.keys():
analysis_df[t] = pd.DataFrame()
analysis_df[t]['Adj Close'] = (stock_df[t]['Adj Close'])
analysis_df[t]['Simple Returns'] = (stock_df[t]['Adj Close'].pct_change(1))
analysis_df[t]['Total ROI %'] = ((stock_df[t]['Adj Close']-stock_df[t]['Adj Close'].iloc[0])/stock_df[t]['Adj Close'].iloc[0])*100
analysis_df[t]['Log Returns'] = np.log(stock_df[t]['Adj Close']/stock_df[t]['Adj Close'].shift(1))
adj_close_df = pd.DataFrame()
for t in stock_df.keys():
adj_close_df[t] = analysis_df[t]['Adj Close']
adj_close_df
#%% covariance matrix class
def cov_to_corr(cov_matrix):
"""
Convert a covariance matrix to a correlation matrix.
:param cov_matrix: covariance matrix
:type cov_matrix: pd.DataFrame
:return: correlation matrix
:rtype: pd.DataFrame
"""
if not isinstance(cov_matrix, pd.DataFrame):
cov_matrix = pd.DataFrame(cov_matrix)
Dinv = np.diag(1 / np.sqrt(np.diag(cov_matrix)))
corr = np.dot(Dinv, np.dot(cov_matrix, Dinv))
return pd.DataFrame(corr, index=cov_matrix.index, columns=cov_matrix.index)
def variation_over_time(prices,frequency=252,periods=8,covariance='ledoit_wolf',correlation=True,returns_data=False):
date_delta = end_date - start_date
divided_days = date_delta/periods
times = pd.date_range(start_date, periods=periods, freq=divided_days,normalize=True)
subset_df = {}
counter=0
for i in times:
counter+=1
#reset index and set timezone.
sub_df = prices.tz_localize('UTC', level=0).reset_index()
#spec start date
subset_start_date = pd.to_datetime(i, utc= True)
#specficy a subset
subset_df[counter] = sub_df.loc[(sub_df['Date'] > subset_start_date) & (sub_df['Date'] < end_date)]
for i in subset_df.keys():
subset_df[i].set_index('Date', inplace=True)
if covariance == 'ledoit_wolf':
matrix={}
for i in subset_df.keys():
matrix[i] = covariance_models(subset_df[i],returns_data=returns_data, frequency=frequency).ledoit_wolf()
else:
#sample covariance
matrix={}
for i in subset_df.keys():
matrix[i] = subset_df[i].pct_change().dropna(how="all").cov()*frequency
if correlation == True :
for i in subset_df.keys():
matrix[i] = cov_to_corr(matrix[i])
else:
pass
return matrix,times
def heatmap(x, y, size, scale, times):
# Mapping from column names to integer coordinates
x_labels = [v for v in sorted(x.unique())]
y_labels = [v for v in sorted(y.unique())]
x_to_num = {p[1]:p[0] for p in enumerate(x_labels)}
y_to_num = {p[1]:p[0] for p in enumerate(y_labels)}
size_scale = scale
ax.scatter(
x=x.map(x_to_num), # Use mapping for x
y=y.map(y_to_num), # Use mapping for y
s=size * size_scale, # Vector of square sizes, proportional to size parameter
marker='s' # Use square as scatterplot marker
)
# Show column labels on the axes
ax.set(title='Variance Matrix \n'+ str(times))
ax.set_xticks([x_to_num[v] for v in x_labels])
ax.set_xticklabels(x_labels, rotation=45, horizontalalignment='right')
ax.set_yticks([y_to_num[v] for v in y_labels])
ax.set_yticklabels(y_labels)
ax.grid(False, 'major')
ax.grid(True, 'minor')
ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)
ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)
ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5])
ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])
class covariance_models:
def __init__(self, prices, returns_data=False, frequency=252):
"""
:param prices: adjusted closing prices of the asset, each row is a date and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param frequency: number of time periods in a year, defaults to 252 (the number of trading days in a year)
:type frequency: int, optional
"""
# Optional import
try:
from sklearn import covariance
self.covariance = covariance
except (ModuleNotFoundError, ImportError):
raise ImportError("Please install scikit-learn via pip")
if not isinstance(prices, pd.DataFrame):
prices = | pd.DataFrame(prices) | pandas.DataFrame |
import os
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from skopt.space import Real
from evalml.pipelines import BinaryClassificationPipeline, ComponentGraph
@pytest.fixture
def test_pipeline():
class TestPipeline(BinaryClassificationPipeline):
component_graph = ['Simple Imputer', 'One Hot Encoder', 'Standard Scaler', 'Logistic Regression Classifier']
hyperparameters = {
"penalty": ["l2"],
"C": Real(.01, 10),
"impute_strategy": ["mean", "median", "most_frequent"],
}
def __init__(self, parameters):
super().__init__(parameters=parameters)
@property
def feature_importance(self):
importance = [1.0, 0.2, 0.0002, 0.0, 0.0, -1.0]
feature_names = range(len(importance))
f_i = list(zip(feature_names, importance))
df = | pd.DataFrame(f_i, columns=["feature", "importance"]) | pandas.DataFrame |
import traceback
import argparse
import re # regular expressions
import gzip
import pandas as pd
'''
Load RNA sequence into memory.
Reads a FASTA.gz file from GeneCode.
Parses the transcript id (TID) from the FASTA defline.
Returns a Pandas dataframe with columnts tid, class, sequence, seqlen.
Typical input files from (https://www.gencodegenes.org/)
- gencode.v38.lncRNA_transcripts.fa.gz
- gencode.v38.pc_transcripts.fa.gz
'''
class GenCodeLoader():
def __init__(self):
self.pattern5=re.compile('.*UTR5:')
self.pattern3=re.compile('.*UTR3:')
self.check_list = None
self.check_utr = False
self.min_size = None
self.max_size = None
def set_label(self,label):
'''
Set one label used for subsequent sequences.
The value gets stored in the 'class' field.
Usually use 1 for protein-coding and 0 for non-coding.
'''
self.label=label
def set_check_list(self,check_list):
'''
Optionally provide a TID include list. Others are excluded.
The parameter, type list, is used with pythin 'in' operator.
'''
self.check_list=check_list
def set_check_utr(self,check_utr):
'''
Optionally require UTR. Equivalent to requiring an ORF.
Include only deflines that specify 5'UTR and 3'UTR positions.
(GenCode does have mRNA transcripts that lack an ORF!)
Set this to false when loading non-coding RNA.
The parameter is type boolean.
'''
self.check_utr=check_utr
def set_check_size(self,min,max):
self.min_size = min
self.max_size = max
def __save_previous(self,one_def,one_seq):
'''
For internal use only.
FASTA sequence records are multi-line starting with a defline.
This is called just before parsing a new defline
to optionally save the previously parsed sequence record.
'''
if one_def is None:
return
if self.check_utr:
if self.pattern5.match(one_def) is None:
return
if self.pattern3.match(one_def) is None:
return
seq_len = len(one_seq)
if self.min_size is not None and seq_len < self.min_size:
return
if self.max_size is not None and seq_len > self.max_size:
return
VERSION = '.'
one_id = one_def[1:].split(VERSION)[0]
if self.check_list is not None:
if one_id not in self.check_list:
return
self.labels.append(self.label)
self.seqs.append(one_seq)
self.lens.append(len(one_seq))
self.ids.append(one_id)
def load_file(self,filename):
'''
Parse the given file and return a data structure.
Given file assumed GenCode FASTA file.
Returns a Pandas dataframe with four fields.
'''
self.labels=[] # usually 1 for protein-coding or 0 for non-coding
self.seqs=[] # usually strings of ACGT
self.lens=[] # sequence length
self.ids=[] # GenCode transcript ID, always starts ENST, excludes version
DEFLINE='>' # start of line with ids in a FASTA FILE
EMPTY=''
one_def = None
one_seq = ''
with gzip.open (filename,'rt') as infile:
for line in infile:
if line[0]==DEFLINE:
self.__save_previous(one_def,one_seq)
one_def=line
one_seq = EMPTY
else:
# Continue loading sequence lines till next defline.
additional = line.rstrip()
one_seq = one_seq + additional
# Don't forget to save the last sequence after end-of-file.
self.__save_previous(one_def,one_seq)
df1=pd.DataFrame(self.ids,columns=['tid'])
df2=pd.DataFrame(self.labels,columns=['class'])
df3=pd.DataFrame(self.seqs,columns=['sequence'])
df4=pd.DataFrame(self.lens,columns=['seqlen'])
df= | pd.concat((df1,df2,df3,df4),axis=1) | pandas.concat |
###what's left: find data path, save raw results (predictions) save efficiency calculations, check imports are correct
import pickle
import sys
path_to_save = sys.argv[1]
import tensorflow as tf
import pandas as pd
import numpy as np
# Keras
import keras
import keras.backend as K
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense
from keras.layers import Dropout
from keras.models import Sequential, Model, load_model
from keras.layers import LSTM, Dense, RepeatVector, Input, BatchNormalization, Bidirectional,multiply, concatenate, Flatten, Activation, dot,Layer
from src.subpackages.ts_process.validate import Rolling_Validation
from src.subpackages.ts_process.scale import WindowMinMaxScaler
from src.subpackages.model.baseline import interval_baseline
from src.subpackages.model.fuzzy_intervals import fuzzy_interval
data = pd.read_csv('processed_dataset_local.csv',parse_dates = True,index_col = "DATE")
###defining sub-models
def seq2seq(time_steps,n_hidden,n_features):
input = Input(shape = (time_steps,n_features))
encoder_stack_h, encoder_last_h, encoder_last_c = LSTM(
n_hidden, activation='elu',
return_state=True, return_sequences=True,dropout = .2, recurrent_dropout = .2)(input)
###does more wild stuff(cross etc. with lower momentum)
#encoder_last_h = BatchNormalization(momentum=0.2)(encoder_last_h)
#encoder_last_c = BatchNormalization(momentum=0.2)(encoder_last_c)
decoder_input = RepeatVector(1)(encoder_last_h)
decoder_stack_h = LSTM(n_hidden, activation='elu',
return_state=False, return_sequences=True,dropout = .2,recurrent_dropout = .2)(
decoder_input, initial_state=[encoder_last_h, encoder_last_c])
attention = dot([decoder_stack_h, encoder_stack_h], axes=[2, 2])
attention = Activation('softmax')(attention)
context = dot([attention, encoder_stack_h], axes=[2,1])
#context = BatchNormalization(momentum=0.2)(context)
decoder_combined_context = concatenate([context, decoder_stack_h])
decoder_combined_context = Flatten()(decoder_combined_context)
#decoder_combined_context = BatchNormalization(momentum = .2)(decoder_combined_context)
#dense_1 = Dense(64)(decoder_combined_context)
out = Dense(2)(decoder_combined_context)
model = Model(inputs = input,outputs = out)
return model
def bidirectional_regression(time_steps=0,n_hidden=0,n_features=0):
input = Input(shape = (time_steps,n_features))
lstm1 = Bidirectional(LSTM(units = n_hidden,return_sequences = False,dropout = .2))(input)
dense1 = Dense(128)(lstm1)
out = Dense(1)(dense1)
model = Model(inputs = input,outputs = out)
return model
###sub-model objects
class EarlyStopByAbsVal(keras.callbacks.Callback):
def __init__(self, min_delta = 0, verbose = 0):
super(keras.callbacks.Callback, self).__init__()
self.min_delta = min_delta
self.verbose = verbose
self.last_loss = 0
def on_epoch_end(self, epoch, logs={}):
epoch_loss = logs['loss']
if abs(epoch_loss-self.last_loss) > self.min_delta:
if self.verbose >0:
print("Epoch %05d: early stopping Threshold" % epoch)
self.model.stop_training = True
else:
self.last_loss = epoch_loss
interval_opt = tf.keras.optimizers.Adam(lr = .001,clipnorm = 1)
interval_callback = EarlyStopByAbsVal(min_delta = 10**(-7))
###Define Global Parameters
start_date = "2000-01-01"
end_date = "2021-06-30"
#get a list of sector tickers
'''
communication services: XLC
consumer discretionary: XLY
consumer staples: XLP
Energy: XLE
Financials: XLF
Healthcare: XLV
Industrials: XLI
Information Technology: XLK
Materials: XLB
Real Estate: IYR
Utilities: XLU
'''
#interval_mod = bidirectional(40,256,120)
fixed_parameter_dict_model = {"regressor_model":bidirectional_regression,
"regressor_window":10,
"regress_model_dict" : {"time_steps":10,"n_hidden":512,"n_features":119},
"regress_compile_dict": {"optimizer":'adam', "loss":'mean_squared_error',"metrics":["mean_absolute_percentage_error"]},
"regress_fit_dict":{"verbose":True, "epochs":125},
"clusters":3,
"cluster_alpha":.045,
"vol_scale":.8,
"interval_model":seq2seq,
"interval_model_dict": {"time_steps":40,"n_hidden":256,"n_features":120},
"interval_window":40,
"interval_fit_dict":{"epochs":150,"batch_size":32,"verbose":True,'callbacks':[interval_callback]},
"interval_compile_dict":{"loss":"mean_squared_error","optimizer":interval_opt},
}
fixed_parameter_dict_baseline = {"alpha":.05}
standardizer = WindowMinMaxScaler()
###experimental global params
length = len(data)
window_length = (1/8)*length
shift = (1/12)*length
train_test_val_split = (1/6,2/3,1/6)
###main
model_result_dict = {}
baseline_result_dict = {}
truth_dict = {}
input_columns = [x for x in data if 'target' not in x]
target_etfs = ['XLY', 'XLP', 'XLE', 'XLF', 'XLV', 'XLI', 'XLK', 'XLB', 'XLU']
for etf in target_etfs:
window_model = WindowGenerator(data,10,1,1,input_columns,[etf+"_target"])
window_baseline = WindowGenerator(data,1,1,1,input_columns,[etf + "_target"])
e_model = Rolling_Validation(window_model.inputs,window_model.targets,window_length,shift,fuzzy_interval,fixed_parameter_dict_model,standardization =standardizer)
e_baseline =Rolling_Validation(window_baseline.inputs,window_baseline.targets,window_length,shift,interval_baseline,fixed_parameter_dict_baseline)
model_result_dict[etf]=e_model.experiment_dict
baseline_result_dict[etf] = e_baseline.experiment_dict
truth_dict[etf] = e_baseline.true_dict
# create a binary pickle file
f_truth = open("/reports/truth_dict.pkl","wb")
pickle.dump(truth_dict,f_truth)
f_truth.close()
f_baseline = open("/reports/baseline_dict","wb")
pickle.dump(baseline_result_dict,f_baseline)
f_baseline.close()
f_model =open("/reports/model_dict","wb")
pickle.dump(model_result_dict,f_model)
f_model.close()
test_index = pd.MultiIndex.from_product([[],[],[]], names=["etf","experiment", "bound"])
truth_index = pd.MultiIndex.from_product([[],[]], names=["etf","experiment"])
model_result = pd.DataFrame(columns = test_index)
baseline_result = pd.DataFrame(columns = test_index)
truth_result = pd.DataFrame(columns = truth_index)
for etf in model_result_dict.keys():
for experiment in model_result_dict[etf].keys():
sub_index = pd.MultiIndex.from_product([[etf],[experiment],['lower','upper']],names = ['etf','experiment','bound'])
truth_index = pd.MultiIndex.from_product([[etf],[experiment]],names = ['etf','experiment'])
model_lower = pd.Series(model_result_dict[etf][experiment][:,0],name = 'lower')
model_upper = pd.Series(model_result_dict[etf][experiment][:,1],name = 'upper')
model_sub_result = pd.concat([model_lower,model_upper],axis =1)
model_sub_result.columns = sub_index
model_result = pd.concat([model_result,model_sub_result],axis =1)
baseline_lower = pd.Series(baseline_result_dict[etf][experiment][:,0],name = 'lower')
baseline_upper = pd.Series(baseline_result_dict[etf][experiment][:,1],name = 'upper')
baseline_sub_result = pd.concat([baseline_lower,baseline_upper],axis =1)
baseline_sub_result.columns = sub_index
baseline_result = | pd.concat([baseline_result,baseline_sub_result],axis =1) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
pd.set_option('display.max_columns', 100)
pd.options.mode.chained_assignment = None
train_path = '../input/forest-cover-type-prediction/train.csv'
test_path = '../input/forest-cover-type-prediction/test.csv'
submit_path = '../input/forest-cover-type-prediction/sampleSubmission.csv'
dtrain = | pd.read_csv(train_path, index_col=0) | pandas.read_csv |
"""
Module for interacting with the NHL's open but undocumented API.
"""
import streamlit as st
import pandas as pd
from pandas.io.json import json_normalize
import requests as rqsts
## data ingestion
def get_seasons(streamlit=False):
""" returns all seasons on record """
seasons_response = rqsts.get('https://statsapi.web.nhl.com/api/v1/seasons')
try:
seasons_response.raise_for_status()
except rqsts.exceptions.HTTPError as e:
if streamlit:
st.write(e)
else:
print(e)
raise e
seasons = seasons_response.content
seasons_df = pd.read_json(seasons)
seasons_df = json_normalize(seasons_df.seasons)
seasons_df.set_index('seasonId', inplace=True)
return seasons_df
def get_current_season():
season_response = rqsts.get('https://statsapi.web.nhl.com/api/v1/seasons/current')
season = season_response.content
season_df = pd.read_json(season)
season_df = json_normalize(season_df.seasons)
season_id = season_df.seasonId
season_start = season_df.regularSeasonStartDate
season_end = season_df.regularSeasonEndDate
return season_id, season_start, season_end
def get_teams(streamlit=False):
"""returns all teams FOR THE CURRENT SEASON"""
teams_response = rqsts.get('https://statsapi.web.nhl.com/api/v1/teams')
try:
teams_response.raise_for_status()
except rqsts.exceptions.HTTPError as e:
if streamlit:
st.write(e)
else:
print(e)
raise e
teams = teams_response.content
teams_df = pd.read_json(teams)
teams_df = json_normalize(teams_df.teams)
return teams_df
def get_schedule(start_date, end_date):
# teams = get_teams()
# st.dataframe(teams)
# output_df = pd.DataFrame()
schedule_response = rqsts.get('https://statsapi.web.nhl.com/api/v1/schedule?startDate={0}&endDate={1}'.format(start_date, end_date))
schedule = schedule_response.content
schedule = pd.read_json(schedule)
schedule = json_normalize(schedule.dates)
output_df = pd.DataFrame()
for game in schedule.games:
game_df = | json_normalize(game) | pandas.io.json.json_normalize |
# Authors: dodoarg <<EMAIL>>
from typing import List, Optional, Union
import pandas as pd
from pandas.api.types import is_datetime64_any_dtype as is_datetime
from pandas.api.types import is_numeric_dtype as is_numeric
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from feature_engine.dataframe_checks import (
_check_contains_na,
_check_X_matches_training_df,
check_X,
)
from feature_engine.datetime._datetime_constants import (
FEATURES_DEFAULT,
FEATURES_FUNCTIONS,
FEATURES_SUFFIXES,
FEATURES_SUPPORTED,
)
from feature_engine._docstrings.methods import (
_fit_not_learn_docstring,
_fit_transform_docstring,
)
from feature_engine._docstrings.fit_attributes import (
_feature_names_in_docstring,
_n_features_in_docstring,
)
from feature_engine._docstrings.substitute import Substitution
from feature_engine.variable_manipulation import (
_check_input_parameter_variables,
_find_or_check_datetime_variables,
_is_categorical_and_is_datetime,
)
@Substitution(
feature_names_in_=_feature_names_in_docstring,
n_features_in_=_n_features_in_docstring,
fit=_fit_not_learn_docstring,
fit_transform=_fit_transform_docstring,
)
class DatetimeFeatures(BaseEstimator, TransformerMixin):
"""
DatetimeFeatures extracts date and time features from datetime variables, adding
new columns to the dataset. DatetimeFeatures can extract datetime information from
existing datetime or object-like variables or from the dataframe index.
DatetimeFeatures uses `pandas.to_datetime` to convert object variables to datetime
and pandas.dt to extract the features from datetime.
The transformer supports the extraction of the following features:
- "month"
- "quarter"
- "semester"
- "year"
- "week"
- "day_of_week"
- "day_of_month"
- "day_of_year"
- "weekend"
- "month_start"
- "month_end"
- "quarter_start"
- "quarter_end"
- "year_start"
- "year_end"
- "leap_year"
- "days_in_month"
- "hour"
- "minute"
- "second"
More details in the :ref:`User Guide <datetime_features>`.
Parameters
----------
variables: str, list, default=None
List with the variables from which date and time information will be extracted.
If None, the transformer will find and select all datetime variables,
including variables of type object that can be converted to datetime.
If "index", the transformer will extract datetime features from the
index of the dataframe.
features_to_extract: list, default=None
The list of date features to extract. If None, the following features will be
extracted: "month", "year", "day_of_week", "day_of_month", "hour",
"minute" and "second". If "all", all supported features will be extracted.
Alternatively, you can pass a list with the names of the features you want to
extract.
drop_original: bool, default="True"
If True, the original datetime variables will be dropped from the dataframe.
missing_values: string, default='raise'
Indicates if missing values should be ignored or raised. If 'raise' the
transformer will return an error if the the datasets to `fit` or `transform`
contain missing values. If 'ignore', missing data will be ignored when
performing the feature extraction. Missing data is only evaluated in the
variables that will be used to derive the date and time features. If features
are derived from the dataframe index, missing data will be checked in the
index.
dayfirst: bool, default="False"
Specify a date parse order if arg is str or is list-like. If True, parses
dates with the day first, e.g. 10/11/12 is parsed as 2012-11-10. Same as in
`pandas.to_datetime`.
yearfirst: bool, default="False"
Specify a date parse order if arg is str or is list-like.
Same as in `pandas.to_datetime`.
- If True parses dates with the year first, e.g. 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded.
utc: bool, default=None
Return UTC DatetimeIndex if True (converting any tz-aware datetime.datetime
objects as well). Same as in `pandas.to_datetime`.
Attributes
----------
variables_:
List of variables from which date and time features will be extracted. If None,
features will be extracted from the dataframe index.
features_to_extract_:
The date and time features that will be extracted from each variable or the
index.
{feature_names_in_}
{n_features_in_}
Methods
-------
{fit}
{fit_transform}
transform:
Add the date and time features.
See also
--------
pandas.to_datetime
pandas.dt
"""
def __init__(
self,
variables: Union[None, int, str, List[Union[str, int]]] = None,
features_to_extract: Union[None, str, List[str]] = None,
drop_original: bool = True,
missing_values: str = "raise",
dayfirst: bool = False,
yearfirst: bool = False,
utc: Union[None, bool] = None,
) -> None:
if features_to_extract:
if not (
isinstance(features_to_extract, list) or features_to_extract == "all"
):
raise ValueError(
"features_to_extract must be a list of strings or 'all'. "
f"Got {features_to_extract} instead."
)
elif isinstance(features_to_extract, list) and any(
feat not in FEATURES_SUPPORTED for feat in features_to_extract
):
raise ValueError(
"Some of the requested features are not supported. "
"Supported features are {}.".format(", ".join(FEATURES_SUPPORTED))
)
if not isinstance(drop_original, bool):
raise ValueError(
"drop_original takes only booleans True or False. "
f"Got {drop_original} instead."
)
if missing_values not in ["raise", "ignore"]:
raise ValueError(
"missing_values takes only values 'raise' or 'ignore'. "
f"Got {missing_values} instead."
)
if utc is not None and not isinstance(utc, bool):
raise ValueError("utc takes only booleans or None. " f"Got {utc} instead.")
self.variables = _check_input_parameter_variables(variables)
self.drop_original = drop_original
self.missing_values = missing_values
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self.utc = utc
self.features_to_extract = features_to_extract
def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None):
"""
This transformer does not learn any parameter.
Finds datetime variables or checks that the variables selected by the user
can be converted to datetime.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The training input samples. Can be the entire dataframe, not just the
variables to transform.
y: pandas Series, default=None
It is not needed in this transformer. You can pass y or None.
"""
# check input dataframe
X = check_X(X)
# special case index
if self.variables == "index":
if not (
is_datetime(X.index)
or (
not | is_numeric(X.index) | pandas.api.types.is_numeric_dtype |
#IMPORT LIBRARIES
import requests
import pandas as pd
import boto3
import re
import os
from datetime import datetime
import selenium
from selenium import webdriver
from bs4 import BeautifulSoup
from secrets import access_key, secret_access_key
#Create User-Agent for requests
headers = { 'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36" }
def uk_cities_name():
#URL with 20 top cities
url = 'https://www.tripadvisor.co.uk/Restaurants-g186216-United_Kingdom.html'
#result of the URL request
result_cities = requests.get(url, headers=headers )
#SET html.parser and text to result
soup = BeautifulSoup( result_cities.text, 'html.parser' )
#Cities Name = Find the tag DIV with class='geo_name'
allcities = soup.find_all( 'div', class_='geo_name')
#Take the CITIES NAME
allcitiesname = [p.get_text() for p in allcities]
#Replace \n and Restaurant to nothing
allcitiesname = [s.replace('\n', '') for s in allcitiesname]
allcitiesname = [s.replace('Restaurants', '') for s in allcitiesname]
#Take the LINK allcities[0].find('a').get('href')
allcitieslinks = [p.find('a').get('href') for p in allcities]
#Take the City ID
city_id = [i.split('-')[1] for i in allcitieslinks]
#Take the City URL
city_url = [i.split('-')[2] for i in allcitieslinks]
#Create DataFrame with feauters
data = pd.DataFrame([allcitiesname,city_id,city_url]).T
#Alter columns name
data.columns = ['city','city_id','city_url']
#Code VEGAN
data['code_vegan'] = 'zfz10697'
#Add new colum with datetime
data['scrap_datetime'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return data
def colect_data_cities():
#get data from uk_cities_name function
data = uk_cities_name()
#empty data frame
df_details = pd.DataFrame()
for i in range( len ( data ) ):
print(range(len(data)))
print(i)
#URL with 20 top cities
url = 'https://www.tripadvisor.co.uk/Restaurants-'+ data.loc[i, 'city_id']+'-'+ data.loc[i, 'code_vegan']+'-'+ data.loc[i, 'city_url']+''
#result of the URL request
result_cities = requests.get(url, headers=headers )
soup = BeautifulSoup(result_cities.text, 'html.parser')
#Take the City URL
city_url2 = url.split('-')
city_url2 = city_url2[1]
city_url2
#Take the SORT BY
sortby = soup.find( 'div', class_='_1NO-LVmX _1xde6MOz')
sortby = sortby.text
#Get the AMOUNT of the RESTAURANTS
path = 'selenium\chromedriver.exe'
#driver = webdriver.Chrome(executable_path="chromedriver\chromedriver.exe")
driver = webdriver.Chrome(path)
driver.get(url)
#Find class name _1D_QUaKi
total_rest = driver.find_element_by_class_name("_1D_QUaKi")
#Get text without html code.
results_restaurants = total_rest.text
#close the browser screen
driver.quit()
#Get first 5 restaurants
list_item = soup.find( 'div', class_='_1kXteagE')
#Get first 5 restaurants - each item list
each_item = soup.find_all('div', attrs={'data-test':re.compile("^[1-5]_list_item")})
#get the name of the restaurantes
restaurant_name = [r.find('a', class_='_15_ydu6b').get_text() for r in each_item]
#create the restaurants name DataFrame
restaurant_name = pd.DataFrame(restaurant_name)
#rename the columns
restaurant_name.columns = ['Restaurants']
#Create DataFrame with feauters
data2 = | pd.DataFrame([city_url2,sortby,results_restaurants, restaurant_name]) | pandas.DataFrame |
"""Test script that saves results from 26 vehicles currently in master branch of FASTSim as of 17 December 2019 for 3 standard cycles.
From command line, pass True (default if left blank) or False argument to use JIT compilation or not, respectively."""
import pandas as pd
import time
import numpy as np
import re
import os
import sys
import inspect
from pathlib import Path
# local modules
from fastsim import simdrive, vehicle, cycle
def main(use_jitclass=True, err_tol=1e-4):
"""Runs test test for 26 vehicles and 3 cycles.
Test compares cumulative positive and negative energy
values to a benchmark from earlier.
Arguments:
----------
use_jitclass : use numba or not, default True
err_tol : error tolerance
default of 1e-4 was selected to prevent minor errors from showing.
As of 31 December 2020, a recent python update caused errors that
are smaller than this and therefore ok to neglect.
"""
t0 = time.time()
cycles = ['udds', 'hwfet', 'us06']
vehicles = np.arange(1, 27)
print('Instantiating classes.')
print()
veh = vehicle.Vehicle(1)
if use_jitclass:
veh_jit = veh.get_numba_veh()
cyc = cycle.Cycle('udds')
if use_jitclass:
cyc_jit = cyc.get_numba_cyc()
energyAuditErrors = []
iter = 0
for vehno in vehicles:
print('vehno =', vehno)
for cycname in cycles:
if not((vehno == 1) and (cycname == 'udds')):
cyc.set_standard_cycle(cycname)
if use_jitclass:
cyc_jit = cyc.get_numba_cyc()
veh.load_veh(vehno)
if use_jitclass:
veh_jit = veh.get_numba_veh()
if use_jitclass:
sim_drive = simdrive.SimDriveJit(cyc_jit, veh_jit)
sim_drive.sim_drive()
else:
sim_drive = simdrive.SimDriveClassic(cyc, veh)
sim_drive.sim_drive()
sim_drive_post = simdrive.SimDrivePost(sim_drive)
# sim_drive_post.set_battery_wear()
diagno = sim_drive_post.get_diagnostics()
energyAuditErrors.append(sim_drive.energyAuditError)
if iter == 0:
dict_diag = {}
dict_diag['vnum'] = [vehno]
dict_diag['cycle'] = [cycname]
for key in diagno.keys():
dict_diag[key] = [diagno[key]]
iter += 1
else:
dict_diag['vnum'].append(vehno)
dict_diag['cycle'].append(cycname)
for key in diagno.keys():
dict_diag[key].append(diagno[key])
df = | pd.DataFrame.from_dict(dict_diag) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
# -------------------------------------------------------------------
# **TD DSA 2021 de <NAME> - rapport de <NAME>**
# ------------------------- -------------------------------------
# # Analyse descriptive
# ## Setup
# In[5]:
get_ipython().system('pip install textblob')
# In[6]:
get_ipython().system('pip install emot')
# In[7]:
get_ipython().system('pip install wordcloud')
# In[8]:
#Temps et fichiers
import os
import warnings
import time
from datetime import timedelta
#Manipulation de données
import pandas as pd
import numpy as np
# Text
from collections import Counter
import nltk
nltk.download('punkt')
from nltk.tokenize import word_tokenize
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
from nltk.util import ngrams
from textblob import TextBlob
import string
import re
import spacy
from emot.emo_unicode import UNICODE_EMO, EMOTICONS
#Visualisation
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from wordcloud import WordCloud
#Tracking d'expérience
import mlflow
import mlflow.sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
# ### Utilisation du package
# In[9]:
#Cette cellule permet d'appeler la version packagée du projet et d'en assurer le reload avant appel des fonctions
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# In[10]:
from dsa_sentiment.scripts.make_dataset import load_data
from dsa_sentiment.scripts.evaluate import eval_metrics
from dsa_sentiment.scripts.make_dataset import Preprocess_StrLower, Preprocess_transform_target
# ### Configuration de l'experiment MLFlow
# In[11]:
mlflow.tracking.get_tracking_uri()
# ### Chargement des données
# In[12]:
# On Importe les données
#df
df_train=pd.read_parquet('/mnt/data/interim/df_train.gzip')
df_val=pd.read_parquet('/mnt/data/interim/df_val.gzip')
df_test=pd.read_parquet('/mnt/data/interim/df_test.gzip')
#X
X_train=pd.read_parquet('/mnt/data/interim/X_train.gzip')
X_val=pd.read_parquet('/mnt/data/interim/X_val.gzip')
X_test=pd.read_parquet('/mnt/data/interim/X_test.gzip')
#y
y_train=pd.read_parquet('/mnt/data/interim/y_train.gzip')
y_val=pd.read_parquet('/mnt/data/interim/y_val.gzip')
y_test=pd.read_parquet('/mnt/data/interim/y_test.gzip')
# ## EDA
# On commence par nalyser l'équilibre des différentes classes de sentiments
# In[13]:
df = df_train
df.head()
# ### Analyse de l'équilibre du jeu d'entrainement par label
# In[14]:
fig = px.histogram(df, x="sentiment", color="sentiment", title = 'Nombre de tweets par sentiment')
fig.show()
# Il existe un léger déséquilibre dans les classes en faveur des sentiments `neutral`
# ### Analyse des champs lexicaux par label
# Pour la suite des travaux, on créée un corpus contenant la concaténation de tous les tweets d'une certaine tonalité.
# In[15]:
def create_corpus(text_series):
text = text_series.apply(lambda x : x.split())
text = sum(text, [])
return text
# In[16]:
positive_text = create_corpus(df['text'][df['sentiment']=='positive'])
negative_text = create_corpus(df['text'][df['sentiment']=='negative'])
neutral_text = create_corpus(df['text'][df['sentiment']=='neutral'])
# Il devient alors possible de crééer des histogrammes représentant la fréquence de N-grams dans un corpus =donné
# In[17]:
def plot_freq_dist(text_corpus, nb=30, ngram=1, title=''):
'''
Plot the most common words
inputs:
text_corpus : a corpus of words
nb : number of words to plot
title : graph title
returns:
nothing, plots the graph
'''
freq_pos=Counter(ngrams(create_corpus(pd.Series(text_corpus)),ngram))
pos_df = pd.DataFrame({
"words":[' '.join(items) for items in list(freq_pos.keys())],
"Count":list(freq_pos.values())
})
common_pos= pos_df.nlargest(columns="Count", n=30)
fig = px.bar(common_pos, x="words", y="Count", labels={"words": "Words", "Count":"Frequency"}, title=title)
fig.show();
# In[18]:
plot_freq_dist(positive_text, title = 'Most common words associated with positive tweets')
# Le résultat montre la prépondérance des `stopwords`, ces mots d'articulation, qui sont très communs et gènent l'identifiaction de mots clefs propres à un document / ensemble de documents spécifiques.
#
# Il convient donc d'effectuer des opérations de retraitement du texte pour analyse.
# ### Preprocessing
# Parmi les éléments propres aux tweets qui peuvent avoir un impact sur la suite on compte :
#
# - les mots clefs marqués par un `#`
# - les noms d'utilisateurs commençant par un `@`
# - les emoticons et emojis
# - les nombre de mots en MAJUSCULES
# - la répétition de caractères pour marquer l'emphase `!!!!`, `looooong`, ou l'autocensure `f***`
# - les fautes de frappes (mots de moins de 2 caractères)
# Afin de disposer de traitements homogènes, repoductibles et paramétrables, une fonction spécifique est créée. Les différenst paramètres pourront être testés dans les phase de modélistaion ultérieures.
# source [preprocess](https://www.kaggle.com/stoicstatic/twitter-sentiment-analysis-for-beginners)
# In[57]:
def preprocess_text(text_series,
apply_lemmatizer=True,
apply_lowercase=True,
apply_url_standerdisation=True,
apply_user_standerdisation=True,
apply_emoticon_to_words=True,
apply_stopwords_removal=True,
apply_shortwords_removal=True,
apply_non_alphabetical_removal=True,
apply_only_2_consecutive_charac=True
):
'''
Main preprocess function
inputs:
text_series : a pandas Series object with text to preprocess
outputs:
a preprocessed pandas Series object
'''
processedText = []
if apply_lemmatizer:
# Create Lemmatizer and Stemmer.
wordLemm = WordNetLemmatizer()
# Defining regex patterns.
urlPattern = r"((http://)[^ ]*|(https://)[^ ]*|( www\.)[^ ]*)"
userPattern = '@[^\s]+'
alphaPattern = r"[^(\w|\*|(!){2}|#)]"
sequencePattern = r"(.)\1\1+"
seqReplacePattern = r"\1\1"
for tweet in text_series:
if apply_lowercase:
tweet = tweet.lower()
if apply_url_standerdisation:
# Replace all URls with 'URL'
tweet = re.sub(urlPattern,' URL',tweet)
if apply_user_standerdisation:
# Replace @USERNAME to 'USER'.
tweet = re.sub(userPattern,' USER', tweet)
if apply_emoticon_to_words:
# Replace all emojis.
for emo in EMOTICONS:
#refactor outputs so that we come up with a single word when/if text spliting afterwards
val = "_".join(EMOTICONS[emo].replace(",","").split())
val='EMO_'+val
tweet = tweet.replace(emo, ' '+val+' ')
for emot in UNICODE_EMO:
val = "_".join(UNICODE_EMO[emot].replace(",","").replace(":","").split())
val='EMO_'+val
tweet = tweet.replace(emo, ' '+val+' ')
if apply_only_2_consecutive_charac:
# Replace 3 or more consecutive letters by 2 letter.
tweet = re.sub(sequencePattern, seqReplacePattern, tweet)
if apply_non_alphabetical_removal:
# Replace all non alphabets.
tweet = re.sub(alphaPattern, " ", tweet)
tweetwords = ''
for word in tweet.split():
# Checking if the word is a stopword.
if apply_stopwords_removal:
if word in stopwords.words('english'):
word=''
else:
word=word
#if word not in stopwordlist:
if apply_shortwords_removal:
if len(word)<=1:
word=''
else:
word=word
# Lemmatizing the word.
if apply_lemmatizer:
word = wordLemm.lemmatize(word)
else:
word=word
tweetwords += (word+' ')
processedText.append(tweetwords)
return processedText
# In[20]:
positive_text_prepro = preprocess_text(df['text'][df['sentiment']=='positive'], apply_lemmatizer=False, apply_non_alphabetical_removal=True)
# In[56]:
pd.Series(positive_text_prepro).head()
# In[21]:
neutral_text_prepro = preprocess_text(df['text'][df['sentiment']=='neutral'], apply_lemmatizer=False, apply_non_alphabetical_removal=True)
# In[58]:
| pd.Series(neutral_text_prepro) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Methods to perform coverage analysis.
@author: <NAME> <<EMAIL>>
"""
import pandas as pd
import numpy as np
import geopandas as gpd
from typing import List, Optional
from shapely import geometry as geo
from datetime import datetime, timedelta
from skyfield.api import load, wgs84, EarthSatellite
from ..schemas.point import Point
from ..schemas.satellite import Satellite
from ..schemas.instrument import Instrument, DutyCycleScheme
from ..utils import (
compute_min_altitude,
swath_width_to_field_of_regard,
compute_max_access_time,
compute_orbit_period,
)
def collect_observations(
point: Point,
satellite: Satellite,
instrument: Instrument,
start: datetime,
end: datetime,
omit_solar: bool = True,
sample_distance: Optional[float] = None,
) -> gpd.GeoDataFrame:
"""
Collect single satellite observations of a geodetic point of interest.
:param point: The ground point of interest
:type point: :class:`tatc.schemas.point.Point`
:param satellite: The observing satellite
:type satellite: :class:`tatc.schemas.satellite.Satellite`
:param instrument: The instrument used to make observations
:type instrument::`tatc.schemas.instrument.instrument`
:param start: The start of the mission window
:type start::`datetime.datetime`
:param end: The end of the mission window
:type end::`datetime.datetime`
:param omit_solar: True, if solar angles should be omitted
to improve computational efficiency, defaults to True
:type omit_solar: bool, optional
:param sample_distance: Ground sample distance (m) to override
instrument field of regard, defaults to None
:type sample_distance: int, optional
:return: An instance of :class:`geopandas.GeoDataFrame` containing all
recorded reduce_observations
:rtype::`geopandas.GeoDataFrame`
"""
# build a topocentric point at the designated geodetic point
topos = wgs84.latlon(point.latitude, point.longitude)
# load the timescale and define starting and ending points
ts = load.timescale()
t0 = ts.from_datetime(start)
t1 = ts.from_datetime(end)
# load the ephemerides
eph = load("de421.bsp")
# convert orbit to tle
orbit = satellite.orbit.to_tle()
# construct a satellite for propagation
sat = EarthSatellite(orbit.tle[0], orbit.tle[1], satellite.name)
# compute the initial satellite height (altitude)
satellite_height = wgs84.subpoint(sat.at(t0)).elevation.m
# compute the minimum altitude angle required for observation
min_altitude = compute_min_altitude(
satellite_height,
instrument.field_of_regard
if sample_distance is None
else swath_width_to_field_of_regard(satellite_height, sample_distance),
)
# compute the maximum access time to filter bad data
max_access_time = timedelta(
seconds=compute_max_access_time(satellite_height, min_altitude)
)
# TODO: consider instrument operational intervals
ops_intervals = pd.Series(
[pd.Interval( | pd.Timestamp(start) | pandas.Timestamp |
import os
import pandas as pd
import numpy as np
import rasterio
from mapBiomas_dictionaries import year_band
def reclassified_pixels(year):
band = year_band.get(year)
open_band = wp_raster.read(band)
pixels = np.count_nonzero(open_band)
converted = open_band * (open_band != wp_raster.read(band - 1))
data.append(round(np.count_nonzero(converted) / pixels * 100, 2))
parks = pd.read_csv('../output/ceara_wind_parks.csv')
column_names = ['wp_id', 'comm_year', 'share', 'no_classes', 'max_class',
'2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017']
dataset = | pd.DataFrame(columns=column_names) | pandas.DataFrame |
from unittest import TestCase
import pandas as pd
import numpy as np
import pandas_validator as pv
from pandas_validator.core.exceptions import ValidationError
class BaseSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.BaseSeriesValidator(series_type=np.int64)
def test_is_valid_when_given_int64_series(self):
series = pd.Series([0, 1])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_when_given_float_series(self):
series = pd.Series([0., 1.])
self.assertFalse(self.validator.is_valid(series))
def test_should_return_true_when_given_int64_series(self):
series = pd.Series([0, 1])
self.assertIsNone(self.validator.validate(series))
def test_should_return_false_when_given_float_series(self):
series = pd.Series([0., 1.])
self.assertRaises(ValidationError, self.validator.validate, series)
class IntegerSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.IntegerSeriesValidator(min_value=0, max_value=2)
def test_is_valid(self):
series = pd.Series([0, 1, 2])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_by_too_low_value(self):
series = pd.Series([-1, 0, 1, 2])
self.assertFalse(self.validator.is_valid(series))
def test_is_invalid_by_too_high_value(self):
series = pd.Series([0, 1, 2, 3])
self.assertFalse(self.validator.is_valid(series))
class FloatSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.FloatSeriesValidator(min_value=0, max_value=2)
def test_is_valid(self):
series = pd.Series([0., 1., 2.])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_when_given_integer_series(self):
series = pd.Series([0, 1, 2])
self.assertFalse(self.validator.is_valid(series))
def test_is_invalid_by_too_low_value(self):
series = pd.Series([-0.1, 0., 1.])
self.assertFalse(self.validator.is_valid(series))
def test_is_invalid_by_too_high_value(self):
series = | pd.Series([0., 1., 2.1]) | pandas.Series |
# -*- encoding: utf-8 -*-
from json import encoder
import multiprocessing
import time
import json
import yaml
import os
import math
import numpy as np
import pandas as pd
from pandas import DataFrame as df
from itertools import product
from random import random, choice, seed
from typing import Tuple
from deap import creator, base, tools, algorithms
from wtpy import WtBtEngine, EngineType
from wtpy.apps import WtBtAnalyst
def fmtNAN(val, defVal=0):
if math.isnan(val):
return defVal
return val
class ParamInfo:
'''
参数信息类
'''
def __init__(self, name: str, start_val=None, end_val=None, step_val=None, ndigits=1, val_list: list = None):
self.name = name # 参数名
self.start_val = start_val # 起始值
self.end_val = end_val # 结束值
self.step_val = step_val # 变化步长
self.ndigits = ndigits # 小数位
self.val_list = val_list # 指定参数
def gen_array(self):
if self.val_list is not None:
return self.val_list
values = list()
curVal = round(self.start_val, self.ndigits)
while curVal < self.end_val:
values.append(curVal)
curVal += self.step_val
curVal = round(curVal, self.ndigits)
if curVal >= self.end_val:
curVal = self.end_val
break
values.append(round(curVal, self.ndigits))
return values
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
class WtCtaGAOptimizer:
'''
参数优化器\n
主要用于做策略参数优化的
'''
def __init__(self, worker_num: int = 2, MU: int = 80, population_size: int = 100, ngen_size: int = 20,
cx_prb: float = 0.9, mut_prb: float = 0.1):
'''
构造函数\n
@worker_num 工作进程个数,默认为2,可以根据CPU核心数设置,由于计算回测值是从文件里读取,因此进程过多可能会出现冲突\n
@MU 每一代选择的个体数\n
@population_size 种群数\n
@ngen_size 进化代数\n
@cx_prb 交叉概率\n
@mut_prb 变异概率
'''
self.worker_num = worker_num
self.running_worker = 0
self.mutable_params = dict()
self.fixed_params = dict()
self.env_params = dict()
# 遗传算法优化参数设置
self.optimizing_target = "胜率"
self.optimizing_target_func = None # 适应度函数,原则上要求为非负
self.population_size = population_size # 种群数
self.ngen_size = ngen_size # 进化代数,即优化迭代次数,根据population_size大小设定
self.MU = MU # 每一代选择的个体数,可以取个体数的0.8倍
self.lambda_ = self.population_size # 下一代产生的个体数
self.cx_prb = cx_prb # 建议取0.4~0.99之间
self.mut_prb = mut_prb # 建议取0.0001~0.1之间
self.cpp_stra_module = None
self.cache_dict = multiprocessing.Manager().dict() # 缓存中间结果
def add_mutable_param(self, name: str, start_val, end_val, step_val, ndigits=1):
'''
添加可变参数\n
@name 参数名\n
@start_val 起始值\n
@end_val 结束值\n
@step_val 步长\n
@ndigits 小数位
'''
self.mutable_params[name] = ParamInfo(name=name, start_val=start_val, end_val=end_val, step_val=step_val,
ndigits=ndigits)
def add_listed_param(self, name: str, val_list: list):
'''
添加限定范围的可变参数\n
@name 参数名\n
@val_list 参数值列表
'''
self.mutable_params[name] = ParamInfo(name=name, val_list=val_list)
def add_fixed_param(self, name: str, val):
'''
添加固定参数\n
@name 参数名\n
@val 值\n
'''
self.fixed_params[name] = val
def generate_settings(self):
''' 生成优化参数组合 '''
# 参数名列表
name_list = self.mutable_params.keys()
param_list = []
for name in name_list:
paramInfo = self.mutable_params[name]
values = paramInfo.gen_array()
param_list.append(values)
# 使用迭代工具产生参数对组合
products = list(product(*param_list))
# 把参数对组合打包到字典列表里
settings = []
[settings.append(dict(zip(name_list, p))) for p in products]
return settings
def set_optimizing_target(self, target: str):
''' 设置优化目标名称,可从summary中已有数据中选取优化目标 '''
self.optimizing_target = target
def set_optimizing_func(self, calculator, target_name: str = None):
''' 根据summary数据自定义优化目标值 '''
self.optimizing_target_func = calculator
if target_name is None:
target_name = "适应值"
self.set_optimizing_target(target_name)
def mututate_individual(self, individual, indpb):
"""
变异函数
:param individual: 个体,实际为策略参数
:param indpb: 变异概率
:return: 变异后的个体
"""
size = len(individual)
param_list = self.generate_settings()
settings = [list(item.items()) for item in param_list]
for i in range(size):
if random() < indpb:
individual[i] = settings[i]
return individual,
def evaluate_func(self, start_time, end_time, cache_dict: dict, params):
"""
适应度函数
:return:
"""
# 参数传递可能出现异常
# 比如[(k1, 0.1), (k2, 0.1)]可能在编码出新的参数组时变为[[(k1, 0.1), (k2, 0.2)], (k2, 0.3)]的情况
tmp_params = dict()
temp = []
[[temp.append(jj) for jj in ii] if isinstance(ii, list) else temp.append(ii) for ii in params]
names = [itm[0] for itm in temp]
names1 = list({}.fromkeys(names).keys())
if len(names1) < len(names):
indexes = [names.index(ii) for ii in names1]
temp = [temp[i] for i in indexes]
if len(temp) < 1:
print(f"Empty parameters: {params}")
return 0,
for cell in temp:
tmp_params[cell[0]] = cell[1]
# strategy name
strName = [self.name_prefix[:-1]]
[strName.extend([key, tmp_params[key]]) for key in tmp_params.keys()]
strName.extend([start_time, end_time])
strName = [str(item) for item in strName]
strName = "_".join(strName)
is_yaml = True
fname = "logcfg_tpl.yaml"
if not os.path.exists(fname):
is_yaml = True
fname = "logcfg_tpl.json"
f = open(fname, "r")
content = f.read()
f.close()
content = content.replace("$NAME$", strName)
if is_yaml:
content = json.dumps(yaml.full_load(content))
engine = WtBtEngine(eType=EngineType.ET_CTA, logCfg=content, isFile=False)
engine.init(self.env_params["deps_dir"], self.env_params["cfgfile"])
engine.configBacktest(int(start_time), int(end_time))
engine.configBTStorage(mode=self.env_params["storage_type"], path=self.env_params["storage_path"],
storage=self.env_params["storage"])
time_range = (int(start_time), int(end_time))
tmp_params["name"] = strName
tmp_params.update(self.fixed_params)
if self.cpp_stra_module is not None:
tmp_params.pop("name")
engine.setExternalCtaStrategy(strName, self.cpp_stra_module, self.cpp_stra_type, tmp_params)
else:
straInfo = self.strategy_type(**tmp_params)
engine.set_cta_strategy(straInfo)
engine.commitBTConfig()
engine.run_backtest()
engine.release_backtest()
summary = self.__ayalyze_result__(strName, time_range, tmp_params)
if self.optimizing_target_func:
result = self.optimizing_target_func(summary) # tuple类型
else:
result = summary[self.optimizing_target],
# if strName not in cache_dict.keys(): # 缓存结果
# tmp_params.update({self.optimizing_target: result[0]})
# cache_dict[strName] = tmp_params
tmp_params.update({self.optimizing_target: result[0]})
cache_dict[strName] = tmp_params
return result
def set_strategy(self, typeName: type, name_prefix: str):
'''
设置策略\n
@typeName 策略类名\n
@name_prefix 命名前缀,用于自动命名用,一般为格式为"前缀_参数1名_参数1值_参数2名_参数2值"
'''
self.strategy_type = typeName
self.name_prefix = name_prefix
return
def set_cpp_strategy(self, module: str, type_name: type, name_prefix: str):
'''
设置CPP策略\n
@module 模块文件\n
@typeName 策略类名\n
@name_prefix 命名前缀,用于自动命名用,一般为格式为"前缀_参数1名_参数1值_参数2名_参数2值"
'''
self.cpp_stra_module = module
self.cpp_stra_type = type_name
self.name_prefix = name_prefix
return
def config_backtest_env(self, deps_dir: str, cfgfile: str = "configbt.yaml", storage_type: str = "csv",
storage_path: str = None, storage: dict = None):
'''
配置回测环境\n
@deps_dir 依赖文件目录\n
@cfgfile 配置文件名\n
@storage_type 存储类型,csv/bin等\n
@storage_path 存储路径
'''
self.env_params["deps_dir"] = deps_dir
self.env_params["cfgfile"] = cfgfile
self.env_params["storage_type"] = storage_type
self.env_params["storage"] = storage
self.env_params["storage_path"] = storage_path
def config_backtest_time(self, start_time: int, end_time: int):
'''
配置回测时间,可多次调用配置多个回测时间区间\n
@start_time 开始时间,精确到分钟,格式如201909100930\n
@end_time 结束时间,精确到分钟,格式如201909100930
'''
if "time_ranges" not in self.env_params:
self.env_params["time_ranges"] = []
self.env_params["time_ranges"].append([start_time, end_time])
def gen_params(self, markerfile: str = "strategies.json"):
'''
生成回测任务
'''
# name_list = self.mutable_params.keys()
param_list = self.generate_settings()
stra_names = dict()
time_range = self.env_params["time_ranges"]
start_time = time_range[0][0]
end_time = time_range[0][1]
thisGrp = self.fixed_params.copy() # 复制固定参数
for setting in param_list:
straName = self.name_prefix
temp_setting = []
[temp_setting.extend([key, setting[key]]) for key in setting.keys()]
temp_setting.extend([start_time, end_time])
straName += "_".join([str(i) for i in temp_setting])
thisGrp["name"] = straName
thisGrp["start_time"] = start_time
thisGrp["end_time"] = end_time
stra_names[straName] = thisGrp
param_group = {"start_time": start_time, "end_time": end_time}
# 将每一组参数和对应的策略ID落地到文件中,方便后续的分析
f = open(markerfile, "w")
f.write(json.dumps(obj=stra_names, sort_keys=True, indent=4))
f.close()
return param_group
def __ayalyze_result__(self, strName: str, time_range: tuple, params: dict):
folder = "./outputs_bt/%s/" % (strName)
try:
df_closes = pd.read_csv(folder + "closes.csv", engine="python")
df_funds = pd.read_csv(folder + "funds.csv", engine="python")
except Exception as e: # 如果读取csv文件出现异常,则按文本格式读取
df_closes = read_closes(folder + "closes.csv")
df_funds = read_funds(folder + "funds.csv")
df_wins = df_closes[df_closes["profit"] > 0]
df_loses = df_closes[df_closes["profit"] <= 0]
ay_WinnerBarCnts = df_wins["closebarno"] - df_wins["openbarno"]
ay_LoserBarCnts = df_loses["closebarno"] - df_loses["openbarno"]
total_winbarcnts = ay_WinnerBarCnts.sum()
total_losebarcnts = ay_LoserBarCnts.sum()
total_fee = df_funds.iloc[-1]["fee"]
totaltimes = len(df_closes) # 总交易次数
wintimes = len(df_wins) # 盈利次数
losetimes = len(df_loses) # 亏损次数
winamout = df_wins["profit"].sum() # 毛盈利
loseamount = df_loses["profit"].sum() # 毛亏损
trdnetprofit = winamout + loseamount # 交易净盈亏
accnetprofit = trdnetprofit - total_fee # 账户净盈亏
winrate = wintimes / totaltimes if totaltimes > 0 else 0 # 胜率
avgprof = trdnetprofit / totaltimes if totaltimes > 0 else 0 # 单次平均盈亏
avgprof_win = winamout / wintimes if wintimes > 0 else 0 # 单次盈利均值
avgprof_lose = loseamount / losetimes if losetimes > 0 else 0 # 单次亏损均值
winloseratio = abs(avgprof_win / avgprof_lose) if avgprof_lose != 0 else "N/A" # 单次盈亏均值比
max_consecutive_wins = 0 # 最大连续盈利次数
max_consecutive_loses = 0 # 最大连续亏损次数
avg_bars_in_winner = total_winbarcnts / wintimes if wintimes > 0 else "N/A"
avg_bars_in_loser = total_losebarcnts / losetimes if losetimes > 0 else "N/A"
consecutive_wins = 0
consecutive_loses = 0
for idx, row in df_closes.iterrows():
profit = row["profit"]
if profit > 0:
consecutive_wins += 1
consecutive_loses = 0
else:
consecutive_wins = 0
consecutive_loses += 1
max_consecutive_wins = max(max_consecutive_wins, consecutive_wins)
max_consecutive_loses = max(max_consecutive_loses, consecutive_loses)
summary = params.copy()
summary["开始时间"] = time_range[0]
summary["结束时间"] = time_range[1]
summary["总交易次数"] = totaltimes
summary["盈利次数"] = wintimes
summary["亏损次数"] = losetimes
summary["毛盈利"] = float(winamout)
summary["毛亏损"] = float(loseamount)
summary["交易净盈亏"] = float(trdnetprofit)
summary["胜率"] = winrate * 100
summary["单次平均盈亏"] = avgprof
summary["单次盈利均值"] = avgprof_win
summary["单次亏损均值"] = avgprof_lose
summary["单次盈亏均值比"] = winloseratio
summary["最大连续盈利次数"] = max_consecutive_wins
summary["最大连续亏损次数"] = max_consecutive_loses
summary["平均盈利周期"] = avg_bars_in_winner
summary["平均亏损周期"] = avg_bars_in_loser
summary["平均账户收益率"] = accnetprofit / totaltimes if totaltimes > 0 else 0
f = open(folder + "summary.json", mode="w")
f.write(json.dumps(obj=summary, indent=4))
f.close()
return summary
def run_ga_optimizer(self, params: dict = None):
""" 执行GA优化 """
# 遗传算法参数空间
buffer = self.generate_settings()
settings = [list(itm.items()) for itm in buffer]
def generate_parameter():
return choice(settings)
pool = multiprocessing.Pool(self.worker_num) # 多线程设置
toolbox = base.Toolbox()
toolbox.register("individual", tools.initIterate, creator.Individual, generate_parameter)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", self.evaluate_func, params["start_time"], params["end_time"], self.cache_dict)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", self.mututate_individual, indpb=0.05)
toolbox.register("select", tools.selNSGA2)
toolbox.register("map", pool.map) # 多线程优化,可能会报错
# seed(12555888) # 固定随机数种子
pop = toolbox.population(self.population_size)
# hof = tools.ParetoFront() # 非占优最优集
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
# stats.register("avg", np.mean, axis=0)
# stats.register("std", np.std, axis=0)
# stats.register("min", np.min, axis=0)
stats.register("max", np.max, axis=0)
# Run ga optimization
print("*" * 50)
print(f"开始执行遗传算法优化...")
print(f"参数优化空间: {len(settings)}")
print(f"每代族群总数: {self.population_size}")
print(f"优良个体筛选数: {self.MU}")
print(f"迭代次数: {self.ngen_size}")
print(f"交叉几率: {self.cx_prb:.2%}")
print(f"变异几率: {self.mut_prb:.2%}")
begin = time.perf_counter()
_, logbook = algorithms.eaMuPlusLambda(pop, toolbox, self.MU, self.lambda_, self.cx_prb, self.mut_prb,
self.ngen_size, stats, verbose=False, halloffame=hof)
end = time.perf_counter()
print(f"算法优化完成,耗时: {end - begin: .2f} 秒")
print("*" * 50)
# # 处理结果
# optimizing_value = [item['max'][0] for item in logbook]
# optimizing_params = [{item[0]: item[1]} for item in hof[0]]
# optimizing_params.append({f"{self.optimizing_target}": max(optimizing_value)})
return
def go(self, out_marker_file: str = "strategies.json",
out_summary_file: str = "total_summary.csv"):
'''
启动优化器\n
@markerfile 标记文件名,回测完成以后分析会用到
'''
params = self.gen_params(out_marker_file)
self.run_ga_optimizer(params)
# 获取所有的值
results = list(self.cache_dict.values())
header = list(results[0].keys())
data = [list(itm.values()) for itm in results]
df_results = pd.DataFrame(data, columns=header)
df_results = df_results[["name", self.optimizing_target]]
# 开始汇总回测结果
f = open(out_marker_file, "r")
content = f.read()
f.close()
obj_stras = json.loads(content)
total_summary = list()
for straName in obj_stras:
filename = "./outputs_bt/%s/summary.json" % (straName)
if not os.path.exists(filename):
# print("%s不存在,请检查数据" % (filename))
continue
f = open(filename, "r")
content = f.read()
f.close()
obj_summary = json.loads(content)
total_summary.append(obj_summary)
df_summary = df(total_summary)
# 汇总结果
df_summary = | pd.merge(df_summary, df_results, how="inner", on="name") | pandas.merge |
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# library
import os, warnings, random
from sklearn.preprocessing import LabelEncoder
warnings.filterwarnings('ignore')
## Seeder
# :seed to make all processes deterministic # type: int
def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
## -------------------
## Memory Reducer
# :df pandas dataframe to reduce size # type: pd.DataFrame()
# :verbose # type: bool
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
## -------------------
### set up
SEED = 42
seed_everything(SEED)
LOCAL_TEST = False
#################################################################################
print('Load Data')
train_df = pd.read_csv('data/train_transaction.csv')
test_df = pd.read_csv('data/test_transaction.csv')
test_df['isFraud'] = 0
train_identity = pd.read_csv('data/train_identity.csv')
test_identity = pd.read_csv('data/test_identity.csv')
########################### Base check ############################################
if LOCAL_TEST:
for df2 in [train_df, test_df, train_identity, test_identity]:
df = reduce_mem_usage(df2)
for col in list(df):
if not df[col].equals(df2[col]):
print('Bad transformation', col)
########################### Base Minification ####################################
train_df = reduce_mem_usage(train_df)
test_df = reduce_mem_usage(test_df)
train_identity = reduce_mem_usage(train_identity)
test_identity = reduce_mem_usage(test_identity)
########################### card4, card6, ProductCD#################################
# Converting Strings to ints(or floats if nan in column) using frequency encoding
# We will be able to use these columns as category or as numerical feature
for col in ['card4', 'card6', 'ProductCD']:
print('Encoding', col)
temp_df = pd.concat([train_df[[col]], test_df[[col]]])
col_encoded = temp_df[col].value_counts().to_dict()
train_df[col] = train_df[col].map(col_encoded)
test_df[col] = test_df[col].map(col_encoded)
print(col_encoded)
########################### M columns #########################################
# Converting Strings to ints(or floats if nan in column)
for col in ['M1','M2','M3','M5','M6','M7','M8','M9']:
train_df[col] = train_df[col].map({'T':1, 'F':0})
test_df[col] = test_df[col].map({'T':1, 'F':0})
for col in ['M4']:
print('Encoding', col)
temp_df = | pd.concat([train_df[[col]], test_df[[col]]]) | pandas.concat |
import pandas as pd
import numpy as np
s = pd.Series(['丁一', '王二', '张三'])
print(s)
a = pd.DataFrame([[1, 2], [3, 4], [5, 6]])
print(f'\n{a}')
b = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=['date', 'score'], index=['A', 'B', 'C'])
print(f'\n{b}')
c = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import contextlib
import datetime
import hypothesis.extra.numpy as hyp_np
import hypothesis.strategies as hyp_st
import numpy as np
import pandas as pd
from kartothek.core.uuid import gen_uuid_object
try:
from freezegun.api import real_date as date
except ImportError:
from datetime import date
try:
# python 3:
from unittest import mock
except ImportError:
import mock
TIME_TO_FREEZE = datetime.datetime(2000, 1, 1, 1, 1, 1, 1)
TIME_TO_FREEZE_ISO = "2000-01-01T01:01:01.000001"
TIME_TO_FREEZE_ISO_QUOTED = "2000-01-01T01%3A01%3A01.000001"
def get_dataframe_alltypes():
"""
Return a pandas DataFrame of length one with a column for each commonly used data types
"""
# fmt: off
not_nested = get_dataframe_not_nested()
nested_types = pd.DataFrame(
{
"array_int8": pd.Series([np.array([1], dtype=np.int8)], dtype=object),
"array_int16": pd.Series([np.array([1], dtype=np.int16)], dtype=object),
"array_int32": pd.Series([np.array([1], dtype=np.int32)], dtype=object),
"array_int64": pd.Series([np.array([1], dtype=np.int64)], dtype=object),
"array_uint8": pd.Series([np.array([1], dtype=np.uint8)], dtype=object),
"array_uint16": pd.Series([np.array([1], dtype=np.uint16)], dtype=object),
"array_uint32": pd.Series([np.array([1], dtype=np.uint32)], dtype=object),
"array_uint64": pd.Series([np.array([1], dtype=np.uint64)], dtype=object),
"array_float32": pd.Series([np.array([1], dtype=np.float32)], dtype=object),
"array_float64": pd.Series([np.array([1], dtype=np.float64)], dtype=object),
"array_unicode": pd.Series([np.array(["Ö"], dtype=object)], dtype=object),
}
)
return | pd.concat([not_nested, nested_types], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""System transmission plots.
This code creates transmission line and interface plots.
@author: <NAME>, <NAME>
"""
import os
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.dates as mdates
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, DataSavedInModule,
UnderDevelopment, InputSheetError, MissingMetaData, UnsupportedAggregation, MissingZoneData)
class MPlot(PlotDataHelper):
"""transmission MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The transmission.py module contains methods that are
related to the transmission network.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.font_defaults = mconfig.parser("font_settings")
def line_util(self, **kwargs):
"""Creates a timeseries line plot of transmission lineflow utilization for each region.
Utilization is plotted between 0 and 1 on the y-axis.
The plot will default to showing the 10 highest utilized lines. A Line category
can also be passed instead, using the property field in the Marmot_plot_select.csv
Each scenarios is plotted on a separate Facet plot.
This methods calls _util() to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._util(**kwargs)
return outputs
def line_hist(self, **kwargs):
"""Creates a histogram of transmission lineflow utilization for each region.
Utilization is plotted between 0 and 1 on the x-axis, with # lines on the y-axis.
Each bar is equal to a 0.05 utilization rate
The plot will default to showing all lines. A Line category can also be passed
instead using the property field in the Marmot_plot_select.csv
Each scenarios is plotted on a separate Facet plot.
This methods calls _util() and passes the hist=True argument to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._util(hist=True, **kwargs)
return outputs
def _util(self, hist: bool = False, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates utilization plots, line plot and histograms
This methods is called from line_util() and line_hist()
Args:
hist (bool, optional): If True creates a histogram of utilization.
Defaults to False.
prop (str, optional): Optional PLEXOS line category to display.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
# sets up x, y dimensions of plot
ncols, nrows = self.set_facet_col_row_dimensions(facet=True,
multi_scenario=self.Scenarios)
grid_size = ncols*nrows
# Used to calculate any excess axis to delete
plot_number = len(self.Scenarios)
excess_axs = grid_size - plot_number
for zone_input in self.Zones:
self.logger.info(f"For all lines touching Zone = {zone_input}")
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.1, hspace=0.25)
data_table=[]
for n, scenario in enumerate(self.Scenarios):
self.logger.info(f"Scenario = {str(scenario)}")
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.warning("Column to Aggregate by is missing")
continue
try:
zone_lines = zone_lines.xs(zone_input)
zone_lines=zone_lines['line_name'].unique()
except KeyError:
self.logger.warning('No data to plot for scenario')
outputs[zone_input] = MissingZoneData()
continue
flow = self["line_Flow"].get(scenario).copy()
#Limit to only lines touching to this zone
flow = flow[flow.index.get_level_values('line_name').isin(zone_lines)]
if self.shift_leapday == True:
flow = self.adjust_for_leapday(flow)
limits = self["line_Import_Limit"].get(scenario).copy()
limits = limits.droplevel('timestamp').drop_duplicates()
limits.mask(limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
# This checks for a nan in string. If no scenario selected, do nothing.
if pd.notna(prop):
self.logger.info(f"Line category = {str(prop)}")
line_relations = self.meta.lines(scenario).rename(columns={"name":"line_name"}).set_index(["line_name"])
flow=pd.merge(flow,line_relations, left_index=True,
right_index=True)
flow=flow[flow["category"] == prop]
flow=flow.drop('category',axis=1)
flow = pd.merge(flow,limits[0].abs(),on = 'line_name',how='left')
flow['Util']=(flow['0_x'].abs()/flow['0_y']).fillna(0)
#If greater than 1 because exceeds flow limit, report as 1
flow['Util'][flow['Util'] > 1] = 1
annual_util=flow['Util'].groupby(["line_name"]).mean().rename(scenario)
# top annual utilized lines
top_utilization = annual_util.nlargest(10, keep='first')
color_dict = dict(zip(self.Scenarios,self.color_list))
if hist == True:
mplt.histogram(annual_util, color_dict,label=scenario, sub_pos=n)
else:
for line in top_utilization.index.get_level_values(level='line_name').unique():
duration_curve = flow.loc[line].sort_values(by='Util',
ascending=False).reset_index(drop=True)
mplt.lineplot(duration_curve, 'Util' ,label=line, sub_pos=n)
axs[n].set_ylim((0,1.1))
data_table.append(annual_util)
mplt.add_legend()
#Remove extra axes
mplt.remove_excess_axs(excess_axs,grid_size)
# add facet labels
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
if hist == True:
if pd.notna(prop):
prop_name = 'All Lines'
else:
prop_name = prop
plt.ylabel('Number of lines', color='black',
rotation='vertical', labelpad=30)
plt.xlabel(f'Line Utilization: {prop_name}', color='black',
rotation='horizontal', labelpad=30)
else:
if pd.notna(prop):
prop_name ='Top 10 Lines'
else:
prop_name = prop
plt.ylabel(f'Line Utilization: {prop_name}', color='black',
rotation='vertical', labelpad=60)
plt.xlabel('Intervals', color='black',
rotation='horizontal', labelpad=20)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
try:
del annual_util,
except:
continue
Data_Out = pd.concat(data_table)
outputs[zone_input] = {'fig': fig,'data_table':Data_Out}
return outputs
def int_flow_ind(self, figure_name: str = None, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a line plot of interchange flows and their import and export limits.
Each interchange is potted on a separate facet plot.
The plot includes every interchange that originates or ends in the aggregation zone.
This can be adjusted by passing a comma separated string of interchanges to the property input.
The code will create either a timeseries or duration curve depending on
if the word 'duration_curve' is in the figure_name.
To make a duration curve, ensure the word 'duration_curve' is found in the figure_name.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): Comma separated string of interchanges.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"interface_Flow",self.Scenarios),
(True,"interface_Import_Limit",self.Scenarios),
(True,"interface_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
scenario = self.Scenarios[0]
outputs = {}
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
for zone_input in self.Zones:
self.logger.info(f"For all interfaces touching Zone = {zone_input}")
Data_Table_Out = pd.DataFrame()
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.info("Column to Aggregate by is missing")
continue
zone_lines = zone_lines.xs(zone_input)
zone_lines = zone_lines['line_name'].unique()
#Map lines to interfaces
all_ints = self.meta.interface_lines(scenario) #Map lines to interfaces
all_ints.index = all_ints.line
ints = all_ints.loc[all_ints.index.intersection(zone_lines)]
#flow = flow[flow.index.get_level_values('interface_name').isin(ints.interface)] #Limit to only interfaces touching to this zone
#flow = flow.droplevel('interface_category')
export_limits = self["interface_Export_Limit"].get(scenario).copy().droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits.index.get_level_values('interface_name').isin(ints.interface)]
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced interfaces.
#Drop unnecessary columns.
export_limits.reset_index(inplace = True)
export_limits.drop(columns=['interface_category', 'units'], inplace=True)
export_limits.set_index('interface_name',inplace = True)
import_limits = self["interface_Import_Limit"].get(scenario).copy().droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits.index.get_level_values('interface_name').isin(ints.interface)]
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced interfaces.
reported_ints = import_limits.index.get_level_values('interface_name').unique()
#Drop unnecessary columns.
import_limits.reset_index(inplace = True)
import_limits.drop(columns=['interface_category', 'units'], inplace=True)
import_limits.set_index('interface_name',inplace = True)
#Extract time index
ti = self["interface_Flow"][self.Scenarios[0]].index.get_level_values('timestamp').unique()
if pd.notna(prop):
interf_list = prop.split(',')
self.logger.info('Plotting only interfaces specified in Marmot_plot_select.csv')
self.logger.info(interf_list)
else:
interf_list = reported_ints.copy()
self.logger.info('Plotting full time series results.')
xdim,ydim = self.set_x_y_dimension(len(interf_list))
mplt = PlotLibrary(ydim, xdim, squeeze=False,
ravel_axs=True)
fig, axs = mplt.get_figure()
grid_size = xdim * ydim
excess_axs = grid_size - len(interf_list)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
missing_ints = 0
chunks = []
n = -1
for interf in interf_list:
n += 1
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
if interf in reported_ints:
chunks_interf = []
single_exp_lim = export_limits.loc[interf] / 1000 #TODO: Use auto unit converter
single_imp_lim = import_limits.loc[interf] / 1000
#Check if all hours have the same limit.
check = single_exp_lim.to_numpy()
identical = check[0] == check.all()
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
for scenario in self.Scenarios:
flow = self["interface_Flow"].get(scenario)
single_int = flow.xs(interf, level='interface_name') / 1000
single_int.index = single_int.index.droplevel(['interface_category','units'])
single_int.columns = [interf]
single_int = single_int.reset_index().set_index('timestamp')
limits = limits.reset_index().set_index('timestamp')
if self.shift_leapday == True:
single_int = self.adjust_for_leapday(single_int)
if pd.notna(start_date_range):
single_int = single_int[start_date_range : end_date_range]
limits = limits[start_date_range : end_date_range]
if duration_curve:
single_int = self.sort_duration(single_int,interf)
mplt.lineplot(single_int, interf,
label=f"{scenario}\n interface flow",
sub_pos=n)
# Only print limits if it doesn't change monthly or if you are plotting a time series.
# Otherwise the limit lines could be misleading.
if not duration_curve or identical[0]:
if scenario == self.Scenarios[-1]:
#Only plot limits for last scenario.
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
mplt.lineplot(limits, 'export limit',
label='export limit', color=limits_color_dict,
linestyle='--', sub_pos=n)
mplt.lineplot(limits, 'import limit',
label='import limit', color=limits_color_dict,
linestyle='--', sub_pos=n)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_int), name='Scenario')
single_int_out = single_int.set_index([scenario_names], append=True)
chunks_interf.append(single_int_out)
Data_out_line = pd.concat(chunks_interf,axis = 0)
Data_out_line.columns = [interf]
chunks.append(Data_out_line)
else:
self.logger.warning(f"{interf} not found in results. Have you tagged "
"it with the 'Must Report' property in PLEXOS?")
excess_axs += 1
missing_ints += 1
continue
axs[n].set_title(interf)
if not duration_curve:
mplt.set_subplot_timeseries_format(sub_pos=n)
if missing_ints == len(interf_list):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
Data_Table_Out = Data_Table_Out.reset_index()
index_name = 'level_0' if duration_curve else 'timestamp'
Data_Table_Out = Data_Table_Out.pivot(index = index_name,columns = 'Scenario')
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
# Data_Table_Out = Data_Table_Out.reset_index()
# Data_Table_Out = Data_Table_Out.groupby(Data_Table_Out.index // 24).mean()
# Data_Table_Out.index = pd.date_range(start = '1/1/2024',end = '12/31/2024',freq = 'D')
mplt.add_legend()
plt.ylabel('Flow (GW)', color='black', rotation='vertical',
labelpad=30)
if duration_curve:
plt.xlabel('Sorted hour of the year', color='black', labelpad=30)
plt.tight_layout(rect=[0, 0.03, 1, 0.97])
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interface_Limits.csv'))
return outputs
def int_flow_ind_seasonal(self, figure_name: str = None, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""#TODO: Finish Docstring
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): Comma separated string of interchanges.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
#TODO: Use auto unit converter in method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"interface_Flow",self.Scenarios),
(True,"interface_Import_Limit",self.Scenarios),
(True,"interface_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
scenario = self.Scenarios[0]
outputs = {}
for zone_input in self.Zones:
self.logger.info("For all interfaces touching Zone = "+zone_input)
Data_Table_Out = | pd.DataFrame() | pandas.DataFrame |
# TODO decide whether include MAX PV and MAX ST or the percentage of area usage
import pandas as pd
import os
def create_decentral_overview(components_csv_data):
# defining columns of the sheet including decentralized components
decentral_columns = ["Building", "PV 1", "Max. PV 1", "PV 2", "Max. PV 2", "PV 3", "Max. PV 3", "PV 4", "Max. PV 4",
"PV 5", "Max. PV 5", "Installed PV", "Max. PV", "STC 1", "Max. STC 1", "STC 2", "Max. STC 2",
"STC 3", "Max. STC 3", "STC 4", "Max. STC 4", "STC 5", "Max. STC 5", "Installed STC",
"Max. STC", "Gasheating-System", "ASHP", "GCHP", "Battery-Storage", "Heat Transformer",
"Electric Heating"]
# defining units for decentral components
decentral_columns_units = {"Building": "", "PV 1": "(kW)", "Max. PV 1": "(kW)", "PV 2": "(kW)", "Max. PV 2": "(kW)",
"PV 3": "(kW)", "Max. PV 3": "(kW)", "PV 4": "(kW)", "Max. PV 4": "(kW)",
"PV 5": "(kW)", "Max. PV 5": "(kW)", "Installed PV": "(kW)", "Max. PV": "(kW)",
"STC 1": "(kW)", "Max. STC 1": "(kW)", "STC 2": "(kW)", "Max. STC 2": "(kW)",
"STC 3": "(kW)", "Max. STC 3": "(kW)", "STC 4": "(kW)", "Max. STC 4": "(kW)",
"STC 5": "(kW)", "Max. STC 5": "(kW)", "Installed STC": "(kW)", "Max. STC": "(kW)",
"Gasheating-System": "(kW)", "ASHP": "(kW)", "GCHP": "(kW)", "Battery-Storage": "(kWh)",
"Heat Transformer": "(kW)", "Electric Heating": "(kW)"}
decentral_components = | pd.DataFrame(columns=decentral_columns) | pandas.DataFrame |
from enum import Enum
from random import random
import pandas
from aisoccer.team import *
class Game:
def __init__(self, blue_brain, red_brain, game_length=Constants.GAME_LENGTH, quiet_mode=False, record_game=False):
self.quiet_mode = quiet_mode
self.game_length = game_length
self.teams = [Team(blue_brain, 0), Team(red_brain, 1)]
self.state = PhyState(Constants.FIELD_LENGTH, Constants.FIELD_HEIGHT)
self.ball = None
self.move_df = {}
self.record_game = record_game
self.score = {
'red': 0,
'blue': 0
}
if self.record_game:
self.init_df()
self.start()
def start(self):
self.state.clear()
self.ball = Ball(Constants.BALL_RADIUS, Constants.FIELD_LENGTH / 2, Constants.FIELD_HEIGHT / 2)
for team in self.teams:
for player in team.players:
self.state.add_body(player.body)
team.reset()
self.state.add_body(self.ball.body)
self.ball.body.velocity = np.array([random() - 0.5, random() - 0.5])
def tick(self):
if self.state.ticks >= self.game_length:
if not self.quiet_mode:
print("Game Over!")
return GameResult.end
elif self.is_red_goal():
self.score['red'] += 1
if not self.quiet_mode:
print("GOAL! Red!")
print('Score: Blue {0:2d} / Red {1:2d} (at {2:3.2f}%)'
.format(self.score['blue'], self.score['red'],
self.game_time_complete() * 100))
self.start()
return GameResult.goal_red
elif self.is_blue_goal():
self.score['blue'] += 1
if not self.quiet_mode:
print("GOAL! Blue!")
print('Score: Blue {0:2d} / Red {1:2d} (at {2:3.2f}%)'
.format(self.score['blue'], self.score['red'],
self.game_time_complete() * 100))
self.start()
return GameResult.goal_blue
else:
self.run_brains()
self.limit_velocities()
self.state.tick()
return GameResult.nothing
def is_red_goal(self):
return self.ball.body.position[0] < Constants.GOAL_WIDTH + Constants.BALL_RADIUS
def is_blue_goal(self):
return self.ball.body.position[0] > (Constants.FIELD_LENGTH - 1) - (
Constants.GOAL_WIDTH + Constants.BALL_RADIUS)
def game_time_complete(self):
return float(self.state.ticks) / float(self.game_length)
def run_brains(self):
blue_team = self.teams[0]
red_team = self.teams[1]
blue_players_pos = blue_team.position_matrix()
blue_players_vel = blue_team.velocity_matrix()
red_players_pos = red_team.position_matrix()
red_players_vel = red_team.velocity_matrix()
ball_pos = self.ball.body.position
ball_vel = self.ball.body.velocity
blue_score = self.score['blue']
red_score = self.score['red']
game_time = self.game_time_complete()
blue_brain = blue_team.brain
red_brain = red_team.brain
blue_move = blue_brain.move(blue_players_pos, blue_players_vel,
red_players_pos, red_players_vel,
ball_pos, ball_vel,
blue_score, red_score,
game_time)
blue_team.apply_move(blue_move)
if self.record_game:
self.record_move(blue_players_pos, blue_players_vel,
red_players_pos, red_players_vel,
ball_pos, ball_vel,
blue_score, red_score,
game_time,
blue_brain.last_move)
# TODO: translate red positions and velocities
# so that both brains think that they are playing from left (0,y) to right (MAX_X,y)
red_move = red_brain.move(flip_pos(red_players_pos), flip_vel(red_players_vel),
flip_pos(blue_players_pos), flip_vel(blue_players_vel),
flip_pos(ball_pos), flip_vel(ball_vel),
red_score, blue_score,
game_time)
red_move = flip_acc(red_move)
red_team.apply_move(red_move)
def limit_velocities(self):
ball_velocity = self.ball.body.normal_velocity()
if ball_velocity > Constants.MAX_BALL_VELOCITY:
self.ball.body.velocity = np.multiply(self.ball.body.velocity, Constants.MAX_BALL_VELOCITY / ball_velocity)
for t in self.teams:
for p in t.players:
player_velocity = p.body.normal_velocity()
if player_velocity > Constants.MAX_PLAYER_VELOCITY:
p.body.velocity = np.multiply(p.body.velocity, Constants.MAX_PLAYER_VELOCITY / player_velocity)
def play(self):
while True:
status = self.tick()
if status == GameResult.end:
break
return self.score
def record_move(self, my_players_pos, my_players_vel,
opp_players_pos, opp_players_vel,
ball_pos, ball_vel,
my_score, opp_score,
game_time,
moves):
self.move_df["bp_x"].append(ball_pos[0])
self.move_df["bp_y"].append(ball_pos[1])
self.move_df["bv_x"].append(ball_vel[0])
self.move_df["bv_y"].append(ball_vel[1])
self.move_df["ms"].append(my_score)
self.move_df["os"].append(opp_score)
self.move_df["gt"].append(game_time)
for i in range(5):
self.move_df["mpp_" + str(i) + "_x"].append(my_players_pos[i][0])
self.move_df["mpp_" + str(i) + "_y"].append(my_players_pos[i][1])
self.move_df["mpv_" + str(i) + "_x"].append(my_players_vel[i][0])
self.move_df["mpv_" + str(i) + "_y"].append(my_players_vel[i][1])
self.move_df["opp_" + str(i) + "_x"].append(opp_players_pos[i][0])
self.move_df["opp_" + str(i) + "_y"].append(opp_players_pos[i][1])
self.move_df["opv_" + str(i) + "_x"].append(opp_players_vel[i][0])
self.move_df["opv_" + str(i) + "_y"].append(opp_players_vel[i][1])
self.move_df["m_" + str(i) + "_x"].append(moves[i][0])
self.move_df["m_" + str(i) + "_y"].append(moves[i][1])
def init_df(self):
df = {"bp_x": [],
"bp_y": [],
"bv_x": [],
"bv_y": [],
"ms": [],
"os": [],
"gt": []}
for i in range(5):
df["mpp_" + str(i) + "_x"] = []
df["mpp_" + str(i) + "_y"] = []
df["mpv_" + str(i) + "_x"] = []
df["mpv_" + str(i) + "_y"] = []
df["opp_" + str(i) + "_x"] = []
df["opp_" + str(i) + "_y"] = []
df["opv_" + str(i) + "_x"] = []
df["opv_" + str(i) + "_y"] = []
df["m_" + str(i) + "_x"] = []
df["m_" + str(i) + "_y"] = []
self.move_df = df
def save_game(self, save_file):
pandas_df = | pandas.DataFrame(self.move_df) | pandas.DataFrame |
"""dfenriching
This module illustrate examples that enrich a dataset using pandas.
"""
import pandas as pd
df = pd.read_csv('scooter.csv')
# Take a subset of the data
new = pd.DataFrame(df['start_location_name'].value_counts().head())
new.reset_index(inplace=True)
new.columns=['address', 'count']
print("First entries:\n"
"--------------\n"
"{0}".format(new))
print("")
# Collect only the street address (necessary to geocode)
# It also replaces the '@' with the word 'and'
n = new['address'].str.split(pat=',', n=1, expand=True)
replaced = n[0].str.replace("@", "and")
new['street'] = replaced
print("Adding a well formated 'street' column:\n"
"---------------------------------------\n"
"{0}".format(new))
print("")
# Enriching the dataset combining with other resources (geocodestreet.csv file)
geo = | pd.read_csv('geocodestreet.csv') | pandas.read_csv |
import os
import pandas as pd
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
CACHE_DIR = os.path.join(BASE_DIR, 'cache')
os.makedirs(CACHE_DIR, exist_ok=True)
new_dataframes = []
csv_files = [x for x in sorted(os.listdir(DATA_DIR), reverse=True) if x.endswith(".csv")]
for filename in csv_files:
year = filename.replace(".csv", "")
csv_path = os.path.join(DATA_DIR, filename)
this_df = pd.read_csv(csv_path)
this_df['filename'] = filename
this_df['year'] = year
new_dataframes.append(this_df)
all_dataframes = | pd.concat(new_dataframes) | pandas.concat |
import unittest
import numpy as np # type: ignore
import pandas as pd # type: ignore
from gpxpy.gpx import GPXTrackPoint, GPXTrackSegment # type: ignore
from gpx_data_utils import (
gpx_point_to_array,
gpx_segment_to_array,
gpx_segment_from_array
)
from gpx_stats import convert_path_to_feature, smoothen_coordinates
class TestConvertPathToArray(unittest.TestCase):
def setUp(self):
self.segment_1 = GPXTrackSegment([GPXTrackPoint(longitude=.1, latitude=1, elevation=10),
GPXTrackPoint(longitude=.2, latitude=2, elevation=20),
GPXTrackPoint(longitude=.3, latitude=3, elevation=30)])
self.expected_path_1 = \
np.array([[0., 0., 0.],
[1.0049875621, 0., 10],
[2.00997512422, 0., 20],
[0., 0., 0.]])
def test_segment_empty(self):
with self.assertRaises(AssertionError):
convert_path_to_feature(GPXTrackSegment(), 1)
def test_segment_too_long(self):
with self.assertRaises(AssertionError):
convert_path_to_feature(self.segment_1, 1)
def test_segment_short(self):
path = convert_path_to_feature(self.segment_1, 4)
np.testing.assert_array_almost_equal(path, self.expected_path_1)
class TestSmoothenCoordinatesShort(unittest.TestCase):
def setUp(self):
self.segment_1 = GPXTrackSegment([GPXTrackPoint(longitude=.1, latitude=1, elevation=10),
GPXTrackPoint(longitude=.2, latitude=2, elevation=20),
GPXTrackPoint(longitude=.3, latitude=3, elevation=30),
GPXTrackPoint(longitude=.5, latitude=-1, elevation=0)])
self.expected_1 = GPXTrackSegment([GPXTrackPoint(longitude=.2, latitude=2, elevation=20),
GPXTrackPoint(longitude=1./3, latitude=4./3, elevation=50./3)])
def test_segment_list_empty(self):
smoothen_coordinates([])
def test_segment_empty(self):
smoothen_coordinates([GPXTrackSegment()])
def test_segment_short_1(self):
input = [GPXTrackSegment(self.segment_1.points[0:3])]
smoothen_coordinates(input)
expected_result = [GPXTrackSegment([self.expected_1.points[0]])]
self.assertEqual(len(input), 1)
self.assertEqual(len(input[0].points), 1)
np.testing.assert_array_almost_equal(gpx_point_to_array(input[0].points[0]),
gpx_point_to_array(expected_result[0].points[0]))
def test_segment_short_2(self):
input = [GPXTrackSegment(self.segment_1.points)]
smoothen_coordinates(input)
expected_result = [self.expected_1]
self.assertEqual(len(input), 1)
self.assertEqual(len(input[0].points), len(expected_result[0].points))
np.testing.assert_array_almost_equal(gpx_point_to_array(input[0].points[0]),
gpx_point_to_array(expected_result[0].points[0]))
class TestSmoothenCoordinatesLonger(unittest.TestCase):
def setUp(self):
segment_1_as_array = np.random.normal(scale=5, size=(25, 3))
segment_1 = gpx_segment_from_array(segment_1_as_array)
segment_2_as_array = np.random.normal(scale=2, size=(9, 3))
segment_2 = gpx_segment_from_array(segment_2_as_array)
self.segments = [segment_1, segment_2]
self.segments_as_arrays = [segment_1_as_array, segment_2_as_array]
def test_window_size_3(self):
expected_as_df = [pd.DataFrame(self.segments_as_arrays[i]).rolling(3).mean()[2:] for i in
range(len(self.segments_as_arrays))]
smoothen_coordinates(self.segments, window_size=3)
for i in range(len(self.segments)):
np.testing.assert_array_almost_equal(expected_as_df[i].values,
gpx_segment_to_array(self.segments[i]))
def test_window_size_5(self):
expected_as_df = [ | pd.DataFrame(self.segments_as_arrays[i]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
def spinner_graph(*args, **kwargs):
return dbc.Spinner(dcc.Graph(*args, **kwargs))
def add_quarters(df):
df['Quarter'] = | pd.to_datetime(df['DT_FIM_EXERC']) | pandas.to_datetime |
import pandas as pd
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from evaluation.tasks.auto_task import AutoTask
class CrowSPairsDataset(Dataset):
def __init__(self):
super().__init__()
# TODO: maybe implement using HuggingFace Datasets
# https://huggingface.co/datasets/crows_pairs
# Load CrowS-Pairs dataset from URL
url = "https://raw.githubusercontent.com/nyu-mll/crows-pairs/master/data/crows_pairs_anonymized.csv"
df = | pd.read_csv(url) | pandas.read_csv |
from donbot import Donbot
from lxml import html, cssselect
from getvotes import GetVotes
import numpy as np
import pandas as pd
import texthero as hero
obj = GetVotes(username="skitter30", password="*")
target_user = "Auro"
#City that never sleeps, BoTC, White flag, Newbie 1900, Mini 2040
threads = {
# town
'name': ['city', 'botc', 'white flag', 'newbie 1900', 'mini 2040', \
# scum
'covid', 'downtown train', 'nomination mafia', 'jungle republic', 'newbie 1893', 'newbie 1898', 'crown of misery',
# 'unknown'
'game of thrones'],
# town
'thread': ["https://forum.mafiascum.net/viewtopic.php?f=54&t=82824", \
"https://forum.mafiascum.net/viewtopic.php?f=84&t=83094", \
"https://forum.mafiascum.net/viewtopic.php?f=150&t=81775", \
"https://forum.mafiascum.net/viewtopic.php?f=50&t=77781", \
"https://forum.mafiascum.net/viewtopic.php?f=53&t=77634",
# scum
"https://forum.mafiascum.net/viewtopic.php?f=3&t=82957", \
"https://forum.mafiascum.net/viewtopic.php?f=54&t=78257", \
"https://forum.mafiascum.net/viewtopic.php?f=52&t=78999", \
"https://forum.mafiascum.net/viewtopic.php?f=52&t=78634", \
"https://forum.mafiascum.net/viewtopic.php?f=50&t=77453", \
"https://forum.mafiascum.net/viewtopic.php?f=50&t=77673", \
"https://forum.mafiascum.net/viewtopic.php?f=52&t=78009",
# 'unknown'
"https://forum.mafiascum.net/viewtopic.php?f=3&t=83318"],
'alignment': ['town', 'town', 'town', 'town', 'town',
'scum', 'scum', 'scum', 'scum', 'scum', 'scum', 'scum',
'unknown']
}
threads = pd.DataFrame(threads, columns = ['name', 'thread', 'alignment'])
temp_posts = []
df = pd.DataFrame()
for ind in threads.index:
temp_posts = []
temp_posts.append(obj.getISOs(usernames = [target_user], thread=threads['thread'][ind]))
for posts in temp_posts:
temp_df = | pd.DataFrame([[post, threads['alignment'][ind], threads['name'][ind]] for post in posts]) | pandas.DataFrame |
import os
import subprocess
from math import floor
from textwrap import dedent
import pandas as pd
import numpy as np
from plotnine import *
from qiime2 import (
Artifact,
Metadata
)
from qiime2.plugins.taxa.methods import collapse
from qiime2.plugins.feature_table.methods import rarefy
from scripts.qiime2_helper.metadata_helper import (
load_metadata,
load_env_metadata,
convert_col_dtype
)
from scripts.qiime2_helper.artifact_helper import (
VALID_COLLAPSE_LEVELS,
check_artifact_type,
filter_by_abundance,
rename_taxa
)
from scripts.qiime2_helper.plotnine_helper import (
add_fill_colours_from_users
)
# Custom exception
from exceptions.exception import AXIOME3Error
VEGDIST_OPTIONS = {
"Manhattan": "manhattan",
"Euclidean": "euclidean",
"Canberra": "canberra",
"Bray-Curtis": "bray",
"Binomial": "binomial",
"Kulczynski": "kulczynski",
"Jaccard": "jaccard",
"Gower": "gower",
"altGower": "altGower",
"Morisita": "morisita",
"Horn-Morisita": "horn",
"Chao": "chao",
"Cao": "cao",
"Mahalanobis":"mahalanobis"
}
def collapse_taxa(feature_table_artifact, taxonomy_artifact, sampling_depth=0, collapse_level="asv"):
"""
Collapse feature table to user specified taxa level (ASV by default).
Input:
- QIIME2 artifact of type FeatureData[Taxonomy]
Returns:
- pd.DataFrame
(taxa/ASV as rows, samples as columns, numeric index, appends 'Taxon' column)
"""
collapse_level = collapse_level.lower()
if(collapse_level not in VALID_COLLAPSE_LEVELS):
raise AXIOME3Error("Specified collapse level, {collapse_level}, is NOT valid!".format(collapse_level=collapse_level))
# Rarefy the table to user specified sampling depth
if(sampling_depth < 0):
raise AXIOME3Error("Sampling depth cannot be a negative number!")
# don't rarefy is sampling depth equals 0
if(sampling_depth > 0):
try:
rarefied = rarefy(feature_table_artifact, sampling_depth=sampling_depth)
except ValueError:
raise AXIOME3Error("No samples or features left after rarefying at {}".format(sampling_depth))
feature_table_artifact = rarefied.rarefied_table
# handle ASV case
if(collapse_level == "asv"):
# By default, feature table has samples as rows, and ASV as columns
feature_table_df = feature_table_artifact.view(pd.DataFrame)
# Transpose feature table
feature_table_df_T = feature_table_df.T
# By default, taxonomy has ASV as rows, and metadata as columns
taxonomy_df = taxonomy_artifact.view(pd.DataFrame)
# Combine the two df (joins on index (ASV))
combined_df = feature_table_df_T.join(taxonomy_df, how='inner')
# Drop "Confidence" column and use numeric index
final_df = combined_df.drop(["Confidence"], axis="columns").reset_index(drop=True)
if(final_df.shape[0] == 0 or final_df.shape[1] == 0):
raise AXIOME3Error("No data to process. Please check if 1. input feature table is empty, 2. input taxonomy is empty, 3. input feature table and taxonomy share common ASVs.")
return final_df
try:
table_artifact = collapse(table=feature_table_artifact, taxonomy=taxonomy_artifact, level=VALID_COLLAPSE_LEVELS[collapse_level])
except ValueError:
raise AXIOME3Error("No data to process. Please check if 1. input feature table is empty, 2. input taxonomy is empty, 3. input feature table and taxonomy share common features.")
# By default, it has samples as rows, and taxa as columns
collapsed_df = table_artifact.collapsed_table.view(pd.DataFrame)
# Transpose
collapsed_df_T = collapsed_df.T
# Append "Taxon" column
collapsed_df_T["Taxon"] = collapsed_df_T.index
# Reset index
final_df = collapsed_df_T.reset_index(drop=True)
return final_df
def filter_by_total_count(feature_table_df, sum_axis=1):
"""
Remove samples that have total counts <= 5 (R complains if total count <= 5)
Inputs
- feature_table_df: feature table in pandas DataFrame
- sum_axis: axis to sum over.
Do axis=0 if sample as columns, taxa/ASV as rows (index)
Do axis=1 if sample as rows (index), taxa/ASV as columns
Default option assumes sample as row (index), taxa/ASV as columns
"""
row_sum = feature_table_df.sum(sum_axis)
to_keep = row_sum > 5
filtered_df = feature_table_df.loc[to_keep, ]
return filtered_df
def find_sample_intersection(feature_table_df, abundance_df, sample_metadata_df, environmental_metadata_df):
"""
Find intersection of feature table, sample metadata, and environmental metadata.
Inputs:
- feature_table_df: feature table in pandas DataFrame (samples as row, taxa/ASV as columns)
- abundance_df: feature table in pandas DataFrame (samples as row, taxa/ASV as columns; to overlay as taxa bubbles later)
- sample_metadata_df: sample metadata in pandas DataFrame (samples as row, metadata as columns)
- environmental_metadata_df: environmental metadata in pandas DataFrame (samples as row, metadata as columns)
Assumes sampleID as index
"""
combined_df = pd.concat([feature_table_df, abundance_df, sample_metadata_df, environmental_metadata_df], join="inner", axis=1)
intersection_samples = combined_df.index
if(len(intersection_samples) == 0):
raise AXIOME3Error("Feature table, sample metadata, and environmental metadata do NOT share any samples...")
intersection_feature_table_df = feature_table_df.loc[intersection_samples, ]
intersection_abundance_df = abundance_df.loc[intersection_samples, ]
intersection_sample_metadata_df = sample_metadata_df.loc[intersection_samples, ]
intersection_environmental_metadata_df = environmental_metadata_df.loc[intersection_samples, ]
# summary about samples not used
feature_table_omitted_samples = ','.join([str(sample) for sample in feature_table_df.index if sample not in intersection_samples])
sample_metadata_omitted_samples = ','.join([str(sample) for sample in sample_metadata_df.index if sample not in intersection_samples])
environmental_metadata_omitted_samples = ','.join([str(sample) for sample in environmental_metadata_df.index if sample not in intersection_samples])
sample_summary = dedent("""\
Omitted samples in feature table,{feature_table_omitted_samples}
Omitted samples in sample metadata,{sample_metadata_omitted_samples}
Omitted samples in environmental metadata,{environmental_metadata_omitted_samples}
""".format(
feature_table_omitted_samples=feature_table_omitted_samples,
sample_metadata_omitted_samples=sample_metadata_omitted_samples,
environmental_metadata_omitted_samples=environmental_metadata_omitted_samples
))
return intersection_feature_table_df, intersection_abundance_df, intersection_sample_metadata_df, intersection_environmental_metadata_df, sample_summary
def calculate_dissimilarity_matrix(feature_table, method="Bray-Curtis"):
"""
Calculates dissimilarity matarix using the feature table.
It uses R's vegan package (using rpy2 interface)
Inputs:
- feature_table_df: feature table (rpy2.robjects)
- method: dissimilarity index (see 'vegdist' R documentation for supported methods)
Outputs:
- distance matrix (rpy2.robjects)
"""
vegan = importr('vegan')
if (method not in VEGDIST_OPTIONS):
raise AXIOME3Error("Specified dissmilarity method, {method} is not supported!".format(method=method))
return vegan.vegdist(feature_table, VEGDIST_OPTIONS[method])
def calculate_ordination(dissimilarity_matrix):
"""
Calculates ordination.
It uses R's stats package (using rpy2 interface)
Inputs:
- dissimilarity_matrix: distance matrix of type rpy2.robjects.
Outputs:
- ordination (rpy2.robjects.)
"""
stats = importr('stats')
ordination = stats.cmdscale(dissimilarity_matrix, k=10, eig=True)
return ordination
def calculate_weighted_average(ordination, feature_table):
"""
Calculate weighted average scores of each taxa/ASV onto ordination
"""
vegan = importr('vegan')
points = ordination[ordination.names.index('points')]
wascores = vegan.wascores(points, feature_table)
return wascores
def project_env_metadata_to_ordination(ordination, env_metadata, PC_axis_one, PC_axis_two):
"""
"""
vegan = importr('vegan')
pc_axes = (PC_axis_one, PC_axis_two)
projection = vegan.envfit(ordination, env_metadata, choices=IntVector(pc_axes))
return projection
def combine_projection_arrow_with_r_sqr(projection):
"""
Cbind R2 with arrow matrix
"""
base = importr('base')
projection_vectors = projection[projection.names.index('vectors')]
arrow = projection_vectors[projection_vectors.names.index('arrows')]
r_sqr = projection_vectors[projection_vectors.names.index('r')]
pvals = projection_vectors[projection_vectors.names.index('pvals')]
projection_matrix = base.cbind(arrow, R2=r_sqr, pvals=pvals)
return projection_matrix
def generate_vector_arrow_df(projection_df, R2_threshold, pval_threshold):
"""
Generate vector arrows to overlay on triplot, and optionally filter it by user specified threshold
Inputs:
- projection_df: pandas DataFrame
environmental variables as row, PC dimensions and R2 value as columns
Returns:
- pandas DataFrame
environmental variables as row, PC dimensions as columns (labeled as PC1, PC2, ...)
"""
#if((projection_df['R2'] > R2_threshold).any() == False):
# raise ValueError("No entries left after applying R2 threshold, {}".format(R2_threshold))
filter_criteria = (projection_df['R2'] >= R2_threshold) & (projection_df['pvals'] <= pval_threshold)
filtered_df = projection_df.loc[filter_criteria, ]
vector_arrow_df = filtered_df.drop(columns=['R2', 'pvals'])
vector_arrow_df = vector_arrow_df.mul(np.sqrt(filtered_df['R2']), axis=0)
return vector_arrow_df
def rename_as_PC_columns(df, PC_axis_one=None, PC_axis_two=None):
"""
Rename pandas DataFrame column names as PC1, PC2, ...
"""
# Special case for environmental projection df
# It needs to be hardcoded because of the way df is created (refer to 'project_env_metadata_to_ordination()'')
if(PC_axis_one is not None and PC_axis_two is not None):
new_col_names = ['PC' + str(PC_axis_one), 'PC' + str(PC_axis_two)]
df.columns = new_col_names
return df
num_col = df.shape[1]
if(num_col == 0):
raise ValueError("Specified dataframe has zero columns...")
new_col_names = ['PC' + str(i) for i in range(1, num_col+1)]
df.columns = new_col_names
return df
def normalized_taxa_total_abundance(wascores_df, feature_table_df):
"""
Calculate normalized, total abundance of each taxa
Inputs:
- wascores_df: weighted average scores in pandas DataFrame (taxa as index, coordinates as columns)
- feature_table_df: feature table in pandas DataFrame (sample as index, taxa as columns)
"""
total_abundance = feature_table_df.to_numpy().sum()
taxa_count = feature_table_df.sum(axis=0)
# pandas treats 0/0 as 0 (does not raise ZeroDivisionError)
normalized_taxa_count = taxa_count / total_abundance
wascores_df['abundance'] = normalized_taxa_count
return wascores_df
def filter_by_wascore_threshold(normalized_wascores_df, wa_threshold):
"""
Filter weighted average DataFrame by normalized abundance
"""
if('abundance' not in normalized_wascores_df.columns):
raise AXIOME3Error("normalized taxa count column does not exist")
filtered_df = normalized_wascores_df[normalized_wascores_df['abundance'] > wa_threshold]
return filtered_df
def get_variance_explained(eig_vals):
"""
Calculate proportion explained per PC axis
Inputs:
- eig_vals: eigenvalues per PC axis. pandas Series
"""
num_row = eig_vals.shape[0]
total_variance = eig_vals.sum()
proportion_explained = eig_vals / total_variance
proportion_explained = proportion_explained * 100
new_index_names = ['PC' + str(i) for i in range(1, num_row+1)]
proportion_explained.index = new_index_names
proportion_explained.columns = ['proportion_explained']
return proportion_explained
def prep_triplot_input(sample_metadata_path, env_metadata_path, feature_table_artifact_path,
taxonomy_artifact_path, sampling_depth=0, ordination_collapse_level="asv",
wascores_collapse_level="phylum", dissmilarity_index="Bray-Curtis", R2_threshold=0.1,
pval_threshold=0.05, wa_threshold=0.1, PC_axis_one=1, PC_axis_two=2, output_dir='.'):
# Load sample metadata
sample_metadata_df = load_metadata(sample_metadata_path)
# Load environmental metadata
# and drop rows with missing values (WARN users?)
env_metadata_df = load_env_metadata(env_metadata_path)
env_metadata_df = env_metadata_df.dropna()
# Load feature table and collapse
feature_table_artifact = check_artifact_type(feature_table_artifact_path, "feature_table")
taxonomy_artifact = check_artifact_type(taxonomy_artifact_path, "taxonomy")
ordination_collapsed_df = collapse_taxa(feature_table_artifact, taxonomy_artifact, sampling_depth, ordination_collapse_level)
abundance_collapsed_df = collapse_taxa(feature_table_artifact, taxonomy_artifact, sampling_depth, wascores_collapse_level)
# Rename taxa for wascores collapsed df
original_taxa = | pd.Series(abundance_collapsed_df["Taxon"]) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ============================================================================================= #
# DS_generator.py #
# Author: <NAME> #
# Creation Date: 03/10/2020 #
# ============================================================================================= #
"""
Imports original DECAGON database and translates it into adjaceny matrices and enumeration
dictionaries. First the original dataset is filtered so it has no unlinked nodes creating a
consistent network. Then a fraction of the dataset is chosen, selecting a fixed number of
polypharmacy side effects given by parameter N (defaults to 964). With the reduced network,
the adjacency matrices and the node enumeration dictionaries are created and exported as a
pickle python3 readable file.
Parameters
----------
number of side effects : int, default=964
Number of joint drug side effects to be chosen from the complete dataset. If not given,
the program uses the maximum number of side effects used by the authors of DECAGON.
"""
# ============================================================================================= #
import argparse
import numpy as np
import pandas as pd
import scipy.sparse as sp
import pickle
from joblib import Parallel, delayed
parser = argparse.ArgumentParser(description='Remove outliers from datasets')
parser.add_argument('N', nargs='?',default =964,type=int, help="Number of side effects")
args = parser.parse_args()
N = args.N
# Import databases as pandas dataframes
PPI = pd.read_csv('original_data/bio-decagon-ppi.csv',sep=',')
DTI = pd.read_csv('original_data/bio-decagon-targets-all.csv',sep=',')
DDI = pd.read_csv('original_data/bio-decagon-combo.csv',sep=',')
DSE = pd.read_csv('original_data/bio-decagon-mono.csv',sep=',')
print('\nData loaded\n')
# Original number of interactions
orig_ppi = len(PPI.index)
orig_dti = len(DTI.index)
orig_ddi = len(DDI.index)
orig_dse = len(DSE.index)
# ============================================================================================= #
# REMOVING OUTLIERS
# PPI genes
PPI_genes = pd.unique(np.hstack((PPI['Gene 1'].values,PPI['Gene 2'].values)))
orig_genes_ppi = len(PPI_genes) # Original number of genes
# REDUCE DDI AND DSE DATABASES TO COMMON DRUGS ONLY
# DDI drugs
DDI_drugs = pd.unique(DDI[["STITCH 1", "STITCH 2"]].values.ravel())
orig_drugs_ddi = len(DDI_drugs) # Original number of drugs
orig_se_combo = len(pd.unique(DDI['Polypharmacy Side Effect'].values))
# Drugs with single side effects
DSE_drugs = pd.unique(DSE['STITCH'].values)
orig_drug_dse = len(DSE_drugs) # Original number of drugs
orig_se_mono = len(pd.unique(DSE['Side Effect Name']))
# Calculate the instersection of the DDI and DSE
# (i.e., the drugs in the interaction network that have single side effect)
inter_drugs = np.intersect1d(DDI_drugs,DSE_drugs,assume_unique=True)
# Choose only the entries in DDI that are in the intersection
DDI = DDI[np.logical_and(DDI['STITCH 1'].isin(inter_drugs).values,
DDI['STITCH 2'].isin(inter_drugs).values)]
# Some drugs in DDI that are common to all 3 datasets may only interact with genes that are
# non-common (outsiders). That is why we need to filter a second time using this array.
DDI_drugs = pd.unique(DDI[["STITCH 1", "STITCH 2"]].values.ravel())
DSE = DSE[DSE['STITCH'].isin(DDI_drugs)]
new_drugs_ddi = len(pd.unique(DDI[['STITCH 1','STITCH 2']].values.ravel()))
new_drugs_dse = len(pd.unique(DSE['STITCH'].values))
new_se_combo = len(pd.unique(DDI['Polypharmacy Side Effect'].values))
new_se_mono = len(pd.unique(DSE['Side Effect Name']))
# SELECT ONLY ENTRIES FROM DTI DATABASE THAT ARE PRESENT IN PREVIOUSLY REDUCED DATABASES
orig_genes_dti = len( | pd.unique(DTI['Gene'].values) | pandas.unique |
# Created Date: 12/09/2018
# Modified Date:
#
# Implements the Early Warning Alert Algorithm of Fire Crisis Classification module
# based on the forecasting weather data from FMI. It calculates the Fire Weather Index
# of Canadian Rating System.
# Also, it calculates the Fire Overall Crisis Level (PFRCL_Predicted Fire Crisis Level)
# based on FWI over the 9 days period
#
#----------------------------------------------------------------------------------------------------------
# Inputs: a) ftp files from EFFIS
#
# Outputs: TOP104_METRIC_REPORT which contains the ....
#
# Early Warning Alert Algorithm from Crisis Classification
#----------------------------------------------------------------------------------------------------------
#
import json, time, re
import os, errno, sys
import glob, gzip, pickle, shutil, tempfile, re, tarfile
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from bus.bus_producer import BusProducer
from pathlib import Path
from datetime import datetime, timedelta
from collections import OrderedDict
from CRCL.FireCRisisCLassification.Topic104_Metric_Report import Top104_Metric_Report
from CRCL.FireCRisisCLassification.topic104Fire import topic104Fire
from CRCL.FireCRisisCLassification.Auxiliary_functions import Forest_Fire_Weather_Index, Fire_Overall_Crisis_Level
from CRCL.FireCRisisCLassification.Auxiliary_functions import open_netcdf, get_nc_file_contents, geo_idx, get_ftp, calc_Index
from CRCL.FireCRisisCLassification.Create_Queries_FirePilot import extract_forecasts_grid, extract_gribs
from CRCL.FireCRisisCLassification.parse_XML_dict import parse_XML_dict
from ftplib import FTP
from scipy.interpolate import griddata
from scipy.interpolate import Rbf
from scipy import interpolate
from netCDF4 import Dataset, num2date
def CrisisClassificationFire_PreEmerg():
#-----------------------------------------------------------------
ver = 'Ver11_2nd_Period'
# Create a directory to store the output files and TOPICS
#root_path = Path.cwd()
# Create a path
current_dirs_parent = os.getcwd()
root_path = current_dirs_parent + "/" + "CRCL/FireCRisisCLassification" + "/"
now = datetime.now()
directory = root_path + "TOPICS" + ver + "_" + str(now.year) + "_" + str(now.month) + "_" + str(now.day)
os.makedirs(directory, exist_ok=True)
# Start Timing Step 1
start_step1 = time.time()
# Store the time steps
time_duration_step = []
print("\n STEP 1: Fetch data from the ftp files from EFFIS \n")
#-----------------------------------------------------------------------------------
# STEP 1: Fetch data from the ftp files from EFFIS
#
#
#parameters for the get_ftp
url='dissemination.ecmwf.int'
Username='fire'
Password='<PASSWORD>'
# Points of Interest
points = [{'Name': 'Sueca', 'lat': 39.253509, 'long': -0.310381},
{'Name': 'Sollana', 'lat': 39.303946, 'long': -0.379010},
{'Name': 'Silla1', 'lat': 39.340604, 'long': -0.395129},
{'Name': 'Silla2', 'lat': 39.364153, 'long': -0.371332},
{'Name': 'Catarroja', 'lat': 39.371835, 'long': -0.350579},
{'Name':'<NAME>', 'lat':39.355179, 'long':-0.320472},
{'Name':'<NAME>', 'lat':39.386909, 'long': -0.331496}
]
# Center of the points of interest
N = float(len(points))
avglat = 0
avgln = 0
for p in points:
avglat = avglat + p['lat']
avgln = avgln + p['long']
center_points = [ round(avglat/N,5), round(avgln/N,5) ]
file_type = '*fwi.nc.gz'
fieldNames = ['fwi'] # ['danger_risk', 'fwi']
# fieldNames = ['danger_risk','bui','ffmc','dc', 'dmc','isi', 'dsr', 'fwi']
ncep_data = dict()
ncep_data['date_time'] = list()
# get the file name we dll from the ftp
ftp_dict = get_ftp(url, Username, Password)
# PATH variable include / at the end...
path = ftp_dict['PATH'] + str(ftp_dict['Date']) + '_fwi/fc/'
iter = [0] * len(fieldNames)
# Create data frame for all the points and FWI estimations
FWI = pd.DataFrame()
for pnt in range(len(points)):
datetime_x = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
days_x = 1
tempFWI = pd.DataFrame()
fwi_date = []
fwi_val = | pd.DataFrame(columns=['FWI_lin', 'FWI_near', 'FWI_cubic', 'FWI_max', 'FWI_min', 'FWI_std', 'FWI_mean']) | pandas.DataFrame |
"""Tests for `mllaunchpad.resource` module."""
# Stdlib imports
import json
import logging
import os
from collections import OrderedDict
from random import random
from unittest import mock
# Third-party imports
import numpy as np
import pandas as pd
import pytest
# Project imports
from mllaunchpad import resource as r
# Test ModelStore
# fmt: off
@pytest.fixture()
def modelstore_config():
return {
"model_store": {
"location": "./model_store",
},
"model": {
"name": "IrisModel",
"version": '0.0.2',
"module": "tree_model",
"train_options": {},
"predict_options": {},
},
"api": {
"name": "tree"
}
}
# fmt: on
@mock.patch("{}.os.path.exists".format(r.__name__), return_value=False)
@mock.patch("{}.os.makedirs".format(r.__name__))
def test_modelstore_create(makedirs, path_exists, modelstore_config):
ms = r.ModelStore(modelstore_config)
ms._ensure_location()
makedirs.assert_called_once_with(
modelstore_config["model_store"]["location"]
)
path_exists.reset_mock()
makedirs.reset_mock()
path_exists.return_value = True
ms = r.ModelStore(modelstore_config)
ms._ensure_location()
assert not makedirs.called
@mock.patch("{}.os.path.exists".format(r.__name__), return_value=False)
@mock.patch("{}.os.makedirs".format(r.__name__))
@mock.patch("{}.shutil.copy".format(r.__name__))
@mock.patch(
"{}.glob.glob".format(r.__name__), return_value=["old.pkl", "old.json"]
)
def test_modelstore_dump(glob, copy, makedirs, path_exists, modelstore_config):
with mock.patch(
"{}.open".format(r.__name__), mock.mock_open(), create=True
) as mo:
ms = r.ModelStore(modelstore_config)
ms.dump_trained_model(
modelstore_config, {"pseudo_model": 1}, {"pseudo_metrics": 2}
)
model_conf = modelstore_config["model"]
base_name = os.path.join(
modelstore_config["model_store"]["location"],
"{}_{}".format(model_conf["name"], model_conf["version"]),
)
calls = [
mock.call("{}.pkl".format(base_name), "wb"),
mock.call("{}.json".format(base_name), "w", encoding="utf-8"),
]
mo.assert_has_calls(calls, any_order=True)
@mock.patch("{}.os.path.exists".format(r.__name__), return_value=True)
@mock.patch("{}.pickle.dump".format(r.__name__))
@mock.patch("{}.json.dump".format(r.__name__))
def test_modelstore_dump_extra_model_keys(
jsond, pickled, path_exists, modelstore_config
):
modelstore_config["model"]["extraparam"] = 42
modelstore_config["model"]["anotherparam"] = 23
modelstore_config["model"]["created"] = "colliding keys should not occur"
with mock.patch(
"{}.open".format(r.__name__), mock.mock_open(), create=True
) as _:
ms = r.ModelStore(modelstore_config)
ms.dump_trained_model(
modelstore_config, {"pseudo_model": 1}, {"pseudo_metrics": 2}
)
dumped = jsond.call_args[0][0]
print(modelstore_config)
print(dumped)
assert "extraparam" in dumped
assert dumped["extraparam"] == 42
assert "anotherparam" in dumped
assert dumped["anotherparam"] == 23
assert dumped["created"] != "colliding keys should not occur"
@mock.patch("{}.os.path.exists".format(r.__name__), return_value=True)
@mock.patch("{}.pickle.dump".format(r.__name__))
@mock.patch("{}.json.dump".format(r.__name__))
def test_modelstore_train_report(
jsond, pickled, path_exists, modelstore_config
):
with mock.patch(
"{}.open".format(r.__name__), mock.mock_open(), create=True
) as _:
ms = r.ModelStore(modelstore_config)
ms.add_to_train_report("report_key", "report_val")
ms.dump_trained_model(
modelstore_config, {"pseudo_model": 1}, {"pseudo_metrics": 2}
)
dumped = jsond.call_args[0][0]
print(modelstore_config)
print(dumped)
assert "train_report" in dumped
assert "report_key" in dumped["train_report"]
assert dumped["train_report"]["report_key"] == "report_val"
assert "system" in dumped
assert "mllaunchpad_version" in dumped["system"]
assert "platform" in dumped["system"]
assert "packages" in dumped["system"]
@mock.patch("{}.os.path.exists".format(r.__name__), return_value=True)
@mock.patch(
"{}.ModelStore._load_metadata".format(r.__name__),
side_effect=lambda _: {"the_json": round(random() * 1000)},
)
def test_list_models(_load_metadata, path_exists, modelstore_config, caplog):
model_jsons = [
"mymodel_1.0.0.json",
"mymodel_1.1.0.json",
"anothermodel_0.0.1.json",
]
backup_jsons = [
"mymodel_1.0.0_2021-08-01_18-00-00.json",
"mymodel_1.0.0_2021-07-31_12-00-00",
]
def my_glob(pattern):
if "previous" in pattern.lower():
return backup_jsons
else:
return model_jsons
with mock.patch(
"{}.glob.glob".format(r.__name__), side_effect=my_glob,
):
with caplog.at_level(logging.DEBUG):
ms = r.ModelStore(modelstore_config)
models = ms.list_models()
print(models)
assert (
len(models) == 2
) # one model ID named "mymodel" and one named "anothermodel"
assert models["mymodel"]["latest"] == models["mymodel"]["1.1.0"]
assert len(models["mymodel"]["backups"]) == 2
assert models["anothermodel"]["backups"] == []
assert "ignoring" not in caplog.text.lower()
@mock.patch("{}.os.path.exists".format(r.__name__), return_value=True)
@mock.patch(
"{}.ModelStore._load_metadata".format(r.__name__),
side_effect=lambda _: {"the_json": round(random() * 1000)},
)
def test_list_models_ignore_obsolete_backups(
_load_metadata, path_exists, modelstore_config, caplog
):
model_jsons = [
"mymodel_1.0.0.json",
"mymodel_1.1.0.json",
"anothermodel_0.0.1.json",
]
backup_jsons = [
"mymodel_1.0.0_2021-08-01_18-00-00.json",
"OBSOLETE-IGNORED_1.0.0_2021-07-31_12-00-00",
]
def my_glob(pattern):
if "previous" in pattern.lower():
return backup_jsons
else:
return model_jsons
with mock.patch(
"{}.glob.glob".format(r.__name__), side_effect=my_glob,
):
with caplog.at_level(logging.DEBUG):
ms = r.ModelStore(modelstore_config)
models = ms.list_models()
print(models)
assert "ignoring" in caplog.text.lower()
@mock.patch("{}.pickle.load".format(r.__name__), return_value="pickle")
@mock.patch("{}.json.load".format(r.__name__), return_value={"json": 0})
def test_modelstore_load(json, pkl, modelstore_config):
with mock.patch(
"{}.open".format(r.__name__), mock.mock_open(), create=True
) as mo:
ms = r.ModelStore(modelstore_config)
ms.load_trained_model(modelstore_config["model"])
model_conf = modelstore_config["model"]
base_name = os.path.join(
modelstore_config["model_store"]["location"],
"{}_{}".format(model_conf["name"], model_conf["version"]),
)
calls = [
mock.call("{}.pkl".format(base_name), "rb"),
mock.call("{}.json".format(base_name), "r", encoding="utf-8"),
]
mo.assert_has_calls(calls, any_order=True)
@mock.patch(
"{}.ModelStore._load_metadata".format(r.__name__),
return_value={"metrics": {"a": 0}, "metrics_history": {"0123": {"a": 0}}},
)
@mock.patch("{}.ModelStore._dump_metadata".format(r.__name__))
def test_modelstore_update_model_metrics(dump, load, modelstore_config):
new_metrics = {"a": 1}
ms = r.ModelStore(modelstore_config)
ms.update_model_metrics(modelstore_config["model"], new_metrics)
load.assert_called_once()
dump.assert_called_once()
name, contents = dump.call_args[0]
assert contents["metrics"] == new_metrics
hist = contents["metrics_history"]
assert len(hist) == 2
del hist["0123"]
assert hist.popitem()[1] == new_metrics
def test___get_all_classes():
"""Retrieve a type which subclasses the given type"""
config = {"plugins": ["tests.mock_plugin"]}
classes = r._get_all_classes(config, r.DataSource)
assert "food" in classes
classes = r._get_all_classes(config, r.DataSink)
assert "food" in classes
def test_create_data_sources_and_sinks():
conf = {
"plugins": ["tests.mock_plugin"],
"datasources": {
"bla": {"type": "food", "path": "some/path", "tags": "train"},
},
"datasinks": {
"foo": {"type": "food", "path": "some/path"},
"blargh": {
"type": "food",
"path": "some/path",
"tags": "weirdtag",
},
},
}
src, snk = r.create_data_sources_and_sinks(conf, tags=["weirdtag"])
assert "bla" not in src
assert "foo" in snk
assert "blargh" in snk
src, snk = r.create_data_sources_and_sinks(conf)
assert "bla" in src
assert "foo" in snk
assert "blargh" in snk
with pytest.raises(ValueError, match="available"):
conf["datasources"]["bla"]["type"] = "will_not_be_found"
r.create_data_sources_and_sinks(conf)
# Test DataSource caching
# fmt: off
@pytest.fixture()
def datasource_expires_config():
def _res(expires):
return {
"type": "mock",
"expires": expires,
"tags": ["train"],
}
return _res
# fmt: on
class MockDataSource(r.DataSource):
serves = ["mock"]
def get_dataframe(self, params=None, chunksize=None):
df = pd.DataFrame(params)
return df
def get_raw(self, params=None, chunksize=False):
raw = params
return raw
@pytest.mark.parametrize(
"expires, expected_cached", [(0, False), (100000, True), (-1, True)]
)
def test_datasource_expires_df(
expires, expected_cached, datasource_expires_config
):
args = {"a": [1, 2, 3], "b": [3, 4, 5]}
ds = MockDataSource("mock", datasource_expires_config(expires))
# 1st DF read
df1 = ds.get_dataframe(params=args.copy())
pd.testing.assert_frame_equal(df1, pd.DataFrame(args))
assert df1 is not pd.DataFrame(args)
# 2nd DF read
df2 = ds.get_dataframe(params=args.copy())
pd.testing.assert_frame_equal(df2, pd.DataFrame(args))
assert (df2 is df1) == expected_cached
# 3rd DF read
df3 = ds.get_dataframe(params=args.copy())
pd.testing.assert_frame_equal(df3, pd.DataFrame(args))
assert (df3 is df1) == expected_cached
def test_datasource_expires_chunksize_error(datasource_expires_config):
"""chunksize must not be used with expires != 0"""
ds1 = MockDataSource("mock", datasource_expires_config(-1))
with pytest.raises(ValueError, match="incompatible"):
_ = ds1.get_dataframe(chunksize=5)
ds2 = MockDataSource("mock", datasource_expires_config(20000))
with pytest.raises(ValueError, match="incompatible"):
_ = ds2.get_dataframe(chunksize=5)
ds3 = MockDataSource("mock", datasource_expires_config(0))
_ = ds3.get_dataframe(chunksize=5)
ds4 = MockDataSource("mock", datasource_expires_config(20000))
_ = ds4.get_dataframe()
@pytest.mark.parametrize(
"expires, expected_cached", [(0, False), (100000, True), (-1, True)]
)
def test_datasource_expires_raw(
expires, expected_cached, datasource_expires_config
):
args = {"a": [1, 2, 3], "b": [3, 4, 5]}
ds = MockDataSource("mock", datasource_expires_config(expires))
# 1st raw read
raw1 = ds.get_raw(params=args.copy())
assert raw1 == args
# 2nd raw read
raw2 = ds.get_raw(params=args.copy())
assert raw2 == args
assert (raw2 is raw1) == expected_cached
def test_datasource_memoization_df(datasource_expires_config):
args1 = {"a": [1, 2, 3], "b": [3, 4, 5]}
args2_same = {"a": [1, 2, 3], "b": [3, 4, 5]}
args3_different = {"A": [3, 2, 1], "B": [3, 4, 5]}
args4_again_different = {"C": [3, 2, 1], "D": [3, 4, 5]}
cfg = datasource_expires_config(-1) # use cache
cfg["cache_size"] = 2
ds = MockDataSource("mock", cfg)
# 1st DF read (cached as 1st cache element)
df1 = ds.get_dataframe(params=args1.copy())
pd.testing.assert_frame_equal(df1, pd.DataFrame(args1))
assert df1 is not pd.DataFrame(args1) # not from cache
# 2nd DF read (getting from cache)
df2 = ds.get_dataframe(params=args2_same.copy())
pd.testing.assert_frame_equal(df2, pd.DataFrame(args2_same))
assert df2 is df1 # from cache
# 3rd DF read (different params passed, will be cached as 2nd cache element)
df3 = ds.get_dataframe(params=args3_different.copy())
pd.testing.assert_frame_equal(df3, pd.DataFrame(args3_different))
assert df3 is not df2 # not from cache
assert df3 is not df1 # not from cache
# 4rth DF read (original params passed again -- it is still in the cache due to size of 2)
df4 = ds.get_dataframe(params=args1.copy())
pd.testing.assert_frame_equal(df4, | pd.DataFrame(args1) | pandas.DataFrame |
import pandas as pd
from pathlib import Path
from datetime import datetime
url="https://www.dshs.state.tx.us/coronavirus/TexasCOVID19DailyCountyFatalityCountData.xlsx"
#get data from 2020, 2021, and 2022: could be cleaned more
df_2020 = pd.read_excel(url,sheet_name=0, index_col=0,parse_dates=[0])
df_2020 = df_2020[1:]
df_2020=df_2020.T
df_2020=df_2020[["County", "Hays", "Bastrop", "Caldwell", "Travis", "Williamson"]]
df_2021 = pd.read_excel(url,sheet_name=1, index_col=0,parse_dates=[0])
df_2021 = df_2021[1:]
df_2021=df_2021.T
df_2021=df_2021[["County", "Hays", "Bastrop", "Caldwell", "Travis", "Williamson"]]
df_2022 = pd.read_excel(url,sheet_name=2, index_col=0,parse_dates=[0])
df_2022 = df_2022[1:]
df_2022=df_2022.T
df_2022=df_2022[["County", "Hays", "Bastrop", "Caldwell", "Travis", "Williamson"]]
#df_all has all of the data for 5 counties from 2020-2022
df_all=df_2020.append(df_2021)
df_all=df_all.append(df_2022)
dict = {'County' : 'date'}
df_all.rename(columns=dict,
inplace=True)
df_all['date'] = | pd.to_datetime(df_all.date, format='%m/%d/%Y') | pandas.to_datetime |
import streamlit as st
from bs4 import BeautifulSoup
import requests
import pandas as pd
import re
import ast
import base64
def local_css(file_name):
with open(file_name) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
local_css("style.css")
st.write("""
# Steam Community Market - Advanced Price Helper App
""")
scm_url = st.text_input('Please enter the url address of Steam Community Market Listing', 'https://steamcommunity.com/market/listings/440/The%20Killing%20Tree')
# Scraping and Storing Objects
# Input from user will need to be a url for steam community market
#resp_object = requests.get('https://steamcommunity.com/market/listings/440/The%20Killing%20Tree') #url will be an input from user
resp_object = requests.get(scm_url) #url will be an input from user
soup = BeautifulSoup(resp_object.text,'html.parser')
market_listing_largeimage_url = soup.find("div",{"class":"market_listing_largeimage"}).contents[1]['src'] # item image url
price_history_string = ast.literal_eval(re.findall('(?<=line1=)(.+?\]\])',resp_object.text)[0]) # price history string
item_name = re.findall('(?<=<title>Steam Community Market :: Listings for )(.+?(?=<\/))',resp_object.text)[0] # name of item
# constructing a df with entire price history
times = []
prices = []
solds = []
for row in range(len(price_history_string)):
timestamp = price_history_string[row][0]
median_price_sold = price_history_string[row][1]
number_sold = price_history_string[row][2]
times.append(timestamp)
prices.append(median_price_sold)
solds.append(number_sold)
final_df = pd.DataFrame(list(zip(times,prices,solds)),columns=['timestamp','price_median (USD)','quantity_sold']) # constructing a dataframe with all attributes
final_df['timestamp'] = [x[:14] for x in final_df['timestamp']] # removing +0s
final_df['timestamp'] = | pd.to_datetime(final_df['timestamp'],format='%b %d %Y %H') | pandas.to_datetime |
import pickle
import random
import string
import warnings
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
from scipy import stats
import linearmodels
from linearmodels.shared.exceptions import missing_warning
from linearmodels.shared.hypotheses import (
InapplicableTestStatistic,
InvalidTestStatistic,
WaldTestStatistic,
)
from linearmodels.shared.io import add_star, format_wide
from linearmodels.shared.linalg import has_constant, inv_sqrth
from linearmodels.shared.utility import AttrDict, ensure_unique_column, panel_to_frame
MISSING_PANEL = "Panel" not in dir(pd)
def test_missing_warning():
missing = np.zeros(500, dtype=bool)
with warnings.catch_warnings(record=True) as w:
missing_warning(missing)
assert len(w) == 0
missing[0] = True
with warnings.catch_warnings(record=True) as w:
missing_warning(missing)
assert len(w) == 1
original = linearmodels.WARN_ON_MISSING
linearmodels.WARN_ON_MISSING = False
with warnings.catch_warnings(record=True) as w:
missing_warning(missing)
assert len(w) == 0
linearmodels.WARN_ON_MISSING = original
def test_hasconstant():
x = np.random.randn(100, 3)
hc, loc = has_constant(x)
assert bool(hc) is False
assert loc is None
x[:, 0] = 1
hc, loc = has_constant(x)
assert hc is True
assert loc == 0
x[:, 0] = 2
hc, loc = has_constant(x)
assert hc is True
assert loc == 0
x[::2, 0] = 0
x[:, 1] = 1
x[1::2, 1] = 0
hc, loc = has_constant(x)
assert hc is True
def test_wald_statistic():
ts = WaldTestStatistic(1.0, "_NULL_", 1, name="_NAME_")
assert str(hex(id(ts))) in ts.__repr__()
assert "_NULL_" in str(ts)
assert ts.stat == 1.0
assert ts.df == 1
assert ts.df_denom is None
assert ts.dist_name == "chi2(1)"
assert isinstance(ts.critical_values, dict)
assert_allclose(1 - stats.chi2.cdf(1.0, 1), ts.pval)
ts = WaldTestStatistic(1.0, "_NULL_", 1, 1000, name="_NAME_")
assert ts.df == 1
assert ts.df_denom == 1000
assert ts.dist_name == "F(1,1000)"
assert_allclose(1 - stats.f.cdf(1.0, 1, 1000), ts.pval)
def test_invalid_test_statistic():
ts = InvalidTestStatistic("_REASON_", name="_NAME_")
assert str(hex(id(ts))) in ts.__repr__()
assert "_REASON_" in str(ts)
assert np.isnan(ts.pval)
assert ts.critical_values is None
def test_inapplicable_test_statistic():
ts = InapplicableTestStatistic(reason="_REASON_", name="_NAME_")
assert str(hex(id(ts))) in ts.__repr__()
assert "_REASON_" in str(ts)
assert np.isnan(ts.pval)
assert ts.critical_values is None
ts = InapplicableTestStatistic()
assert "not applicable" in str(ts)
def test_inv_sqrth():
x = np.random.randn(1000, 10)
xpx = x.T @ x
invsq = inv_sqrth(xpx)
prod = invsq @ xpx @ invsq - np.eye(10)
assert_allclose(1 + prod, np.ones((10, 10)))
def test_ensure_unique_column():
df = pd.DataFrame({"a": [0, 1, 0], "b": [1.0, 0.0, 1.0]})
out = ensure_unique_column("a", df)
assert out == "_a_"
out = ensure_unique_column("c", df)
assert out == "c"
out = ensure_unique_column("a", df, "=")
assert out == "=a="
df["_a_"] = -1
out = ensure_unique_column("a", df)
assert out == "__a__"
def test_attr_dict():
ad = AttrDict()
ad["one"] = "one"
ad[1] = 1
ad[("a", 2)] = ("a", 2)
assert list(ad.keys()) == ["one", 1, ("a", 2)]
assert len(ad) == 3
plk = pickle.dumps(ad)
pad = pickle.loads(plk)
assert list(pad.keys()) == ["one", 1, ("a", 2)]
assert len(pad) == 3
ad2 = ad.copy()
assert list(ad2.keys()) == list(ad.keys())
assert ad.get("one", None) == "one"
assert ad.get("two", False) is False
k, v = ad.popitem()
assert k == "one"
assert v == "one"
items = ad.items()
assert (1, 1) in items
assert (("a", 2), ("a", 2)) in items
assert len(items) == 2
values = ad.values()
assert 1 in values
assert ("a", 2) in values
assert len(values) == 2
ad2 = AttrDict()
ad2[1] = 3
ad2["one"] = "one"
ad2["a"] = "a"
ad.update(ad2)
assert ad[1] == 3
assert "a" in ad
ad.__str__()
with pytest.raises(AttributeError):
ad.__private_dict__ = None
with pytest.raises(AttributeError):
ad.some_other_key
with pytest.raises(KeyError):
ad["__private_dict__"] = None
del ad[1]
assert 1 not in ad.keys()
ad.new_value = "new_value"
assert "new_value" in ad.keys()
assert ad.new_value == ad["new_value"]
for key in ad.keys():
if isinstance(key, str):
assert key in dir(ad)
new_value = ad.pop("new_value")
assert new_value == "new_value"
del ad.one
assert "one" not in ad.keys()
ad.clear()
assert list(ad.keys()) == []
def test_format_wide():
k = 26
inputs = [chr(65 + i) * (20 + i) for i in range(k)]
out = format_wide(inputs, 80)
assert max([len(v) for v in out]) <= 80
out = format_wide(["a"], 80)
assert out == [["a"]]
def test_panel_to_midf():
x = np.random.standard_normal((3, 7, 100))
df = panel_to_frame(x, list(range(3)), list(range(7)), list(range(100)))
mi = pd.MultiIndex.from_product([list(range(7)), list(range(100))])
expected = pd.DataFrame(index=mi, columns=[0, 1, 2])
for i in range(3):
expected[i] = x[i].ravel()
expected.index.names = ["major", "minor"]
pd.testing.assert_frame_equal(df, expected)
expected2 = expected.copy()
expected2 = expected2.sort_index(level=[1, 0])
expected2.index = expected2.index.swaplevel(0, 1)
expected2.index.names = ["major", "minor"]
df2 = panel_to_frame(x, list(range(3)), list(range(7)), list(range(100)), True)
pd.testing.assert_frame_equal(df2, expected2)
entities = list(
map(
"".join,
[
[random.choice(string.ascii_lowercase) for __ in range(10)]
for _ in range(100)
],
)
)
times = | pd.date_range("1999-12-31", freq="A-DEC", periods=7) | pandas.date_range |
# category: ["region","city","parent_category_name","category_name","user_type","image_top_1","param_1","param_2","param_3"]("user_id"?)
# 1. category base count features
# 2. category embedding.
from utils import *
import pandas as pd
import gc
train = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"]).drop(["deal_probability", "image", "image_top_1"],axis=1)
test = pd.read_csv("../input/test.csv", parse_dates = ["activation_date"]).drop(["image", "image_top_1"],axis=1)
train_active = pd.read_csv("../input/train_active.csv", parse_dates = ["activation_date"])
test_active = pd.read_csv("../input/test_active.csv", parse_dates = ["activation_date"])
all_df = pd.concat([train, test, train_active, test_active])
del train_active, test_active;gc.collect()
all_df["dayofweek"] = all_df.activation_date.dt.weekday
train["dayofweek"] = train.activation_date.dt.weekday
test["dayofweek"] = test.activation_date.dt.weekday
all_df["one"] = 1
print("Done Reading All Data")
def get_category_df(col):
tmp = pd.DataFrame()
all_df[col] = all_df[col].fillna("NAN")
tmp["{}_count".format(col)] = all_df.groupby(col)["one"].sum()
tmp["{}_unique_user_count".format(col)] = all_df[["user_id", col]].groupby(col).agg(pd.Series.nunique)
tmp["{}_price_median".format(col)] = all_df[[col, "price"]].groupby(col).agg(np.median)
tmp["{}_price_std".format(col)] = all_df[[col, "price"]].groupby(col).agg(np.std)
tmp["{}_price_max".format(col)] = all_df[[col, "price"]].groupby(col).agg(np.max)
tmp["{}_price_min".format(col)] = all_df[[col, "price"]].groupby(col).agg(np.min)
tmp["latest_date"] = all_df[[col, "activation_date"]].groupby(col).max()
tmp["first_date"] = all_df[[col, "activation_date"]].groupby(col).min()
tmp["{}_diff".format(col)] = (tmp["latest_date"] - tmp["first_date"]).dt.days
tmp["{}_average_period".format(col)] = tmp["{}_diff".format(col)] / tmp["{}_count".format(col)]
tmp.drop(["latest_date", "first_date"], axis=1, inplace=True)
return tmp.reset_index()
print("Categorical Features...")
region = get_category_df("region")
city = get_category_df("city")
parent_category_name = get_category_df("parent_category_name")
category_name = get_category_df("category_name")
user_type = get_category_df("user_type")
param_1 = get_category_df("param_1")
param_2 = get_category_df("param_2")
param_3 = get_category_df("param_3")
category = {"region":region, "city":city, "parent_category_name":parent_category_name
,"category_name":category_name,"user_type":user_type, "param_1":param_1
, "param_2":param_2, "param_3":param_3}
cate_col = list(category.keys())
train = train[cate_col]
test = test[cate_col]
for col, d in category.items():
train = pd.merge(train, d, on=col, how="left")
test = pd.merge(test, d, on=col, how="left")
train.drop(cate_col, axis=1, inplace=True)
test.drop(cate_col, axis=1, inplace=True)
to_parquet(train, "../features/fe_categorical_base_features_train.parquet")
to_parquet(test, "../features/fe_categorical_base_features_test.parquet")
# weekday
def get_category_weekday_df(col):
all_df[col] = all_df[col].fillna("NAN")
tmp = pd.DataFrame()
tmp["{}_{}_count".format(*col)] = all_df.groupby(col)["one"].sum()
tmp["{}_{}_unique_user_count".format(*col)] = all_df[["user_id"] + col].groupby(col).agg(pd.Series.nunique)
tmp["{}_{}_price_median".format(*col)] = all_df[["price"] + col].groupby(col).agg(np.median)
tmp["{}_{}_price_std".format(*col)] = all_df[["price"] + col].groupby(col).agg(np.std)
tmp["{}_{}_price_max".format(*col)] = all_df[["price"] + col].groupby(col).agg(np.max)
tmp["{}_{}_price_min".format(*col)] = all_df[["price"] + col].groupby(col).agg(np.min)
tmp = tmp.reset_index()
return tmp
print("Categorical Weekday Features...")
region = get_category_weekday_df(["region", "dayofweek"])
city = get_category_weekday_df(["city", "dayofweek"])
parent_category_name = get_category_weekday_df(["parent_category_name", "dayofweek"])
category_name = get_category_weekday_df(["category_name", "dayofweek"])
user_type = get_category_weekday_df(["user_type", "dayofweek"])
param_1 = get_category_weekday_df(["param_1", "dayofweek"])
param_2 = get_category_weekday_df(["param_2", "dayofweek"])
param_3 = get_category_weekday_df(["param_3", "dayofweek"])
category = {"region":region, "city":city, "parent_category_name":parent_category_name
,"category_name":category_name,"user_type":user_type, "param_1":param_1
, "param_2":param_2, "param_3":param_3}
cate_col = list(category.keys())+["dayofweek"]
train = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"]).drop(["deal_probability", "image", "image_top_1"],axis=1)
test = pd.read_csv("../input/test.csv", parse_dates = ["activation_date"]).drop(["image", "image_top_1"],axis=1)
train["dayofweek"] = train.activation_date.dt.weekday
test["dayofweek"] = test.activation_date.dt.weekday
train = train[cate_col]
test = test[cate_col]
for col, d in category.items():
train = | pd.merge(train, d, on=[col, "dayofweek"], how="left") | pandas.merge |
import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier
from cause.plotter import Plotter
from cause.predictor import ClassificationSet
class Breakdown():
def __init__(self, data, weights, algos, name):
self.__data = data
self.__weights = weights
self.__algos = algos
self.__name = name
# todo validate input:
# data is an np.array with dims (num algos, num weights)
@property
def data(self):
return self.__data
@property
def weights(self):
return self.__weights
@property
def algos(self):
return self.__algos
@property
def name(self):
return self.__name
def save_to_latex(self, outfolder="/tmp", weight=1.):
outfile = "%s/breakdown_%s" % (outfolder, self.name)
index = np.where(self.weights==weight)[0][0] # location for lambda=weight
breakdown_perc = self.data[:,index] * 100. / self.data[:,index].sum()
# write latex table to file
with open(outfile, "w") as f:
for algo in range(self.data.shape[0]):
f.write("&\t%s\t&\t%.2f\\%%\t\t\n" % (
self.data[algo, index], breakdown_perc[algo]))
def plot(self, outfolder="/tmp"):
Plotter.plot_breakdown(self, outfolder)
class Postprocessor():
def __init__(self, dataset):
self.__dataset = dataset
@property
def dataset(self):
return self.__dataset
def breakdown(self):
breakdown = np.empty(shape=(0,0))
for weight in self.dataset.weights:
column = self.dataset.lstats[weight].get_breakdown(self.dataset.algos)
if breakdown.shape[0] == 0:
breakdown = column
else:
breakdown = np.vstack([breakdown, column])
breakdown = np.transpose(breakdown)
return Breakdown(breakdown, self.dataset.weights,
self.dataset.algos, self.dataset.name)
class FeatsPostprocessor(Postprocessor):
def __init__(self, dataset, features):
super().__init__(dataset)
self.__features = features
@property
def features(self):
return self.__features
def save_feature_importances_by_weight(self, outfolder, weight):
lstats = self.dataset.lstats[weight]
clsset = ClassificationSet.sanitize_and_init(
self.features.features, lstats.winners, lstats.costs)
clf = ExtraTreesClassifier()
clf = clf.fit(clsset.X, clsset.y.ravel())
importances = pd.DataFrame(data=clf.feature_importances_.reshape(
(1, len(clf.feature_importances_))
),
columns=self.features.features.columns)
# sort feature names by average importance
sorted_feature_names = [name for _,name in
sorted(zip(importances.mean(axis=0), self.features.features.columns))
][::-1]
importances = importances[sorted_feature_names]
feats = | pd.DataFrame(columns=["order", "value", "name"]) | pandas.DataFrame |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from application import model_builder
def test_validate_types_numeric_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [3, 4, 5]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = new_expect["Some Feature"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Numeric"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_numeric_string_converts_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [3, 4, 5]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = ["3", "4", "5"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Numeric"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_numeric_string_converts_throws_error():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = ["3d", "4d", "5d"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Numeric"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_percentage_converts_throws_value_error():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = ["0.3s c", "0.4", "0.5"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Percentage"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_percentage_converts_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 2"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 3"] = [30.0, 40.0, 50.0]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = [0.3, 0.4, 0.5]
df["Some Feature 2"] = ["0.3%", "0.4 %", " 0.5 %"]
df["Some Feature 3"] = ["30", "40", " 50"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Percentage"],
["Some Feature 2", "Percentage"],
["Some Feature 3", "Percentage"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_money_converts_throws_value_error():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = ["0.3s$", "$0.4", "0.5"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Money"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_percentage_converts_success():
# Arrange
df = pd.DataFrame()
new_expect = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from iexfinance.base import _IEXBase
from iexfinance.utils import _handle_lists, no_pandas
from iexfinance.utils.exceptions import IEXSymbolError, IEXEndpointError
class StockReader(_IEXBase):
"""
Base class for obtaining data from the Stock endpoints of IEX.
"""
# Possible option values (first is default)
_ENDPOINTS = ["chart", "quote", "book", "open-close", "previous",
"company", "stats", "peers", "relevant", "news",
"financials", "earnings", "dividends", "splits", "logo",
"price", "delayed-quote", "effective-spread",
"volume-by-venue", "ohlc"]
def __init__(self, symbols=None, **kwargs):
""" Initialize the class
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Desired symbols for retrieval
"""
self.symbols = list(map(lambda x: x.upper(), _handle_lists(symbols)))
self.n_symbols = len(self.symbols)
self.endpoints = []
super(StockReader, self).__init__(**kwargs)
def get_all(self):
"""
Returns all endpoints, indexed by endpoint title for each symbol
Notes
-----
Only allows JSON format (pandas not supported).
"""
self.optional_params = {}
self.endpoints = self._ENDPOINTS[:10]
json_data = self.fetch(fmt_p=no_pandas)
self.endpoints = self._ENDPOINTS[10:20]
json_data_2 = self.fetch(fmt_p=no_pandas)
for symbol in self.symbols:
if symbol not in json_data:
raise IEXSymbolError(symbol)
json_data[symbol].update(json_data_2[symbol])
return json_data[self.symbols[0]] if self.n_symbols == 1 else json_data
@property
def url(self):
return 'stock/market/batch'
@property
def params(self):
temp = {
"symbols": ','.join(self.symbols),
"types": ','.join(self.endpoints)
}
temp.update(self.optional_params)
if "filter_" in temp:
if isinstance(temp["filter_"], list):
temp["filter"] = ",".join(temp.pop("filter_"))
else:
temp["filter"] = temp.pop("filter_")
if "range_" in temp:
temp["range"] = temp.pop("range_")
params = {k: str(v).lower() if v is True or v is False else str(v)
for k, v in temp.items()}
return params
def _get_endpoint(self, endpoint, params={}, fmt_p=None,
fmt_j=None, filter_=None):
result = {}
if filter_:
params.update({"filter": filter_})
self.optional_params = params
self.endpoints = [endpoint]
data = self.fetch(fmt_j=fmt_j, fmt_p=no_pandas)
for symbol in self.symbols:
if symbol not in data:
raise IEXSymbolError(symbol)
if endpoint not in data[symbol]:
result[symbol] = []
else:
result[symbol] = data[symbol][endpoint]
return self._output_format_one(result, fmt_p=fmt_p, fmt_j=fmt_j)
def _get_field(self, endpoint, field):
data = getattr(self, "get_%s" % endpoint)(filter_=field)
if self.output_format == 'json':
if self.n_symbols == 1:
data = data[field]
else:
data = {symbol: data[symbol][field] for symbol in self.symbols}
return data
def _output_format_one(self, out, fmt_p=None, fmt_j=None):
data = super(StockReader, self)._output_format(out, fmt_p=fmt_p)
if len(self.symbols) == 1 and self.output_format == 'json':
return data[self.symbols[0]]
return data
def get_endpoints(self, endpoints=[]):
"""
Universal selector method to obtain specific endpoints from the
data set.
Parameters
----------
endpoints: str or list
Desired valid endpoints for retrieval
Notes
-----
Only allows JSON format (pandas not supported).
Raises
------
IEXEndpointError
If an invalid endpoint is specified
IEXSymbolError
If a symbol is invalid
IEXQueryError
If issues arise during query
"""
if isinstance(endpoints, str) and endpoints in self._ENDPOINTS:
endpoints = list(endpoints)
if not endpoints or not set(endpoints).issubset(self._ENDPOINTS):
raise IEXEndpointError("Please provide a valid list of endpoints")
elif len(endpoints) > 10:
raise ValueError("Please input up to 10 valid endpoints")
self.optional_params = {}
self.endpoints = endpoints
json_data = self.fetch(fmt_p=no_pandas)
for symbol in self.symbols:
if symbol not in json_data:
raise IEXSymbolError(symbol)
return json_data[self.symbols[0]] if self.n_symbols == 1 else json_data
def get_book(self, **kwargs):
"""
Reference: https://iextrading.com/developer/docs/#book
Returns
-------
dict or pandas.DataFrame
Stocks Book endpoint data
"""
return self._get_endpoint("book", params=kwargs)
def get_chart(self, **kwargs):
"""
Reference: https://iextrading.com/developer/docs/#chart
Parameters
----------
range: str, default '1m', optional
Chart range to return. See docs.
chartReset: boolean, default True, optional
If true, 1d chart will reset at midnight instead of the default
behavior of 9:30am EST.
chartSimplify: boolean, default True, optional
If true, runs polyline simplification using Douglas-Peucker
algorithm. Useful for plotting spotline charts
chartInterval: int, default None, optional
Chart data will return every nth element (where n is chartInterval)
changeFromClose: bool, default False, optional
If true, changeOverTime and marketChangeOverTime will be relative
to previous day close instead of the first value.
chartLast: int, optional
return the last N elements
Returns
-------
list
Stocks Chart endpoint data
"""
def fmt_p(out):
result = {}
for symbol in self.symbols:
d = out.pop(symbol)
df = pd.DataFrame(d)
df.set_index( | pd.DatetimeIndex(df["date"]) | pandas.DatetimeIndex |
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
from Phase2Vec import Phase2Vec
from Atom2Vec.Atom2Vec_encoder import Atom2Vec
from AtomicModel import Endtoend
from utils import *
def get_phases(datafile=None, phases=None, mode='classify', maxlength=None):
""" Reads datafile, trains or reads atomic features, vectorises phase fields.
Parameters
----------
datafile: str; filename of database to process
to aggregate compositions into phase fields
default: None
load_phase_vectors: str; filename to read phase fields, or their vectors
default: None
mode: str; mode of use of atomic features - parameter is passed to Atom2Vec and End2End model
default: 'classify' - derive during End2End training from Atom2Vec enviroments
'rank' - re-use embeddings calculated in 'classify' mode
maxlength: int; maximum size of a phase field in the dataset
default: None
"""
return Phase2Vec(dbfile=datafile, load_phases=phases, maxlength=maxlength, mode=mode)
def get_dataset(phases, parameter, t_type='max Tc', T=None, scaler=None):
""" Selects data for training, X.
If threshold T is specified, selects target values, y, from dataframe.
Parameters
----------
phases: Phase2Vec obj
parameter: str; name of the column in phases.dt where phase vectors are stored
values:'onehot' or 'phase_vectors'
t_type: str; name of the column (property), which is used for data classification
default: 'max_Tc'
T: float; threshold value used for binary partitioning of the dataset
scaler: sklearn obj; perform normalisation on X data; default: None
"""
X = np.array([i for i in phases.dt[parameter].values])
print('X shape:', X.shape)
print(X[0])
if scaler:
X = scaler.fit_transform(X)
return X, np.where(phases.dt[t_type].values > T, 1, 0) if T else X
def generate_test(phases, natoms=3, atoms=None, exclude=None, mode='classify'):
""" Generates a list of phase fields not in training set.
Parameters
----------
phases: Phase2Vec obj
natoms: int; number of elements in a phase field
atoms: list; constituent elements of the phase fields;
default: None - use the same elements as in training
exclude: list; phase fields to exclude from the test set;
default: None - exclude phases listed in ICSD
"""
if not exclude:
exclude = pd.read_csv('DATA/icsd_phases.csv')['phases']
if not atoms:
atoms = phases.atoms
test = generate(atoms, natoms, exclude)
test = | pd.DataFrame({'phases': test}) | pandas.DataFrame |
from evalutils.exceptions import ValidationError
from evalutils.io import CSVLoader, FileLoader, ImageLoader
import json
import nibabel as nib
import numpy as np
import os.path
from pathlib import Path
from pandas import DataFrame, MultiIndex
import scipy.ndimage
from scipy.ndimage.interpolation import map_coordinates, zoom
from surface_distance import *
##### paths #####
DEFAULT_INPUT_PATH = Path("/input/")
DEFAULT_GROUND_TRUTH_PATH = Path("/opt/evaluation/ground-truth/")
DEFAULT_EVALUATION_OUTPUT_FILE_PATH = Path("/output/metrics.json")
##### metrics #####
def jacobian_determinant(disp):
_, _, H, W, D = disp.shape
gradx = np.array([-0.5, 0, 0.5]).reshape(1, 3, 1, 1)
grady = np.array([-0.5, 0, 0.5]).reshape(1, 1, 3, 1)
gradz = np.array([-0.5, 0, 0.5]).reshape(1, 1, 1, 3)
gradx_disp = np.stack([scipy.ndimage.correlate(disp[:, 0, :, :, :], gradx, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 1, :, :, :], gradx, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 2, :, :, :], gradx, mode='constant', cval=0.0)], axis=1)
grady_disp = np.stack([scipy.ndimage.correlate(disp[:, 0, :, :, :], grady, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 1, :, :, :], grady, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 2, :, :, :], grady, mode='constant', cval=0.0)], axis=1)
gradz_disp = np.stack([scipy.ndimage.correlate(disp[:, 0, :, :, :], gradz, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 1, :, :, :], gradz, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 2, :, :, :], gradz, mode='constant', cval=0.0)], axis=1)
grad_disp = np.concatenate([gradx_disp, grady_disp, gradz_disp], 0)
jacobian = grad_disp + np.eye(3, 3).reshape(3, 3, 1, 1, 1)
jacobian = jacobian[:, :, 2:-2, 2:-2, 2:-2]
jacdet = jacobian[0, 0, :, :, :] * (jacobian[1, 1, :, :, :] * jacobian[2, 2, :, :, :] - jacobian[1, 2, :, :, :] * jacobian[2, 1, :, :, :]) -\
jacobian[1, 0, :, :, :] * (jacobian[0, 1, :, :, :] * jacobian[2, 2, :, :, :] - jacobian[0, 2, :, :, :] * jacobian[2, 1, :, :, :]) +\
jacobian[2, 0, :, :, :] * (jacobian[0, 1, :, :, :] * jacobian[1, 2, :, :, :] - jacobian[0, 2, :, :, :] * jacobian[1, 1, :, :, :])
return jacdet
def compute_tre(x, y, spacing):
return np.linalg.norm((x - y) * spacing, axis=1)
##### file loader #####
class NiftiLoader(ImageLoader):
@staticmethod
def load_image(fname):
return nib.load(str(fname))
@staticmethod
def hash_image(image):
return hash(image.get_fdata().tostring())
class NumpyLoader(ImageLoader):
@staticmethod
def load_image(fname):
return np.load(str(fname))['arr_0']
@staticmethod
def hash_image(image):
return hash(image.tostring())
class CURIOUSLmsLoader(FileLoader):
def load(self, fname):
lms_fixed = []
lms_moving = []
f = open(fname, 'r')
for line in f.readlines()[5:]:
lms = [float(lm) for lm in line.split(' ')[1:-1]]
lms_fixed.append(lms[:3])
lms_moving.append(lms[3:])
return {'lms_fixed': lms_fixed, 'lms_moving': lms_moving}
class L2RLmsLoader(FileLoader):
def load(self, fname):
lms_fixed = []
lms_moving = []
f = open(fname, 'r')
for line in f.readlines():
lms = [float(lm) for lm in line.split(',')]
lms_fixed.append(lms[:3])
lms_moving.append(lms[3:])
return {'lms_fixed': lms_fixed, 'lms_moving': lms_moving}
##### validation errors #####
def raise_missing_file_error(fname):
message = (
f"The displacement field {fname} is missing. "
f"Please provide all required displacement fields."
)
raise ValidationError(message)
def raise_dtype_error(fname, dtype):
message = (
f"The displacement field {fname} has a wrong dtype ('{dtype}'). "
f"All displacement fields should have dtype 'float16'."
)
raise ValidationError(message)
def raise_shape_error(fname, shape, expected_shape):
message = (
f"The displacement field {fname} has a wrong shape ('{shape[0]}x{shape[1]}x{shape[2]}x{shape[3]}'). "
f"The expected shape of displacement fields for this task is {expected_shape[0]}x{expected_shape[1]}x{expected_shape[2]}x{expected_shape[3]}."
)
raise ValidationError(message)
##### eval val #####
class EvalVal():
def __init__(self):
self.ground_truth_path = DEFAULT_GROUND_TRUTH_PATH
self.predictions_path = DEFAULT_INPUT_PATH
self.output_file = DEFAULT_EVALUATION_OUTPUT_FILE_PATH
self.csv_loader = CSVLoader()
self.nifti_loader = NiftiLoader()
self.numpy_loader = NumpyLoader()
self.curious_lms_loader = CURIOUSLmsLoader()
self.l2r_lms_loader = L2RLmsLoader()
self.pairs_task_01 = DataFrame()
self.imgs_task_01 = DataFrame()
self.lms_task_01 = DataFrame()
self.disp_fields_task_01 = DataFrame()
self.cases_task_01 = DataFrame()
self.pairs_task_02 = | DataFrame() | pandas.DataFrame |
import math
import copy
import numpy as np
import pandas as pd
import scipy.interpolate as interp
import scipy.fftpack as fft
from .base import QualityControlBaseAccessor
from .utils import *
#=============================General Accessors==============================#
@pd.api.extensions.register_series_accessor("qc")
class QualityControlSeriesAccessor(QualityControlBaseAccessor):
def describe(self):
out_dict = dict(mean = self._obj.mean(),
std = self._obj.std(),
skew= self._obj.skew(),
kurt= self._obj.kurtosis(),
pct_null = self._obj.isna().sum()/self._obj.size,
stationarity_measure = self.stationarity_measure,
pct_spike_flag = self.spike_mask.sum()/self._obj.size,
pct_hist_flag = self.hist_mask.sum()/self._obj.size)
return | pd.Series(out_dict, name=self._obj.name) | pandas.Series |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from .example import replace_all_nulls_with_value
@pytest.fixture
def df_none_missing():
""" return a 3x3 dataframe with no missing values """
cols = ['a', 'b', 'c']
data = [[0, 1, 0], [0, 0, 1], [1, 1, 1]]
return pd.DataFrame(data, columns=cols)
@pytest.fixture
def df_missing():
""" return a 3x3 dataframe with a couple of NaNs """
df = df_none_missing()
df.ix[0, 2] = np.nan
df.ix[2, 1] = np.nan
return df
def test_replace_all_nulls_does_nothing_if_no_nulls(df_none_missing):
new_df = replace_all_nulls_with_value(df_none_missing, -1)
assert (df_none_missing.values == new_df.values).all()
assert | pd.notnull(new_df.values) | pandas.notnull |
import datetime
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, "trades.csv")
self.quotes = self.read_data(datapath, "quotes.csv", dedupe=True)
self.asof = self.read_data(datapath, "asof.csv")
self.tolerance = self.read_data(datapath, "tolerance.csv")
self.allow_exact_matches = self.read_data(datapath, "allow_exact_matches.csv")
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, "allow_exact_matches_and_tolerance.csv"
)
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = pd.merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = pd.merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype("category")
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index("time")
quotes = self.quotes
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_on(self):
def index_by_time_then_arbitrary_new_level(df):
df = df.set_index("time")
df = pd.concat([df, df], keys=["f1", "f2"], names=["f", "time"])
return df.reorder_levels([1, 0]).sort_index()
trades = index_by_time_then_arbitrary_new_level(self.trades)
quotes = index_by_time_then_arbitrary_new_level(self.quotes)
expected = index_by_time_then_arbitrary_new_level(self.asof)
result = merge_asof(trades, quotes, on="time", by=["ticker"])
tm.assert_frame_equal(result, expected)
def test_on_and_index(self):
# "on" parameter and index together is prohibited
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a"],
[pd.to_datetime("20160602"), 2, "a"],
[pd.to_datetime("20160603"), 1, "b"],
[pd.to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[pd.to_datetime("20160502"), 1, "a", 1.0],
[pd.to_datetime("20160502"), 2, "a", 2.0],
[pd.to_datetime("20160503"), 1, "b", 3.0],
[pd.to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a", 1.0],
[pd.to_datetime("20160602"), 2, "a", 2.0],
[pd.to_datetime("20160603"), 1, "b", 3.0],
[pd.to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = pd.merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = self.read_data(datapath, "asof2.csv")
trades = self.read_data(datapath, "trades2.csv")
quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes, on="time")
tm.assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath):
q = (
pd.concat([self.quotes, self.quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(self.trades, q, on="time", by="ticker")
expected = self.read_data(datapath, "asof.csv")
tm.assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
tm.assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
# invalid negative
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self):
trades = self.trades.sort_values("time", ascending=False)
quotes = self.quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
trades = self.trades.sort_values("time")
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
quotes = self.quotes.sort_values("time")
assert trades.time.is_monotonic
assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes, on="time", by="ticker")
@pytest.mark.parametrize(
"tolerance",
[Timedelta("1day"), datetime.timedelta(days=1)],
ids=["pd.Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker", tolerance=tolerance)
expected = self.tolerance
tm.assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="forward", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_tz(self):
# GH 14844
left = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
}
)
right = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-01"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value2": list("ABCDE"),
}
)
result = pd.merge_asof(left, right, on="date", tolerance=pd.Timedelta("1 day"))
expected = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
"value2": list("BCDEE"),
}
)
tm.assert_frame_equal(result, expected)
def test_tolerance_float(self):
# GH22981
left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]})
right = pd.DataFrame(
{"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]}
)
expected = pd.DataFrame(
{
"a": [1.1, 3.5, 10.9],
"left_val": ["a", "b", "c"],
"right_val": [1, 3.3, np.nan],
}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=0.5)
tm.assert_frame_equal(result, expected)
def test_index_tolerance(self):
# GH 15135
expected = self.tolerance.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = pd.merge_asof(
trades,
quotes,
left_index=True,
right_index=True,
by="ticker",
tolerance=pd.Timedelta("1day"),
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(
self.trades, self.quotes, on="time", by="ticker", allow_exact_matches=False
)
expected = self.allow_exact_matches
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="forward", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="nearest", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(
self.trades,
self.quotes,
on="time",
by="ticker",
tolerance=Timedelta("100ms"),
allow_exact_matches=False,
)
expected = self.allow_exact_matches_and_tolerance
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame(
{"time": pd.to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(df1, df2, on="time")
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [2],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on="time", allow_exact_matches=False)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [1],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance3(self):
# GH 13709
df1 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
"version": [np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 6, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 6, 11]}
)
result = pd.merge_asof(
left,
right,
on="a",
direction="forward",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 4, 11]}
)
result = pd.merge_asof(
left,
right,
on="a",
direction="nearest",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_forward_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Y", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, np.nan, 11, 15, 16],
}
)
result = pd.merge_asof(left, right, on="a", by="b", direction="forward")
tm.assert_frame_equal(result, expected)
def test_nearest_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Z", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, 1, 11, 11, 16],
}
)
result = pd.merge_asof(left, right, on="a", by="b", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_by_int(self):
# we specialize by type, so test that this is correct
df1 = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
},
columns=["time", "key", "value1"],
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.015",
"20160525 13:30:00.020",
"20160525 13:30:00.025",
"20160525 13:30:00.035",
"20160525 13:30:00.040",
"20160525 13:30:00.055",
"20160525 13:30:00.060",
"20160525 13:30:00.065",
]
),
"key": [2, 1, 1, 3, 2, 1, 2, 3],
"value2": [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8],
},
columns=["time", "key", "value2"],
)
result = pd.merge_asof(df1, df2, on="time", by="key")
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
"value2": [2.2, 2.1, 2.3, 2.4, 2.7],
},
columns=["time", "key", "value1", "value2"],
)
tm.assert_frame_equal(result, expected)
def test_on_float(self):
# mimics how to determine the minimum-price variation
df1 = pd.DataFrame(
{
"price": [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "price"],
)
df2 = pd.DataFrame(
{"price": [0.0, 1.0, 100.0], "mpv": [0.0001, 0.01, 0.05]},
columns=["price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="price")
expected = pd.DataFrame(
{
"symbol": list("BGACEDF"),
"price": [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
"mpv": [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05],
},
columns=["symbol", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame(
{"value": [5, 2, 25, 100, 78, 120, 79], "symbol": list("ABCDEFG")},
columns=["symbol", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "result": list("xyzw")},
columns=["value", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"value": [2, 5, 25, 78, 79, 100, 120],
"result": list("xxxxxyz"),
},
columns=["symbol", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type_by_int(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame(
{
"value": [5, 2, 25, 100, 78, 120, 79],
"key": [1, 2, 3, 2, 3, 1, 2],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "key", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "key": [1, 2, 2, 3], "result": list("xyzw")},
columns=["value", "key", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value", by="key")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"key": [2, 1, 3, 3, 2, 2, 1],
"value": [2, 5, 25, 78, 79, 100, 120],
"result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"],
},
columns=["symbol", "key", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_float_by_int(self):
# type specialize both "by" and "on" parameters
df1 = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"price": [
3.26,
3.2599,
3.2598,
12.58,
12.59,
12.5,
378.15,
378.2,
378.25,
],
},
columns=["symbol", "exch", "price"],
)
df2 = pd.DataFrame(
{
"exch": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"price": [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0],
"mpv": [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0],
},
columns=["exch", "price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
df2 = df2.sort_values("price").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="price", by="exch")
expected = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [3, 2, 1, 3, 1, 2, 1, 2, 3],
"price": [
3.2598,
3.2599,
3.26,
12.5,
12.58,
12.59,
378.15,
378.2,
378.25,
],
"mpv": [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25],
},
columns=["symbol", "exch", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_merge_datatype_error_raises(self):
msg = r"incompatible merge keys \[0\] .*, must be the same type"
left = pd.DataFrame({"left_val": [1, 5, 10], "a": ["a", "b", "c"]})
right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7], "a": [1, 2, 3, 6, 7]})
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_datatype_categorical_error_raises(self):
msg = (
r"incompatible merge keys \[0\] .* both sides category, "
"but not equal ones"
)
left = pd.DataFrame(
{"left_val": [1, 5, 10], "a": pd.Categorical(["a", "b", "c"])}
)
right = pd.DataFrame(
{
"right_val": [1, 2, 3, 6, 7],
"a": pd.Categorical(["a", "X", "c", "X", "b"]),
}
)
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_groupby_multiple_column_with_categorical_column(self):
# GH 16454
df = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
result = merge_asof(df, df, on="x", by=["y", "z"])
expected = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func", [lambda x: x, lambda x: to_datetime(x)], ids=["numeric", "datetime"]
)
@pytest.mark.parametrize("side", ["left", "right"])
def test_merge_on_nans(self, func, side):
# GH 23189
msg = f"Merge keys contain null values on {side} side"
nulls = func([1.0, 5.0, np.nan])
non_nulls = func([1.0, 5.0, 10.0])
df_null = pd.DataFrame({"a": nulls, "left_val": ["a", "b", "c"]})
df = pd.DataFrame({"a": non_nulls, "right_val": [1, 6, 11]})
with pytest.raises(ValueError, match=msg):
if side == "left":
merge_asof(df_null, df, on="a")
else:
merge_asof(df, df_null, on="a")
def test_merge_by_col_tz_aware(self):
# GH 21184
left = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [2],
"values": ["a"],
}
)
right = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [1],
"values": ["b"],
}
)
result = pd.merge_asof(left, right, by="by_col", on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), 2, "a", "b"]],
columns=["by_col", "on_col", "values_x", "values_y"],
)
tm.assert_frame_equal(result, expected)
def test_by_mixed_tz_aware(self):
# GH 26649
left = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["HELLO"],
"on_col": [2],
"value": ["a"],
}
)
right = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["WORLD"],
"on_col": [1],
"value": ["b"],
}
)
result = pd.merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), "HELLO", 2, "a"]],
columns=["by_col1", "by_col2", "on_col", "value_x"],
)
expected["value_y"] = np.array([np.nan], dtype=object)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import arcpy
import pandas as pd
import ppa_input_params as p
import npmrds_data_conflation as ndc
def get_wtdavg_truckdata(in_df, col_name):
len_cols = ['{}_calc_len'.format(dirn) for dirn in p.directions_tmc]
val_cols = ['{}{}'.format(dirn, col_name) for dirn in p.directions_tmc]
wtd_dict = dict(zip(len_cols, val_cols))
wtd_val_sum = 0
dist_sum = 0
for dirlen, dirval in wtd_dict.items():
dir_val2 = 0 if | pd.isnull(in_df[dirval][0]) | pandas.isnull |
# -*- coding: utf-8 -*-
# @File : plot_utils.py
# @Author : <NAME>
# @Time : 2021/10/29 下午9:56
# @Disc :
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import os
from xgboost.sklearn import XGBModel
from typing import List
from sklearn.metrics import roc_curve, classification_report, roc_auc_score, confusion_matrix
from src.utils.confusion_matrix_pretty_print import pretty_plot_confusion_matrix
def count_plot(df: pd.DataFrame, col: str, xytext=(0, 0), show_details=True) -> None:
'''
custom count plot
Args:
df:
col:
xytext:
Returns:
'''
ax = sns.countplot(data=df, x=col)
if show_details:
for bar in ax.patches:
ax.annotate('%{:.2f}\n{:.0f}'.format(100*bar.get_height()/len(df),bar.get_height()), (bar.get_x() + bar.get_width() / 2,
bar.get_height()), ha='center', va='center',
size=11, xytext=xytext,
textcoords='offset points')
plt.show()
def plot_feature_importances(model: XGBModel, feature_cols: List[str], show_feature_num=10, figsize=(20, 10), fig_dir=None):
"""
plot feature importance of xgboost model
Args:
model:
feature_cols:
show_feature_num:
figsize:
Returns:
"""
feature_imp = | pd.Series(model.feature_importances_, index=feature_cols) | pandas.Series |
import pandas as pd
import csv
def save_table_dict_csv(fn, table_dict):
fn_csv = fn + '.csv'
with open(fn_csv, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=table_dict.keys(),
lineterminator='\n')
writer.writeheader()
for id in range(0, len(list(table_dict.values())[0])):
tmp_dict = {}
for key, values in table_dict.items():
tmp_dict[key] = values[id]
writer.writerow(tmp_dict)
def save_table_dict(fn, table_dict):
fn_xls = fn + '.xlsx'
df = | pd.DataFrame(table_dict) | pandas.DataFrame |
import calendar
from ..utils import search_quote
from datetime import datetime, timedelta
from ..utils import process_dataframe_and_series
import rich
from jsonpath import jsonpath
from retry import retry
import pandas as pd
import requests
import multitasking
import signal
from tqdm import tqdm
from typing import (Dict,
List,
Union)
from ..shared import session
from ..common import get_quote_history as get_quote_history_for_stock
from ..common import get_history_bill as get_history_bill_for_stock
from ..common import get_today_bill as get_today_bill_for_stock
from ..common import get_realtime_quotes_by_fs
from ..utils import (to_numeric,
get_quote_id)
from .config import EASTMONEY_STOCK_DAILY_BILL_BOARD_FIELDS, EASTMONEY_STOCK_BASE_INFO_FIELDS
from ..common.config import (
FS_DICT,
MARKET_NUMBER_DICT,
EASTMONEY_REQUEST_HEADERS,
EASTMONEY_QUOTE_FIELDS
)
signal.signal(signal.SIGINT, multitasking.killall)
@to_numeric
def get_base_info_single(stock_code: str) -> pd.Series:
"""
获取单股票基本信息
Parameters
----------
stock_code : str
股票代码
Returns
-------
Series
单只股票基本信息
"""
fields = ",".join(EASTMONEY_STOCK_BASE_INFO_FIELDS.keys())
secid = get_quote_id(stock_code)
if not secid:
return pd.Series(index=EASTMONEY_STOCK_BASE_INFO_FIELDS.values())
params = (
('ut', 'fa5fd1943c7b386f172d6893dbfba10b'),
('invt', '2'),
('fltt', '2'),
('fields', fields),
('secid', secid),
)
url = 'http://push2.eastmoney.com/api/qt/stock/get'
json_response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
s = pd.Series(json_response['data']).rename(
index=EASTMONEY_STOCK_BASE_INFO_FIELDS)
return s[EASTMONEY_STOCK_BASE_INFO_FIELDS.values()]
def get_base_info_muliti(stock_codes: List[str]) -> pd.DataFrame:
"""
获取股票多只基本信息
Parameters
----------
stock_codes : List[str]
股票代码列表
Returns
-------
DataFrame
多只股票基本信息
"""
@multitasking.task
@retry(tries=3, delay=1)
def start(stock_code: str):
s = get_base_info_single(stock_code)
dfs.append(s)
pbar.update()
pbar.set_description(f'Processing => {stock_code}')
dfs: List[pd.DataFrame] = []
pbar = tqdm(total=len(stock_codes))
for stock_code in stock_codes:
start(stock_code)
multitasking.wait_for_tasks()
df = pd.DataFrame(dfs)
df = df.dropna(subset=['股票代码'])
return df
@to_numeric
def get_base_info(stock_codes: Union[str, List[str]]) -> Union[pd.Series, pd.DataFrame]:
"""
Parameters
----------
stock_codes : Union[str, List[str]]
股票代码或股票代码构成的列表
Returns
-------
Union[Series, DataFrame]
- ``Series`` : 包含单只股票基本信息(当 ``stock_codes`` 是字符串时)
- ``DataFrane`` : 包含多只股票基本信息(当 ``stock_codes`` 是字符串列表时)
Raises
------
TypeError
当 ``stock_codes`` 类型不符合要求时
Examples
--------
>>> import efinance as ef
>>> # 获取单只股票信息
>>> ef.stock.get_base_info('600519')
股票代码 600519
股票名称 贵州茅台
市盈率(动) 39.38
市净率 12.54
所处行业 酿酒行业
总市值 2198082348462.0
流通市值 2198082348462.0
板块编号 BK0477
ROE 8.29
净利率 54.1678
净利润 13954462085.610001
毛利率 91.6763
dtype: object
>>> # 获取多只股票信息
>>> ef.stock.get_base_info(['600519','300715'])
股票代码 股票名称 市盈率(动) 市净率 所处行业 总市值 流通市值 板块编号 ROE 净利率 净利润 毛利率
0 300715 凯伦股份 42.29 3.12 水泥建材 9.160864e+09 6.397043e+09 BK0424 3.97 12.1659 5.415488e+07 32.8765
1 600519 贵州茅台 39.38 12.54 酿酒行业 2.198082e+12 2.198082e+12 BK0477 8.29 54.1678 1.395446e+10 91.6763
"""
if isinstance(stock_codes, str):
return get_base_info_single(stock_codes)
elif hasattr(stock_codes, '__iter__'):
return get_base_info_muliti(stock_codes)
raise TypeError(f'所给的 {stock_codes} 不符合参数要求')
def get_quote_history(stock_codes: Union[str, List[str]],
beg: str = '19000101',
end: str = '20500101',
klt: int = 101,
fqt: int = 1,
**kwargs) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""
获取股票的 K 线数据
Parameters
----------
stock_codes : Union[str,List[str]]
股票代码、名称 或者 股票代码、名称构成的列表
beg : str, optional
开始日期,默认为 ``'19000101'`` ,表示 1900年1月1日
end : str, optional
结束日期,默认为 ``'20500101'`` ,表示 2050年1月1日
klt : int, optional
行情之间的时间间隔,默认为 ``101`` ,可选示例如下
- ``1`` : 分钟
- ``5`` : 5 分钟
- ``15`` : 15 分钟
- ``30`` : 30 分钟
- ``60`` : 60 分钟
- ``101`` : 日
- ``102`` : 周
- ``103`` : 月
fqt : int, optional
复权方式,默认为 ``1`` ,可选示例如下
- ``0`` : 不复权
- ``1`` : 前复权
- ``2`` : 后复权
Returns
-------
Union[DataFrame, Dict[str, DataFrame]]
股票的 K 线数据
- ``DataFrame`` : 当 ``stock_codes`` 是 ``str`` 时
- ``Dict[str, DataFrame]`` : 当 ``stock_codes`` 是 ``List[str]`` 时
Examples
--------
>>> import efinance as ef
>>> # 获取单只股票日 K 行情数据
>>> ef.stock.get_quote_history('600519')
股票名称 股票代码 日期 开盘 收盘 最高 最低 成交量 成交额 振幅 涨跌幅 涨跌额 换手率
0 贵州茅台 600519 2001-08-27 -89.74 -89.53 -89.08 -90.07 406318 1.410347e+09 -1.10 0.92 0.83 56.83
1 贵州茅台 600519 2001-08-28 -89.64 -89.27 -89.24 -89.72 129647 4.634630e+08 -0.54 0.29 0.26 18.13
2 贵州茅台 600519 2001-08-29 -89.24 -89.36 -89.24 -89.42 53252 1.946890e+08 -0.20 -0.10 -0.09 7.45
3 贵州茅台 600519 2001-08-30 -89.38 -89.22 -89.14 -89.44 48013 1.775580e+08 -0.34 0.16 0.14 6.72
4 贵州茅台 600519 2001-08-31 -89.21 -89.24 -89.12 -89.28 23231 8.623100e+07 -0.18 -0.02 -0.02 3.25
... ... ... ... ... ... ... ... ... ... ... ... ... ...
4756 贵州茅台 600519 2021-07-23 1937.82 1900.00 1937.82 1895.09 47585 9.057762e+09 2.20 -2.06 -40.01 0.38
4757 贵州茅台 600519 2021-07-26 1879.00 1804.11 1879.00 1780.00 98619 1.789436e+10 5.21 -5.05 -95.89 0.79
4758 贵州茅台 600519 2021-07-27 1803.00 1712.89 1810.00 1703.00 86577 1.523081e+10 5.93 -5.06 -91.22 0.69
4759 贵州茅台 600519 2021-07-28 1703.00 1768.90 1788.20 1682.12 85369 1.479247e+10 6.19 3.27 56.01 0.68
4760 贵州茅台 600519 2021-07-29 1810.01 1749.79 1823.00 1734.34 63864 1.129957e+10 5.01 -1.08 -19.11 0.51
>>> # 获取多只股票历史行情
>>> stock_df = ef.stock.get_quote_history(['600519','300750'])
>>> type(stock_df)
<class 'dict'>
>>> stock_df.keys()
dict_keys(['300750', '600519'])
>>> stock_df['600519']
股票名称 股票代码 日期 开盘 收盘 最高 最低 成交量 成交额 振幅 涨跌幅 涨跌额 换手率
0 贵州茅台 600519 2001-08-27 -89.74 -89.53 -89.08 -90.07 406318 1.410347e+09 -1.10 0.92 0.83 56.83
1 贵州茅台 600519 2001-08-28 -89.64 -89.27 -89.24 -89.72 129647 4.634630e+08 -0.54 0.29 0.26 18.13
2 贵州茅台 600519 2001-08-29 -89.24 -89.36 -89.24 -89.42 53252 1.946890e+08 -0.20 -0.10 -0.09 7.45
3 贵州茅台 600519 2001-08-30 -89.38 -89.22 -89.14 -89.44 48013 1.775580e+08 -0.34 0.16 0.14 6.72
4 贵州茅台 600519 2001-08-31 -89.21 -89.24 -89.12 -89.28 23231 8.623100e+07 -0.18 -0.02 -0.02 3.25
... ... ... ... ... ... ... ... ... ... ... ... ... ...
4756 贵州茅台 600519 2021-07-23 1937.82 1900.00 1937.82 1895.09 47585 9.057762e+09 2.20 -2.06 -40.01 0.38
4757 贵州茅台 600519 2021-07-26 1879.00 1804.11 1879.00 1780.00 98619 1.789436e+10 5.21 -5.05 -95.89 0.79
4758 贵州茅台 600519 2021-07-27 1803.00 1712.89 1810.00 1703.00 86577 1.523081e+10 5.93 -5.06 -91.22 0.69
4759 贵州茅台 600519 2021-07-28 1703.00 1768.90 1788.20 1682.12 85369 1.479247e+10 6.19 3.27 56.01 0.68
4760 贵州茅台 600519 2021-07-29 1810.01 1749.79 1823.00 1734.34 63864 1.129957e+10 5.01 -1.08 -19.11 0.51
"""
df = get_quote_history_for_stock(
stock_codes,
beg=beg,
end=end,
klt=klt,
fqt=fqt
)
if isinstance(df, pd.DataFrame):
df.rename(columns={'代码': '股票代码',
'名称': '股票名称'
},
inplace=True)
elif isinstance(df, dict):
for stock_code in df.keys():
df[stock_code].rename(columns={'代码': '股票代码',
'名称': '股票名称'
},
inplace=True)
# NOTE 扩展接口 设定此关键词即返回 DataFrame 而不是 dict
if kwargs.get('return_df'):
df: pd.DataFrame = pd.concat(df, axis=0, ignore_index=True)
return df
@process_dataframe_and_series(remove_columns_and_indexes=['市场编号'])
@to_numeric
def get_realtime_quotes(fs: Union[str, List[str]] = None) -> pd.DataFrame:
"""
获取单个或者多个市场行情的最新状况
Parameters
----------
fs : Union[str, List[str]], optional
行情名称或者多个行情名列表 可选值及示例如下
- ``None`` 沪深京A股市场行情
- ``'沪深A股'`` 沪深A股市场行情
- ``'沪A'`` 沪市A股市场行情
- ``'深A'`` 深市A股市场行情
- ``北A`` 北证A股市场行情
- ``'可转债'`` 沪深可转债市场行情
- ``'期货'`` 期货市场行情
- ``'创业板'`` 创业板市场行情
- ``'美股'`` 美股市场行情
- ``'港股'`` 港股市场行情
- ``'中概股'`` 中国概念股市场行情
- ``'新股'`` 沪深新股市场行情
- ``'科创板'`` 科创板市场行情
- ``'沪股通'`` 沪股通市场行情
- ``'深股通'`` 深股通市场行情
- ``'行业板块'`` 行业板块市场行情
- ``'概念板块'`` 概念板块市场行情
- ``'沪深系列指数'`` 沪深系列指数市场行情
- ``'上证系列指数'`` 上证系列指数市场行情
- ``'深证系列指数'`` 深证系列指数市场行情
- ``'ETF'`` ETF 基金市场行情
- ``'LOF'`` LOF 基金市场行情
Returns
-------
DataFrame
单个或者多个市场行情的最新状况
Raises
------
KeyError
当参数 ``fs`` 中含有不正确的行情类型时引发错误
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_realtime_quotes()
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 688787 N海天 277.59 139.48 172.39 139.25 171.66 102.54 85.62 - 78.93 74519 1110318832.0 36.94 5969744000 1213908667 1.688787 沪A
1 301045 N天禄 149.34 39.42 48.95 39.2 48.95 23.61 66.66 - 37.81 163061 683878656.0 15.81 4066344240 964237089 0.301045 深A
2 300532 今天国际 20.04 12.16 12.16 10.69 10.69 2.03 8.85 3.02 -22.72 144795 171535181.0 10.13 3322510580 1989333440 0.300532 深A
3 300600 国瑞科技 20.02 13.19 13.19 11.11 11.41 2.2 18.61 2.82 218.75 423779 541164432.0 10.99 3915421427 3003665117 0.300600 深A
4 300985 致远新能 20.01 47.08 47.08 36.8 39.4 7.85 66.65 2.17 58.37 210697 897370992.0 39.23 6277336472 1488300116 0.300985 深A
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
4598 603186 华正新材 -10.0 43.27 44.09 43.27 43.99 -4.81 1.98 0.48 25.24 27697 120486294.0 48.08 6146300650 6063519472 1.603186 沪A
4599 688185 康希诺-U -10.11 476.4 534.94 460.13 530.0 -53.6 6.02 2.74 -2088.07 40239 1960540832.0 530.0 117885131884 31831479215 1.688185 沪A
4600 688148 芳源股份 -10.57 31.3 34.39 31.3 33.9 -3.7 26.07 0.56 220.01 188415 620632512.0 35.0 15923562000 2261706043 1.688148 沪A
4601 300034 钢研高纳 -10.96 43.12 46.81 42.88 46.5 -5.31 7.45 1.77 59.49 323226 1441101824.0 48.43 20959281094 18706911861 0.300034 深A
4602 300712 永福股份 -13.71 96.9 110.94 95.4 109.0 -15.4 6.96 1.26 511.21 126705 1265152928.0 112.3 17645877600 17645877600 0.300712 深A
>>> ef.stock.get_realtime_quotes(['创业板','港股'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 00859 中昌国际控股 49.02 0.38 0.38 0.26 0.26 0.125 0.08 86.85 -2.83 938000 262860.0 0.255 427510287 427510287 128.00859 None
1 01058 粤海制革 41.05 1.34 1.51 0.9 0.93 0.39 8.34 1.61 249.89 44878000 57662440.0 0.95 720945460 720945460 128.01058 None
2 00713 世界(集团) 27.94 0.87 0.9 0.68 0.68 0.19 1.22 33.28 3.64 9372000 7585400.0 0.68 670785156 670785156 128.00713 None
3 08668 瀛海集团 24.65 0.177 0.179 0.145 0.145 0.035 0.0 10.0 -9.78 20000 3240.0 0.142 212400000 212400000 128.08668 None
4 08413 亚洲杂货 24.44 0.28 0.28 0.25 0.25 0.055 0.01 3.48 -20.76 160000 41300.0 0.225 325360000 325360000 128.08413 None
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
5632 08429 冰雪集团 -16.75 0.174 0.2 0.166 0.2 -0.035 2.48 3.52 -21.58 11895000 2074645.0 0.209 83520000 83520000 128.08429 None
5633 00524 长城天下 -17.56 0.108 0.118 0.103 0.118 -0.023 0.45 15.43 -6.55 5961200 649171.0 0.131 141787800 141787800 128.00524 None
5634 08377 申酉控股 -17.71 0.395 0.46 0.39 0.46 -0.085 0.07 8.06 -5.07 290000 123200.0 0.48 161611035 161611035 128.08377 None
5635 00108 国锐地产 -19.01 1.15 1.42 1.15 1.42 -0.27 0.07 0.78 23.94 2376000 3012080.0 1.42 3679280084 3679280084 128.00108 None
5636 08237 华星控股 -25.0 0.024 0.031 0.023 0.031 -0.008 0.43 8.74 -2.01 15008000 364188.0 0.032 83760000 83760000 128.08237 None
>>> ef.stock.get_realtime_quotes(['ETF'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 513050 中概互联网ETF 4.49 1.444 1.455 1.433 1.452 0.062 6.71 0.92 - 12961671 1870845984.0 1.382 27895816917 27895816917 1.513050 沪A
1 513360 教育ETF 4.38 0.5 0.502 0.486 0.487 0.021 16.89 1.7 - 1104254 54634387.0 0.479 326856952 326856952 1.513360 沪A
2 159766 旅游ETF 3.84 0.974 0.988 0.95 0.95 0.036 14.46 1.97 - 463730 45254947.0 0.938 312304295 312304295 0.159766 深A
3 159865 养殖ETF 3.8 0.819 0.828 0.785 0.791 0.03 12.13 0.89 - 1405871 114254714.0 0.789 949594189 949594189 0.159865 深A
4 516670 畜牧养殖ETF 3.76 0.856 0.864 0.825 0.835 0.031 24.08 0.98 - 292027 24924513.0 0.825 103803953 103803953 1.516670 沪A
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
549 513060 恒生医疗ETF -4.12 0.861 0.905 0.86 0.902 -0.037 47.96 1.57 - 1620502 141454355.0 0.898 290926128 290926128 1.513060 沪A
550 515220 煤炭ETF -4.46 2.226 2.394 2.194 2.378 -0.104 14.39 0.98 - 2178176 487720560.0 2.330 3369247992 3369247992 1.515220 沪A
551 513000 日经225ETF易方达 -4.49 1.212 1.269 1.21 1.269 -0.057 5.02 2.49 - 25819 3152848.0 1.269 62310617 62310617 1.513000 沪A
552 513880 日经225ETF -4.59 1.163 1.224 1.162 1.217 -0.056 16.93 0.94 - 71058 8336846.0 1.219 48811110 48811110 1.513880 沪A
553 513520 日经ETF -4.76 1.2 1.217 1.196 1.217 -0.06 27.7 1.79 - 146520 17645828.0 1.260 63464640 63464640 1.513520 沪A
Notes
-----
无论股票、可转债、期货还是基金。第一列表头始终叫 ``股票代码``
"""
fs_list: List[str] = []
if fs is None:
fs_list.append(FS_DICT['stock'])
if isinstance(fs, str):
fs = [fs]
if isinstance(fs, list):
for f in fs:
if not FS_DICT.get(f):
raise KeyError(f'指定的行情参数 `{fs}` 不正确')
fs_list.append(FS_DICT[f])
# 给空列表时 试用沪深A股行情
if not fs_list:
fs_list.append(FS_DICT['stock'])
fs_str = ','.join(fs_list)
df = get_realtime_quotes_by_fs(fs_str)
df.rename(columns={'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_history_bill(stock_code: str) -> pd.DataFrame:
"""
获取单只股票历史单子流入流出数据
Parameters
----------
stock_code : str
股票代码
Returns
-------
DataFrame
沪深市场单只股票历史单子流入流出数据
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_history_bill('600519')
股票名称 股票代码 日期 主力净流入 小单净流入 中单净流入 大单净流入 超大单净流入 主力净流入占比 小单流入净占比 中单流入净占比 大单流入净占比 超大单流入净占比 收盘价 涨跌幅
0 贵州茅台 600519 2021-03-04 -3.670272e+06 -2282056.0 5.952143e+06 1.461528e+09 -1.465199e+09 -0.03 -0.02 0.04 10.99 -11.02 2013.71 -5.05
1 贵州茅台 600519 2021-03-05 -1.514880e+07 -1319066.0 1.646793e+07 -2.528896e+07 1.014016e+07 -0.12 -0.01 0.13 -0.19 0.08 2040.82 1.35
2 贵州茅台 600519 2021-03-08 -8.001702e+08 -877074.0 8.010473e+08 5.670671e+08 -1.367237e+09 -6.29 -0.01 6.30 4.46 -10.75 1940.71 -4.91
3 贵州茅台 600519 2021-03-09 -2.237770e+08 -6391767.0 2.301686e+08 -1.795013e+08 -4.427571e+07 -1.39 -0.04 1.43 -1.11 -0.27 1917.70 -1.19
4 贵州茅台 600519 2021-03-10 -2.044173e+08 -1551798.0 2.059690e+08 -2.378506e+08 3.343331e+07 -2.02 -0.02 2.03 -2.35 0.33 1950.72 1.72
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
97 贵州茅台 600519 2021-07-26 -1.564233e+09 13142211.0 1.551091e+09 -1.270400e+08 -1.437193e+09 -8.74 0.07 8.67 -0.71 -8.03 1804.11 -5.05
98 贵州茅台 600519 2021-07-27 -7.803296e+08 -10424715.0 7.907544e+08 6.725104e+07 -8.475807e+08 -5.12 -0.07 5.19 0.44 -5.56 1712.89 -5.06
99 贵州茅台 600519 2021-07-28 3.997645e+08 2603511.0 -4.023677e+08 2.315648e+08 1.681997e+08 2.70 0.02 -2.72 1.57 1.14 1768.90 3.27
100 贵州茅台 600519 2021-07-29 -9.209842e+08 -2312235.0 9.232964e+08 -3.959741e+08 -5.250101e+08 -8.15 -0.02 8.17 -3.50 -4.65 1749.79 -1.08
101 贵州茅台 600519 2021-07-30 -1.524740e+09 -6020099.0 1.530761e+09 1.147248e+08 -1.639465e+09 -11.63 -0.05 11.68 0.88 -12.51 1678.99 -4.05
"""
df = get_history_bill_for_stock(stock_code)
df.rename(columns={
'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_today_bill(stock_code: str) -> pd.DataFrame:
"""
获取单只股票最新交易日的日内分钟级单子流入流出数据
Parameters
----------
stock_code : str
股票代码
Returns
-------
DataFrame
单只股票最新交易日的日内分钟级单子流入流出数据
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_today_bill('600519')
股票代码 时间 主力净流入 小单净流入 中单净流入 大单净流入 超大单净流入
0 600519 2021-07-29 09:31 -3261705.0 -389320.0 3651025.0 -12529658.0 9267953.0
1 600519 2021-07-29 09:32 6437999.0 -606994.0 -5831006.0 -42615994.0 49053993.0
2 600519 2021-07-29 09:33 13179707.0 -606994.0 -12572715.0 -85059118.0 98238825.0
3 600519 2021-07-29 09:34 15385244.0 -970615.0 -14414632.0 -86865209.0 102250453.0
4 600519 2021-07-29 09:35 7853716.0 -970615.0 -6883104.0 -75692436.0 83546152.0
.. ... ... ... ... ... ... ...
235 600519 2021-07-29 14:56 -918956019.0 -1299630.0 920255661.0 -397127393.0 -521828626.0
236 600519 2021-07-29 14:57 -920977761.0 -2319213.0 923296987.0 -397014702.0 -523963059.0
237 600519 2021-07-29 14:58 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
238 600519 2021-07-29 14:59 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
239 600519 2021-07-29 15:00 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
"""
df = get_today_bill_for_stock(stock_code)
df.rename(columns={
'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_latest_quote(stock_codes: List[str]) -> pd.DataFrame:
"""
获取沪深市场多只股票的实时涨幅情况
Parameters
----------
stock_codes : List[str]
多只股票代码列表
Returns
-------
DataFrame
沪深市场、港股、美股多只股票的实时涨幅情况
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_latest_quote(['600519','300750'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 市场类型
0 600519 贵州茅台 0.59 1700.04 1713.0 1679.0 1690.0 10.04 0.30 0.72 43.31 37905 6.418413e+09 1690.0 2135586507912 2135586507912 沪A
1 300750 宁德时代 0.01 502.05 529.9 480.0 480.0 0.05 1.37 1.75 149.57 277258 1.408545e+10 502.0 1169278366994 1019031580505 深A
Notes
-----
当需要获取多只沪深 A 股 的实时涨跌情况时,最好使用 ``efinance.stock.get_realtime_quptes``
"""
if isinstance(stock_codes, str):
stock_codes = [stock_codes]
secids: List[str] = [get_quote_id(stock_code)
for stock_code in stock_codes]
columns = EASTMONEY_QUOTE_FIELDS
fields = ",".join(columns.keys())
params = (
('OSVersion', '14.3'),
('appVersion', '6.3.8'),
('fields', fields),
('fltt', '2'),
('plat', 'Iphone'),
('product', 'EFund'),
('secids', ",".join(secids)),
('serverVersion', '6.3.6'),
('version', '6.3.8'),
)
url = 'https://push2.eastmoney.com/api/qt/ulist.np/get'
json_response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
rows = jsonpath(json_response, '$..diff[:]')
if rows is None:
return pd.DataFrame(columns=columns.values()).rename({
'市场编号': '市场类型'
})
df = pd.DataFrame(rows)[columns.keys()].rename(columns=columns)
df['市场类型'] = df['市场编号'].apply(lambda x: MARKET_NUMBER_DICT.get(str(x)))
del df['市场编号']
return df
@to_numeric
def get_top10_stock_holder_info(stock_code: str,
top: int = 4) -> pd.DataFrame:
"""
获取沪深市场指定股票前十大股东信息
Parameters
----------
stock_code : str
股票代码
top : int, optional
最新 top 个前 10 大流通股东公开信息, 默认为 ``4``
Returns
-------
DataFrame
个股持仓占比前 10 的股东的一些信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_top10_stock_holder_info('600519',top = 1)
股票代码 更新日期 股东代码 股东名称 持股数 持股比例 增减 变动率
0 600519 2021-03-31 80010298 中国贵州茅台酒厂(集团)有限责任公司 6.783亿 54.00% 不变 --
1 600519 2021-03-31 80637337 香港中央结算有限公司 9594万 7.64% -841.1万 -8.06%
2 600519 2021-03-31 80732941 贵州省国有资本运营有限责任公司 5700万 4.54% -182.7万 -3.11%
3 600519 2021-03-31 80010302 贵州茅台酒厂集团技术开发公司 2781万 2.21% 不变 --
4 600519 2021-03-31 80475097 中央汇金资产管理有限责任公司 1079万 0.86% 不变 --
5 600519 2021-03-31 80188285 中国证券金融股份有限公司 803.9万 0.64% -91 0.00%
6 600519 2021-03-31 78043999 深圳市金汇荣盛财富管理有限公司-金汇荣盛三号私募证券投资基金 502.1万 0.40% 不变 --
7 600519 2021-03-31 70400207 中国人寿保险股份有限公司-传统-普通保险产品-005L-CT001沪 434.1万 0.35% 44.72万 11.48%
8 600519 2021-03-31 005827 中国银行股份有限公司-易方达蓝筹精选混合型证券投资基金 432万 0.34% 新进 --
9 600519 2021-03-31 78083830 珠海市瑞丰汇邦资产管理有限公司-瑞丰汇邦三号私募证券投资基金 416.1万 0.33% 不变 --
"""
def gen_fc(stock_code: str) -> str:
"""
Parameters
----------
stock_code : str
股票代码
Returns
-------
str
指定格式的字符串
"""
_type, stock_code = get_quote_id(stock_code).split('.')
_type = int(_type)
# 深市
if _type == 0:
return f'{stock_code}02'
# 沪市
return f'{stock_code}01'
def get_public_dates(stock_code: str) -> List[str]:
"""
获取指定股票公开股东信息的日期
Parameters
----------
stock_code : str
股票代码
Returns
-------
List[str]
公开日期列表
"""
quote_id = get_quote_id(stock_code)
stock_code = quote_id.split('.')[-1]
fc = gen_fc(stock_code)
data = {"fc": fc}
url = 'https://emh5.eastmoney.com/api/GuBenGuDong/GetFirstRequest2Data'
json_response = requests.post(
url, json=data).json()
dates = jsonpath(json_response, f'$..BaoGaoQi')
if not dates:
return []
return dates
fields = {
'GuDongDaiMa': '股东代码',
'GuDongMingCheng': '股东名称',
'ChiGuShu': '持股数',
'ChiGuBiLi': '持股比例',
'ZengJian': '增减',
'BianDongBiLi': '变动率',
}
quote_id = get_quote_id(stock_code)
stock_code = quote_id.split('.')[-1]
fc = gen_fc(stock_code)
dates = get_public_dates(stock_code)
dfs: List[pd.DataFrame] = []
empty_df = pd.DataFrame(columns=['股票代码', '日期']+list(fields.values()))
for date in dates[:top]:
data = {"fc": fc, "BaoGaoQi": date}
url = 'https://emh5.eastmoney.com/api/GuBenGuDong/GetShiDaLiuTongGuDong'
response = requests.post(url, json=data)
response.encoding = 'utf-8'
items: List[dict] = jsonpath(
response.json(), f'$..ShiDaLiuTongGuDongList[:]')
if not items:
continue
df = pd.DataFrame(items)
df.rename(columns=fields, inplace=True)
df.insert(0, '股票代码', [stock_code for _ in range(len(df))])
df.insert(1, '更新日期', [date for _ in range(len(df))])
del df['IsLink']
dfs.append(df)
if len(dfs) == 0:
return empty_df
return pd.concat(dfs, axis=0, ignore_index=True)
def get_all_report_dates() -> pd.DataFrame:
"""
获取沪深市场的全部股票报告期信息
Returns
-------
DataFrame
沪深市场的全部股票报告期信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_all_report_dates()
报告日期 季报名称
0 2021-06-30 2021年 半年报
1 2021-03-31 2021年 一季报
2 2020-12-31 2020年 年报
3 2020-09-30 2020年 三季报
4 2020-06-30 2020年 半年报
5 2020-03-31 2020年 一季报
6 2019-12-31 2019年 年报
7 2019-09-30 2019年 三季报
8 2019-06-30 2019年 半年报
9 2019-03-31 2019年 一季报
10 2018-12-31 2018年 年报
11 2018-09-30 2018年 三季报
12 2018-06-30 2018年 半年报
13 2018-03-31 2018年 一季报
14 2017-12-31 2017年 年报
15 2017-09-30 2017年 三季报
16 2017-06-30 2017年 半年报
17 2017-03-31 2017年 一季报
18 2016-12-31 2016年 年报
19 2016-09-30 2016年 三季报
20 2016-06-30 2016年 半年报
21 2016-03-31 2016年 一季报
22 2015-12-31 2015年 年报
24 2015-06-30 2015年 半年报
25 2015-03-31 2015年 一季报
26 2014-12-31 2014年 年报
27 2014-09-30 2014年 三季报
28 2014-06-30 2014年 半年报
29 2014-03-31 2014年 一季报
30 2013-12-31 2013年 年报
31 2013-09-30 2013年 三季报
32 2013-06-30 2013年 半年报
33 2013-03-31 2013年 一季报
34 2012-12-31 2012年 年报
35 2012-09-30 2012年 三季报
36 2012-06-30 2012年 半年报
37 2012-03-31 2012年 一季报
38 2011-12-31 2011年 年报
39 2011-09-30 2011年 三季报
"""
fields = {
'REPORT_DATE': '报告日期',
'DATATYPE': '季报名称'
}
params = (
('type', 'RPT_LICO_FN_CPD_BBBQ'),
('sty', ','.join(fields.keys())),
('p', '1'),
('ps', '2000'),
)
url = 'https://datacenter.eastmoney.com/securities/api/data/get'
response = requests.get(
url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
pd.DataFrame(columns=fields.values())
df = pd.DataFrame(items)
df = df.rename(columns=fields)
df['报告日期'] = df['报告日期'].apply(lambda x: x.split()[0])
return df
@to_numeric
def get_all_company_performance(date: str = None) -> pd.DataFrame:
"""
获取沪深市场股票某一季度的表现情况
Parameters
----------
date : str, optional
报告发布日期 部分可选示例如下(默认为 ``None``)
- ``None`` : 最新季报
- ``'2021-06-30'`` : 2021 年 Q2 季度报
- ``'2021-03-31'`` : 2021 年 Q1 季度报
Returns
-------
DataFrame
获取沪深市场股票某一季度的表现情况
Examples
---------
>>> import efinance as ef
>>> # 获取最新季度业绩表现
>>> ef.stock.get_all_company_performance()
股票代码 股票简称 公告日期 营业收入 营业收入同比增长 营业收入季度环比 净利润 净利润同比增长 净利润季度环比 每股收益 每股净资产 净资产收益率 销售毛利率 每股经营现金流量
0 688981 中芯国际 2021-08-28 00:00:00 1.609039e+10 22.253453 20.6593 5.241321e+09 278.100000 307.8042 0.6600 11.949525 5.20 26.665642 1.182556
1 688819 天能股份 2021-08-28 00:00:00 1.625468e+10 9.343279 23.9092 6.719446e+08 -14.890000 -36.8779 0.7100 11.902912 6.15 17.323263 -1.562187
2 688789 宏华数科 2021-08-28 00:00:00 4.555604e+08 56.418441 6.5505 1.076986e+08 49.360000 -7.3013 1.8900 14.926761 13.51 43.011243 1.421272
3 688681 科汇股份 2021-08-28 00:00:00 1.503343e+08 17.706987 121.9407 1.664509e+07 -13.100000 383.3331 0.2100 5.232517 4.84 47.455511 -0.232395
4 688670 金迪克 2021-08-28 00:00:00 3.209423e+07 -63.282413 -93.1788 -2.330505e+07 -242.275001 -240.1554 -0.3500 3.332254 -10.10 85.308531 1.050348
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
3720 600131 国网信通 2021-07-16 00:00:00 2.880378e+09 6.787087 69.5794 2.171389e+08 29.570000 296.2051 0.1800 4.063260 4.57 19.137437 -0.798689
3721 600644 乐山电力 2021-07-15 00:00:00 1.257030e+09 18.079648 5.7300 8.379727e+07 -14.300000 25.0007 0.1556 3.112413 5.13 23.645137 0.200906
3722 002261 拓维信息 2021-07-15 00:00:00 8.901777e+08 47.505282 24.0732 6.071063e+07 68.320000 30.0596 0.0550 2.351598 2.37 37.047968 -0.131873
3723 601952 苏垦农发 2021-07-13 00:00:00 4.544138e+09 11.754570 47.8758 3.288132e+08 1.460000 83.1486 0.2400 3.888046 6.05 15.491684 -0.173772
3724 601568 北元集团 2021-07-09 00:00:00 6.031506e+09 32.543303 30.6352 1.167989e+09 61.050000 40.8165 0.3200 3.541533 9.01 27.879243 0.389860
>>> # 获取指定日期的季度业绩表现
>>> ef.stock.get_all_company_performance('2020-03-31')
股票代码 股票简称 公告日期 营业收入 营业收入同比增长 营业收入季度环比 净利润 净利润同比增长 净利润季度环比 每股收益 每股净资产 净资产收益率 销售毛利率 每股经营现金流量
0 605033 美邦股份 2021-08-25 00:00:00 2.178208e+08 NaN NaN 4.319814e+07 NaN NaN 0.4300 NaN NaN 37.250416 NaN
1 301048 金鹰重工 2021-07-30 00:00:00 9.165528e+07 NaN NaN -2.189989e+07 NaN NaN NaN NaN -1.91 20.227118 NaN
2 001213 中铁特货 2021-07-29 00:00:00 1.343454e+09 NaN NaN -3.753634e+07 NaN NaN -0.0100 NaN NaN -1.400708 NaN
3 605588 冠石科技 2021-07-28 00:00:00 1.960175e+08 NaN NaN 1.906751e+07 NaN NaN 0.3500 NaN NaN 16.324650 NaN
4 688798 艾为电子 2021-07-27 00:00:00 2.469943e+08 NaN NaN 2.707568e+07 NaN NaN 0.3300 NaN 8.16 33.641934 NaN
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
4440 603186 华正新材 2020-04-09 00:00:00 4.117502e+08 -6.844813 -23.2633 1.763252e+07 18.870055 -26.3345 0.1400 5.878423 2.35 18.861255 0.094249
4441 002838 道恩股份 2020-04-09 00:00:00 6.191659e+08 -8.019810 -16.5445 6.939886e+07 91.601624 76.7419 0.1700 2.840665 6.20 22.575224 0.186421
4442 600396 金山股份 2020-04-08 00:00:00 2.023133e+09 0.518504 -3.0629 1.878432e+08 114.304022 61.2733 0.1275 1.511012 8.81 21.422393 0.085698
4443 002913 奥士康 2020-04-08 00:00:00 4.898977e+08 -3.883035 -23.2268 2.524717e+07 -47.239162 -58.8136 0.1700 16.666749 1.03 22.470020 0.552624
4444 002007 华兰生物 2020-04-08 00:00:00 6.775414e+08 -2.622289 -36.1714 2.472864e+08 -4.708821 -22.6345 0.1354 4.842456 3.71 61.408522 0.068341
Notes
-----
当输入的日期不正确时,会输出可选的日期列表。
你也可以通过函数 ``efinance.stock.get_all_report_dates`` 来获取可选日期
"""
# TODO 加速
fields = {
'SECURITY_CODE': '股票代码',
'SECURITY_NAME_ABBR': '股票简称',
'NOTICE_DATE': '公告日期',
'TOTAL_OPERATE_INCOME': '营业收入',
'YSTZ': '营业收入同比增长',
'YSHZ': '营业收入季度环比',
'PARENT_NETPROFIT': '净利润',
'SJLTZ': '净利润同比增长',
'SJLHZ': '净利润季度环比',
'BASIC_EPS': '每股收益',
'BPS': '每股净资产',
'WEIGHTAVG_ROE': '净资产收益率',
'XSMLL': '销售毛利率',
'MGJYXJJE': '每股经营现金流量'
# 'ISNEW':'是否最新'
}
dates = get_all_report_dates()['报告日期'].to_list()
if date is None:
date = dates[0]
if date not in dates:
rich.print('日期输入有误,可选日期如下:')
rich.print(dates)
return pd.DataFrame(columns=fields.values())
date = f"(REPORTDATE=\'{date}\')"
page = 1
dfs: List[pd.DataFrame] = []
while 1:
params = (
('st', 'NOTICE_DATE,SECURITY_CODE'),
('sr', '-1,-1'),
('ps', '500'),
('p', f'{page}'),
('type', 'RPT_LICO_FN_CPD'),
('sty', 'ALL'),
('token', '<KEY>'),
# ! 只选沪深A股
('filter',
f'(SECURITY_TYPE_CODE in ("058001001","058001008")){date}'),
)
url = 'http://datacenter-web.eastmoney.com/api/data/get'
response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
break
df = pd.DataFrame(items)
dfs.append(df)
page += 1
if len(dfs) == 0:
df = pd.DataFrame(columns=fields.values())
return df
df = | pd.concat(dfs, axis=0, ignore_index=True) | pandas.concat |
import os
import csv
import pandas as pd
import numpy as np
import librosa
import tqdm
def load_audio_file(path):
audio_data, sample_rate = librosa.load(path)
return audio_data, sample_rate
def extract_features(audio_data, sample_rate):
sig_mean = np.mean(abs(audio_data))
sig_std = np.std(audio_data)
rmse = librosa.feature.rms(audio_data + 0.0001)[0]
audio_data_harmonic = librosa.effects.hpss(audio_data)[0]
silence = 0
for e in rmse:
if e <= 0.4 * np.mean(rmse):
silence += 1
silence /= float(len(rmse))
# based on the pitch detection algorithm mentioned here:
# http://access.feld.cvut.cz/view.php?cisloclanku=2009060001
cl = 0.45 * sig_mean
center_clipped = []
for s in audio_data:
if s >= cl:
center_clipped.append(s - cl)
elif s <= -cl:
center_clipped.append(s + cl)
elif np.abs(s) < cl:
center_clipped.append(0)
auto_corrs = librosa.core.autocorrelate(np.array(center_clipped))
return {
'sig_mean': sig_mean,
'sig_std': sig_std,
'rmse_mean': np.mean(rmse),
'rmse_std': np.std(rmse),
'silence': silence,
'harmonic': 1e3 * np.mean(audio_data_harmonic),
'auto_corr_max': 1e3 * np.max(auto_corrs)/len(auto_corrs),
'auto_corr_std': np.std(auto_corrs),
}
def featurize_dataframe(df):
features = []
for idx, path in tqdm.tqdm(df['path'].items(), total=df.shape[0]):
audio_data, sample_rate = load_audio_file(os.path.join(os.path.dirname(__file__), '..', path))
features.append(extract_features(audio_data=audio_data, sample_rate=sample_rate))
return df.join( | pd.DataFrame(data=features, index=df.index) | pandas.DataFrame |
import numpy as np
import pandas as pd
from PyEMD import EMD, Visualisation
import scipy
import math
import scipy.io
import scipy.linalg
import sklearn.metrics
import sklearn.neighbors
from sklearn import metrics
from sklearn import svm
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset, TensorDataset
import ipdb
# Utilities
def normalize(V):
return ( V - min(V.flatten()) ) / ( max(V.flatten()) - min(V.flatten()) )
def sliding_window(T, T_org, seq_len, label_seq_len):
# seq_len is equal to window_size
# T (np.array) has dim: population, seq_len (window length)
TT = T.reshape(-1, 1)
K = TT.shape[0] - seq_len - label_seq_len + 1 # Li, et al., 2021, TRJ part C, pp. 8
TT_org = T_org.reshape(-1, 1)
# TT has dim: n, 1
# assemble the data into 2D
x_set = np.vstack(TT[i : K+i, 0] for i in range(seq_len)).T
y_set = np.vstack(TT_org[i+seq_len : K+seq_len+i, 0] for i in range(label_seq_len)).T
assert x_set.shape[0] == y_set.shape[0]
# return size: n_samp, seq_len
return x_set, y_set
def var_name(var, all_var=locals()):
# get the name of the variable
return [var_name for var_name in all_var if all_var[var_name] is var][0]
def np2csv(A):
# store numpy to local csv file
if type(A) == torch.Tensor:
np.savetxt('./outputs/BDA/'+var_name(A)+'.csv', A.detach().numpy(), delimiter=',')
elif type(A) == np.ndarray:
np.savetxt('./outputs/BDA/'+var_name(A)+'.csv', A, delimiter=',')
# BDA part
def kernel(ker, X1, X2, gamma):
K = None
if not ker or ker == 'primal':
K = X1
elif ker == 'linear':
if X2 is not None:
K = sklearn.metrics.pairwise.linear_kernel(
np.asarray(X1).T, np.asarray(X2).T)
else:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T)
elif ker == 'rbf':
if X2 is not None:
K = sklearn.metrics.pairwise.rbf_kernel(
np.asarray(X1).T, np.asarray(X2).T, gamma)
else:
K = sklearn.metrics.pairwise.rbf_kernel(
np.asarray(X1).T, None, gamma)
return K
def proxy_a_distance(source_X, target_X):
"""
Compute the Proxy-A-Distance of a source/target representation
"""
nb_source = np.shape(source_X)[0]
nb_target = np.shape(target_X)[0]
train_X = np.vstack((source_X, target_X))
train_Y = np.hstack((np.zeros(nb_source, dtype=int),
np.ones(nb_target, dtype=int)))
clf = svm.LinearSVC(random_state=0)
clf.fit(train_X, train_Y)
y_pred = clf.predict(train_X)
error = metrics.mean_absolute_error(train_Y, y_pred)
dist = 2 * (1 - 2 * error)
return dist
def estimate_mu(_X1, _Y1, _X2, _Y2):
adist_m = proxy_a_distance(_X1, _X2)
C = len(np.unique(_Y1))
epsilon = 1e-3
list_adist_c = []
for i in range(1, C + 1):
ind_i, ind_j = np.where(_Y1 == i), np.where(_Y2 == i)
Xsi = _X1[ind_i[0], :]
Xtj = _X2[ind_j[0], :]
adist_i = proxy_a_distance(Xsi, Xtj)
list_adist_c.append(adist_i)
adist_c = sum(list_adist_c) / C
mu = adist_c / (adist_c + adist_m)
if mu > 1:
mu = 1
if mu < epsilon:
mu = 0
return mu
class BDA:
def __init__(self, kernel_type='primal', dim=30, lamb=1, mu=0.5, gamma=1, T=10, mode='BDA', estimate_mu=False):
'''
Init func
:param kernel_type: kernel, values: 'primal' | 'linear' | 'rbf'
:param dim: dimension after transfer
:param lamb: lambda value in equation
:param mu: mu. Default is -1, if not specificied, it calculates using A-distance
:param gamma: kernel bandwidth for rbf kernel
:param T: iteration number
:param mode: 'BDA' | 'WBDA'
:param estimate_mu: True | False, if you want to automatically estimate mu instead of manally set it
'''
self.kernel_type = kernel_type
self.dim = dim
self.lamb = lamb
self.mu = mu
self.gamma = gamma
self.T = T
self.mode = mode
self.estimate_mu = estimate_mu
def fit(self, Xs, Ys, Xt, Yt):
'''
Transform and Predict using 1NN as JDA paper did
:param Xs: ns * n_feature, source feature
:param Ys: ns * 1, source label
:param Xt: nt * n_feature, target feature
:param Yt: nt * 1, target label
:return: acc, y_pred, list_acc
'''
#ipdb.set_trace()
list_acc = []
X = np.hstack((Xs.T, Xt.T)) # X.shape: [n_feature, ns+nt]
X /= np.linalg.norm(X, axis=0) # why it's axis=0?
m, n = X.shape
ns, nt = len(Xs), len(Xt)
e = np.vstack((1 / ns * np.ones((ns, 1)), -1 / nt * np.ones((nt, 1))))
C = len(np.unique(Ys))
H = np.eye(n) - 1 / n * np.ones((n, n))
mu = self.mu
M = 0
Y_tar_pseudo = None
Xs_new = None
for t in range(self.T):
N = 0
M0 = e * e.T * C
if Y_tar_pseudo is not None and len(Y_tar_pseudo) == nt:
for c in range(1, C + 1):
e = np.zeros((n, 1))
Ns = len(Ys[np.where(Ys == c)])
Nt = len(Y_tar_pseudo[np.where(Y_tar_pseudo == c)])
if self.mode == 'WBDA':
Ps = Ns / len(Ys)
Pt = Nt / len(Y_tar_pseudo)
alpha = Pt / Ps
mu = 1
else:
alpha = 1
tt = Ys == c
e[np.where(tt == True)] = 1 / Ns
yy = Y_tar_pseudo == c
ind = np.where(yy == True)
inds = [item + ns for item in ind]
e[tuple(inds)] = -alpha / Nt
e[np.isinf(e)] = 0 # ?
N = N + np.dot(e, e.T)
# In BDA, mu can be set or automatically estimated using A-distance
# In WBDA, we find that setting mu=1 is enough
if self.estimate_mu and self.mode == 'BDA':
if Xs_new is not None:
mu = estimate_mu(Xs_new, Ys, Xt_new, Y_tar_pseudo)
else:
mu = 0
M = (1 - mu) * M0 + mu * N
M /= np.linalg.norm(M, 'fro')
K = kernel(self.kernel_type, X, None, gamma=self.gamma)
n_eye = m if self.kernel_type == 'primal' else n
a, b = np.linalg.multi_dot(
[K, M, K.T]) + self.lamb * np.eye(n_eye), np.linalg.multi_dot([K, H, K.T])
w, V = scipy.linalg.eig(a, b)
ind = np.argsort(w)
A = V[:, ind[:self.dim]]
Z = np.dot(A.T, K)
Z /= np.linalg.norm(Z, axis=0) # why it's axis=0?
Xs_new, Xt_new = Z[:, :ns].T, Z[:, ns:].T
'''
clf = sklearn.neighbors.KNeighborsClassifier(n_neighbors=1)
clf.fit(Xs_new, Ys.ravel())
Y_tar_pseudo = clf.predict(Xt_new)
acc = sklearn.metrics.accuracy_score(Yt, Y_tar_pseudo)
list_acc.append(acc)
print('{} iteration [{}/{}]: Acc: {:.4f}'.format(self.mode, t + 1, self.T, acc))
'''
return Xs_new, Xt_new, A #, acc, Y_tar_pseudo, list_acc
class LSTM(nn.Module):
def __init__(self, inp_dim, out_dim, hid_dim, layers):
super(LSTM, self).__init__()
self.out_dim = out_dim
self.lstm = nn.LSTM(inp_dim, hid_dim, layers, dropout=0.3, batch_first=True)
self.fc = nn.Sequential(
nn.ReLU(),
nn.Linear(hid_dim, hid_dim*2),
nn.ReLU(),
nn.Linear(hid_dim*2, out_dim)
) # regression
def forward(self, x):
# input: (batchsize, seq_len, input_dim)
# output: (batchsize, seq_len, hid_dim)
#ipdb.set_trace()
y = self.lstm(x)[0] # y, (h, c) = self.rnn(x)
y = self.fc(y[:, :, :]) # fully connected layer
return y[:, -1, :]
def mape_loss_func(preds, labels):
try:
if preds.device.type == 'cuda':
preds = preds.cpu().detach().numpy()
if labels.device.type == 'cuda':
labels = labels.cpu().detach().numpy()
except:
None
mask = labels > .05
return np.mean(np.fabs(labels[mask]-preds[mask])/labels[mask])
def smape_loss_func(preds, labels):
try:
if preds.device.type == 'cuda':
preds = preds.cpu().detach().numpy()
if labels.device.type == 'cuda':
labels = labels.cpu().detach().numpy()
except:
None
mask= labels > .05
return np.mean(2*np.fabs(labels[mask]-preds[mask])/(np.fabs(labels[mask])+np.fabs(preds[mask])))
def mae_loss_func(preds, labels):
try:
if preds.device.type == 'cuda':
preds = preds.cpu().detach().numpy()
if labels.device.type == 'cuda':
labels = labels.cpu().detach().numpy()
except:
None
mask= labels > .05
return np.fabs((labels[mask]-preds[mask])).mean()
def eliminate_nan(b):
a = np.array(b)
c = a[~np.isnan(a)]
return c
def main(mu):
# load data
weekdays = np.array([np.arange(2+7*i,7+7*i,1) for i in range(4)]).flatten()
weekends = np.array([np.arange(7+7*i,9+7*i,1) for i in range(3)]).flatten()[:-1]
src_domain = np.array( | pd.read_csv('../TCA_traffic/data/siteM4_2168B_20210101_20210131.csv') | pandas.read_csv |
from lxml import etree
import requests
from io import BytesIO
import pandas
from zipfile import ZipFile
popoular_name_url = "https://uscode.house.gov/popularnames/popularnames.htm"
table3_zip_url = "https://uscode.house.gov/table3/table3-xml-bulk.zip"
if __name__ == "__main__":
print("Downloading Popular Name List")
popular_names = requests.get(popoular_name_url)
print("Downloaded popularnames.htm", len(popular_names.content), "bytes")
parser = etree.HTMLParser(huge_tree=True)
tree = etree.parse(BytesIO(popular_names.content), parser)
items = tree.xpath("//div[@class='popular-name-table-entry']")
print("Detected", len(items), "acts")
res = [{
"Name": item.getchildren()[0].text,
"PubL": item.getchildren()[1].getchildren()[0].text,
} for item in items if len(item.getchildren()) > 1 and item.getchildren()[1].text == "Pub. L. "]
print("Detected", len(res), "PubL acts")
pop_df = | pandas.DataFrame.from_records(res) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
class FeatureImportance:
def __init__(self, df, resp):
self.dataframe = df
self.response = resp
self.predictors = pd.Series(self.dataframe.columns)
self._rf_imp = []
self._boosting_imp = []
self._rpe_imp = []
self._kbest_imp = []
self._rf_param = []
self._boosting_param = []
self._rpe_param = []
self._kbest_param = []
def rf(self, n_estimators=500, criterion='gini', max_features='auto'):
""" Returns the importances calculated by a random forest classifier.
To make the method more effective, the result is stored in a private
property, so if it is used with the same parameters again, it will only
have to print the result.
Parameters:
* n_estimators: number of trees in the forest
* criterion: optimization criterion when building the trees.
'gini' (default) for Gini impurity
'entropy' for the information gain
* max_features: number of features to select at each split
"""
if self._rf_param == [n_estimators, criterion, max_features]:
return | pd.DataFrame({'Predictors': self.predictors, 'RF': self._rf_imp}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import mapping as mp
from . import strategy
def get_relative_to_expiry_instrument_weights(dates, root_generics, expiries,
offsets, all_monthly=False,
holidays=None):
"""
Generate instrument weights for each root generic where the position is
rolled entirely in one day based on an offset from the earlier of the
contracts First Notice Date and Last Trade Date.
Parameters
----------
dates: Iterable
Iterable of pandas.Timestamps, dates to generate instrument weights
for.
root_generics: dict
Dictionary with key as root generic and value as list of future
generics, e.g. {"CL": ["CL1", "CL2"]}
expiries: pd.DataFrame
A pd.DataFrame with columns ["contract", "first_notice",
"last_trade"] where "first_notice" and "last_trade" must be
parseable to datetimes with format %Y-%m-%d and "contract" must be
a string in the form YYYYNNC representing the contract name, e.g.
"2007ESU".
offsets: int or dict
Number of business days to roll relative to earlier of the
instruments First Notice and Last Trade date. If int is given use
the same number for all futures, if dict is given keys must cover
all root generics and contain an integer for each.
all_monthly: boolean
Whether to roll each contract individually based on the offset from
the earlier of its First Notice and Last Trade date or to roll all
contracts with the same month code based on the earliest date.
holidays: list
list of timezone aware pd.Timestamps used for holidays when calculating
relative date roll logic.
Returns
-------
A dictionary of DataFrames of instrument weights indexed by root
generic, see mapper.mappings.roller()
Examples
--------
>>> import strategy.rebalance as rebal
>>> import pandas as pd
>>> dts = pd.date_range("2018-01-01", "2018-02-01", freq="B")
>>> rg = {"CL": ["CL1"], "ES": ["ES1"]}
>>> exp = pd.DataFrame(
... [["2018CLF", "2018-01-28", "2018-01-27"],
... ["2018CLG", "2018-02-28", "2018-02-27"],
... ["2018ESF", "2018-01-20", "2018-01-21"],
... ["2018ESG", "2018-02-20", "2018-02-21"]],
... columns=["contract", "first_notice", "last_trade"]
... )
>>> offsets = -5
>>> rebal.get_relative_to_expiry_instrument_weights(dts, rg, exp, offsets)
"""
close_by = _close_by_dates(expiries, all_monthly)
cntrct_close_by_dates = {}
for grp, dts in close_by.groupby("root_generic"):
cntrct_close_by_dates[grp] = dts.loc[:, "close_by"]
wts = {}
for root in root_generics:
gnrcs = root_generics[root]
cols = pd.MultiIndex.from_product([gnrcs, ['front', 'back']])
if not isinstance(offsets, int):
offset = offsets[root]
else:
offset = offsets
idx = [offset, offset + 1]
trans = np.tile(np.array([[1.0, 0.0], [0.0, 1.0]]), len(gnrcs))
transition = pd.DataFrame(trans, index=idx,
columns=cols)
wts[root] = mp.mappings.roller(dates,
cntrct_close_by_dates[root],
mp.mappings.static_transition,
transition=transition,
holidays=holidays)
return wts
def get_relative_to_expiry_rebalance_dates(start_date, end_date, expiries,
offsets, all_monthly=False,
holidays=None):
"""
Rebalance days for trading strategy. These are defined as the offset
given from the earlier of the instruments First Notice and Last Trade
date. If all_monthly=True then then roll all monthly contracts
together based on earliest date for this set of contracts. The
start_date or if this is not a business day the following business day
is also included in the rebalance days.
Parameters
----------
start_date: pandas.Timestamp
Date to generate rebalance dates starting from
end_date: pandas.Timestamp
Date to generate rebalance dates until
expiries: pd.DataFrame
A pd.DataFrame with columns ["contract", "first_notice",
"last_trade"] where "first_notice" and "last_trade" must be
parseable to datetimes with format %Y-%m-%d and "contract" must be
a string in the form YYYYNNC representing the contract name, e.g.
"2007ESU".
offsets: int or dict
Number of business days to roll relative to earlier of the
instruments First Notice and Last Trade date. If int is given use
the same number for all futures, if dict is given keys must cover
all root generics and contain an integer for each.
all_monthly: boolean
Whether to roll each contract individually based on the offset from
the earlier of its First Notice and Last Trade date or to roll all
contracts with the same month code based on the earliest date.
holidays: list
list of timezone aware pd.Timestamps used for holidays when calculating
relative date roll logic.
Returns
-------
pandas.DatetimeIndex
Examples
--------
>>> import strategy.rebalance as rebal
>>> import pandas as pd
>>> sd = pd.Timestamp("2018-01-01")
>>> ed = pd.Timestamp("2018-02-01")
>>> exp = pd.DataFrame(
... [["2018CLF", "2018-01-28", "2018-01-27"],
... ["2018CLG", "2018-02-28", "2018-02-27"],
... ["2018ESF", "2018-01-20", "2018-01-21"],
... ["2018ESG", "2018-02-20", "2018-02-21"]],
... columns=["contract", "first_notice", "last_trade"]
... )
>>> offsets = -5
>>> rebal.get_relative_to_expiry_rebalance_dates(sd, ed, exp, offsets)
"""
if not holidays:
holidays = []
close_by = _close_by_dates(expiries, all_monthly)
gnrc_close_by = close_by.groupby(["root_generic"])
rebal_dates = []
for root, close_by_dates in gnrc_close_by:
if not isinstance(offsets, int):
offset = offsets[root]
else:
offset = offsets
dates = (
close_by_dates.loc[:, "close_by"].values.astype('datetime64[D]')
)
dates = np.busday_offset(dates, offsets=offset, roll='preceding',
holidays=holidays)
rebal_dates.append(dates)
rebal_dates = np.concatenate(rebal_dates)
rebal_dates = pd.DatetimeIndex(rebal_dates).unique().sort_values()
rebal_dates = rebal_dates[rebal_dates >= start_date]
rebal_dates = rebal_dates[rebal_dates <= end_date]
first_date = np.busday_offset(start_date.date(), 0,
roll="following", holidays=holidays)
rebal_dates = rebal_dates.union([first_date])
return rebal_dates
def _close_by_dates(expiries, all_monthly):
# hacky, should refactor such that not using private method
# _validate_expiries
expiries = strategy.Exposures._validate_expiries(expiries)
close_by = expiries.set_index("contract")
close_by.loc[:, "close_by"] = (
close_by[["first_notice", "last_trade"]].min(axis=1)
)
close_by = close_by.sort_values("close_by")
if all_monthly:
close_by = (
close_by.join(close_by.groupby(["year", "month"]).first(),
on=["year", "month"], rsuffix="_earliest_cntrct")
)
close_by = close_by[["root_generic", "close_by_earliest_cntrct"]]
close_by.columns = ["root_generic", "close_by"]
close_by = close_by.loc[:, ["root_generic", "close_by"]]
return close_by
def get_fixed_frequency_rebalance_dates(start_date, end_date, frequency,
offset):
"""
Generate reblance dates according to a fixed frequency, e.g. Wednesday of
every week.
Parameters
----------
start_date: pandas.Timestamp
Date to generate rebalance dates starting from
end_date: pandas.Timestamp
Date to generate rebalance dates until
frequency: string
Fixed frequency for reblance, supports {"weekly", "monthly"}
offset: int or list
Relative offsets based on the frequency. E.g. [0, 1] for weekly
gives the first two days of the week, [-5] for monthly gives the
fifth last day of the month.
Returns
-------
pandas.DatetimeIndex
Examples
--------
>>> import strategy.rebalance as rebal
>>> import pandas as pd
>>> sd = pd.Timestamp("2018-01-01")
>>> ed = pd.Timestamp("2018-02-01")
>>> freq = "weekly"
>>> offsets = 2
>>> rebal.get_fixed_frequency_rebalance_dates(sd, ed, freq, offsets)
"""
if frequency == "monthly":
groups = ["year", "month"]
sd = start_date - pd.offsets.MonthBegin(1)
ed = end_date + | pd.offsets.MonthEnd(1) | pandas.offsets.MonthEnd |
from nlpsummarize import nlp
import pandas as pd
def test_init_1():
"""
Test initialization of the class NLPFrame
"""
initial_df = nlp.NLPFrame({'text_col' : ['Today is a beautiful Monday and I would love getting a coffee. However, startbucks is closed.','It has been an amazing day today!']}, index = [0,1])
assert initial_df.column == 'text_col'
def test_init_2():
"""
Test initialization of the class NLPFrame
"""
initial_df = nlp.NLPFrame({'text_col' : ['Today is a beautiful Monday and I would love getting a coffee. However, startbucks is closed.','It has been an amazing day today!']}, index = [0,1], column = 'non_existing')
assert initial_df.column == 'text_col'
def test_init_3():
"""
Test initialization of the class NLPFrame that doesn't have text column
"""
initial_df = nlp.NLPFrame({'text_col' : [5,6,8]})
assert initial_df.column == None
def test_get_nlp_summary_1():
"""
Tests get_nlp_summary function for NLPFrame without column with text
"""
initial_df = nlp.NLPFrame({'text_col' : [5,6,8]})
try:
initial_df.get_nlp_summary()
assert False
except ValueError:
pass
def test_get_nlp_summary_2():
"""
Tests get_nlp_summary function for NLPFrame with wrong column specified
"""
initial_df = nlp.NLPFrame({'text_col' : ['Today is a beautiful Monday and I would love getting a coffee. However, startbucks is closed.','It has been an amazing day today!']}, index = [0,1], column = 'non_existing')
res = initial_df.get_nlp_summary(column = 'non_existing')
assert res.equals( | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
api.py
Provides the API for papermill
"""
from __future__ import unicode_literals
import os
import IPython
from IPython.display import display as ip_display, Markdown
import pandas as pd
from six import string_types
from .exceptions import PapermillException
from .iorw import load_notebook_node, list_notebook_files
RECORD_OUTPUT_TYPE = 'application/papermill.record+json'
DISPLAY_OUTPUT_TYPE = 'application/papermill.display+json'
def record(name, value):
"""
Record a value in the output notebook when a cell is executed.
The recorded value can be retrieved during later inspection of the
output notebook.
Example
-------
`record` provides a handy way for data to be stored with a notebook to
be used later::
pm.record("hello", "world")
pm.record("number", 123)
pm.record("some_list", [1, 3, 5])
pm.record("some_dict", {"a": 1, "b": 2})
pandas can be used later to recover recorded values by
reading the output notebook into a `dataframe`::
nb = pm.read_notebook('notebook.ipynb')
nb.dataframe
Parameters
----------
name : str
Name of the value to record.
value: str, int, float, list, dict
The value to record.
"""
# IPython.display.display takes a tuple of objects as first parameter
# `http://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html#IPython.display.display`
data = {RECORD_OUTPUT_TYPE: {name: value}}
ip_display(data, raw=True)
def display(name, obj):
"""
Display an object with the reference `name`.
Parameters
----------
name : str
Name of the output.
obj : object
An object that can be displayed in the notebook.
"""
data, metadata = IPython.core.formatters.format_display_data(obj)
metadata['papermill'] = dict(name=name)
ip_display(data, metadata=metadata, raw=True)
def read_notebook(path):
"""
Returns a Notebook object loaded from the location specified at `path`.
Parameters
----------
path : str
Path to a notebook `.ipynb` file.
Returns
-------
notebook : object
A Notebook object.
"""
if not path.endswith(".ipynb"):
raise PapermillException("Requires an '.ipynb' file extension. Provided path: '%s'", path)
nb = Notebook()
nb.path = path
nb.node = load_notebook_node(path)
return nb
def read_notebooks(path):
"""
Returns a NotebookCollection including the notebooks read from the
directory specified by `path`.
Parameters
----------
path : str
Path to directory containing notebook `.ipynb` files.
Returns
-------
nb_collection : object
A `NotebookCollection` object.
"""
nb_collection = NotebookCollection()
for notebook_path in list_notebook_files(path):
fn = os.path.basename(notebook_path)
nb_collection[fn] = read_notebook(notebook_path)
return nb_collection
class Notebook(object):
"""
Representation of a notebook.
Parameters
----------
node : `nbformat.NotebookNode`
a notebook object
path : str, optional
the path to the notebook
Method
------
display_output
"""
def __init__(self, node=None, path=None):
if path is not None and not node:
raise ValueError('notebook must be defined when path is given')
self.path = path or ''
self.node = node
@property
def filename(self):
"""str: filename found a the specified path"""
return os.path.basename(self.path)
@property
def directory(self):
"""str: directory name at the specified path"""
return os.path.dirname(self.path)
@property
def version(self):
"""str: version of Jupyter notebook spec"""
return _get_papermill_metadata(self.node, 'version', default=None)
@property
def parameters(self):
"""dict: parameters stored in the notebook metadata"""
return _get_papermill_metadata(self.node, 'parameters', default={})
@property
def environment_variables(self):
"""dict: environment variables used by the notebook execution"""
return _get_papermill_metadata(self.node, 'environment_variables', default={})
@property
def data(self):
"""dict: a dictionary of data found in the notebook"""
return _fetch_notebook_data(self.node)
@property
def dataframe(self):
"""pandas dataframe: a dataframe of named parameters and values"""
df = pd.DataFrame(columns=['name', 'value', 'type', 'filename'])
# TODO: refactor to a Dict comprehension or list comprehensions
i = 0
for name in sorted(self.parameters.keys()):
df.loc[i] = name, self.parameters[name], 'parameter', self.filename
i += 1
for name in sorted(self.data.keys()):
df.loc[i] = name, self.data[name], 'record', self.filename
i += 1
return df
@property
def metrics(self):
"""pandas dataframe: dataframe of cell execution counts and times"""
df = | pd.DataFrame(columns=['filename', 'cell', 'value', 'type']) | pandas.DataFrame |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
_testing as tm,
concat,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
pytestmark = pytest.mark.single
def test_format_type(setup_path):
df = DataFrame({"A": [1, 2]})
with ensure_clean_path(setup_path) as path:
with HDFStore(path) as store:
store.put("a", df, format="fixed")
store.put("b", df, format="table")
assert store.get_storer("a").format_type == "fixed"
assert store.get_storer("b").format_type == "table"
def test_format_kwarg_in_constructor(setup_path):
# GH 13291
msg = "format is not a defined argument for HDFStore"
with tm.ensure_clean(setup_path) as path:
with pytest.raises(ValueError, match=msg):
HDFStore(path, format="table")
def test_api_default_format(setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_put(setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError, match=msg):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError, match=msg):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(setup_path):
with ensure_clean_store(setup_path) as store:
index = Index([f"I am a very long string index: {i}" for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ [f"I am a very long string index: {i}" for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
msg = "Compression not supported on Fixed format stores"
with pytest.raises(ValueError, match=msg):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
msg = "Compression not supported on Fixed format stores"
with pytest.raises(ValueError, match=msg):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_put_mixed_type(setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
def test_store_index_types(setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
def test_column_multiindex(setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with | ensure_clean_store(setup_path) | pandas.tests.io.pytables.common.ensure_clean_store |
import time
import requests
import numpy as np
import pandas as pd
from tradingfeatures import apiBase
from tradingfeatures.apis.bitfinex.base import bitfinexBase
class bitfinexShortLong(bitfinexBase):
def __init__(self):
super(bitfinexShortLong, self).__init__()
self.name = 'bitfinex_shortlong'
self.address = '/stats1'
self.start = 1364778000
self.limit = 10000
# self.columns = ['timestamp', 'open', 'close', 'high', 'low', 'volume']
def get(self, limit=None, symbol=None, query=None, start=None, end=None, *args, **kwargs):
# def get(self,
# limit: int = None,
# symbol: str = None,
# address: str = None,
# query: dict = None,
# start: int = None,
# end: int = None,
# interval: str = '1h',
# columns: list = None,
# return_r: bool = False,
# sort = -1,
# ):
address = f'{self.address}/pos.size:1h:tBTCUSD:long/hist'
query = {'limit': 10000, 'sort': -1}
r = super(bitfinexShortLong, self).get(
address=address,
query=query,
start=start,
end=end,
return_r=True,
*args, **kwargs
)
data = r.json()
df = pd.DataFrame(data, columns=['timestamp', 'values'])
print('deu')
start, end, out_of_range = self.calc_start(limit, start, end)
if out_of_range:
return self.get_hist(start=start, end=end)
address = address or self.address
address = self.base_address + address
symbol = symbol or 'tBTCUSD'
if query is None:
limit = self.limit if limit is None else limit
start, end = self.ts_to_mts(start), self.ts_to_mts(end) # Conver for Bitfinex
address = address + f'/trade:{interval}:{symbol}/hist'
query = {'limit': limit, 'start': start, 'end': end, 'sort': sort}
r = self.response_handler(address, params=query, timeout=60)
data = r.json()
data.reverse()
df = | pd.DataFrame(data, columns=self.columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from pandas_transformers.transformers import PandasOneHotEncoder, PandasTfidfVectorizer
class TestPandasOneHotEncoder:
"""
Tests for the PandasOneHotEncoder class
"""
@pytest.fixture
def example_train_df(self):
"""Example training dataset."""
return pd.DataFrame({"cat": ["a", "a", "b"], "num": [3, 4, 4]})
@pytest.fixture
def example_test_df(self):
"""Example testing dataset which contains a category not present in the
training dataset (c)."""
return pd.DataFrame({"cat": ["a", "b", "c"], "num": [3, 4, 3]})
@pytest.fixture
def example_test_df_diff_column(self):
"""Example testing dataset which contains a column that is not present in the
other example datasets"""
return pd.DataFrame({"new_col": [3]})
@pytest.fixture
def example_missing_values_df(self):
"""
Example dataset with missing value
"""
return pd.DataFrame({"cat": ["a", "a", None]})
def test_example(self, example_train_df):
""" Tests a simple example. """
transformer = PandasOneHotEncoder()
transformer.fit(example_train_df)
transformed = transformer.transform(example_train_df)
expected = pd.DataFrame(
{
"cat_a": pd.Series([1, 1, 0], dtype=np.uint8),
"cat_b": pd.Series([0, 0, 1], dtype=np.uint8),
"num": [3, 4, 4],
}
)
# The column order shouldnt matter (therefore we sort them)
pd.testing.assert_frame_equal(
transformed.sort_index(axis=1), expected.sort_index(axis=1)
)
def test_min_frequency(self, example_train_df):
"""Example where we use min_frequency=2. This means that any category with less
than two occurences should be ignored.
"""
transformer = PandasOneHotEncoder(min_frequency=2)
transformer.fit(example_train_df)
transformed = transformer.transform(example_train_df)
expected = pd.DataFrame(
{"cat_a": pd.Series([1, 1, 0], dtype=np.uint8), "num": [3, 4, 4]}
)
# The column order shouldnt matter (therefore we sort them)
pd.testing.assert_frame_equal(
transformed.sort_index(axis=1), expected.sort_index(axis=1)
)
def test_unseen_category(self, example_train_df, example_test_df):
"""
Example where we test the onehot encoder in the case where it encounters
new categories during transform (in the test set) and we choose to ignore it
"""
transformer = PandasOneHotEncoder()
transformer.fit(example_train_df)
transformed_test = transformer.transform(example_test_df)
expected = pd.DataFrame(
{
"cat_a": pd.Series([1, 0, 0], dtype=np.uint8),
"cat_b": pd.Series([0, 1, 0], dtype=np.uint8),
"num": [3, 4, 3],
}
)
# The column order shouldnt matter (therefore we sort them)
pd.testing.assert_frame_equal(
transformed_test.sort_index(axis=1), expected.sort_index(axis=1)
)
def test_unseen_category_error(self, example_train_df, example_test_df):
"""
Example where we test the onehot encoder in the case where it encounters
new categories during transform (in the test set) and we choose to raise
an error
"""
transformer = PandasOneHotEncoder(handle_unknown="error")
transformer.fit(example_train_df)
with pytest.raises(ValueError):
transformer.transform(example_test_df)
def test_missing_column(self, example_train_df, example_test_df_diff_column):
"""
Test transformer when test set does not have the required columns.
In that case, it should return a KeyError
"""
transformer = PandasOneHotEncoder()
transformer.fit(example_train_df)
with pytest.raises(KeyError):
transformer.transform(example_test_df_diff_column)
def test_zero_count_categories(self, example_train_df):
"""
Test transformer when categorical columns have categories that do not
occur at all (e.g. in predefined categories)
"""
categ = pd.CategoricalDtype(["a", "b", "c"])
example_train_df_extra_cat = example_train_df.assign(
cat=lambda _df: _df["cat"].astype(categ)
)
transformer = PandasOneHotEncoder()
transformer.fit(example_train_df_extra_cat)
transformed = transformer.transform(example_train_df_extra_cat)
expected = pd.DataFrame(
{
"cat_a": pd.Series([1, 1, 0], dtype=np.uint8),
"cat_b": | pd.Series([0, 0, 1], dtype=np.uint8) | pandas.Series |
#Modules to install via pip pandas,ipynb
import os
import sys
import time
from lib import trace_classification
sys.path.append('../')
import os
import pandas as pd
import numpy as np
import json
#Modules to install via pip pandas,ipynb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
from pprint import pprint
import os
#import import_ipynb
import sys
sys.path.append('../')
from pandas.plotting import scatter_matrix
from lib import trace_analysis
from node import *
import sklearn.metrics as sm
import pandas as pd
import matplotlib.pyplot as plt
import os
from node import *
from lib import plots_analysis
from sklearn.metrics import confusion_matrix
from sklearn.cluster import KMeans
import sklearn.metrics as sm
from sklearn.decomposition import PCA
import random
#Modules to install via pip pandas,ipynb
import sys
sys.path.append('../')
from lib import plots_analysis
from sklearn.cluster import KMeans
import pandas as pd
# scipy
import sklearn.metrics as sm
class node(object):
ip = ""
hop= 0
pkts=pd.DataFrame()
# The class "constructor" - It's actually an initializer
def __init__(self,ip,hop,pkts):
self.ip = ip
self.hop=hop
self.pkts=pkts
def make_node(ip,hop,pkts):
node= node(ip,hop,pkts)
return node
#######
#Plotting Graphs
#####
def saveFileFigures(fig,directory,namefile):
directory=directory+"figures/"
if not os.path.exists(directory):
os.makedirs(directory)
print(directory)
fig.savefig(directory+namefile+".pdf") # save the figure to file
#plt.show()
#Prints on a file the big matrix (asked by professor)
def printBigPlot(directory,data,figsize,namefile,colors,cases):
print("Printing Big Plot for "+directory)
fig, axs= plt.subplots(len(data),len(data[0]), figsize=figsize,sharey=True, )
for i in range(len(data)):
for j in range(len(data[i])):
#print(i,j)
ax=axs[i][j]
d=data[i][j].pkts["rtt"]
ax.set_ylabel("Density")
ax.set_title("Node "+ str(data[i][j].ip) )
ax.set_xlabel("Time (ms)")
if not d.empty | len(d)<2 :
d.plot.kde(
ax=ax,
label="Case " +str(cases[i]),
color=colors[i]
)
d.hist(density=True,alpha=0.3,color=colors[i], ax=ax)
ax.legend()
#ax.set_xlim([-500, 8000])
plt.tight_layout()
saveFileFigures(fig,directory,namefile)
#Print on a file density by Hop (asked by professor)
def printDensityByHop(directory,dataHop,hops,figsize,namefile,colors,cases):
print("Printing Density by Hop for "+directory)
#dataHop=hopPreparation(data)
fig, axs= plt.subplots(len(dataHop[0]),1, figsize=(15,20),sharey=True, )
#print(len(dataHop),len(dataHop[0]))
for i in range(len(dataHop)):
for j in range(len(dataHop[i])):
#print(i,j)
d=dataHop[i][j].pkts['rtt']
axs[j].set_xlabel("Time (ms)")
axs[j].set_title("Hop "+ str(j+1))
if not d.empty | len(d)<2 :
d.plot.kde(
ax=axs[j],
label=cases[i],color=colors[i]
)
d.hist(density=True,alpha=0.3, ax=axs[j],color=colors[i])
axs[j].legend()
#axs[j].set_xlim([-40, 6000])
plt.tight_layout()
saveFileFigures(fig,directory,namefile)
#Print on a file density by Case (asked by professor)
def printDensityByCase(directory,data,hops,figsize,namefile,colors,cases):
print("Printing Density by case for "+directory)
#print(len(data),len(data[0]))
#data1=hopPreparation(data)
dataHopT=[*zip(*hops)]
#print(len(data1),len(data1[0]))
#print(len(dataHopT),len(dataHopT[0]))
fig, axs= plt.subplots(len(dataHopT[0]),1, figsize=(15,20),sharey=True, )
for i in range(len(dataHopT)):
for j in range(len(dataHopT[0])):
d=dataHopT[i][j]
axs[j].set_title(""+ cases[i])
axs[j].set_xlabel("Time (ms)")
axs[j].set_ylabel("Density")
if not d.empty | len(d)<2 :
#print(dataHopT[i][j])
#print(colors[i])
d=d["rtt"]
try:
d.plot.kde(
ax=axs[j],
label="Hop "+str(i),
color=colors[i]
)
d.hist(density=True,alpha=0.3, ax=axs[j],color=colors[i])
axs[j].legend()
except:pass
plt.tight_layout()
#axs[j].set_xlim([-40, 6000])
saveFileFigures(fig,directory,namefile)
#Print Density of delay without outliers in every node by Case
def densityOfDelayByCaseNoOutliers(directory,data,figsize,namefile,colors,cases):
print("Printing Density of delay without outliers in every node by Case for "+directory)
fig, axs= plt.subplots(len(data[0]),1, figsize=figsize,sharey=True, )
for i in range(len(data)):
for j in range(len(data[i])):
out=getStdValues(data[i][j].pkts)
if not out.empty :
ax=axs[j]
out["rtt"].plot.kde(
ax=ax,
label=cases[i],
color=colors[i]
)
ax.set_ylabel("Density")
out["rtt"].hist(density=True,alpha=0.3, ax=ax, color=colors[i])
ax.set_title("Node "+ str(data[i][j].ip))
ax.set_xlabel("Time (ms)")
ax.legend()
plt.tight_layout()
saveFileFigures(fig,directory,namefile)
#Density of outliers in every node by Case
def densityOutliersByCase(directory,data,figsize,namefile,colors,cases):
print("Printing Density of outliers in every node by Case for "+directory)
fig, axs= plt.subplots(len(data),len(data[0]), figsize=figsize,sharey=True, )
for i in range(len(data)):
for j in range(len(data[i])):
out=getOutliers(data[i][j].pkts)
ax=axs[i][j]
ax.set_ylabel("Density")
ax.set_title("Node "+ str(data[i][j].ip))
ax.set_xlabel("Time (ms)")
if not out.empty | len(out)<2 :
out["rtt"].plot.kde(
ax=ax,
label=cases[i],
color=colors[i]
)
out["rtt"].hist(density=True,alpha=0.3, ax=ax, color=colors[i])
ax.legend()
plt.tight_layout()
saveFileFigures(fig,directory,namefile)
#Distibution of the delay divided by Node in the differents Cases
def densityOfDelayByCase(directory,data,figsize,namefile,colors,cases):
print("Printing Density of delay in every node by Case for "+directory)
fig, axs= plt.subplots(len(data[0]),1, figsize=figsize,sharey=True, )
for i in range(len(data)):
for j in range(len(data[i])):
d=data[i][j].pkts["rtt"]
axs[j].set_title("Node "+ str(data[i][j].ip))
axs[j].set_xlabel("Time (ms)")
axs[j].set_ylabel("Density")
if not d.empty | len(d)<2 :
try:
d.plot.kde(
ax=axs[j],
label=cases[i],color=colors[i]
)
d.hist(density=True,alpha=0.3, ax=axs[j],color=colors[i])
axs[j].legend()
except:
pass
plt.tight_layout()
saveFileFigures(fig,directory,namefile)
#RTT Graph
def RTTGraph(directory,data,figsize,namefile,colors,cases):
print("Printing RTT Graph for "+directory)
# fig, axs= plt.subplots(len(data[0]),1, figsize=figsize,sharey=True, )
# for i in range(len(data)):
# for j in range(len(data[i])):
# axs[j].plot(data[i][j].pkts["seq"],data[i][j].pkts["rtt"],label=cases[i],color=colors[i] )
# axs[j].set_title("Node "+ str(data[i][j].ip))
# axs[j].set_xlabel("Packet Number")
# axs[j].set_ylabel("Time (ms)")
# axs[j].legend()
# plt.tight_layout()
# saveFileFigures(fig,directory,namefile)
fig, axs= plt.subplots(len(data),len(data[0]), figsize=figsize,sharey=True, )
for i in range(len(data)):
for j in range(len(data[i])):
#print(i,j)
ax=axs[i][j]
d=data[i][j].pkts["rtt"]
ax.set_ylabel("Time (ms)")
ax.set_title("Node "+ str(data[i][j].ip))
ax.set_xlabel("Packet Number")
if not d.empty | len(d)<2 :
ax.plot(data[i][j].pkts["seq"],data[i][j].pkts["rtt"],label=cases[i]
#,color=colors[i]
)
ax.legend()
#ax.set_xlim([-500, 8000])
plt.tight_layout()
saveFileFigures(fig,directory,namefile)
#Not used anymore
def coojaJsonImporter(dir):
dataList=[]
for file in os.listdir(dir):
print("Importing "+ file)
with open(dir+"/" + file, 'r') as f:
dataList.append(json.load(f))
return dataList
###Function to create nodes, create a list of nodes
###
def createNodes(dict):
nodeList=[]
#dfList(pd.DataFrame(dict))
for ip in dict.keys():
pkts=pd.DataFrame(dict[ip]['pkts'])
hop=64-(int(pkts[0:1]["ttl"]))
pkts = pkts.drop(['ttl'], axis=1)
pkts=pkts.rename(columns={"pkt":"seq"})
#print(type(pkts[0:1]["ttl"]))
#print(pkts[0:1]["ttl"])
n=node(ip,hop,pkts)
nodeList.append(n)
return nodeList
def findMissingPackets(node):
#print(node.pkts["pkt"])
print("Executed")
maxP=-1
for el in node.pkts["seq"]:
if(el>maxP): maxP=int(el)
#print(maxP)
pkt=[None]*(maxP+1)
for i in range(len(node.pkts["seq"])):
index=int(node.pkts["seq"][i])
#print(index)
pkt[index]=node.pkts["rtt"][i]
#pkt[)]=node.pkts["pkt"][i]
return pkt
def getIps(list):
ips=[]
for n in list:
ips.append(n.ip)
return ips
def MLPreparation(data):
# Calculate all the statistics
statistics = {} # <node_id, statistics of the node>
for network in data:
for node in network:
print(node.pkts["rtt"].describe())
def getOutliers(df):
df1=df["rtt"]
std=df1.std()
mean=df1.mean()
a1=df["rtt"]>mean+(2*std)
a2=df["rtt"]<mean-(2*std)
return(df[a1 | a2])
def get_IQR_Outliers(df):
df1 = df["rtt"]
lower = df1.quantile(.25)
upper = df1.quantile(.75)
a1 = df["rtt"]>upper
a2 = df["rtt"]<lower
return(df[a1 | a2])
def getStdValues(df):
df1=df["rtt"]
std=df1.std()
mean=df1.mean()
a1=df["rtt"]<mean+(2*std)
a2=df["rtt"]>mean-(2*std)
return(df[a1 & a2])
def getPings(data):
pings=[]
for i in range(len(data)):
packetN=-1
for j in range(len(data[i])):
if(len(data[i][j].pkts)>packetN): packetN=len(data[i][j].pkts)
pings.append(packetN)
return pings
#Prepare the hop data
def hopPreparation(data):
hoplist=[]
df_a = pd.DataFrame( )
dataHop=[]
listoflists = []
#print("Hop Preparation")
#print(len(data),len(data[0]))
maxHopCase=[]
for i in range(len(data)):
maxHop=-1
for j in range(len(data[i])):
if(data[i][j].hop>maxHop):
maxHop=data[i][j].hop
maxHopCase.append(maxHop)
#print(maxHopCase)
for i in range(len(data)):
sublist = []
for j in range(maxHopCase[i]):
sublist.append((df_a))
dataHop.append(sublist)
#print (listoflists)
for i in range(len(data)):
col=[]
for j in range(len(data[i])):
hop=data[i][j].hop-1
dataHop[i][hop]= pd.concat([dataHop[i][hop],data[i][j].pkts],sort=True)
#print(len(dataHop),len(dataHop[0]))
return dataHop
def getPercentageMissingPackets(node,lenght):
missing=0
#print(len(node.pkts))
missing=lenght-len(node)
#print(lenght,missing)
if(missing!=0):
result=missing/lenght
else: result=0
#print(maxS/missing)
return result*100
def accuracy_score_corrected(correction,labels):
#print(np.array(correction))
labels_alt=[]
sum_labels=0
sum_labels_alt=0
for el in labels:
if (el==0):
labels_alt.append(1)
sum_labels_alt+=1
elif el==1:
labels_alt.append(0)
sum_labels+=1
accuracy=sm.accuracy_score(correction, labels)
accuracy_alt=sm.accuracy_score(correction, labels_alt)
#print(correction)
if (sum_labels>sum_labels_alt):
#print(accuracy)
None
else:
#print(accuracy_alt)
labels=labels_alt
#print(np.array(labels))
confusionMatrix=sm.confusion_matrix(correction, labels)
#pprint(confusionMatrix)
return labels
def ReplaceMissingPackets(node):
#print(node.pkts["pkt"])
print("Executed")
maxP=-1
for el in node.pkts["seq"]:
if(el>maxP): maxP=int(el)
#print(maxP)
pkt=[None]*(maxP+1)
for i in range(len(node.pkts["seq"])):
index=int(node.pkts["seq"][i])
#print(index)
pkt[index]=node.pkts["rtt"][i]
#pkt[)]=node.pkts["pkt"][i]
return pkt
#Import from pings files to a dataframe
def import_nodes_Cooja_2(directory,tracemask,node_defaults):
#print(directory)
#print(tracemask)
files = []
# load all files and extract IPs of nodes
for file in os.listdir(directory):
try:
if file.startswith(tracemask) and file.index("routes"):
continue
except:
files.append(file)
nodes = pd.DataFrame(columns=['node_id', 'rank'])
packets_node = {}
# Load the ICMP traces
for file in files:
packets = pd.read_csv(directory + '/' + file,
sep=' |icmp_seq=|ttl=|time=',
na_filter=True,
header=None,
skiprows=1,
skipfooter=4,
usecols=[3, 5, 7, 9],
names=['node_id', 'seq', 'hop', 'rtt'],
engine='python').dropna().drop_duplicates()
if len(packets) < 1:
# Nodes affected by a black hole did not receive any packet
node_id = file[-24:-4]
if(node_id=="fc00:e968:6179::de52:7100:7411:11:1111"): node_id="fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:7411:11:1111"
packets = pd.DataFrame(columns=['node_id', 'seq', 'hop', 'rtt'],
data=[[node_id, 1, node_defaults[node_id], 1]])
nodes.loc[len(nodes)] = [file[-24:-4], node_defaults[node_id]]
packets_node[file[-24:-4]] = packets
else:
#print("qui")
packets['node_id'] = packets.apply(lambda row: row['node_id'][:-1], axis=1)
#print(packets["hop"].head())
#print(nodes)
#nodes.loc[len(nodes)-1] = [packets['node_id'][0], 64-packets['hop'][0]]
#print("ciao"+ str(64-packets['hop'][0]))
#print(nodes.loc[7])
packets = packets.sort_values(by=['node_id', 'seq'], ascending=True, na_position='first')
packets = packets[packets['rtt'] > 1]
packets["hop"]= 64-packets['hop']
packets_node[packets['node_id'][0]] = packets
nodes=nodes.sort_values(by=['rank', 'node_id'])
#tranformation in node
nodeList=[]
for n in packets_node.keys():
#print((packets_node[n]).head())
pkts=packets_node[n].drop(["node_id","hop"],axis=1)
#print(pkts)
hop=int(packets_node[n]["hop"][0])
ip=packets_node[n]["node_id"][0]
#print(hop)
n=node(ip,hop,pkts)
nodeList.append(n)
return nodeList
#calls import nodes cooja_2
def import_Cooja2(df,directory):
data=[]
node_defaults = {
"aaaa::212:7403:3:303": 10,
"aaaa::212:7402:2:202": 10,
"aaaa::212:7404:4:404": 10,
"aaaa::212:7406:6:606": 10,
"aaaa::212:7405:5:505": 10,
"aaaa::212:7407:7:707": 10,
"aaaa::212:7409:9:909": 10,
"aaaa::212:7408:8:808": 10,
"aaaa::212:740a:a:a0a": 10,
"aaaa::212:740b:b:b0b": 10,
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:740f:f:f0f": 10,
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:7411:11:1111": 10,
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:740d:d:d0d": 10,
}
#for row in plots:
#print("Importing ./"+row[0]+"/"+row[1])
#print(directory+df["directory"].values)
for i in range(len(df["directory"].values)):
nodeList=import_nodes_Cooja_2(directory+df["directory"].values[i],df["case"].values[i],node_defaults)
data.append(nodeList)
#print(len(data))
#print(len(data[0]))
return data
def analyze_network(directory, df, pings, window, features_to_drop):
cases = []
casesAccuracy = df["case_accuracy"].values
casesAccuracy2 = df["case_accuracy2"].values
# for row in plots:
# cases.append(row[1])
# casesAccuracy.append(row[2])
# data=import_Cooja2(plots)
cases = df["case"].values
folder = df["directory"].values + directory
data = import_Cooja2(df, directory)
# pings=getPings(data)
# All data collection is in variable node that is a list of list of nodes
# 3 nets input x 9 nodes by net
print("Processing...")
d = {"label": [],
"type": [],
"count": [],
"std": [],
"mean": [],
"var": [],
"hop": [],
"packet loss": [],
"outliers": [],
"node": [],
"window": []
}
# count=[]
labels = []
var = []
# window=100
# stats=pd.DataFrame(columns=columns)
n = pings
for i in range(len(data)):
# window=pings[i]
for j in range(len(data[i])):
# n=pings[i]
# print(n)
for z in range(0, n, int(window)):
# if(z+window>n):break
# print(z,z+window)
# df1 = df1.assign(e=p.Series(np.random.randn(sLength)).values)
node = data[i][j].pkts
name = str(j) + " " + cases[i]
nodeWindow = node[(node["seq"] < z + window) & (node["seq"] >= z)]
nodeWindowP = nodeWindow["rtt"]
d["count"].append(nodeWindowP.count())
# Case without outliers
# Case with outliers
std = 0
if (nodeWindowP.std() > 10):
std = 1
std = nodeWindowP.std()
d["std"].append(std)
mean = nodeWindowP.mean()
# if(mean<1):print(mean)
d["mean"].append(mean)
var = 0
if (nodeWindowP.var() > var): var = nodeWindowP.var()
d["var"].append(var)
d["label"].append(cases[i])
d["hop"].append(data[i][j].hop)
d["type"].append(casesAccuracy[i])
d["outliers"].append(getOutliers(nodeWindow)["rtt"].count())
missing = window - nodeWindow.count()
d["node"].append(data[i][j].ip)
mP = getPercentageMissingPackets(nodeWindow, window)
PL = 0
if (mP > 30):
PL = 1
PL = mP
d["packet loss"].append(mP)
d["window"].append(window)
stats = pd.DataFrame(d)
dataK = stats.drop(features_to_drop, axis=1)
dataK = dataK.fillna(0)
# print(dataK)
correction = []
correction_alt = []
col = np.array(dataK["type"])
dataK = dataK.drop(["type"], axis=1)
# Creating simple array to correct unsupervised learning
# NB as it is unsupervised could happen that the correction are inverted
for i in range(len(col)):
el = d["type"][i]
if el == "normal":
correction.append(1)
correction_alt.append(0)
else:
correction.append(0)
correction_alt.append(1)
dataC = stats["label"]
kmeans = KMeans(n_clusters=2)
kmeans.fit(dataK)
labels = kmeans.predict(dataK)
centroids = kmeans.cluster_centers_
labels = accuracy_score_corrected(correction, labels)
predicted = []
for i in range(len(labels)):
if (labels[i] == 1):
predicted.append("normal")
else:
predicted.append("BH")
# print(len(predicted))
stats["predicted"] = pd.Series(np.array(predicted))
stats["predicted number"] = pd.Series(np.array(labels))
stats["correction number"] = pd.Series(np.array(correction))
stats_csv = stats[[
"label",
"type",
"predicted",
"packet loss",
"outliers",
"std",
"hop",
"node",
"mean"
]]
# stats_csv.to_csv("results_kmeans.csv", sep='\t', encoding='utf-8')
stats.head()
net_results = {
"case": [],
"normal_behaving_nodes_percentage": [],
"predicted": [],
"real": [],
"pings": [],
"window": [],
}
# print(stats["predicted number"])
correction = []
labels = []
for case in range(len(cases)):
subset = stats[stats["label"] == cases[case]]
mean_predicted = str(subset["predicted number"].mean() * 100) # +"% normal"
net_results["case"].append(cases[case])
net_results["normal_behaving_nodes_percentage"].append(mean_predicted)
net_results["pings"].append(pings)
net_results["window"].append(window)
if (float(mean_predicted) < 85):
p = "abnormal"
labels.append(0)
else:
p = "normal"
labels.append(1)
if (casesAccuracy[case] == "BH"):
c = "abnormal"
correction.append(0)
elif (casesAccuracy[case] == "normal"):
c = "normal"
correction.append(1)
net_results["predicted"].append(p)
net_results["real"].append(c)
results = pd.DataFrame(net_results)
return results, stats, correction, labels
def create_stats(directory, df, pings, window):
cases = []
casesAccuracy = df["case_accuracy"].values
casesAccuracy2 = df["case_accuracy2"].values
cases = df["case"].values
folder = df["directory"].values + directory
data = import_Cooja2(df, directory)
print("Processing...")
d = {"experiment": [],
"node_id": [],
"label": [],
"label_2":[],
"loss": [],
"count": [],
"std": [],
"mean": [],
"var": [],
"hop": [],
"min":[],
"max":[],
"outliers": [],
"window": []
}
# count=[]
labels = []
var = []
# window=100
# stats=pd.DataFrame(columns=columns)
n = pings
for i in range(len(data)):
# window=pings[i]
for j in range(len(data[i])):
# n=pings[i]
# print(n)
for z in range(0, n, int(window)):
# if(z+window>n):break
# print(z,z+window)
# df1 = df1.assign(e=p.Series(np.random.randn(sLength)).values)
node = data[i][j].pkts
name = str(j) + " " + cases[i]
nodeWindow = node[(node["seq"] < z + window) & (node["seq"] >= z)]
nodeWindowP = nodeWindow["rtt"]
d["count"].append(nodeWindowP.count())
# Case without outliers
# Case with outliers
std = 0
if (nodeWindowP.std() > 10):
std = 1
std = nodeWindowP.std()
d["std"].append(std)
mean=0
if(nodeWindowP.mean()>mean): mean=nodeWindowP.mean()
# if(mean<1):print(mean)
d["mean"].append(mean)
var = 0
if (nodeWindowP.var() > var): var = nodeWindowP.var()
d["var"].append(var)
d["experiment"].append(cases[i])
d["hop"].append(data[i][j].hop)
if(casesAccuracy[i]=="normal"):
d["label"].append("Normal")
else:
d["label"].append("Attacked")
if (casesAccuracy[i] == "normal"):
d["label_2"].append("Normal")
elif(casesAccuracy[i] == "BH"):
d["label_2"].append("BH")
else:
d["label_2"].append("GH")
d["outliers"].append(getOutliers(nodeWindow)["rtt"].count())
missing = window - nodeWindow.count()
d["node_id"].append(data[i][j].ip)
mP = getPercentageMissingPackets(nodeWindow, window)
d["min"].append(data[i][j].pkts["rtt"].min())
d["max"].append(data[i][j].pkts["rtt"].max())
d["loss"].append(mP)
d["window"].append(window)
stats = | pd.DataFrame(d) | pandas.DataFrame |
import click
import logging
import signal
import time
import code
import os
import re
import subprocess
import pdb
import glob
import IPython
import bpython
import collections
import pandas as pd
from config import user_config
from typing import Optional, List, Dict, Callable, Union
from pprint import pformat
from .base import MenuOption, ControlMode, ControlAction
from .error import RaspadorInputTimeoutError, RaspadorCannotInteractError, RaspadorQuit
from .style import Styled, CustomStyled, CodeStyled, Format
from datetime import datetime
from enum import Enum
class Interaction(MenuOption):
html = 'h'
image = 'i'
python = 'p'
script = 'r'
debugger = 'd'
log = 'l'
@property
def option_text(self) -> str:
if self is Interaction.html:
return '(H)TML current page view'
elif self is Interaction.image:
return '(I)mage of current page'
elif self is Interaction.python:
return '(P)ython interactive shell'
elif self is Interaction.script:
return '(R)un python script'
elif self is Interaction.debugger:
return '(D)ebugger session'
elif self is Interaction.log:
return '(L)og view'
@property
def styled(self) -> Styled:
return CustomStyled(text=self.option_text, style=Format().blue())
class Console(code.InteractiveConsole):
record: List = None
def raw_input(self, prompt=''):
result = super().raw_input(prompt=prompt)
if self.record is None:
self.record = []
self.record.append(result)
return result
class UserExceptionFormatter(logging.Formatter):
style: Format = Format().red().bold()
def formatException(self, exc_info):
return self.style(super(UserExceptionFormatter, self).formatException(exc_info))
def format(self, record):
s = super(UserExceptionFormatter, self).format(record)
if record.exc_text:
s = self.style(s)
return s
class UserInteractor:
driver: Optional[any]
locals: Dict[str, any]
timeout: Optional[int]
interactive: bool
monitor: bool
control_mode: ControlMode
break_on_exceptions: bool
retry: Optional[int]
abbreviated_length: int
_last_script_name: Optional[str]=None
def __init__(self, driver: Optional[any]=None, locals: Dict[str, any]={}, timeout: Optional[int]=30, interactive: bool=True, monitor: bool=False, control_mode: ControlMode=ControlMode.automatic, break_on_exceptions: bool=False, retry: Optional[int]=None, abbreviated_length: int=2048):
self.driver = driver
self.timeout = timeout
self.locals = self.python_locals
self.locals.update(locals)
self.interactive = interactive
self.control_mode = control_mode
self.break_on_exceptions = break_on_exceptions
self.retry = retry
self.abbreviated_length = abbreviated_length
@classmethod
def shell(cls, driver: Optional[any]=None, locals: Dict[str, any]={}):
user = cls(driver=driver, locals=locals)
user.present_python_shell()
@property
def python_locals(self) -> Dict[str, any]:
python_locals = {}
if self.driver is not None:
python_locals['driver'] = self.driver
def view_html():
self.view_html()
python_locals['view_html'] = view_html
return python_locals
def present_prompt(self, prompt: str, response_type: any=str, default_response: Optional[any]=None, prompter: Optional[Callable[[str, any, Optional[any]], any]]=None):
if prompter is None:
def prompter(prompt: str, response_type:any, default_response: Optional[any]) -> any:
return click.prompt(prompt, type=response_type, default=default_response)
if self.interactive:
if self.timeout is None:
response = prompter(prompt, response_type, default_response)
elif self.timeout <= 0:
response = default_response
else:
def handle_alarm(signum, frame):
raise RaspadorInputTimeoutError()
original_handler = signal.getsignal(signal.SIGALRM)
signal.signal(signal.SIGALRM, handle_alarm)
original_time = time.time()
original_alarm = signal.alarm(self.timeout)
try:
print(f'Will continue automaticially after {self.timeout} seconds with reponse [{default_response}]')
response = prompter(prompt, response_type, default_response)
signal.alarm(0)
except RaspadorInputTimeoutError:
print(f' => {default_response} (continuing automaticially after {self.timeout} seconds)')
response = default_response
signal.signal(signal.SIGALRM, original_handler)
if original_alarm:
new_alarm = original_alarm - round(time.time() - original_time)
if new_alarm > 0:
signal.alarm(new_alarm)
else:
signal.alarm(1)
time.sleep(2)
else:
response = default_response
return response
def present_error(self, error: Exception):
handler = logging.StreamHandler()
formatter = UserExceptionFormatter()
handler.setFormatter(formatter)
root = logging.getLogger()
root.addHandler(handler)
logging.exception(error)
root.removeHandler(handler)
def present_message(self, message: Optional[str]=None, prompt: Optional[str]=None, error: Optional[Exception]=None, response_type: any=str, default_response: Optional[any]=None):
response = None
if error is not None:
self.present_error(error)
if message is not None:
print(message)
if prompt is not None:
response = self.present_prompt(prompt=prompt, response_type=response_type, default_response=default_response)
return response
def present_confirmation(self, prompt: str='Continue', default_response: bool=False) -> bool:
def prompter(prompt: str, response_type: any, default_response: bool) -> bool:
return click.confirm(prompt, default=default_response)
return self.present_prompt(prompt=prompt, response_type=bool, default_response=default_response, prompter=prompter)
def present_report(self, report: Union[pd.DataFrame, pd.Series], title: Optional[str]=None, prefix: Optional[str]=None, suffix: Optional[str]=None):
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
prefix = f'{prefix}\n' if prefix else ''
report_text = report.to_string() if not report.empty else 'Empty report.'
suffix = f'\n{suffix}' if suffix else ''
self.present_title(title=title)
self.present_message(f'{prefix}{report_text}{suffix}')
def present_menu(self, options: List[MenuOption], default_option: Optional[MenuOption]=None, message: Optional[str]=None) -> MenuOption:
prompt = '\n'.join([
(o.styled + Format().underline()).styled if o is default_option else o.styled.styled for o in options
] + [message if message is not None else ''])
option_values = [o.value for o in options]
assert default_option is None or default_option in options
assert len(option_values) == len(set(option_values)), str([o for o in options if collections.Counter(option_values)[o.value] > 1])
choice = self.present_message(
prompt=prompt,
response_type=click.Choice(option_values, case_sensitive=False),
default_response=default_option.value if default_option else None
)
option = next(filter(lambda o: o.value == choice, options))
return option
def interact(self, interaction: Interaction) -> bool:
if interaction is Interaction.html:
self.view_html()
elif interaction is Interaction.image:
# log = self.locals['log'] if 'log' in self.locals else pd.DataFrame()
# file_name = f'{log}'
self.save_image()
elif interaction is Interaction.python:
self.present_python_shell()
elif interaction is Interaction.script:
self.run_script()
elif interaction is Interaction.debugger:
self.debug()
elif interaction is Interaction.log:
log = self.locals['log'] if 'log' in self.locals else pd.DataFrame()
self.present_log(log=log)
def view_html(self):
if self.driver is None:
print('No driver')
return
time_text = self.date_file_name()
path = os.path.join('output', 'html', f'{time_text}_{self.safe_file_name(self.driver.current_url)[:200]}.html')
with open(path, 'w') as f:
f.write(self.driver.page_source)
subprocess.call([
*user_config['view_html_command'],
path,
])
def save_image(self, file_name: Optional[str]=None, quiet: bool=False):
def output(message: str):
if not quiet:
log.log(message)
if 'browser' not in self.locals or self.locals['browser'] is None:
output('No browser')
return
browser = self.locals['browser']
if file_name is None:
file_name = self.frame_file_name()
source = browser.current_source
image_path = os.path.join('output', 'image', f'{file_name}.png')
html_path = os.path.join('output', 'html', f'{file_name}.html')
if self.locals['browser'].save_screenshot(path=image_path):
output(f'Image saved to {image_path}')
else:
output('No display')
if source:
with open(html_path, 'w') as f:
f.write(source)
output(f'Source saved to {html_path}')
else:
output('No source')
def python_shell(self) -> str:
if not self.interactive:
raise RaspadorCannotInteractError()
print('Local variables:')
for name, value in self.locals.items():
print(f' {name}:\n {pformat(value)}')
if user_config['python_shell'].lower() == 'default':
console = Console(locals=self.locals)
console.interact(banner='Python shell. Type CTRL-D to exit.')
return '\n'.join(console.record)
elif user_config['python_shell'].lower() == 'ipython':
print('IPython shell. Type CTRL-D to exit.')
console = IPython.terminal.embed.InteractiveShellEmbed()
import raspador
console.mainloop(local_ns=self.locals, module=raspador)
record = ''
for index, line in enumerate(console.history_manager.input_hist_raw):
record += f'{line}\n'
if index in console.history_manager.output_hist:
record += f'# {str(console.history_manager.output_hist[index])}\n'
return record
elif user_config['python_shell'].lower() == 'bpython':
history_path = os.path.join('output', 'python', 'history.py')
try:
os.remove(history_path)
except FileNotFoundError:
pass
bpython.embed(args=['--config=bpython_config'], locals_=self.locals, banner='bpython shell. Type CTRL-D to exit.')
if os.path.isfile(history_path):
with open(history_path, 'r') as f:
record = f.read()
else:
record = ''
return record
else:
raise ValueError('Invalid python_shell', user_config['python_shell'])
def present_python_shell(self):
record = self.python_shell()
if not record.strip():
return
choice = click.prompt('(S)ave, (E)dit, or (D)elete record of interaction?', type=click.Choice(['s', 'e', 'd'], case_sensitive=False), default='d').lower()
if choice in ['s', 'e', 'd']:
path = os.path.join('output', 'python', f'{self.date_file_name()}_interaction.py')
with open(path, 'w') as f:
f.write(record)
if choice == 'e':
subprocess.call([
*user_config['editor_command'],
path,
])
while True:
script_name = click.prompt('To save this interaction as a script enter a script name, or press return to continue.', default='')
if not script_name:
break
script_name = self.safe_file_name(script_name)
script_path = os.path.join('output', 'python', 'scripts', f'{script_name}.py')
if os.path.exists(script_path):
if not click.confirm(f'A script alread exists at \'{script_path}\'\nWould you like to replace it', abort=True):
continue
with open(path, 'r') as f:
with open(script_path, 'w') as g:
g.write(f.read())
print(f'Script written to \'{script_path}\'\nIt will be available for future use as \'{script_name}\'')
break
else:
print(f'{record}\n\nWritten to {path}')
def run_script(self, script_name: Optional[str]=None):
script_paths = glob.glob(os.path.join('output', 'python', 'scripts', '*.py'))
if not script_paths:
print('No scripts found in \'output/python/scripts/\'')
return
script_names = {os.path.splitext(os.path.basename(p))[0]: p for p in script_paths}
if script_name is None and self.interactive:
default_script = self._last_script_name if self._last_script_name in script_names.keys() else None
script_name = click.prompt('Enter a script to execute', type=click.Choice(sorted(script_names.keys())), default=default_script)
if not script_name:
return
self._last_script_name = script_name
with open(script_names[script_name], 'r') as f:
script = f.read()
self.run_code(code=script, file_path=script_names[script_name], description=f'from \'{script_names[script_name]}\'')
def run_code(self, code: str, file_path: str, description: str, confirm: Optional[bool]=True):
if confirm:
message_style = Format().cyan()
prompt = CustomStyled(text=f'Script...\n{"–" * 9}\n', style=message_style) + CodeStyled(text=code) + CustomStyled(text=f'{"–" * 9}\n...Script\n', style=message_style) + f'Run this script ({description})'
if not click.confirm(prompt.styled, default=True):
return
compiled = compile(code, file_path, 'exec')
exec(compiled, self.locals, self.locals)
print(f'Ran script ({description})')
def debug(self):
pdb.set_trace()
def present_title(self, title: str=''):
title = Format().cyan()(f'\n{title}\n{"–" * len(title)}\n') if title else ''
print(title)
def present_log(self, log: pd.DataFrame):
self.present_title(title='Flight Log')
index_style = Format().gray()
maneuver_style = Format().bold()
attempt_style = Format().blue()
error_style = Format().red().bold()
result_style = Format().green().bold()
for index, row in log.iterrows():
mission = f'{row.mission}.' if row.mission else ''
print(f'{index_style(str(index))} {mission}{maneuver_style(row.maneuver)}')
if row.error:
print(error_style(f' = {row.option} – {row.error}'))
elif row.result:
print(result_style(f' = {row.option} : {row.result}'))
else:
print(attempt_style(f' = {row.option}'))
print(f'{len(log)} actions logged')
def date_file_name(self) -> str:
return datetime.now().strftime('%Y-%m-%d_%H_%M_%S')
def safe_file_name(self, name: str) -> str:
return re.sub(r'[^a-zA-Z0-9]', '_', name)
def frame_file_name(self) -> str:
log = self.locals['log'] if 'log' in self.locals else | pd.DataFrame() | pandas.DataFrame |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012',
'2011'], name='pidx', freq='A')
pexpected = PeriodIndex(
['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp,
check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx',
freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'],
name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(
['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
def test_getitem(self):
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Period('2011-01-01', freq='D'))
result = idx[-1]
self.assertEqual(result, pd.Period('2011-01-31', freq='D'))
result = idx[0:5]
expected = pd.period_range('2011-01-01', '2011-01-05', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[0:10:2]
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',
'2011-01-05',
'2011-01-07', '2011-01-09'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[-20:-5:3]
expected = pd.PeriodIndex(['2011-01-12', '2011-01-15',
'2011-01-18',
'2011-01-21', '2011-01-24'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[4::-1]
expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
def test_take(self):
# GH 10295
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Period('2011-01-01', freq='D'))
result = idx.take([5])
self.assertEqual(result, pd.Period('2011-01-06', freq='D'))
result = idx.take([0, 1, 2])
expected = pd.period_range('2011-01-01', '2011-01-03', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',
'2011-01-05'], freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(['2011-01-08', '2011-01-05',
'2011-01-02'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([3, 2, 5])
expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([-3, 2, 5])
expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
def test_nat_new(self):
idx = pd.period_range('2011-01', freq='M', periods=5, name='x')
result = idx._nat_new()
exp = pd.PeriodIndex([pd.NaT] * 5, freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.PeriodIndex([], name='xxx', freq='H')
with tm.assertRaises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq='H')
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(-3), exp)
def test_repeat(self):
index = pd.period_range('2001-01-01', periods=2, freq='D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], freq='D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.period_range('2001-01-01', periods=2, freq='2D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], freq='2D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')
exp = pd.PeriodIndex(['2001-01', '2001-01', '2001-01',
'NaT', 'NaT', 'NaT',
'2003-01', '2003-01', '2003-01'], freq='M')
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
def test_nat(self):
self.assertIs(pd.PeriodIndex._na_value, pd.NaT)
self.assertIs(pd.PeriodIndex([], freq='M')._na_value, pd.NaT)
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for freq in ['D', 'M']:
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq=freq)
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq='H')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(idx.asi8, freq='H')
| tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) | pandas.util.testing.assert_numpy_array_equal |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': | Series([0, 2, 4]) | pandas.core.api.Series |
import numpy as np
import pandas as pd
import os
import sys
sys.path.append('/home/akagi/github/RIPS_kircheis/RIPS')
import rect_grid
import cable
acsr = [ u'Bittern', u'Bluebird', u'Bluejay', u'Bobolink', u'Bunting',
u'Canary', u'Cardinal', u'Chickadee', u'Chukar', u'Cochin',
u'Condor', u'Coot', u'Curlew', u'Dipper', u'Dorking',
u'Dotterel', u'Dove', u'Drake', u'Eagle', u'Egret',
u'Falcon', u'Finch', u'Flamingo', u'Flicker', u'Grackle',
u'Grosbeak', u'Grouse', u'Guinea', u'Hawk', u'Hen',
u'Ibis', u'Kingbird', u'Kiwi', u'Lapwing', u'Lark',
u'Leghorn', u'Linnet', u'Mallard', u'Martin', u'Merlin',
u'Minorca', u'Oriole', u'Ortolan', u'Osprey', u'Parakeet',
u'Partridge', u'Peacock', u'Pelican', u'Penguin', u'Petrel',
u'Pheasant', u'Pigeon', u'Quail', u'Rail', u'Raven',
u'Redwing', u'Robin', u'Rook', u'Ruddy', u'Sparate',
u'Sparrow', u'Starling', u'Swan', u'Swanate', u'Swift',
u'Tern', u'Turkey', u'Waxwing']
acss = [ u'Avocet', u'Bittern', u'Bluebird',
u'Bluejay', u'Bobolink', u'Brant', u'Bullfinch',
u'Bunting', u'Canary', u'Canvasback', u'Cardinal',
u'Chukar', u'Condor', u'Cormorant', u'Corncrake',
u'Cuckoo', u'Curlew', u'Dipper', u'Diver',
u'Dove', u'Drake', u'Eagle', u'Egret',
u'Falcon', u'Finch', u'Flamingo', u'Flicker',
u'Gannet', u'Goldfinch', u'Grackle', u'Grosbeak',
u'Hawk', u'Hen', u'Heron', u'Hornbill',
u'Ibis', u'Joree', u'Junco', u'Kiwi',
u'Lapwing', u'Lark', u'Linnet', u'Macaw',
u'Mallard', u'Martin', u'Mockingbird', u'Nuthatch',
u'Oriole', u'Ortolan', u'Ostrich', u'Oxbird',
u'Parakeet', u'Parrot', u'Partridge', u'Peacock',
u'Pheasant', u'Phoenix', u'Plover', u'Popinjay',
u'Ptarmigan', u'Puffin', u'Rail', u'Ratite',
u'Redbird', u'Redwing', u'Ringdove', u'Roadrunner',
u'Rook', u'Ruddy', u'Sapsucker', u'Scaup',
u'Scissortail', u'Scoter', u'Seahawk', u'Snowbird',
u'Spoonbill', u'Squab', u'Starling', u'Stilt',
u'Stork', u'Tailorbird', u'Teal', u'Tern',
u'Thrasher', u'Tody', u'Toucan', u'Towhee',
u'Trogon', u'Turacos', u'Turbit', u'Wagtail',
u'Whooper', u'Widgeon', u'Woodcock']
aac = [ u'Arbutus', u'Aster', u'Bluebell', u'Bluebonnet',
u'Canna', u'Carnation', u'Cockscomb', u'Columbine',
u'Coreopsis', u'Cosmos', u'Cowslip', u'Daffodil',
u'Dahlia', u'Daisy', u'Goldenrod', u'Goldentuft',
u'Hawkweed', u'Hawthorn', u'Heuchera', u'Iris',
u'Jessamine', u'Larkspur', u'Laurel', u'Lilac',
u'Lupine', u'Magnolia', u'Marigold', u'Meadowsweet',
u'Mistletoe', u'Narcissus', u'Nasturtium', u'Orchid',
u'Oxlip', u'Pansy', u'Peachbell', u'Peony',
u'Petunia', u'Phlox', u'Poppy', u'Rose',
u'Sneezewort', u'Syringa', u'Trillium', u'Tulip',
u'Valerian', u'Verbena', u'Violet', u'Zinnia']
# Had to remove wood duck because of title() function
# ACSR
# 230
#skylark = cable.cable('Skylark', 'acsr')
acsr_df = pd.DataFrame()
for k in acsr:
cable_i = cable.cable(k, 'acsr')
acsr_df[k] = np.asarray([cable_i.I(348, i, 0.61) for i in np.arange(273+0, 273+61)])/(cable_i.I(348, 298, 0.61))
acss_df = pd.DataFrame()
for k in acss:
cable_i = cable.cable(k, 'acss')
acss_df[k] = np.asarray([cable_i.I(348, i, 0.61) for i in np.arange(273+0, 273+61)])/(cable_i.I(348, 298, 0.61))
aac_df = pd.DataFrame()
for k in aac:
cable_i = cable.cable(k, 'aac')
aac_df[k] = np.asarray([cable_i.I(348, i, 0.61) for i in np.arange(273+0, 273+61)])/(cable_i.I(348, 298, 0.61))
fill_between(acsr_df.index.values, acsr_df.min(axis=1), acsr_df.max(axis=1), color='blue', label='ACSR', alpha=1)
xlabel('Ambient temperature ($^\circ$C)')
ylabel('Fraction of rated capacity')
title('ACSR cable')
clf()
fill_between(acss_df.index.values, acss_df.min(axis=1), acss_df.max(axis=1), color='orange', label='ACSS', alpha=1)
xlabel('Ambient temperature ($^\circ$C)')
ylabel('Fraction of rated capacity')
title('ACSS cable')
clf()
fill_between(aac_df.index.values, aac_df.min(axis=1), aac_df.max(axis=1), color='red', label='AAC', alpha=1)
xlabel('Ambient temperature ($^\circ$C)')
ylabel('Fraction of rated capacity')
title('AAC cable')
ylim(0.4, 1.3)
clf()
#####################
acsr_cat = | pd.concat([acsr_df.loc[50], cable_i.models['acsr'].T], axis=1) | pandas.concat |
import csv
import logging
import json
import math
import random
import re
import time
import urllib.request
from pathlib import Path
import sys
import pandas as pd
import get_edgar.common.my_csv as mc
logger = logging.getLogger(__name__)
EDGAR_PREFIX = "https://www.sec.gov/Archives/"
SEC_PREFIX = "https://www.sec.gov"
## Download index
# Generate the output csv paths for the sample year
def dl_index(folder,start_year,end_year,form_types,prefix,ciks=None):
""" Download index to csvs according
to the start year & end year
Arguments:
folder {Path} -- [the Path for the folder to store index csvs]
start_year {int} -- [the start year of the sample period]
end_year {int} -- [the end year of the sample period]
form_types {string} -- [all the form types need to download index]
prefix {string} -- [prefix of the output index csv names]
ciks {Path or tuple or set} -- [csv file containing ciks needed, if applicable]
Returns:
[list of Paths] -- [list of Paths for all the index csvs during the sample period]
"""
years = list(range(start_year, end_year+1))
if folder.exists() == False:
folder.mkdir()
cik_need = input_cik(ciks=ciks)
index_csvs = []
for form_type in form_types:
for year in years:
index_csv = folder / f'index_{prefix}_{form_type}_{year}.csv'
get_index_master(year,form_type,index_csv,cik_filter=cik_need)
index_csvs.append(index_csv)
return index_csvs
def input_cik(ciks=None):
if ciks is not None:
if type(ciks) in (tuple,set):
return ciks
else:
return mc.extract_obs(ciks,'CIK')
else:
return None
# Generate index csv for each year
def get_index_master(year, form_type, out_csv,cik_filter=None):
""" Get index file for a form type during the specified years.
year -> the year to download
form_type -> the name of the form type required, case sensitive
Output:
csv file for required index
"""
if out_csv.exists() == False:
urls = index_url(year)
with open(out_csv,'w', newline='') as out:
writer = csv.writer(out)
labels = ['cik', 'conm', 'form_type', 'filing_date','txt_path', 'html_index']
writer.writerow(labels)
for url in urls:
try:
master = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
logger.error(f'{url} does not exist')
break
lines = str(master, "latin-1").splitlines()
for line in lines[11:]:# skip header, first 11 lines for master.idx
row = append_html_index(line)
if form_type_filter(row, form_type):
if cik_filter is not None:
if row[0] in cik_filter:
writer.writerow(row)
else:
writer.writerow(row)
logger.info(f"{year} {form_type} downloaded and wrote to csv")
logger.info(f'{out_csv} created')
else:
logger.info(f'{out_csv} already exists')
def index_url(year):
""" Generate url of the index file for future downloading.
year - > the year to download
Returns:
url link of the index file
"""
quarters = ['QTR1', 'QTR2', 'QTR3', 'QTR4']
return [f'https://www.sec.gov/Archives/edgar/full-index/{year}/{q}/master.idx' for q in quarters]
def append_html_index(line):
""" Separate a line in an index file and Generate link of the index webpage.
line - > a line in an index file
Returns:
a list of chunks in a line of an index file, including the index webpage
"""
chunks = line.split("|")
chunks[-1] = EDGAR_PREFIX + chunks[-1]
chunks.append(chunks[-1].replace(".txt", "-index.html"))
return chunks
def form_type_filter(chunks, form_type):
""" Find a specific form type in the index file.
chunks - > a seprated line in an index file
form_type - > the name of the form type required, case sensitive
Returns:
True if the line represents a form that fits form type required
False if the line does not
"""
try:
norm_type = re.compile(r'[^\w]')
type_t = re.sub(norm_type,'',chunks[2].strip().lower())
type_m = re.sub(norm_type,'',form_type.lower())
if type_m == type_t:
return True
else:
return False
except:
logger.error('form type need to be a string')
return False
def evt_filter(csv_index,evt_csv,evtdate,mperiods):
"""Keep filings for the specific period after a event
Arguments:
csv_index {Path} -- The Path for the csv file containing all filings
evt_csv {Path} -- The Path for the csv file containing the event dates
evtdate {str} -- The variable name of the event date
mperiods {int} -- The number of months after the event dates
Returns:
Path -- The Path for the resulting csv file
"""
all_index = pd.read_csv(csv_index,parse_dates=['filing_date'])
all_index['cik'] = all_index['cik'].apply(str)
evt = pd.read_csv(evt_csv,parse_dates=[evtdate])
evt['post_evt'] = evt[evtdate] + | pd.DateOffset(months=mperiods) | pandas.DateOffset |
import os
import sdg
import yaml
import pandas as pd
skip_values_in_columns = [
'GeoCode',
'Group',
]
skip_column_names = [
'GeoCode',
'Group',
'Units'
]
translations_should_include = {}
translation_columns = {}
data_pattern = os.path.join('data', '*-*.csv')
data_input = sdg.inputs.InputCsvData(path_pattern=data_pattern)
data_input.execute(None)
for indicator in data_input.indicators:
serieses = data_input.indicators[indicator].get_all_series()
for series in serieses:
disaggregations = series.get_disaggregations()
for column in disaggregations:
if column not in skip_column_names:
translations_should_include[column] = True
translation_columns[column] = column
if column not in skip_values_in_columns:
if disaggregations[column] and not | pd.isna(disaggregations[column]) | pandas.isna |
""" analyze.py - experiment analysis script"""
import music_trees as mt
from collections import OrderedDict
import glob
from pathlib import Path
import random
from itertools import combinations, permutations
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from natsort import natsorted
from scipy.stats import wilcoxon
BASELINE_NAME = 'baseline'
ANALYSES_DIR = mt.ROOT_DIR / 'analyses'
ALL_COLORS = ["#ff595e", "#ffca3a", "#8ac926", "#1982c4", "#eaf6ff",
"#6a4c93", "#ed6a5a", "#f4f1bb", "#9bc1bc", "#5d576b", "#9bc1bc", "#5d576b", ]
random.shuffle(ALL_COLORS)
def significance(df: pd.DataFrame, dv: str, iv: str, cond: str):
"""
returns a DataFrame with p values for each condition between independent variables
"""
# get all possible values for the IV and conditions
all_trials = df[iv].unique()
all_conds = list(natsorted(df[cond].unique()))
pairs = list(permutations(all_trials, 2))
pvals = []
for co in all_conds:
for pair in pairs:
subset = df[df[cond] == co]
s1, s2 = pair
df1 = subset[subset[iv] == s1].copy()
df2 = subset[subset[iv] == s2].copy()
stat, p = wilcoxon(df1['value'].values.astype(
np.float), df2['value'].values.astype(np.float))
pvals.append({
'a': s1,
'b': s2,
cond: co,
'p': p,
'stat': stat,
'significant?': p < 0.01
})
return pd.DataFrame(pvals)
def bar_with_error(df: pd.DataFrame, dv: str, iv: str,
cond: str, title: str = None,
xlabel: str = None, ylabel: str = None) -> plt.figure:
"""
dv --> dependent variable
iv --> independent variable
cond --> conditions (groupings)
"""
plt.rcParams["figure.figsize"] = (7, 4)
# get all possible values for the IV and conditions
all_trials = list(natsorted(df[iv].unique()))
all_conds = list(natsorted(df[cond].unique()))
bar_width = 0.25 * 3 / len(all_trials)
means = OrderedDict((tr, []) for tr in all_trials)
stds = OrderedDict((tr, []) for tr in all_trials)
for trial in all_trials:
for c in all_conds:
# get the list of all scores per episode
values = df[(df[iv] == trial) & (df[cond] == c)
][dv].values.astype(np.float)
means[trial].append(np.mean(values))
stds[trial].append(np.std(values))
# make a bar plot for each condition
bp = np.arange(len(list(means.values())[0]))
bar_pos = OrderedDict((tr, bp + i*bar_width)
for i, tr in enumerate(all_trials))
for idx, tr in enumerate(all_trials):
plt.bar(bar_pos[tr], means[tr], yerr=stds[tr], width=bar_width, capsize=12*bar_width,
color=ALL_COLORS[idx], edgecolor='white', label=tr)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xticks(ticks=[i + bar_width for i in range(len(all_conds))],
labels=all_conds)
plt.title(title)
plt.legend()
plt.tight_layout()
fig = plt.gcf()
plt.close()
return fig
def boxplot(df: pd.DataFrame, dv: str, iv: str,
cond: str, title: str = None,
xlabel: str = None, ylabel: str = None):
import seaborn as sns
df = df.sort_values(by=cond)
sns.boxplot(data=df, x=cond, y=dv, hue=iv, palette="pastel")
# sns.despine(offset=10, trim=True)
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.tight_layout()
fig = plt.gcf()
plt.close()
return fig
def table(df: pd.DataFrame, dv: str, iv: str,
cond: str, title: str = None,
xlabel: str = None, ylabel: str = None):
# get all possible values for the IV and conditions
all_trials = list(natsorted(df[iv].unique()))
all_conds = list(natsorted(df[cond].unique()))
bar_width = 0.25 * 3 / len(all_trials)
means = OrderedDict((tr, []) for tr in all_trials)
stds = OrderedDict((tr, []) for tr in all_trials)
mnstd = OrderedDict((tr, []) for tr in all_trials)
for trial in all_trials:
for c in all_conds:
# get the list of all scores per episode
values = df[(df[iv] == trial) & (df[cond] == c)
][dv].values.astype(np.float)
means[trial].append(np.mean(values))
stds[trial].append(np.std(values))
mnstd[trial].append(f'{np.mean(values):.4f}±{np.std(values):.4f}')
tbl = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Functions for importing mssql data.
"""
import pandas as pd
import numpy as np
from datetime import datetime
from pdsql.util import create_engine, get_pk_stmt, compare_dfs
try:
from geopandas import GeoDataFrame
from shapely.wkb import loads
from pycrs import parse
except ImportError:
print('Install geopandas for reading geometery columns')
def rd_sql(server, database, table=None, col_names=None, where_in=None, where_op='AND', geo_col=False, from_date=None, to_date=None, date_col=None, rename_cols=None, stmt=None, con=None):
"""
Function to import data from an MSSQL database.
Parameters
----------
server : str
The server name. e.g.: 'SQL2012PROD03'
database : str
The specific database within the server. e.g.: 'LowFlows'
table : str
The specific table within the database. e.g.: 'LowFlowSiteRestrictionDaily'
col_names : list of str
The column names that should be retrieved. e.g.: ['SiteID', 'BandNo', 'RecordNo']
where_in : dict
A dictionary of strings to lists of strings.'. e.g.: {'SnapshotType': ['value1', 'value2']}
where_op : str
If where_in is a dictionary and there are more than one key, then the operator that connects the where statements must be either 'AND' or 'OR'.
geo_col : bool
Is there a geometry column in the table?.
from_date : str
The start date in the form '2010-01-01'.
to_date : str
The end date in the form '2010-01-01'.
date_col : str
The SQL table column that contains the dates.
rename_cols: list of str
List of strings to rename the resulting DataFrame column names.
stmt : str
Custom SQL statement to be directly passed to the database. This will ignore all prior arguments except server and database.
con : SQLAlchemy connectable (engine/connection) or database string URI
The sqlalchemy connection to be passed to pandas.read_sql
Returns
-------
DataFrame
"""
## Create where statements
if stmt is None:
if table is None:
raise ValueError('Must at least provide input for server, database, and table.')
if col_names is not None:
if isinstance(col_names, str):
col_names = [col_names]
col_names1 = ['[' + i.encode('ascii', 'ignore').decode() + ']' for i in col_names]
col_stmt = ', '.join(col_names1)
else:
col_stmt = '*'
where_lst, where_temp = sql_where_stmts(where_in=where_in, where_op=where_op, from_date=from_date, to_date=to_date, date_col=date_col)
if isinstance(where_lst, list):
stmt1 = "SELECT " + col_stmt + " FROM " + table + " where " + " and ".join(where_lst)
else:
stmt1 = "SELECT " + col_stmt + " FROM " + table
elif isinstance(stmt, str):
where_temp = {}
stmt1 = stmt
else:
raise ValueError('stmt must either be an SQL string or None.')
## Create connection to database and execute sql statement
if geo_col & (stmt is None):
df = rd_sql_geo(server=server, database=database, table=table, col_stmt=col_stmt, where_lst=where_lst)
if rename_cols is not None:
rename_cols1 = rename_cols.copy()
rename_cols1.extend(['geometry'])
df.columns = rename_cols1
else:
if con is None:
engine = create_engine('mssql', server, database)
with engine.begin() as conn:
if where_temp:
for key, value in where_temp.items():
df = pd.DataFrame(data=value, columns=[key.lower()])
temp_tab = '#temp_'+key.lower()
df.to_sql(temp_tab, con=conn, if_exists='replace', index=False, chunksize=1000)
df = pd.read_sql(stmt1, con=conn)
else:
if where_temp:
for key, value in where_temp.items():
df = pd.DataFrame(data=value, columns=[key])
temp_tab = '#temp_'+key
df.to_sql(temp_tab, con=con, if_exists='replace', index=False, chunksize=1000)
df = pd.read_sql(stmt1, con=con)
if rename_cols is not None:
df.columns = rename_cols
return df
def rd_sql_ts(server, database, table, groupby_cols, date_col, values_cols, resample_code=None, period=1, fun='mean', val_round=3, where_in=None, where_op='AND', from_date=None, to_date=None, min_count=None, con=None):
"""
Function to specifically read and possibly aggregate time series data stored in MSSQL tables.
Parameters
----------
server : str
The server name. e.g.: 'SQL2012PROD03'
database : str
The specific database within the server. e.g.: 'LowFlows'
table : str
The specific table within the database. e.g.: 'LowFlowSiteRestrictionDaily'
groupby_cols : str or list of str
The columns in the SQL table to grouped and returned with the time series data.
date_col : str
The date column in the table.
values_cols : str or list of str
The column(s) of the value(s) that should be resampled.
resample_code : str or None
The Pandas time series resampling code. e.g. 'D' for day, 'W' for week, 'M' for month, etc.
period : int
The number of resampling periods. e.g. period = 2 and resample = 'D' would be to resample the values over a 2 day period.
fun : str
The resampling function. i.e. mean, sum, count, min, or max. No median yet...
val_round : int
The number of decimals to round the values.
where_in : dict
A dictionary of strings to lists of strings.'. e.g.: {'SnapshotType': ['value1', 'value2']}
where_op : str
If where_in is a dictionary and there are more than one key, then the operator that connects the where statements must be either 'AND' or 'OR'.
from_date : str
The start date in the form '2010-01-01'.
to_date : str
The end date in the form '2010-01-01'.
min_count : int
The minimum number of values required to return groupby_cols. Only works when groupby_cols and vlue_cols are str.
con : SQLAlchemy connectable (engine/connection) or database string URI
The sqlalchemy connection to be passed to pandas.read_sql
Returns
-------
DataFrame
Pandas DataFrame with MultiIndex of groupby_cols and date_col
"""
## Create where statement
where_lst, where_temp = sql_where_stmts(where_in=where_in, where_op=where_op, from_date=from_date, to_date=to_date, date_col=date_col)
## Create ts statement and append earlier where statement
if isinstance(groupby_cols, str):
groupby_cols = [groupby_cols]
col_names1 = ['[' + i.encode('ascii', 'ignore').decode() + ']' for i in groupby_cols]
col_stmt = ', '.join(col_names1)
## Create sql stmt
sql_stmt1 = sql_ts_agg_stmt(table, groupby_cols=groupby_cols, date_col=date_col, values_cols=values_cols, resample_code=resample_code, period=period, fun=fun, val_round=val_round, where_lst=where_lst)
## Create connection to database
if con is None:
con = create_engine('mssql', server, database)
## Make minimum count selection
if (min_count is not None) & isinstance(min_count, int) & (len(groupby_cols) == 1):
cols_count_str = ', '.join([groupby_cols[0], 'count(' + values_cols + ') as count'])
if isinstance(where_lst, list):
stmt1 = "SELECT " + cols_count_str + " FROM " + "(" + sql_stmt1 + ") as agg" + " GROUP BY " + col_stmt + " HAVING count(" + values_cols + ") >= " + str(min_count)
else:
stmt1 = "SELECT " + cols_count_str + " FROM " + table + " GROUP BY " + col_stmt + " HAVING count(" + values_cols + ") >= " + str(min_count)
up_sites = pd.read_sql(stmt1, con)[groupby_cols[0]].tolist()
up_sites = [str(i) for i in up_sites]
if not up_sites:
raise ValueError('min_count filtered out all sites.')
where_in.update({groupby_cols[0]: up_sites})
where_lst, where_temp = sql_where_stmts(where_in, where_op=where_op, from_date=from_date, to_date=to_date, date_col=date_col)
## Create sql stmt
sql_stmt1 = sql_ts_agg_stmt(table, groupby_cols=groupby_cols, date_col=date_col, values_cols=values_cols, resample_code=resample_code, period=period, fun=fun, val_round=val_round, where_lst=where_lst)
## Create connection to database and execute sql statement
df = pd.read_sql(sql_stmt1, con)
## Check to see if any data was found
if df.empty:
raise ValueError('No data was found in the database for the parameters given.')
## set the index
df[date_col] = pd.to_datetime(df[date_col])
groupby_cols.append(date_col)
df1 = df.set_index(groupby_cols).sort_index()
return df1
def to_mssql(df, server, database, table, index=False, dtype=None, schema=None):
"""
Function to append a DataFrame onto an existing mssql table.
Parameters
----------
df : DataFrame
DataFrame to be saved. The DataFrame column/index names must match those on the mssql table exactly.
server : str
The server name. e.g.: 'SQL2012PROD03'
database : str
The specific database within the server. e.g.: 'LowFlows'
table : str
The specific table within the database. e.g.: 'LowFlowSiteRestrictionDaily'
index : bool
Should the index be added as a column?
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should be an SQLAlchemy type.
Returns
-------
None
"""
### Prepare the engine
engine = create_engine('mssql', server, database)
### Save to mssql table
df.to_sql(name=table, con=engine, if_exists='append', chunksize=1000, index=index, dtype=dtype, schema=schema)
def create_table(server, database, table, dtype_dict, primary_keys=None, foreign_keys=None, foreign_table=None, drop_table=False, con=None):
"""
Function to create a table in an mssql database.
Parameters
----------
server : str
The server name. e.g.: 'SQL2012PROD03'
database : str
The specific database within the server. e.g.: 'LowFlows'
table : str
The specific table within the database. e.g.: 'LowFlowSiteRestrictionDaily'
dtype_dict : dict of str
Dictionary of df columns to the associated sql data type. Examples below.
primary_keys : str or list of str
Index columns to define uniqueness in the data structure.
foreign_keys : str or list of str
Columns to link to another table in the same database.
foreign_table: str
The table in the same database with the identical foreign key(s).
drop_table : bool
If the table already exists, should it be dropped?
Returns
-------
None
"""
### Make connection
if con is None:
engine = create_engine('mssql', server, database)
con = engine.connect()
### Primary keys
if isinstance(primary_keys, str):
primary_keys = [primary_keys]
if isinstance(primary_keys, list):
pkey_stmt = ", Primary key (" + ", ".join(primary_keys) + ")"
else:
pkey_stmt = ""
### Foreign keys
if isinstance(foreign_keys, str):
foreign_keys = [foreign_keys]
if isinstance(foreign_keys, list):
fkey_stmt = ", Foreign key (" + ", ".join(foreign_keys) + ") " + "References " + foreign_table + "(" + ", ".join(foreign_keys) + ")"
else:
fkey_stmt = ""
### Initial create table statement
d1 = [str(i) + ' ' + dtype_dict[i] for i in dtype_dict]
d2 = ', '.join(d1)
tab_create_stmt = "IF OBJECT_ID(" + str([str(table)])[1:-1] + ", 'U') IS NULL create table " + table + " (" + d2 + pkey_stmt + fkey_stmt + ")"
trans = con.begin()
try:
### Drop table option or check
if drop_table:
drop_stmt = "IF OBJECT_ID(" + str([str(table)])[1:-1] + ", 'U') IS NOT NULL DROP TABLE " + table
con.execute(drop_stmt)
else:
check_tab_stmt = "IF OBJECT_ID(" + str([str(table)])[1:-1] + ", 'U') IS NOT NULL SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME=" + str([str(table)])[1:-1]
# tab1 = read_sql(check_tab_stmt, conn)
con.execute(check_tab_stmt)
# list1 = [i for i in conn]
# if list1:
# print('Table already exists. Returning the table info.')
# df = pd.DataFrame(list1, columns=['columns', 'dtype'])
# conn.close()
# return df
### Create table in database
con.execute(tab_create_stmt)
trans.commit()
con.close()
except Exception as err:
trans.rollback()
con.close()
raise err
def del_table_rows(server, database, table=None, pk_df=None, stmt=None):
"""
Function to selectively delete rows from an mssql table.
Parameters
----------
server : str
The server name. e.g.: 'SQL2012PROD03'
database : str
The specific database within the server. e.g.: 'LowFlows'
table : str or None if stmt is a str
The specific table within the database. e.g.: 'LowFlowSiteRestrictionDaily'
pk_df : DataFrame
A DataFrame of the primary keys of the table for the rows that should be removed.
stmt : str
SQL delete statement. Will override everything except server and database.
Returns
-------
None
Notes
-----
Using the pk_df is the only way to ensure that specific rows will be deleted from composite keys. The column data types and names of pk_df must match the equivelant columns in the SQL table. The procedure creates a temporary table from the pk_df then deletes the rows in the target table based on the temp table. Then finally deletes the temp table.
"""
### Make the delete statement
# del_where_list = sql_where_stmts(**kwargs)
if isinstance(stmt, str):
del_rows_stmt = stmt
elif isinstance(pk_df, pd.DataFrame):
temp_tab = '#temp_del_tab1'
### Check the primary keys
pk_stmt = get_pk_stmt.format(db=database, table=table)
pk = rd_sql(server, database, stmt=pk_stmt).name
if pk.empty:
raise ValueError('SQL table has no primary key. Please set one up.')
if not np.isin(pk, pk_df.columns.tolist()).all():
raise ValueError('The primary keys in the SQL table does not match up with the pk_df')
sel_t1 = "select * from " + temp_tab
cols = pk_df.columns.tolist()
tab_where = [table + '.' + i for i in cols]
t1_where = [temp_tab + '.' + i for i in cols]
where_list = [t1_where[i] + ' = ' + tab_where[i] for i in np.arange(len(cols))]
where_stmt = " where " + " and ".join(where_list)
exists_stmt = "(" + sel_t1 + where_stmt + ")"
del_rows_stmt = "DELETE FROM " + table + " where exists " + exists_stmt
# elif isinstance(del_where_list, list):
# del_rows_stmt = "DELETE FROM " + table + " WHERE " + " AND ".join(del_where_list)
else:
raise ValueError('Please specify pk_df or stmt')
### Delete rows
engine = create_engine('mssql', server, database)
with engine.begin() as conn:
if isinstance(pk_df, pd.DataFrame):
pk_df.to_sql(name=temp_tab, con=conn, if_exists='replace', chunksize=1000)
conn.execute(del_rows_stmt)
def update_table_rows(df, server, database, table, on=None, index=False, append=True):
"""
Function to update rows from an mssql table. SQL table must have a primary key and the primary key must be in the input DataFrame.
Parameters
----------
df : DataFrame
DataFrame with data to be overwritten in SQL table.
server : str
The server name. e.g.: 'SQL2012PROD03'
database : str
The specific database within the server. e.g.: 'LowFlows'
table : str
The specific table within the database. e.g.: 'LowFlowSiteRestrictionDaily'
on : None, str, or list of str
The index by which the update should be applied on. If None, then it uses the existing primary key(s).
index : bool
Does the df have an index that corresponds to the SQL table primary keys?
append : bool
Should new sites be appended to the table?
Returns
-------
None
"""
### Check the primary keys
if on is None:
pk_stmt = get_pk_stmt.format(db=database, table=table)
pk = rd_sql(server, database, stmt=pk_stmt).name.tolist()
if not pk:
raise ValueError('SQL table has no primary key. Please set one up or assign "on" explicitly.')
on = pk
elif isinstance(on, str):
on = [on]
## Check that "on" are in the tables
df_bool = ~np.isin(on, df.columns).all()
if df_bool:
raise ValueError('"on" contains column names that are not in the df')
### Make the update statement
temp_tab = '#temp_up1'
on_tab = [table + '.' + i for i in on]
on_temp = [temp_tab + '.' + i for i in on]
cols = df.columns.tolist()
val_cols = [i for i in cols if not i in on]
tab_list = [table + '.' + i for i in val_cols]
temp_list = [temp_tab + '.' + i for i in val_cols]
temp_list2 = [temp_tab + '.' + i for i in cols]
up_list = [tab_list[i] + ' = ' + temp_list[i] for i in np.arange(len(temp_list))]
on_list = [on_tab[i] + ' = ' + on_temp[i] for i in np.arange(len(on))]
# up_stmt = "update " + table + " set " + ", ".join(up_list) + " from " + table + " inner join " + temp_tab + " on " + " and ".join(on_list)
if append:
up_stmt = "merge " + table + " using " + temp_tab + " on (" + " and ".join(on_list) + ") when matched then update set " + ", ".join(up_list) + " WHEN NOT MATCHED BY TARGET THEN INSERT (" + ", ".join(cols) + ") values (" + ", ".join(temp_list2) + ");"
else:
up_stmt = "merge " + table + " using " + temp_tab + " on (" + " and ".join(on_list) + ") when matched then update set " + ", ".join(up_list) + ";"
### Run SQL code to update rows
engine = create_engine('mssql', server, database)
with engine.begin() as conn:
print('Saving data to temp table...')
df.to_sql(temp_tab, con=conn, if_exists='replace', index=index, chunksize=1000)
print('Updating primary table...')
conn.execute(up_stmt)
def sql_where_stmts(where_in=None, where_op='AND', from_date=None, to_date=None, date_col=None):
"""
Function to take various input parameters and convert them to a list of where statements for SQL.
Parameters
----------
where_in : str or dict
Either a str with an associated where_val list or a dictionary of string keys to list values. If a str, it should represent the table column associated with the 'where' condition.
where_in : dict
A dictionary of strings to lists of strings.'. e.g.: {'SnapshotType': ['value1', 'value2']}
where_op : str of either 'AND' or 'OR'
The binding operator for the where conditions.
from_date : str or None
The start date in the form '2010-01-01'.
to_date : str or None
The end date in the form '2010-01-01'.
date_col : str or None
The SQL table column that contains the dates.
Returns
-------
list of str or None
Returns a list of str where conditions to be passed to an SQL execution function. The function needs to bind it with " where " + " and ".join(where_lst)
"""
### Where stmts
where_stmt = []
temp_where = {}
if isinstance(where_in, dict):
where_in_bool = {k: len(where_in[k]) > 20000 for k in where_in}
for key, value in where_in.items():
if not isinstance(value, list):
raise ValueError('Values in the dict where_in must be lists.')
if where_in_bool[key]:
temp_where.update({key: value})
where_stmt.append("{key} IN (select {key} from {temp_tab})".format(key=key, temp_tab='#temp_'+key.lower()))
else:
where_stmt.append("{key} IN ({values})".format(key=key, values=str(value)[1:-1]))
if isinstance(from_date, str):
from_date1 = pd.to_datetime(from_date, errors='coerce')
if isinstance(from_date1, pd.Timestamp):
from_date2 = str(from_date1)
where_from_date = date_col + " >= " + from_date2.join(['\'', '\''])
else:
where_from_date = ''
else:
where_from_date = ''
if isinstance(to_date, str):
to_date1 = | pd.to_datetime(to_date, errors='coerce') | pandas.to_datetime |
# -*- coding: utf-8 -*-
__author__ = "gao"
import pandas as pd
from AmazingQuant.data_center.mongosconn import MongoConn
from AmazingQuant.constant import DatabaseName, Period, RightsAdjustment
import AmazingQuant.utils.data_transfer as data_transfer
class GetData(object):
def __init__(self):
self.conn = MongoConn()
def get_all_market_data(self, stock_code=[], field=[], start="", end="", period=Period.DAILY.value,
rights_adjustment=RightsAdjustment.NONE.value):
"""
复权因子和skip_pause在这做
:param stock_code:
:param field:
:param start:
:param end:
:param period:
:param rights_adjustment:
:return: 代码-n,字段-n,时间-n, return dataframe 行-代码-timetag(多层索引),列-字段
"""
if period == Period.DAILY.value:
db_name = DatabaseName.MARKET_DATA_DAILY.value
end = data_transfer.date_str_to_int(end)
values = []
colum = {"_id": 0, "timetag": 1}
for i in field:
colum[i] = 1
for stock in stock_code:
self.conn.check_connected()
stock_market_data = self.conn.select_colum(db_name=db_name, table=stock,
value={"timetag": {"$lte": end}},
colum=colum)
stock_market_data_list = list(stock_market_data)
if stock_market_data_list:
df = pd.DataFrame(stock_market_data_list)
values.append(pd.DataFrame(df[field].values, index=df['timetag'], columns=field))
market_data = pd.concat(values, keys=stock_code)
elif period == Period.ONE_MIN.value:
db_name = DatabaseName.MARKET_DATA_ONE_MIN.value
return market_data
def get_market_data(self, market_data, stock_code=[], field=[], start="", end="", count=-1):
"""
从dataframe解析数据成最终的数据格式,count都在在这里做
因为停牌或者其他原因取不到数据的,1 2 3 返回的是-1,其他返回的是pandas的空或者NaN,所以可以使用 >0判断是否取到值
:param market_data:
:param stock_code:
:param field:
:param start:
:param end:
:param count:
:return:
"""
if start != "":
start = data_transfer.date_str_to_int(start)
else:
start = 0
if end != "":
end = data_transfer.date_str_to_int(end)
else:
end = 0
# (1)代码-1,字段-1,时间-1, return float
if len(stock_code) == 1 and len(field) == 1 and (start == end) and count == -1:
try:
return market_data[field[0]].ix[stock_code[0], end]
# 停牌或者其他情情况取不到数据的返回-1
except:
return -1
# (2)代码-n,字段-1,时间-1, return Series
elif len(stock_code) > 1 and len(field) == 1 and (start == end) and count == -1:
result_dict = {}
for stock in stock_code:
try:
result_dict[stock] = market_data[field[0]].ix[stock, end]
except:
result_dict[stock] = -1
return pd.Series(result_dict)
# (3)代码-1,字段-n,时间-1, return Series
elif len(stock_code) == 1 and len(field) > 1 and (start == end) and count == -1:
result_dict = {}
for field_one in field:
try:
result_dict[field_one] = market_data[field_one].ix[stock_code[0], end]
except:
result_dict[field_one] = -1
return pd.Series(result_dict)
# (4)代码-1,字段-1,时间-n, return Series
elif len(stock_code) == 1 and len(field) == 1 and (start != end) and count == -1:
try:
series = market_data[field[0]].ix[stock_code[0]]
except KeyError:
return pd.Series()
series = series[series.index >= start]
series = series[series.index <= end]
return series
# (5)代码-n,字段-1,时间-n, return dataframe 行-timetag,列-代码
elif len(stock_code) > 1 and len(field) == 1 and (start != end) and count == -1:
result_dict = {}
for stock in stock_code:
index = market_data.ix[stock].index
index = index[index <= end]
index = index[index >= start]
result_dict[stock] = market_data[field[0]].ix[stock][index]
return | pd.DataFrame(result_dict) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEqual(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEqual(code, fmod.FreqGroup.FR_MIN)
self.assertEqual(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEqual(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEqual(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEqual(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEqual(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
if pd._np_version_under1p7:
# you have to convert to timestamp for this to work with numpy
# scalars
expected = left_f(Timestamp(lhs), rhs)
# otherwise a TypeError is thrown
if left not in ('eq', 'ne'):
with tm.assertRaises(TypeError):
left_f(lhs, rhs)
else:
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
self.assertEqual(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
s[0] = pd.Timestamp('nat')
s[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(s, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(s, Timestamp('nat'))
result = right_f(Timestamp('nat'), s)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, Timestamp('nat'))
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.ix['2005']
expected = df[df.index.year == 2005]
assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
self.assertEqual(result, expected)
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2001Q1']), 90)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['1Q01']), 90)
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2005-11']), 30)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['2005-11']), 30)
assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import os
import shutil
import time
from copy import copy
import pandas as pd
import numpy as np
from tqdm import tqdm
from datetime import datetime
from pathlib import Path
from glob import glob
import libs.dirs as dirs
import libs.utils as utils
import libs.dataset_utils as dutils
# from libs.utils import *
# from libs.dataset_utils import *
def hyphenated_string_to_list(hyphenString):
return hyphenString.split("-")
class IndexManager:
def __init__(self, path=dirs.index+"main_index.csv", destFolder='auto', verbose=True):
self.path = Path(path)
self.indexExists = False
self.bkpFolderName = "index_backup"
self.imagesDestFolder = destFolder
self.verbose = verbose
self.duplicates_count = 0
self.new_entries_count = 0
self.originalLen = 0
# Get date and time for index and folder name
self.date = datetime.now()
self.validate_path()
def get_index_len(self):
'''
Returns integer representing number of entries in index,
or None, if index doesn't exists.
'''
if self.indexExists:
return self.index.shape[0]
else:
return None
def get_video_path_list(self):
''' Returns list with unique videos in the dataset.
'''
if self.indexExists:
return list(dict.fromkeys(self.index['VideoPath']))
else:
return []
def validate_path(self):
if self.path.suffix == ".csv":
# Check for csv files matching filename in self.path
pathList = list(self.path.parent.glob("*"+str(self.path.stem).strip()+"*.csv"))
if len(pathList) > 0:
try:
# Check if index DataFrame exists and is non-empty
self.index = | pd.read_csv(pathList[0]) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.