prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import json
import os
import pdb
import sys
import time
import h5py
import joblib
import numpy as np
import pandas as pd
from scipy.stats import poisson
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.metrics import precision_recall_curve, average_precision_score
import tensorflow as tf
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import seaborn as sns
from basenji import plots
from basenji import trainer
from basenji import dataset
from basenji import rnann
if tf.__version__[0] == '1':
tf.compat.v1.enable_eager_execution()
"""
saluki_test.py
Test the accuracy of a trained model.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <data_dir>'
parser = OptionParser(usage)
parser.add_option('--head', dest='head_i',
default=0, type='int',
help='Parameters head to test [Default: %default]')
# parser.add_option('--mc', dest='mc_n',
# default=0, type='int',
# help='Monte carlo test iterations [Default: %default]')
parser.add_option('-o', dest='out_dir',
default='test_out',
help='Output directory for test statistics [Default: %default]')
parser.add_option('--save', dest='save',
default=False, action='store_true',
help='Save targets and predictions numpy arrays [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0',
help='Ensemble prediction shifts [Default: %default]')
# parser.add_option('-t', dest='targets_file',
# default=None, type='str',
# help='File specifying target indexes and labels in table format')
parser.add_option('--split', dest='split_label',
default='test',
help='Dataset split label for eg TFR pattern [Default: %default]')
# parser.add_option('--tfr', dest='tfr_pattern',
# default=None,
# help='TFR pattern string appended to data_dir/tfrecords for subsetting [Default: %default]')
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error('Must provide parameters, model, and test data HDF5')
else:
params_file = args[0]
model_file = args[1]
data_dir = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
# parse shifts to integers
options.shifts = [int(shift) for shift in options.shifts.split(',')]
#######################################################
# inputs
# read targets
# if options.targets_file is None:
# options.targets_file = '%s/targets.txt' % data_dir
# targets_df = pd.read_csv(options.targets_file, index_col=0, sep='\t')
# read model parameters
with open(params_file) as params_open:
params = json.load(params_open)
params_model = params['model']
params_train = params['train']
# construct eval data
eval_data = dataset.RnaDataset(data_dir,
split_label=options.split_label,
batch_size=params_train['batch_size'],
mode='eval')
# initialize model
seqnn_model = rnann.RnaNN(params_model)
seqnn_model.restore(model_file, options.head_i)
seqnn_model.build_ensemble(options.shifts)
#######################################################
# evaluation
# evaluate
test_loss, test_metric1, test_metric2 = seqnn_model.evaluate(eval_data)
# print summary statistics
print('\nTest Loss: %7.5f' % test_loss)
print('Test PearsonR: %7.5f' % test_metric1.mean())
print('Test R2: %7.5f' % test_metric2.mean())
# write target-level statistics
# targets_acc_df = pd.DataFrame({
# 'index': targets_df.index,
# 'pearsonr': test_metric1,
# 'r2': test_metric2,
# 'identifier': targets_df.identifier,
# 'description': targets_df.description
# })
# write target-level statistics
targets_acc_df = pd.DataFrame({
'index': [0],
'pearsonr': test_metric1,
'r2': test_metric2,
'identifier': ['123'],
'description': ['abc']
})
targets_acc_df.to_csv('%s/acc.txt'%options.out_dir, sep='\t',
index=False, float_format='%.5f')
#######################################################
# predict?
if options.save:
# compute predictions
test_preds = seqnn_model.predict(eval_data).astype('float16')
# read targets
test_targets = eval_data.numpy(return_inputs=False)
if options.save:
# read genes
genes_df = | pd.read_csv('%s/genes.tsv' % data_dir, sep='\t', index_col=0) | pandas.read_csv |
from bs4 import BeautifulSoup
import json
import os
import pandas as pd
import re
import requests
import subprocess
def text_from_pdf(pdf_path, temp_path):
if os.path.exists(temp_path):
os.remove(temp_path)
subprocess.call(["pdftotext", pdf_path, temp_path])
f = open(temp_path, encoding="utf8")
text = f.read()
f.close()
os.remove(temp_path)
return text
base_url = "http://papers.nips.cc"
index_urls = {1987: "https://papers.nips.cc/book/neural-information-processing-systems-1987"}
for i in range(1, 30):
year = i+1987
index_urls[year] = "http://papers.nips.cc/book/advances-in-neural-information-processing-systems-%d-%d" % (i, year)
nips_authors = set()
papers = list()
paper_authors = list()
for year in sorted(index_urls.keys()):
index_url = index_urls[year]
index_html_path = os.path.join("working", "html", str(year)+".html")
if not os.path.exists(index_html_path):
r = requests.get(index_url)
if not os.path.exists(os.path.dirname(index_html_path)):
os.makedirs(os.path.dirname(index_html_path))
with open(index_html_path, "wb") as index_html_file:
index_html_file.write(r.content)
with open(index_html_path, "rb") as f:
html_content = f.read()
soup = BeautifulSoup(html_content, "lxml")
paper_links = [link for link in soup.find_all('a') if link["href"][:7]=="/paper/"]
print("%d Papers Found" % len(paper_links))
temp_path = os.path.join("working", "temp.txt")
for link in paper_links:
paper_title = link.contents[0]
info_link = base_url + link["href"]
pdf_link = info_link + ".pdf"
pdf_name = link["href"][7:] + ".pdf"
pdf_path = os.path.join("working", "pdfs", str(year), pdf_name)
paper_id = re.findall(r"^(\d+)-", pdf_name)[0]
print(year, " ", paper_id) #paper_title.encode('ascii', 'namereplace'))
if not os.path.exists(pdf_path):
pdf = requests.get(pdf_link)
if not os.path.exists(os.path.dirname(pdf_path)):
os.makedirs(os.path.dirname(pdf_path))
pdf_file = open(pdf_path, "wb")
pdf_file.write(pdf.content)
pdf_file.close()
paper_info_html_path = os.path.join("working", "html", str(year), str(paper_id)+".html")
if not os.path.exists(paper_info_html_path):
r = requests.get(info_link)
if not os.path.exists(os.path.dirname(paper_info_html_path)):
os.makedirs(os.path.dirname(paper_info_html_path))
with open(paper_info_html_path, "wb") as f:
f.write(r.content)
with open(paper_info_html_path, "rb") as f:
html_content = f.read()
paper_soup = BeautifulSoup(html_content, "lxml")
try:
abstract = paper_soup.find('p', attrs={"class": "abstract"}).contents[0]
except:
print("Abstract not found %s" % paper_title.encode("ascii", "replace"))
abstract = ""
authors = [(re.findall(r"-(\d+)$", author.contents[0]["href"])[0],
author.contents[0].contents[0])
for author in paper_soup.find_all('li', attrs={"class": "author"})]
for author in authors:
nips_authors.add(author)
paper_authors.append([len(paper_authors)+1, paper_id, author[0]])
event_types = [h.contents[0][23:] for h in paper_soup.find_all('h3') if h.contents[0][:22]=="Conference Event Type:"]
if len(event_types) != 1:
#print(event_types)
#print([h.contents for h in paper_soup.find_all('h3')].__str__().encode("ascii", "replace"))
#raise Exception("Bad Event Data")
event_type = ""
else:
event_type = event_types[0]
with open(pdf_path, "rb") as f:
if f.read(15)==b"<!DOCTYPE html>":
print("PDF MISSING")
continue
paper_text = text_from_pdf(pdf_path, temp_path)
papers.append([paper_id, year, paper_title, event_type, pdf_name, abstract, paper_text])
pd.DataFrame(list(nips_authors), columns=["id","name"]).sort_values(by="id").to_csv("output/authors.csv", index=False)
| pd.DataFrame(papers, columns=["id", "year", "title", "event_type", "pdf_name", "abstract", "paper_text"]) | pandas.DataFrame |
#!/usr/bin/env python3
#Author: <NAME>
#Contact: <EMAIL>
from __future__ import print_function
from . import SigProfilerMatrixGenerator as matGen
import os
import SigProfilerMatrixGenerator as sig
import re
import sys
import pandas as pd
import datetime
from SigProfilerMatrixGenerator.scripts import convert_input_to_simple_files as convertIn
import uuid
import shutil
import time
import numpy as np
import platform
import itertools
import statsmodels
import matplotlib as plt
from pathlib import Path
import sigProfilerPlotting as sigPlt
import scipy
def perm(n, seq):
'''
Generates a list of all available permutations of n-mers.
Parameters:
n -> length of the desired permutation string
seq -> list of all possible string values
Returns:
permus -> list of all available permutations
'''
permus = []
for p in itertools.product(seq, repeat=n):
permus.append("".join(p))
return(permus)
def SigProfilerMatrixGeneratorFunc (project, genome, vcfFiles, exome=False, bed_file=None, chrom_based=False, plot=False, tsb_stat=False, seqInfo=False, cushion=100, gs=False):
'''
Allows for the import of the sigProfilerMatrixGenerator.py function. Returns a dictionary
with each context serving as the first level of keys.
Parameters:
project -> unique name given to the current samples
genome -> reference genome
vcfFiles -> path where the input vcf files are located.
exome -> flag to use only the exome or not
bed_file -> BED file that contains a list of ranges to be used in generating the matrices
chrom_based -> flag to create the matrices on a per chromosome basis
plot -> flag to generate the plots for each context
tsb_stat -> performs a transcriptional strand bias test for the 24, 384, and 6144 contexts. The output is
saved into the output/TSB directory
gs -> flag that performs a gene strand bias test
Returns:
matrices -> dictionary (nested) of the matrices for each context
example:
matrices = {'96': {'PD1001a':{'A[A>C]A':23,
'A[A>G]A':10,...},
'PD1202a':{'A[A>C]A':23,
'A[A>G]A':10,...},...},
'192':{'PD1001a':{'T:A[A>C]A':23,
'T:A[A>G]A':10,...},
'PD1202a':{'T:A[A>C]A':23,
'T:A[A>G]A':10,...},...},...}
'''
# Instantiates all of the required variables and references
if gs:
print("The Gene Strand Bias is not yet supported! Continuing with the matrix generation.")
gs = False
functionFlag = True
bed = False
bed_ranges = None
limited_indel = True
exome = exome
plot = plot
# Instantiates the final output matrix
matrices = {'96':None, '1536':None, '384':None, '6144':None, 'DINUC':None, '6':None, '24':None, 'INDEL':None}
# Provides a chromosome conversion from NCBI notation
ncbi_chrom = {'NC_000067.6':'1', 'NC_000068.7':'2', 'NC_000069.6':'3', 'NC_000070.6':'4',
'NC_000071.6':'5', 'NC_000072.6':'6', 'NC_000073.6':'7', 'NC_000074.6':'8',
'NC_000075.6':'9', 'NC_000076.6':'10', 'NC_000077.6':'11', 'NC_000078.6':'12',
'NC_000079.6':'13', 'NC_000080.6':'14', 'NC_000081.6':'15', 'NC_000082.6':'16',
'NC_000083.6':'17', 'NC_000084.6':'18', 'NC_000085.6':'19', 'NC_000086.7':'X',
'NC_000087.7':'Y'}
# Provides the reference file conversion from binary to base information
tsb_ref = {0:['N','A'], 1:['N','C'], 2:['N','G'], 3:['N','T'],
4:['T','A'], 5:['T','C'], 6:['T','G'], 7:['T','T'],
8:['U','A'], 9:['U','C'], 10:['U','G'], 11:['U','T'],
12:['B','A'], 13:['B','C'], 14:['B','G'], 15:['B','T'],
16:['N','N'], 17:['T','N'], 18:['U','N'], 19:['B','N']}
bias_sort = {'T':0,'U':1,'N':3,'B':2, 'Q':4}
tsb = ['T','U','N','B']
tsb_I = ['T','U','N','B','Q']
bases = ['A','C','G','T']
mutation_types = ['CC>AA','CC>AG','CC>AT','CC>GA','CC>GG','CC>GT','CC>TA','CC>TG','CC>TT',
'CT>AA','CT>AC','CT>AG','CT>GA','CT>GC','CT>GG','CT>TA','CT>TC','CT>TG',
'TC>AA','TC>AG','TC>AT','TC>CA','TC>CG','TC>CT','TC>GA','TC>GG','TC>GT',
'TT>AA','TT>AC','TT>AG','TT>CA','TT>CC','TT>CG','TT>GA','TT>GC','TT>GG']
mutation_types_non_tsb = ['AC>CA','AC>CG','AC>CT','AC>GA','AC>GG','AC>GT','AC>TA','AC>TG','AC>TT',
'AT>CA','AT>CC','AT>CG','AT>GA','AT>GC','AT>TA',
'CG>AT','CG>GC','CG>GT','CG>TA','CG>TC','CG>TT',
'GC>AA','GC>AG','GC>AT','GC>CA','GC>CG','GC>TA',
'TA>AT','TA>CG','TA>CT','TA>GC','TA>GG','TA>GT',
'TG>AA','TG>AC','TG>AT','TG>CA','TG>CC','TG>CT','TG>GA','TG>GC','TG>GT']
indels_seq_types = [ # Single-sequences
'C', 'T',
# Di-sequences
'AC','AT','CA','CC','CG','CT','GC','TA','TC','TT',
# Tri-sequences
'ACC', 'ACT', 'ATC', 'ATT', 'CAC', 'CAT', 'CCA', 'CCC', 'CCG', 'CCT', 'CGC', 'CGT', 'CTA', 'CTC', 'CTG', 'CTT',
'GCC', 'GCT', 'GTC', 'GTT', 'TAC', 'TAT', 'TCA', 'TCC', 'TCG', 'TCT', 'TGC', 'TGT', 'TTA', 'TTC', 'TTG', 'TTT',
# Tetra-sequences
'AACC', 'AACT', 'AATC', 'AATT', 'ACAC', 'ACAT', 'ACCA', 'ACCC', 'ACCG', 'ACCT', 'ACGC', 'ACGT', 'ACTA', 'ACTC', 'ACTG', 'ACTT', 'AGCC', 'AGCT', 'AGTC',
'AGTT', 'ATAC', 'ATAT', 'ATCA', 'ATCC', 'ATCG', 'ATCT', 'ATGC', 'ATGT', 'ATTA', 'ATTC', 'ATTG', 'ATTT', 'CAAC', 'CAAT', 'CACA', 'CACC', 'CACG', 'CACT',
'CAGC', 'CAGT', 'CATA', 'CATC', 'CATG', 'CATT', 'CCAA', 'CCAC', 'CCAG', 'CCAT', 'CCCA', 'CCCC', 'CCCG', 'CCCT', 'CCGA', 'CCGC', 'CCGG', 'CCGT', 'CCTA',
'CCTC', 'CCTG', 'CCTT', 'CGAC', 'CGAT', 'CGCA', 'CGCC', 'CGCG', 'CGCT', 'CGGC', 'CGTA', 'CGTC', 'CGTG', 'CGTT', 'CTAA', 'CTAC', 'CTAG', 'CTAT', 'CTCA',
'CTCC', 'CTCG', 'CTCT', 'CTGA', 'CTGC', 'CTGG', 'CTGT', 'CTTA', 'CTTC', 'CTTG', 'CTTT', 'GACC', 'GATC', 'GCAC', 'GCCA', 'GCCC', 'GCCG', 'GCCT', 'GCGC',
'GCTA', 'GCTC', 'GCTG', 'GCTT', 'GGCC', 'GGTC', 'GTAC', 'GTCA', 'GTCC', 'GTCG', 'GTCT', 'GTGC', 'GTTA', 'GTTC', 'GTTG', 'GTTT', 'TAAC', 'TACA', 'TACC',
'TACG', 'TACT', 'TAGC', 'TATA', 'TATC', 'TATG', 'TATT', 'TCAA', 'TCAC', 'TCAG', 'TCAT', 'TCCA', 'TCCC', 'TCCG', 'TCCT', 'TCGA', 'TCGC', 'TCGG', 'TCGT',
'TCTA', 'TCTC', 'TCTG', 'TCTT', 'TGAC', 'TGCA', 'TGCC', 'TGCG', 'TGCT', 'TGTA', 'TGTC', 'TGTG', 'TGTT', 'TTAA', 'TTAC', 'TTAG', 'TTAT', 'TTCA', 'TTCC',
'TTCG', 'TTCT', 'TTGA', 'TTGC', 'TTGG', 'TTGT', 'TTTA', 'TTTC', 'TTTG', 'TTTT',
# Penta-sequences
'AACCC', 'AACCT', 'AACTC', 'AACTT', 'AATCC', 'AATCT', 'AATTC', 'AATTT', 'ACACC', 'ACACT', 'ACATC', 'ACATT', 'ACCAC', 'ACCAT', 'ACCCA', 'ACCCC', 'ACCCG',
'ACCCT', 'ACCGC', 'ACCGT', 'ACCTA', 'ACCTC', 'ACCTG', 'ACCTT', 'ACGCC', 'ACGCT', 'ACGTC', 'ACGTT', 'ACTAC', 'ACTAT', 'ACTCA', 'ACTCC', 'ACTCG', 'ACTCT',
'ACTGC', 'ACTGT', 'ACTTA', 'ACTTC', 'ACTTG', 'ACTTT', 'AGCCC', 'AGCCT', 'AGCTC', 'AGCTT', 'AGTCC', 'AGTCT', 'AGTTC', 'AGTTT', 'ATACC', 'ATACT', 'ATATC',
'ATATT', 'ATCAC', 'ATCAT', 'ATCCA', 'ATCCC', 'ATCCG', 'ATCCT', 'ATCGC', 'ATCGT', 'ATCTA', 'ATCTC', 'ATCTG', 'ATCTT', 'ATGCC', 'ATGCT', 'ATGTC', 'ATGTT',
'ATTAC', 'ATTAT', 'ATTCA', 'ATTCC', 'ATTCG', 'ATTCT', 'ATTGC', 'ATTGT', 'ATTTA', 'ATTTC', 'ATTTG', 'ATTTT', 'CAACC', 'CAACT', 'CAATC', 'CAATT', 'CACAC',
'CACAT', 'CACCA', 'CACCC', 'CACCG', 'CACCT', 'CACGC', 'CACGT', 'CACTA', 'CACTC', 'CACTG', 'CACTT', 'CAGCC', 'CAGCT', 'CAGTC', 'CAGTT', 'CATAC', 'CATAT',
'CATCA', 'CATCC', 'CATCG', 'CATCT', 'CATGC', 'CATGT', 'CATTA', 'CATTC', 'CATTG', 'CATTT', 'CCAAC', 'CCAAT', 'CCACA', 'CCACC', 'CCACG', 'CCACT', 'CCAGC',
'CCAGT', 'CCATA', 'CCATC', 'CCATG', 'CCATT', 'CCCAA', 'CCCAC', 'CCCAG', 'CCCAT', 'CCCCA', 'CCCCC', 'CCCCG', 'CCCCT', 'CCCGA', 'CCCGC', 'CCCGG', 'CCCGT',
'CCCTA', 'CCCTC', 'CCCTG', 'CCCTT', 'CCGAC', 'CCGAT', 'CCGCA', 'CCGCC', 'CCGCG', 'CCGCT', 'CCGGC', 'CCGGT', 'CCGTA', 'CCGTC', 'CCGTG', 'CCGTT', 'CCTAA',
'CCTAC', 'CCTAG', 'CCTAT', 'CCTCA', 'CCTCC', 'CCTCG', 'CCTCT', 'CCTGA', 'CCTGC', 'CCTGG', 'CCTGT', 'CCTTA', 'CCTTC', 'CCTTG', 'CCTTT', 'CGACC', 'CGACT',
'CGATC', 'CGATT', 'CGCAC', 'CGCAT', 'CGCCA', 'CGCCC', 'CGCCG', 'CGCCT', 'CGCGC', 'CGCGT', 'CGCTA', 'CGCTC', 'CGCTG', 'CGCTT', 'CGGCC', 'CGGCT', 'CGGTC',
'CGGTT', 'CGTAC', 'CGTAT', 'CGTCA', 'CGTCC', 'CGTCG', 'CGTCT', 'CGTGC', 'CGTGT', 'CGTTA', 'CGTTC', 'CGTTG', 'CGTTT', 'CTAAC', 'CTAAT', 'CTACA', 'CTACC',
'CTACG', 'CTACT', 'CTAGC', 'CTAGT', 'CTATA', 'CTATC', 'CTATG', 'CTATT', 'CTCAA', 'CTCAC', 'CTCAG', 'CTCAT', 'CTCCA', 'CTCCC', 'CTCCG', 'CTCCT', 'CTCGA',
'CTCGC', 'CTCGG', 'CTCGT', 'CTCTA', 'CTCTC', 'CTCTG', 'CTCTT', 'CTGAC', 'CTGAT', 'CTGCA', 'CTGCC', 'CTGCG', 'CTGCT', 'CTGGC', 'CTGGT', 'CTGTA', 'CTGTC',
'CTGTG', 'CTGTT', 'CTTAA', 'CTTAC', 'CTTAG', 'CTTAT', 'CTTCA', 'CTTCC', 'CTTCG', 'CTTCT', 'CTTGA', 'CTTGC', 'CTTGG', 'CTTGT', 'CTTTA', 'CTTTC', 'CTTTG',
'CTTTT', 'GACCC', 'GACCT', 'GACTC', 'GACTT', 'GATCC', 'GATCT', 'GATTC', 'GATTT', 'GCACC', 'GCACT', 'GCATC', 'GCATT', 'GCCAC', 'GCCAT', 'GCCCA', 'GCCCC',
'GCCCG', 'GCCCT', 'GCCGC', 'GCCGT', 'GCCTA', 'GCCTC', 'GCCTG', 'GCCTT', 'GCGCC', 'GCGCT', 'GCGTC', 'GCGTT', 'GCTAC', 'GCTAT', 'GCTCA', 'GCTCC', 'GCTCG',
'GCTCT', 'GCTGC', 'GCTGT', 'GCTTA', 'GCTTC', 'GCTTG', 'GCTTT', 'GGCCC', 'GGCCT', 'GGCTC', 'GGCTT', 'GGTCC', 'GGTCT', 'GGTTC', 'GGTTT', 'GTACC', 'GTACT',
'GTATC', 'GTATT', 'GTCAC', 'GTCAT', 'GTCCA', 'GTCCC', 'GTCCG', 'GTCCT', 'GTCGC', 'GTCGT', 'GTCTA', 'GTCTC', 'GTCTG', 'GTCTT', 'GTGCC', 'GTGCT', 'GTGTC',
'GTGTT', 'GTTAC', 'GTTAT', 'GTTCA', 'GTTCC', 'GTTCG', 'GTTCT', 'GTTGC', 'GTTGT', 'GTTTA', 'GTTTC', 'GTTTG', 'GTTTT', 'TAACC', 'TAACT', 'TAATC', 'TAATT',
'TACAC', 'TACAT', 'TACCA', 'TACCC', 'TACCG', 'TACCT', 'TACGC', 'TACGT', 'TACTA', 'TACTC', 'TACTG', 'TACTT', 'TAGCC', 'TAGCT', 'TAGTC', 'TAGTT', 'TATAC',
'TATAT', 'TATCA', 'TATCC', 'TATCG', 'TATCT', 'TATGC', 'TATGT', 'TATTA', 'TATTC', 'TATTG', 'TATTT', 'TCAAC', 'TCAAT', 'TCACA', 'TCACC', 'TCACG', 'TCACT',
'TCAGC', 'TCAGT', 'TCATA', 'TCATC', 'TCATG', 'TCATT', 'TCCAA', 'TCCAC', 'TCCAG', 'TCCAT', 'TCCCA', 'TCCCC', 'TCCCG', 'TCCCT', 'TCCGA', 'TCCGC', 'TCCGG',
'TCCGT', 'TCCTA', 'TCCTC', 'TCCTG', 'TCCTT', 'TCGAC', 'TCGAT', 'TCGCA', 'TCGCC', 'TCGCG', 'TCGCT', 'TCGGC', 'TCGGT', 'TCGTA', 'TCGTC', 'TCGTG', 'TCGTT',
'TCTAA', 'TCTAC', 'TCTAG', 'TCTAT', 'TCTCA', 'TCTCC', 'TCTCG', 'TCTCT', 'TCTGA', 'TCTGC', 'TCTGG', 'TCTGT', 'TCTTA', 'TCTTC', 'TCTTG', 'TCTTT', 'TGACC',
'TGACT', 'TGATC', 'TGATT', 'TGCAC', 'TGCAT', 'TGCCA', 'TGCCC', 'TGCCG', 'TGCCT', 'TGCGC', 'TGCGT', 'TGCTA', 'TGCTC', 'TGCTG', 'TGCTT', 'TGGCC', 'TGGCT',
'TGGTC', 'TGGTT', 'TGTAC', 'TGTAT', 'TGTCA', 'TGTCC', 'TGTCG', 'TGTCT', 'TGTGC', 'TGTGT', 'TGTTA', 'TGTTC', 'TGTTG', 'TGTTT', 'TTAAC', 'TTAAT', 'TTACA',
'TTACC', 'TTACG', 'TTACT', 'TTAGC', 'TTAGT', 'TTATA', 'TTATC', 'TTATG', 'TTATT', 'TTCAA', 'TTCAC', 'TTCAG', 'TTCAT', 'TTCCA', 'TTCCC', 'TTCCG', 'TTCCT',
'TTCGA', 'TTCGC', 'TTCGG', 'TTCGT', 'TTCTA', 'TTCTC', 'TTCTG', 'TTCTT', 'TTGAC', 'TTGAT', 'TTGCA', 'TTGCC', 'TTGCG', 'TTGCT', 'TTGGC', 'TTGGT', 'TTGTA',
'TTGTC', 'TTGTG', 'TTGTT', 'TTTAA', 'TTTAC', 'TTTAG', 'TTTAT', 'TTTCA', 'TTTCC', 'TTTCG', 'TTTCT', 'TTTGA', 'TTTGC', 'TTTGG', 'TTTGT', 'TTTTA', 'TTTTC',
'TTTTG', 'TTTTT']
# Pre-fills the mutation types variable
size = 5
mut_types_initial = perm(size, "ACGT")
mut_types = []
for tsbs in tsb:
for mut in mut_types_initial:
current_base = mut[int(size/2)]
if current_base == 'C' or current_base == 'T':
for base in bases:
if base != current_base:
mut_types.append(tsbs+":"+mut[0:int(size/2)] + "[" + current_base+">"+ base+"]"+mut[int(size/2)+1:])
# Organizes all of the mutation types for DINUCs
mutation_types_tsb_context = []
for base in bases:
for mut in mutation_types:
for base2 in bases:
for base3 in tsb:
mutation_types_tsb_context.append(''.join([base3,":",base,"[",mut,"]",base2]))
for base in bases:
for mut in mutation_types_non_tsb:
for base2 in bases:
mutation_types_tsb_context.append(''.join(['Q:', base, "[", mut, "]", base2]))
indel_types_tsb = []
indel_types_simple = []
indel_complete = []
indel_cat = ['Del', 'Ins']
indel_types = ['1:Del:C:0', '1:Del:C:1', '1:Del:C:2', '1:Del:C:3', '1:Del:C:4', '1:Del:C:5',
'1:Del:T:0', '1:Del:T:1', '1:Del:T:2', '1:Del:T:3', '1:Del:T:4', '1:Del:T:5',
'1:Ins:C:0', '1:Ins:C:1', '1:Ins:C:2', '1:Ins:C:3', '1:Ins:C:4', '1:Ins:C:5',
'1:Ins:T:0', '1:Ins:T:1', '1:Ins:T:2', '1:Ins:T:3', '1:Ins:T:4', '1:Ins:T:5',
# >1bp INDELS
'2:Del:R:0', '2:Del:R:1', '2:Del:R:2', '2:Del:R:3', '2:Del:R:4', '2:Del:R:5',
'3:Del:R:0', '3:Del:R:1', '3:Del:R:2', '3:Del:R:3', '3:Del:R:4', '3:Del:R:5',
'4:Del:R:0', '4:Del:R:1', '4:Del:R:2', '4:Del:R:3', '4:Del:R:4', '4:Del:R:5',
'5:Del:R:0', '5:Del:R:1', '5:Del:R:2', '5:Del:R:3', '5:Del:R:4', '5:Del:R:5',
'2:Ins:R:0', '2:Ins:R:1', '2:Ins:R:2', '2:Ins:R:3', '2:Ins:R:4', '2:Ins:R:5',
'3:Ins:R:0', '3:Ins:R:1', '3:Ins:R:2', '3:Ins:R:3', '3:Ins:R:4', '3:Ins:R:5',
'4:Ins:R:0', '4:Ins:R:1', '4:Ins:R:2', '4:Ins:R:3', '4:Ins:R:4', '4:Ins:R:5',
'5:Ins:R:0', '5:Ins:R:1', '5:Ins:R:2', '5:Ins:R:3', '5:Ins:R:4', '5:Ins:R:5',
#MicroHomology INDELS
'2:Del:M:1', '3:Del:M:1', '3:Del:M:2', '4:Del:M:1', '4:Del:M:2', '4:Del:M:3',
'5:Del:M:1', '5:Del:M:2', '5:Del:M:3', '5:Del:M:4', '5:Del:M:5', '2:Ins:M:1',
'3:Ins:M:1', '3:Ins:M:2', '4:Ins:M:1', '4:Ins:M:2', '4:Ins:M:3', '5:Ins:M:1',
'5:Ins:M:2', '5:Ins:M:3', '5:Ins:M:4', '5:Ins:M:5', 'complex', 'non_matching']
for indels in indel_types[:-13]:
for tsbs in tsb_I:
indel_types_tsb.append(tsbs + ":" + indels)
for indels in indels_seq_types:
repeat = str(len(indels))
for id_cat in indel_cat:
for l in range(0, 6, 1):
indel_complete.append(":".join([repeat, id_cat, indels, str(l)]))
for id_cat in indel_cat:
for i in range(0, 6, 1):
indel_complete.append(":".join(['5',id_cat, '5',str(i)]))
indel_types_simple = indel_types[:24]
indel_types_simple.append('long_Del')
indel_types_simple.append('long_Ins')
indel_types_simple.append('MH')
indel_types_simple.append('complex')
# Instantiates the initial contexts to generate matrices for
contexts = ['6144']
# Organizes all of the reference directories for later reference:
ref_dir, tail = os.path.split(os.path.dirname(os.path.abspath(__file__)))
chrom_path =ref_dir + '/references/chromosomes/tsb/' + genome + "/"
transcript_path = ref_dir + '/references/chromosomes/transcripts/' + genome + "/"
# Terminates the code if the genome reference files have not been created/installed
if not os.path.exists(chrom_path):
print("The specified genome: " + genome + " has not been installed\nRun the following command to install the genome:\n\tpython sigProfilerMatrixGenerator/install.py -g " + genome)
sys.exit()
# Organizes all of the input and output directories:
if vcfFiles[-1] != "/":
vcfFiles += "/"
vcf_path = vcfFiles + "input/"
vcf_path_original = vcf_path
if not os.path.exists(vcf_path) or len(os.listdir(vcf_path)) < 1:
os.makedirs(vcf_path, exist_ok=True)
input_files = os.listdir(vcfFiles)
if os.path.exists(vcfFiles + "input/"):
input_files.remove("input")
if os.path.exists(vcfFiles + "logs/"):
input_files.remove("logs")
if ".DS_Store" in input_files:
input_files.remove(".DS_Store")
if "__init__.py" in input_files:
input_files.remove("__init__.py")
if "__pycache__" in input_files:
input_files.remove("__pycache__")
if os.path.exists(vcfFiles + "output/"):
input_files.remove("output")
for files in input_files:
shutil.copy(vcfFiles + files, vcf_path + files)
output_matrix = vcfFiles + "output/"
if not os.path.exists(output_matrix):
os.makedirs(output_matrix)
# Organizes the error and log files
time_stamp = datetime.date.today()
output_log_path = vcfFiles + "logs/"
if not os.path.exists(output_log_path):
os.makedirs(output_log_path)
error_file = output_log_path + 'SigProfilerMatrixGenerator_' + project + "_" + genome + str(time_stamp) + ".err"
log_file = output_log_path + 'SigProfilerMatrixGenerator_' + project + "_" + genome + str(time_stamp) + ".out"
if os.path.exists(error_file):
os.remove(error_file)
if os.path.exists(log_file):
os.remove(log_file)
sys.stderr = open(error_file, 'w')
log_out = open(log_file, 'w')
log_out.write("THIS FILE CONTAINS THE METADATA ABOUT SYSTEM AND RUNTIME\n\n\n")
log_out.write("-------System Info-------\n")
log_out.write("Operating System Name: "+ platform.uname()[0]+"\n"+"Nodename: "+ platform.uname()[1]+"\n"+"Release: "+ platform.uname()[2]+"\n"+"Version: "+ platform.uname()[3]+"\n")
log_out.write("\n-------Python and Package Versions------- \n")
log_out.write("Python Version: "+str(platform.sys.version_info.major)+"."+str(platform.sys.version_info.minor)+"."+str(platform.sys.version_info.micro)+"\n")
log_out.write("SigProfilerMatrixGenerator Version: "+sig.__version__+"\n")
log_out.write("SigProfilerPlotting version: "+sigPlt.__version__+"\n")
log_out.write("matplotlib version: "+plt.__version__+"\n")
log_out.write("statsmodels version: "+statsmodels.__version__+"\n")
log_out.write("scipy version: "+scipy.__version__+"\n")
log_out.write("pandas version: "+pd.__version__+"\n")
log_out.write("numpy version: "+np.__version__+"\n")
log_out.write("\n-------Vital Parameters Used for the execution -------\n")
log_out.write("Project: {}\nGenome: {}\nInput File Path: {}\nexome: {}\nbed_file: {}\nchrom_based: {}\nplot: {}\ntsb_stat: {}\nseqInfo: {}\n".format(project, genome, vcfFiles, str(exome), str(bed_file), str(chrom_based), str(plot), str(tsb_stat), str(seqInfo)))
log_out.write("\n-------Date and Time Data------- \n")
tic = datetime.datetime.now()
log_out.write("Date and Clock time when the execution started: "+str(tic)+"\n\n\n")
log_out.write("-------Runtime Checkpoints------- \n")
log_out.close()
# Gathers all of the vcf files:
vcf_files_temp = os.listdir(vcf_path)
vcf_files = []
first_extenstion = True
for file in vcf_files_temp:
# Skips hidden files
if file[0:3] == '.DS' or file[0:2] == '__':
pass
else:
vcf_files.append(file)
# Creates a temporary folder for sorting and generating the matrices
file_name = vcf_files[0].split(".")
file_extension = file_name[-1]
unique_folder = project + "_"+ str(uuid.uuid4())
output_path = output_matrix + "temp/" + unique_folder + "/"
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.makedirs(output_path)
skipped_muts = 0
# Converts the input files to standard text in the temporary folder
if file_extension == 'genome':
snv, indel, skipped, samples = convertIn.convertTxt(project, vcf_path, genome, output_path)
else:
if file_extension == 'txt':
snv, indel, skipped, samples = convertIn.convertTxt(project, vcf_path, genome, output_path, ncbi_chrom, log_file)
elif file_extension == 'vcf':
snv, indel, skipped, samples = convertIn.convertVCF(project, vcf_path, genome, output_path, ncbi_chrom, log_file)
elif file_extension == 'maf':
snv, indel, skipped, samples = convertIn.convertMAF(project, vcf_path, genome, output_path, ncbi_chrom, log_file)
elif file_extension == 'tsv':
snv, indel, skipped, samples = convertIn.convertICGC(project, vcf_path, genome, output_path, ncbi_chrom, log_file)
else:
print("File format not supported")
skipped_muts += skipped
# Instantiates variables for final output statistics
analyzed_muts = [0, 0, 0]
sample_count_high = 0
# Begins matrix generation for all possible contexts
for i in range(0, 2, 1):
if i == 0 and snv:
mutation_pd = {}
mutation_pd['6144'] = pd.DataFrame(0, index=mut_types, columns=samples)
mutation_dinuc_pd_all = pd.DataFrame(0, index=mutation_types_tsb_context, columns=samples)
output_path_snv = output_path + "SNV/"
vcf_files = os.listdir(output_path_snv)
vcf_path = output_path_snv
print("Starting matrix generation for SNVs and DINUCs...", end='', flush=True)
start = time.time()
# Skips SNVs if none are present
elif i == 0 and not snv:
continue
elif i == 1 and indel:
mutation_ID = {}
mutation_ID['ID'] = pd.DataFrame(0, index=indel_types, columns=samples)
mutation_ID['simple'] = pd.DataFrame(0, index=indel_types_simple, columns=samples)
mutation_ID['tsb'] = pd.DataFrame(0, index=indel_types_tsb, columns=samples)
mutation_ID['complete'] = pd.DataFrame(0, index=indel_complete, columns=samples)
contexts = ['INDEL']
output_path_indel = output_path + "INDEL/"
vcf_files = os.listdir(output_path_indel)
vcf_path = output_path_indel
print("Starting matrix generation for INDELs...", end='', flush=True)
start = time.time()
# Skips INDELs if none are present and deletes the temp folder
elif i ==1 and not indel:
shutil.rmtree(output_matrix + "temp/")
continue
# Removes hidden files generated in macos
if ".DS_Store" in vcf_files:
vcf_files.remove(".DS_Store")
# Generates the bed regions if a bed file was provided
if bed_file != None:
bed = True
bed_file_path = bed_file
bed_ranges = matGen.BED_filtering(bed_file_path)
else:
bed_file_path = None
# Sorts files based on chromosome, sample, and start position
if not chrom_based:
chrom_start = None
if i != 1:
for file in vcf_files:
chrom = file.split("_")[0]
with open(vcf_path + file) as f:
lines = [line.strip().split() for line in f]
lines = sorted(lines, key = lambda x: (x[0], int(x[2])))
context = '6144'
mutation_pd, skipped_mut, total, total_DINUC, mutation_dinuc_pd_all = matGen.catalogue_generator_single (lines, chrom, mutation_pd, mutation_dinuc_pd_all, mutation_types_tsb_context, vcf_path, vcf_path_original, vcf_files, bed_file_path, chrom_path, project, output_matrix, context, exome, genome, ncbi_chrom, functionFlag, bed, bed_ranges, chrom_based, plot, tsb_ref, transcript_path, tsb_stat, seqInfo, gs, log_file)
if chrom_based and not exome and not bed:
matrices = matGen.matrix_generator (context, output_matrix, project, samples, bias_sort, mutation_pd, exome, mut_types, bed, chrom, functionFlag, plot, tsb_stat)
mutation_pd = {}
mutation_pd['6144'] = pd.DataFrame(0, index=mut_types, columns=samples)
dinuc_mat = matGen.matrix_generator_DINUC (output_matrix, samples, bias_sort, mutation_dinuc_pd_all, mutation_types_tsb_context, project, exome, bed, chrom, plot)
mutation_dinuc_pd_all = pd.DataFrame(0, index=mutation_types_tsb_context, columns=samples)
skipped_muts += skipped_mut
analyzed_muts[0] += total
analyzed_muts[1] += total_DINUC
sample_count_high = len(samples)
if exome:
with open(vcf_path + "exome_temp.txt") as f:
lines = [line.strip().split() for line in f]
output = open(vcf_path + "exome_temp.txt", 'w')
for line in sorted(lines, key = lambda x: (['I','II','III','IV','V','chrI','chrII','chrIII','chrIV','chrV','X','Y','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23','24',
'25','26','27','28','29','30','31','32','33','34','35','36','37','38','39', 'MT', 'M', 'MtDNA'].index(x[1]), int(x[2]))):
print('\t'.join(line), file=output)
output.close()
mutation_pd = {}
mutation_pd['6144'] = pd.DataFrame(0, index=mut_types, columns=samples)
# mutation_pd['6144'], samples2 = matGen.exome_check(mutation_pd['6144'], genome, vcf_path + "exome_temp.txt", output_matrix, project, "SNV", cushion)
mutation_pd['6144'], samples2 = matGen.exome_check(chrom_based, samples, bias_sort, exome, mut_types, bed, chrom, functionFlag, plot, tsb_stat, mutation_pd['6144'], genome, vcf_path + "exome_temp.txt", output_matrix, project, "SNV", cushion)
if bed:
with open(vcf_path + "bed_temp.txt") as f:
lines = [line.strip().split() for line in f]
output = open(vcf_path + "bed_temp.txt", 'w')
for line in sorted(lines, key = lambda x: (['I','II','III','IV','V','chrI','chrII','chrIII','chrIV','chrV','X','Y','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23','24',
'25','26','27','28','29','30','31','32','33','34','35','36','37','38','39', 'MT', 'M', 'MtDNA'].index(x[1]), int(x[2]))):
print('\t'.join(line), file=output)
output.close()
mutation_pd = {}
mutation_pd['6144'] = pd.DataFrame(0, index=mut_types, columns=samples)
mutation_pd['6144'], samples2 = matGen.panel_check(chrom_based, samples, bias_sort, exome, mut_types, bed, chrom, functionFlag, plot, tsb_stat, mutation_pd['6144'], genome, vcf_path + "bed_temp.txt", output_matrix, bed_file_path, project, "SNV", cushion)
if not chrom_based:
if not mutation_pd['6144'].empty:
matrices = matGen.matrix_generator (context, output_matrix, project, samples, bias_sort, mutation_pd, exome, mut_types, bed, chrom_start, functionFlag, plot, tsb_stat)
if analyzed_muts[1] > 0:
if exome:
with open(vcf_path + "exome_temp_context_tsb_DINUC.txt") as f:
lines = [line.strip().split() for line in f]
output = open(vcf_path + "exome_temp_context_tsb_DINUC.txt", 'w')
for line in sorted(lines, key = lambda x: (['I','II','III','IV','V','chrI','chrII','chrIII','chrIV','chrV','X','Y','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23','24',
'25','26','27','28','29','30','31','32','33','34','35','36','37','38','39', 'MT', 'M', 'MtDNA'].index(x[1]), int(x[2]))):
print('\t'.join(line), file=output)
output.close()
mutation_dinuc_pd_all = pd.DataFrame(0, index=mutation_types_tsb_context, columns=samples)
mutation_dinuc_pd_all, samples2 = matGen.exome_check(chrom_based, samples, bias_sort, exome, mutation_types_tsb_context, bed, chrom, functionFlag, plot, tsb_stat, mutation_dinuc_pd_all, genome, vcf_path + "exome_temp_context_tsb_DINUC.txt", output_matrix, project, "DBS", cushion)
if bed:
with open(vcf_path + "bed_temp_context_tsb_DINUC.txt") as f:
lines = [line.strip().split() for line in f]
output = open(vcf_path + "bed_temp_context_tsb_DINUC.txt", 'w')
for line in sorted(lines, key = lambda x: (['I','II','III','IV','V','chrI','chrII','chrIII','chrIV','chrV','X','Y','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23','24',
'25','26','27','28','29','30','31','32','33','34','35','36','37','38','39', 'MT', 'M', 'MtDNA'].index(x[1]), int(x[2]))):
print('\t'.join(line), file=output)
output.close()
mutation_dinuc_pd_all = pd.DataFrame(0, index=mutation_types_tsb_context, columns=samples)
mutation_dinuc_pd_all, samples2 = matGen.panel_check(chrom_based, samples, bias_sort, exome, mutation_types_tsb_context, bed, chrom, functionFlag, plot, tsb_stat, mutation_dinuc_pd_all, genome, vcf_path + "bed_temp_context_tsb_DINUC.txt", output_matrix, bed_file_path, project, "DBS", cushion)
if not chrom_based:
if not mutation_dinuc_pd_all.empty:
dinuc_mat = matGen.matrix_generator_DINUC (output_matrix, samples, bias_sort, mutation_dinuc_pd_all, mutation_types_tsb_context, project, exome, bed, chrom_start, plot)
matrices['DINUC'] = dinuc_mat
else:
for file in vcf_files:
chrom = file.split("_")[0]
with open(vcf_path + file) as f:
lines = [line.strip().split() for line in f]
lines = sorted(lines, key = lambda x: (x[0], int(x[2])))
mutation_ID, skipped_mut, total = matGen.catalogue_generator_INDEL_single (mutation_ID, lines, chrom, vcf_path, vcf_path_original, vcf_files, bed_file_path, chrom_path, project, output_matrix, exome, genome, ncbi_chrom, limited_indel, functionFlag, bed, bed_ranges, chrom_based, plot, tsb_ref, transcript_path, seqInfo, gs, log_file)
if chrom_based and not exome and not bed:
matGen.matrix_generator_INDEL(output_matrix, samples, indel_types, indel_types_tsb, indel_types_simple, mutation_ID['ID'], mutation_ID['tsb'], mutation_ID['simple'], mutation_ID['complete'], project, exome, limited_indel, bed, chrom, plot)
mutation_ID['ID'] = pd.DataFrame(0, index=indel_types, columns=samples)
mutation_ID['simple'] = pd.DataFrame(0, index=indel_types_simple, columns=samples)
mutation_ID['tsb'] = pd.DataFrame(0, index=indel_types_tsb, columns=samples)
mutation_ID['complete'] = pd.DataFrame(0, index=indel_complete, columns=samples)
skipped_muts += skipped_mut
analyzed_muts[2] += total
# Performs the final filter on the variants base upon the exome if desired
if exome:
with open(vcf_path + "exome_temp.txt") as f:
lines = [line.strip().split() for line in f]
output = open(vcf_path + "exome_temp.txt", 'w')
for line in sorted(lines, key = lambda x: (['I','II','III','IV','V','chrI','chrII','chrIII','chrIV','chrV','X','Y','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23','24',
'25','26','27','28','29','30','31','32','33','34','35','36','37','38','39', 'MT', 'M', 'MtDNA'].index(x[1]), int(x[2]))):
print('\t'.join(line), file=output)
output.close()
mutation_ID = {}
mutation_ID['ID'] = pd.DataFrame(0, index=indel_types, columns=samples)
mutation_ID['ID'], samples2 = matGen.exome_check(chrom_based, samples, bias_sort, exome, indel_types, bed, chrom, functionFlag, plot, tsb_stat, mutation_ID['ID'], genome, vcf_path + "exome_temp.txt", output_matrix, project, "ID", cushion, '83')
with open(vcf_path + "exome_temp_simple.txt") as f:
lines = [line.strip().split() for line in f]
output = open(vcf_path + "exome_temp_simple.txt", 'w')
for line in sorted(lines, key = lambda x: (['I','II','III','IV','V','chrI','chrII','chrIII','chrIV','chrV','X','Y','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23','24',
'25','26','27','28','29','30','31','32','33','34','35','36','37','38','39', 'MT', 'M', 'MtDNA'].index(x[1]), int(x[2]))):
print('\t'.join(line), file=output)
output.close()
mutation_ID['simple'] = pd.DataFrame(0, index=indel_types_simple, columns=samples)
mutation_ID['simple'], samples2 = matGen.exome_check(chrom_based, samples, bias_sort, exome, indel_types_simple, bed, chrom, functionFlag, plot, tsb_stat, mutation_ID['simple'], genome, vcf_path + "exome_temp_simple.txt", output_matrix, project, "ID", cushion, 'simple')
with open(vcf_path + "exome_temp_tsb.txt") as f:
lines = [line.strip().split() for line in f]
output = open(vcf_path + "exome_temp_tsb.txt", 'w')
for line in sorted(lines, key = lambda x: (['I','II','III','IV','V','chrI','chrII','chrIII','chrIV','chrV','X','Y','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23','24',
'25','26','27','28','29','30','31','32','33','34','35','36','37','38','39', 'MT', 'M', 'MtDNA'].index(x[1]), int(x[2]))):
print('\t'.join(line), file=output)
output.close()
mutation_ID['tsb'] = pd.DataFrame(0, index=indel_types_tsb, columns=samples)
mutation_ID['tsb'], samples2 = matGen.exome_check(chrom_based, samples, bias_sort, exome, indel_types_tsb, bed, chrom, functionFlag, plot, tsb_stat, mutation_ID['tsb'], genome, vcf_path + "exome_temp_tsb.txt", output_matrix, project, "ID", cushion, 'tsb')
mutation_ID['complete'] = pd.DataFrame(0, index=indel_complete, columns=samples)
if bed:
with open(vcf_path + "bed_temp.txt") as f:
lines = [line.strip().split() for line in f]
output = open(vcf_path + "bed_temp.txt", 'w')
for line in sorted(lines, key = lambda x: (['I','II','III','IV','V','chrI','chrII','chrIII','chrIV','chrV','X','Y','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23','24',
'25','26','27','28','29','30','31','32','33','34','35','36','37','38','39', 'MT', 'M', 'MtDNA'].index(x[1]), int(x[2]))):
print('\t'.join(line), file=output)
output.close()
mutation_ID = {}
mutation_ID['ID'] = | pd.DataFrame(0, index=indel_types, columns=samples) | pandas.DataFrame |
'''
Auther: littleherozzzx
Date: 2022-01-13 16:48:51
LastEditTime: 2022-03-08 12:42:39
'''
import base64
import json
import logging
import os.path
import sys
import threading
import time
import pandas as pd
import requests
import rsa
import yaml
from getpass4 import getpass
from bs4 import BeautifulSoup
import config
logging.basicConfig(level=logging.INFO)
class hdu_jwc:
def __init__(self) -> None:
self.username = ""
self.password = ""
self.url = "http://newjw.hdu.edu.cn/jwglxt"
self.headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.5",
"Connection": "keep-alive",
"Content-Length": "477",
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": "",
"Host": "newjw.hdu.edu.cn",
"Origin": "http://newjw.hdu.edu.cn",
"Referer": "http://newjw.hdu.edu.cn/jwglxt/xtgl/login_slogin.html",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0"
}
self.cookies = ""
self.modulus = ""
self.exponent = ""
self.pub_key = None
self.csrftoken = ""
self.session = requests.Session()
self.session.headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0"
self.session.trust_env = False
self.xuanke = ""
self.index = ""
self.class_list = []
self.dict = {
"01": "E0BE1EB065FBFA29E0536264A8C04A31",
"10": "E0BDC4C7604BD44BE0536264A8C0B7EC",
"05": "E0BE43551AEB415FE0536164A8C06426",
}
self.cfg = config.Cfg()
def load_cfg(self, cfg_file):
try:
if self.cfg.load_cfg(cfg_file) == 1:
logging.info("配置文件加载成功!")
return 1
except FileNotFoundError:
logging.error(f"配置文件加载失败!文件{cfg_file}不存在!")
logging.info("请重新生成配置文件。")
return 0
except ValueError as error:
logging.error("配置文件加载失败!")
logging.error(error)
logging.info("请重新生成配置文件。")
return 0
except KeyError as error:
logging.error("配置文件加载失败!")
logging.error(error)
logging.info("请重新生成配置文件。")
return 0
def init_cfg(self):
page_file = input("请输入网页文件路径(网页文件获取方式请查看readme):\n")
while (not os.path.exists(page_file)):
page_file = input("文件不存在,请重新输入!")
try:
if self.cfg.init_cfg(page_file) == 1:
print("配置文件生成成功!")
return 1
except KeyError as error:
logging.error("配置文件生成失败!")
logging.error(error)
logging.info("请按照readme中要求获取网页文件,或手动在config.json中添加缺失项。")
def set_pubKey(self):
try:
response = self.session.get(
url=self.url + "/xtgl/login_getPublicKey.html?time={}".format(round(time.time() * 1000)))
data = response.json()
self.modulus = base64.b64decode(data["modulus"])
self.exponent = base64.b64decode(data["exponent"])
self.pub_key = rsa.PublicKey(int.from_bytes(
self.modulus, 'big'), int.from_bytes(self.exponent, 'big'))
logging.info("公钥设置成功!")
except json.decoder.JSONDecodeError:
raise ConnectionError("connect failed")
def encoded(self, s):
encoded_pwd = rsa.encrypt(s, self.pub_key)
return base64.b64encode(encoded_pwd)
def set_csrftoken(self):
response = self.session.get(
url=self.url + "/xtgl/login_slogin.html?language=zh_CN&_t={}".format(round(time.time() * 1000)))
self.headers["Cookie"] = "JSESSIONID={}; route={}".format(
self.session.cookies["JSESSIONID"], self.session.cookies["route"])
# self.headers["Cookie"]=self.session.cookies
page = BeautifulSoup(response.text, features="html.parser")
self.cookies = response.cookies
self.csrftoken = page.find("input", attrs={"id": "csrftoken"})["value"]
logging.info("csrftoken设置成功!")
def login(self):
encoded_password = self.encoded(self.password).decode("utf-8")
params = {
"csrftoken": self.csrftoken,
"language": "zh_CN",
"yhm": self.username,
"mm": [encoded_password, encoded_password]}
try:
response = self.session.post(url=self.url + "/xtgl/login_slogin.html?time={}".format(round(
time.time() * 1000)), headers=self.headers, data=params, cookies=self.session.cookies,
allow_redirects=False)
self.cookies = response.cookies
except requests.exceptions.ConnectionError as error:
logging.error(error)
raise error
try:
if response.status_code == 302:
self.index = self.session.get(url=response.headers["Location"])
if self.username in self.index.text:
logging.info("登录成功")
else:
raise RuntimeError("unknown error!")
else:
if "不正确" in response.text:
raise ValueError("学号或密码错误!")
except ValueError as error:
logging.error("当前学号为 {}, 当前密码为 {}".format(self.username, self.password))
logging.error(error)
raise error
except RuntimeError as error:
logging.error("当前学号为 {}, 当前密码为 {}".format(self.username, self.password))
logging.error(error)
raise error
return
def login_course_selection(self):
page = BeautifulSoup(self.index.text, features="html.parser")
link = page.find('a', text="自主选课")
link = link["onclick"].split("\'")
gndm = link[1]
url = self.url + link[3]
params = {"gnmkdm": "index", "su": self.username, "layout": "default"}
data = {"gndm": gndm}
response = self.session.get(url=url, data=data, params=params)
return response
def logout(self):
params = {"t": str(round(time.time() * 1000)), "login_type": ""}
self.session.get(url=self.url + "/logout", params=params)
pass
def query_margin(self, jxb_id, kklxdm, rwlx="2", index=0):
data1 = \
{
"xkly": "",
"bklx_id": "",
"sfkkjyxdxnxq": "",
"xqh_id": "",
"jg_id": "",
"njdm_id_1": "",
"zyh_id_1": "",
"zyh_id": "",
"zyfx_id": "",
"njdm_id": "",
"bh_id": "",
"xbm": "",
"xslbdm": "",
"ccdm": "",
"xsbj": "",
"sfkknj": "",
"sfkkzy": "",
"kzybkxy": "",
"sfznkx": "",
"zdkxms": "",
"sfkxq": "",
"sfkcfx": "",
"kkbk": "",
"kkbkdj": "",
"sfkgbcx": "",
"sfrxtgkcxd": "",
"tykczgxdcs": "",
"xkxnm": "",
"xkxqm": "",
"rlkz": "",
"xkzgbj": "",
"jspage": "",
"jxbzb": "",
"mzm": "",
"bbhzxjxb":"",
"xz":""
}
data2 = \
{
"xkly": "",
"bklx_id": "",
"sfkkjyxdxnxq": "",
"xqh_id": "",
"jg_id": "",
"zyh_id": "",
"zyfx_id": "",
"njdm_id": "",
"bh_id": "",
"xbm": "",
"xslbdm": "",
"ccdm": "",
"xsbj": "",
"sfkknj": "",
"sfkkzy": "",
"kzybkxy": "",
"sfznkx": "",
"zdkxms": "",
"sfkxq": "",
"sfkcfx": "",
"kkbk": "",
"kkbkdj": "",
"xkxnm": "",
"xkxqm": "",
"xkxskcgskg": "",
"rlkz": "",
"jxbzcxskg": "",
"cxbj": "",
"fxbj": "",
"mzm": "",
"bbhzxjxb":"",
"xz":""
}
self.cfg.get_data(data1)
self.cfg.get_data(data2)
data1["filter_list"] = [jxb_id]
data1["rwlx"] = str(rwlx)
data1["kklxdm"] = kklxdm
data1["kspage"] = "1"
params = {"gnmkdm": "N253512", "su": self.username}
res1 = \
self.session.post(url="http://newjw.hdu.edu.cn/jwglxt/xsxk/zzxkyzb_cxZzxkYzbPartDisplay.html", data=data1,
params=params).json()["tmpList"]
data2["filter_list"] = [jxb_id]
data2["rwlx"] = str(rwlx)
data2["kklxdm"] = kklxdm
data2["kch_id"] = res1[0]["kch_id"]
data2["xkkz_id"] = self.dict[kklxdm]
res2 = self.session.post(url="http://newjw.hdu.edu.cn/jwglxt/xsxk/zzxkyzbjk_cxJxbWithKchZzxkYzb.html",
data=data2, params=params).json()
if index > len(res1) or index > len(res2):
raise IndexError("不存在该节课")
return [res1[index], res2[index]]
def query_by_class_id(self, jxb_id, kklxdm, rwlx="2"):
data1 = \
{
"xkly": "",
"bklx_id": "",
"sfkkjyxdxnxq": "",
"xqh_id": "",
"jg_id": "",
"njdm_id_1": "",
"zyh_id_1": "",
"zyh_id": "",
"zyfx_id": "",
"njdm_id": "",
"bh_id": "",
"xbm": "",
"xslbdm": "",
"ccdm": "",
"xsbj": "",
"sfkknj": "",
"sfkkzy": "",
"kzybkxy": "",
"sfznkx": "",
"zdkxms": "",
"sfkxq": "",
"sfkcfx": "",
"kkbk": "",
"kkbkdj": "",
"sfkgbcx": "",
"sfrxtgkcxd": "",
"tykczgxdcs": "",
"xkxnm": "",
"xkxqm": "",
"rlkz": "",
"xkzgbj": "",
"jspage": "",
"jxbzb": "",
"mzm": "",
"bbhzxjxb":"",
"xz":""
}
data2 = \
{
"xkly": "",
"bklx_id": "",
"sfkkjyxdxnxq": "",
"xqh_id": "",
"jg_id": "",
"zyh_id": "",
"zyfx_id": "",
"njdm_id": "",
"bh_id": "",
"xbm": "",
"xslbdm": "",
"ccdm": "",
"xsbj": "",
"sfkknj": "",
"sfkkzy": "",
"kzybkxy": "",
"sfznkx": "",
"zdkxms": "",
"sfkxq": "",
"sfkcfx": "",
"kkbk": "",
"kkbkdj": "",
"xkxnm": "",
"xkxqm": "",
"xkxskcgskg": "",
"rlkz": "",
"jxbzcxskg": "",
"cxbj": "",
"fxbj": "",
"mzm": "",
"bbhzxjxb":"",
"xz":""
}
self.cfg.get_data(data1)
self.cfg.get_data(data2)
data1["filter_list"] = [jxb_id]
data1["rwlx"] = str(rwlx)
data1["kklxdm"] = kklxdm
data1["kspage"] = "1"
params = {"gnmkdm": "N253512", "su": self.username}
res1 = \
self.session.post(url="http://newjw.hdu.edu.cn/jwglxt/xsxk/zzxkyzb_cxZzxkYzbPartDisplay.html", data=data1,
params=params).json()["tmpList"]
data2["filter_list"] = [jxb_id]
data2["rwlx"] = str(rwlx)
data2["kklxdm"] = kklxdm
data2["kch_id"] = res1[0]["kch_id"] if len(res1)>0 else ""
data2["xkkz_id"] = self.dict[kklxdm]
res2 = self.session.post(url="http://newjw.hdu.edu.cn/jwglxt/xsxk/zzxkyzbjk_cxJxbWithKchZzxkYzb.html",
data=data2, params=params).json()
return [res1, res2]
def qiangke(self, index, times=1000, interval=1):
(res1, res2) = self.class_list[index]
data = {
"jxb_ids": res2["do_jxb_id"],
"kch_id": res1["kch_id"],
"kcmc": res1["kcmc"], # not same completely
"rwlx": rwlx,
"rlkz": "0",
"rlzlkz": "1",
"sxbj": "1",
"xxkbj": res1["xxkbj"],
"qz": "0",
"cxbj": res1["cxbj"],
"xkkz_id": self.dict[kklxdm],
"njdm_id": "2020",
"zyh_id": "0523",
"kklxdm": kklxdm,
"xklc" : "2",
"xkxnm": "2022",
"xkxqm": "3"
}
# headers = {
# "Accept": "application/json, text/javascript, */*; q=0.01"
# "Accept-Encoding": "gzip, deflate"
# "Accept-Language": "en,zh-CN;q=0.9,zh;q=0.8"
# "Connection": "keep-alive"
# "Content-Length": "552"
# "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"
# "Cookie": "JSESSIONID=76EAADA342699A32AF7748779A92EC38; route=5063706ecbac8c9154cb45c088a91202"
# "Host": "newjw.hdu.edu.cn"
# "Origin": "http://newjw.hdu.edu.cn"
# "Referer": "http://newjw.hdu.edu.cn/jwglxt/xsxk/zzxkyzb_cxZzxkYzbIndex.html?gnmkdm=N253512&layout=default&su=20051336"
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36"
# "X-Requested-With": XMLHttpRequest"
# }
params = {"gnmkdm": "N253512", "su": self.username}
res = {"flag": 0}
for cnt in range(times):
res = self.session.post(url="http://newjw.hdu.edu.cn/jwglxt/xsxk/zzxkyzbjk_xkBcZyZzxkYzb.html", data=data,
params=params).json()
if res["flag"] == "-1":
logging.info("cnt={}: ".format(cnt) + "人数已满")
else:
logging.info("cnt={}: ".format(cnt) + str(res))
time.sleep(interval)
if res["flag"] == "1":
self.class_list.remove((res1, res2))
return 1
return -1
def add_to_list(self, jxb_id, kklxdm, rwlx):
[res1, res2] = self.query_by_class_id(jxb_id, rwlx=rwlx, kklxdm=kklxdm)
if len(res1) == 0 or len(res2) == 0:
print(f"不存在课程代码为{jxb_id}的课程,请核对输入")
return False
teacher_name = [res["jsxx"] for res in res2]
class_time = [res["sksj"] for res in res2]
already_picked = [res["yxzrs"] for res in res1]
total = [res["jxbrl"] for res in res2]
pick_info = [f"{al}/{tltal}" for al, tltal in zip(already_picked, total)]
data = | pd.DataFrame() | pandas.DataFrame |
#================================================================
#
# File name : utils.py
# Author : PyLessons
# Created date: 2021-01-20
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/RL-Bitcoin-trading-bot
# Description : additional functions
#
#================================================================
import pandas as pd
from collections import deque
import matplotlib.pyplot as plt
from mplfinance.original_flavor import candlestick_ohlc
import matplotlib.dates as mpl_dates
from datetime import datetime
import os
import cv2
import numpy as np
def Write_to_file(Date, net_worth, filename='{}.txt'.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))):
for i in net_worth:
Date += " {}".format(i)
#print(Date)
if not os.path.exists('logs'):
os.makedirs('logs')
file = open("logs/"+filename, 'a+')
file.write(Date+"\n")
file.close()
class TradingGraph:
# A crypto trading visualization using matplotlib made to render custom prices which come in following way:
# Date, Open, High, Low, Close, Volume, net_worth, trades
# call render every step
def __init__(self, Render_range, Show_reward=False, Show_indicators=False):
self.Volume = deque(maxlen=Render_range)
self.net_worth = deque(maxlen=Render_range)
self.render_data = deque(maxlen=Render_range)
self.Render_range = Render_range
self.Show_reward = Show_reward
self.Show_indicators = Show_indicators
# We are using the style ‘ggplot’
plt.style.use('ggplot')
# close all plots if there are open
plt.close('all')
# figsize attribute allows us to specify the width and height of a figure in unit inches
self.fig = plt.figure(figsize=(16,8))
# Create top subplot for price axis
self.ax1 = plt.subplot2grid((6,1), (0,0), rowspan=5, colspan=1)
# Create bottom subplot for volume which shares its x-axis
self.ax2 = plt.subplot2grid((6,1), (5,0), rowspan=1, colspan=1, sharex=self.ax1)
# Create a new axis for net worth which shares its x-axis with price
self.ax3 = self.ax1.twinx()
# Formatting Date
self.date_format = mpl_dates.DateFormatter('%d-%m-%Y')
# Add paddings to make graph easier to view
#plt.subplots_adjust(left=0.07, bottom=-0.1, right=0.93, top=0.97, wspace=0, hspace=0)
# define if show indicators
if self.Show_indicators:
self.Create_indicators_lists()
def Create_indicators_lists(self):
# Create a new axis for indicatorswhich shares its x-axis with volume
self.ax4 = self.ax2.twinx()
self.sma7 = deque(maxlen=self.Render_range)
self.sma25 = deque(maxlen=self.Render_range)
self.sma99 = deque(maxlen=self.Render_range)
self.bb_bbm = deque(maxlen=self.Render_range)
self.bb_bbh = deque(maxlen=self.Render_range)
self.bb_bbl = deque(maxlen=self.Render_range)
self.psar = deque(maxlen=self.Render_range)
self.MACD = deque(maxlen=self.Render_range)
self.RSI = deque(maxlen=self.Render_range)
def Plot_indicators(self, df, Date_Render_range):
self.sma7.append(df["sma7"])
self.sma25.append(df["sma25"])
self.sma99.append(df["sma99"])
self.bb_bbm.append(df["bb_bbm"])
self.bb_bbh.append(df["bb_bbh"])
self.bb_bbl.append(df["bb_bbl"])
self.psar.append(df["psar"])
self.MACD.append(df["MACD"])
self.RSI.append(df["RSI"])
# Add Simple Moving Average
self.ax1.plot(Date_Render_range, self.sma7,'-')
self.ax1.plot(Date_Render_range, self.sma25,'-')
self.ax1.plot(Date_Render_range, self.sma99,'-')
# Add Bollinger Bands
self.ax1.plot(Date_Render_range, self.bb_bbm,'-')
self.ax1.plot(Date_Render_range, self.bb_bbh,'-')
self.ax1.plot(Date_Render_range, self.bb_bbl,'-')
# Add Parabolic Stop and Reverse
self.ax1.plot(Date_Render_range, self.psar,'.')
self.ax4.clear()
# # Add Moving Average Convergence Divergence
self.ax4.plot(Date_Render_range, self.MACD,'r-')
# # Add Relative Strength Index
self.ax4.plot(Date_Render_range, self.RSI,'g-')
# Render the environment to the screen
#def render(self, Date, Open, High, Low, Close, Volume, net_worth, trades):
def render(self, df, net_worth, trades):
Date = df["Date"]
Open = df["Open"]
High = df["High"]
Low = df["Low"]
Close = df["Close"]
Volume = df["Volume"]
# append volume and net_worth to deque list
self.Volume.append(Volume)
self.net_worth.append(net_worth)
# before appending to deque list, need to convert Date to special format
Date = mpl_dates.date2num([pd.to_datetime(Date)])[0]
self.render_data.append([Date, Open, High, Low, Close])
# Clear the frame rendered last step
self.ax1.clear()
candlestick_ohlc(self.ax1, self.render_data, width=0.8/24, colorup='green', colordown='red', alpha=0.8)
# Put all dates to one list and fill ax2 sublot with volume
Date_Render_range = [i[0] for i in self.render_data]
self.ax2.clear()
self.ax2.fill_between(Date_Render_range, self.Volume, 0)
if self.Show_indicators:
self.Plot_indicators(df, Date_Render_range)
# draw our net_worth graph on ax3 (shared with ax1) subplot
self.ax3.clear()
self.ax3.plot(Date_Render_range, self.net_worth, color="blue")
# beautify the x-labels (Our Date format)
self.ax1.xaxis.set_major_formatter(self.date_format)
self.fig.autofmt_xdate()
minimum = np.min(np.array(self.render_data)[:,1:])
maximum = np.max(np.array(self.render_data)[:,1:])
RANGE = maximum - minimum
# sort sell and buy orders, put arrows in appropiate order positions
for trade in trades:
trade_date = mpl_dates.date2num([pd.to_datetime(trade['Date'])])[0]
if trade_date in Date_Render_range:
if trade['type'] == 'buy':
high_low = trade['Low'] - RANGE*0.02
ycoords = trade['Low'] - RANGE*0.08
self.ax1.scatter(trade_date, high_low, c='green', label='green', s = 120, edgecolors='none', marker="^")
else:
high_low = trade['High'] + RANGE*0.02
ycoords = trade['High'] + RANGE*0.06
self.ax1.scatter(trade_date, high_low, c='red', label='red', s = 120, edgecolors='none', marker="v")
if self.Show_reward:
try:
self.ax1.annotate('{0:.2f}'.format(trade['Reward']), (trade_date-0.02, high_low), xytext=(trade_date-0.02, ycoords),
bbox=dict(boxstyle='round', fc='w', ec='k', lw=1), fontsize="small")
except:
pass
# we need to set layers every step, because we are clearing subplots every step
self.ax2.set_xlabel('Date')
self.ax1.set_ylabel('Price')
self.ax3.set_ylabel('Balance')
# I use tight_layout to replace plt.subplots_adjust
self.fig.tight_layout()
"""Display image with matplotlib - interrupting other tasks"""
# Show the graph without blocking the rest of the program
#plt.show(block=False)
# Necessary to view frames before they are unrendered
#plt.pause(0.001)
"""Display image with OpenCV - no interruption"""
# redraw the canvas
self.fig.canvas.draw()
# convert canvas to image
img = np.fromstring(self.fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
img = img.reshape(self.fig.canvas.get_width_height()[::-1] + (3,))
# img is rgb, convert to opencv's default bgr
image = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# display image with OpenCV or any operation you like
cv2.imshow("Bitcoin trading bot",image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
return
else:
return img
def Plot_OHCL(df):
df_original = df.copy()
# necessary convert to datetime
df["Date"] = | pd.to_datetime(df.Date) | pandas.to_datetime |
import random
import pandas as pd
import pytest
from suda import suda, find_msu
@pytest.fixture
def data():
persons = [
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'primary incomplete', 'labourstatus': 'non-LF'},
{'gender': 'male', 'region': 'urban', 'education': 'secondary complete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'rural', 'education': 'secondary complete', 'labourstatus': 'unemployed'},
{'gender': 'male', 'region': 'urban', 'education': 'secondary complete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'primary complete', 'labourstatus': 'non-LF'},
{'gender': 'male', 'region': 'urban', 'education': 'post-secondary', 'labourstatus': 'unemployed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'non-LF'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'non-LF'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary complete', 'labourstatus': 'non-LF'}
]
return | pd.DataFrame(persons) | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.signal as S
import scipy.io.wavfile as wavfile
import os
from functools import reduce
import matrices as M
import waveforms as W
import tuning as T
def get_signal(path):
return wavfile.read(path)[1]
def signal_to_csv(path):
sound = get_signal(path)
df = | pd.DataFrame(sound) | pandas.DataFrame |
from __future__ import print_function
import os
import pandas as pd
import xgboost as xgb
import time
import shutil
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.utils import shuffle
from sklearn import metrics
import sys
def archive_results(filename,results,algo,script):
"""
:type algo: basestring
:type script: basestring
:type results: DataFrame
"""
#assert results == pd.DataFrame
now=time.localtime()[0:5]
dirname='../archive'
subdirfmt='%4d-%02d-%02d-%02d-%02d'
subdir=subdirfmt %now
if not os.path.exists(os.path.join(dirname,str(algo))):
os.mkdir(os.path.join(dirname,str(algo)))
dir_to_create=os.path.join(dirname,str(algo),subdir)
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
os.chdir(dir_to_create)
results.to_csv(filename,index=False,float_format='%.6f')
shutil.copy2(script,'.')
return
###############################################################################################
# support class to redirect stderr
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self,name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# Stderr
oldstderr = sys.stderr # global
def capture_stderr(log):
oldstderr = sys.stderr
sys.stderr = open(log, 'w')
sys.stderr = flushfile(sys.stderr)
return log
def restore_stderr():
sys.stderr = oldstderr
def parse_xgblog(xgblog):
import re
pattern = re.compile(r'^\[(?P<round>\d+)\]\s*\D+:(?P<validation>\d+.\d+)\s*\D+:(?P<train>\d+.\d+)')
xgb_list = []
with open(xgblog, "r") as ins:
next(ins)
for line in ins:
match = pattern.match(line)
if match:
idx = int(match.group("round"))
validation = float(match.group("validation"))
training = float(match.group("train"))
xgb_list.append([idx, validation, training])
else:
pass # raise Exception("Failed to parse!")
return xgb_list
def preprocess_data(train,test):
id_test=test['patient_id']
train=train.drop(['patient_id'],axis=1)
test=test.drop(['patient_id'],axis=1)
y=train['is_screener']
train=train.drop(['is_screener'],axis=1)
for f in train.columns:
if train[f].dtype == 'object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
return id_test,test,train,y
os.chdir(os.getcwd())
train_file = '../input/patients_train.csv.gz'
test_file = '../input/patients_test.csv.gz'
train = pd.read_csv(train_file)
test = pd.read_csv(test_file)
train.drop( 'patient_gender', axis = 1, inplace = True )
test.drop( 'patient_gender', axis = 1, inplace = True )
########## last asctivity files
activity_file=('../input/activity_selected_last.csv.gz')
diagnosis_file=('../input/diagnosis_selected_last.csv.gz')
procedure_file=('../input/procedure_selected_last.csv.gz')
surgical_file=('../input/surgical_selected_last.csv.gz')
prescription_file=('../input/prescription_selected_last.csv.gz')
physicians_file=('../input/physicians.csv.gz')
drugs_file=('../input/drugs.csv.gz')
############ first activity files
activity_file_first=('../input/activity_selected_last.csv.gz')
diagnosis_file_first=('../input/diagnosis_selected_last.csv.gz')
procedure_file_first=('../input/procedure_selected_last.csv.gz')
surgical_file_first=('../input/surgical_selected_last.csv.gz')
prescription_file=('../input/prescription_selected_last.csv.gz')
activity=pd.read_csv(activity_file )
#Fa=pd.read_csv(activity_file_first,usecols=['activity_year'])
#print(Fa)
#activity['activity_first_year']=Fa['activity_year']
#activity['delta_time_activity']=activity['activity_year']-activity['activity_first_year']
#print(activity[activity['delta_time_activity']!=0,'delta_time_activity'])
train=pd.merge(train,activity, on='patient_id',how='left')
test=pd.merge(test,activity, on='patient_id',how='left')
print('after merging activity')
print(train.shape,test.shape)
procedure=pd.read_csv(procedure_file )
diagnosis=pd.read_csv(diagnosis_file)
diagnosis=pd.merge(diagnosis,procedure,on=['patient_id','claim_id'],how='left')
train=pd.merge(train,diagnosis, on='patient_id',how='left')
test=pd.merge(test,diagnosis, on='patient_id',how='left')
print('after merging diagnosis ')
print(train.shape,test.shape)
prescription=pd.read_csv(prescription_file)
drugs=pd.read_csv(drugs_file)
physicians=pd.read_csv(physicians_file)
prescription=pd.merge(prescription,drugs ,on='drug_id',how='left')
prescription=pd.merge(prescription,physicians,on='practitioner_id',how='left')
train=pd.merge(train,prescription,on='patient_id',how='left')
test=pd.merge(test,prescription,on='patient_id',how='left')
print('after merging prescription ')
print(train.shape,test.shape)
surgical=pd.read_csv(surgical_file )
train=pd.merge(train,surgical, on='patient_id',how='left')
test=pd.merge(test,surgical, on='patient_id',how='left')
print('after merging surgical')
print(train.shape,test.shape)
test=test.fillna(0)
id_test,test,train,y=preprocess_data(train,test)
print(train.shape,test.shape)
#print(train.columns)
X=np.asarray(train)
y=np.asarray(y)
X_test=np.asarray(test)
X,y=shuffle(X,y,random_state=9)
X_train,X_val0,y_train,y_val0 = train_test_split(X,y,test_size=0.1,random_state=17)
X_train,X_val,y_train,y_val = train_test_split(X_train,y_train,test_size=0.1,random_state=77)
dval=xgb.DMatrix(data=X_val,label=y_val)
dtrain=xgb.DMatrix(data=X_train,label=y_train)
DTest=xgb.DMatrix(data=X_test)
Dval0=xgb.DMatrix(data=X_val0)
watchlist = [(dval,'eval'), (dtrain,'train')]
params = {"objective": "binary:logistic",
"eta": 0.1,
#"gamma":0.5,
"max_depth": 8,
#"max_delta_step":1,
#"min_child_weight":10,
"silent":1,
"subsample": 0.95,
"colsample_bytree": 0.7,
"seed": 777,
"booster": "gbtree",
"nthread":-1,
"eval_metric":'auc'
}
#
# Train an XGBoost model
# Train an XGBoost model
xgblog = capture_stderr('xgb.log')
clf = xgb.train(params, dtrain, num_boost_round=1500, evals=watchlist, early_stopping_rounds=30,verbose_eval=True,
maximize= True)
restore_stderr()
from sklearn import metrics
predictions=clf.predict(DTest)
score=clf.best_score
print('best score:%s'%score)
y_pred=clf.predict(Dval0)
score=metrics.roc_auc_score(y_val0, y_pred)
print('score on extra set:%s' %score)
model='XGBOOST_onraw_features'
#
# predict on test set
submission='%s_score_%03f.csv' %(model,score)
# create submission file
xgb_preds = | pd.DataFrame({"patient_id": id_test, 'predict_screener': predictions}) | pandas.DataFrame |
import abc
import logging
from typing import Union, Dict, Tuple, List, Set, Callable
import pandas as pd
import warnings
import numpy as np
import scipy.sparse
import xarray as xr
import patsy
try:
import anndata
except ImportError:
anndata = None
import batchglm.data as data_utils
from batchglm.xarray_sparse import SparseXArrayDataArray, SparseXArrayDataSet
from batchglm.models.glm_nb import Model as GeneralizedLinearModel
from ..stats import stats
from . import correction
from ..models.batch_bfgs.optim import Estim_BFGS
from diffxpy import pkg_constants
logger = logging.getLogger(__name__)
def _dmat_unique(dmat, sample_description):
dmat, idx = np.unique(dmat, axis=0, return_index=True)
sample_description = sample_description.iloc[idx].reset_index(drop=True)
return dmat, sample_description
class _Estimation(GeneralizedLinearModel, metaclass=abc.ABCMeta):
"""
Dummy class specifying all needed methods / parameters necessary for a model
fitted for DifferentialExpressionTest.
Useful for type hinting.
"""
@property
@abc.abstractmethod
def X(self) -> np.ndarray:
pass
@property
@abc.abstractmethod
def design_loc(self) -> np.ndarray:
pass
@property
@abc.abstractmethod
def design_scale(self) -> np.ndarray:
pass
@property
@abc.abstractmethod
def constraints_loc(self) -> np.ndarray:
pass
@property
@abc.abstractmethod
def constraints_scale(self) -> np.ndarray:
pass
@property
@abc.abstractmethod
def num_observations(self) -> int:
pass
@property
@abc.abstractmethod
def num_features(self) -> int:
pass
@property
@abc.abstractmethod
def features(self) -> np.ndarray:
pass
@property
@abc.abstractmethod
def observations(self) -> np.ndarray:
pass
@property
@abc.abstractmethod
def log_likelihood(self, **kwargs) -> np.ndarray:
pass
@property
@abc.abstractmethod
def loss(self, **kwargs) -> np.ndarray:
pass
@property
@abc.abstractmethod
def gradients(self, **kwargs) -> np.ndarray:
pass
@property
@abc.abstractmethod
def hessians(self, **kwargs) -> np.ndarray:
pass
@property
@abc.abstractmethod
def fisher_inv(self, **kwargs) -> np.ndarray:
pass
class _DifferentialExpressionTest(metaclass=abc.ABCMeta):
"""
Dummy class specifying all needed methods / parameters necessary for DifferentialExpressionTest.
Useful for type hinting. Structure:
Methods which are called by constructor and which compute (corrected) p-values:
_test()
_correction()
Accessor methods for important metrics which have to be extracted from estimated models:
log_fold_change()
reduced_model_gradient()
full_model_gradient()
Interface method which provides summary of results:
results()
plot()
"""
def __init__(self):
self._pval = None
self._qval = None
self._mean = None
self._log_likelihood = None
@property
@abc.abstractmethod
def gene_ids(self) -> np.ndarray:
pass
@property
@abc.abstractmethod
def X(self):
pass
@abc.abstractmethod
def log_fold_change(self, base=np.e, **kwargs):
pass
def log2_fold_change(self, **kwargs):
"""
Calculates the pairwise log_2 fold change(s) for this DifferentialExpressionTest.
"""
return self.log_fold_change(base=2, **kwargs)
def log10_fold_change(self, **kwargs):
"""
Calculates the log_10 fold change(s) for this DifferentialExpressionTest.
"""
return self.log_fold_change(base=10, **kwargs)
def _test(self, **kwargs) -> np.ndarray:
pass
def _correction(self, method) -> np.ndarray:
"""
Performs multiple testing corrections available in statsmodels.stats.multitest.multipletests()
on self.pval.
:param method: Multiple testing correction method.
Browse available methods in the annotation of statsmodels.stats.multitest.multipletests().
"""
if np.all(np.isnan(self.pval)):
return self.pval
else:
return correction.correct(pvals=self.pval, method=method)
def _ave(self):
"""
Returns a xr.DataArray containing the mean expression by gene
:return: xr.DataArray
"""
pass
@property
def log_likelihood(self):
if self._log_likelihood is None:
self._log_likelihood = self._ll().compute()
return self._log_likelihood
@property
def mean(self):
if self._mean is None:
self._mean = self._ave()
if isinstance(self._mean, xr.DataArray): # Could also be np.ndarray coming out of XArraySparseDataArray
self._mean = self._mean.compute()
return self._mean
@property
def pval(self):
if self._pval is None:
self._pval = self._test().copy()
return self._pval
@property
def qval(self, method="fdr_bh"):
if self._qval is None:
self._qval = self._correction(method=method).copy()
return self._qval
def log10_pval_clean(self, log10_threshold=-30):
"""
Return log10 transformed and cleaned p-values.
NaN p-values are set to one and p-values below log10_threshold
in log10 space are set to log10_threshold.
:param log10_threshold: minimal log10 p-value to return.
:return: Cleaned log10 transformed p-values.
"""
pvals = np.reshape(self.pval, -1)
pvals = np.nextafter(0, 1, out=pvals, where=pvals == 0)
log10_pval_clean = np.log(pvals) / np.log(10)
log10_pval_clean[np.isnan(log10_pval_clean)] = 1
log10_pval_clean = np.clip(log10_pval_clean, log10_threshold, 0, log10_pval_clean)
return log10_pval_clean
def log10_qval_clean(self, log10_threshold=-30):
"""
Return log10 transformed and cleaned q-values.
NaN p-values are set to one and q-values below log10_threshold
in log10 space are set to log10_threshold.
:param log10_threshold: minimal log10 q-value to return.
:return: Cleaned log10 transformed q-values.
"""
qvals = np.reshape(self.qval, -1)
qvals = np.nextafter(0, 1, out=qvals, where=qvals == 0)
log10_qval_clean = np.log(qvals) / np.log(10)
log10_qval_clean[np.isnan(log10_qval_clean)] = 1
log10_qval_clean = np.clip(log10_qval_clean, log10_threshold, 0, log10_qval_clean)
return log10_qval_clean
@abc.abstractmethod
def summary(self, **kwargs) -> pd.DataFrame:
pass
def _threshold_summary(self, res, qval_thres=None,
fc_upper_thres=None, fc_lower_thres=None, mean_thres=None) -> pd.DataFrame:
"""
Reduce differential expression results into an output table with desired thresholds.
"""
if qval_thres is not None:
res = res.iloc[res['qval'].values <= qval_thres, :]
if fc_upper_thres is not None and fc_lower_thres is None:
res = res.iloc[res['log2fc'].values >= np.log(fc_upper_thres) / np.log(2), :]
elif fc_upper_thres is None and fc_lower_thres is not None:
res = res.iloc[res['log2fc'].values <= np.log(fc_lower_thres) / np.log(2), :]
elif fc_upper_thres is not None and fc_lower_thres is not None:
res = res.iloc[np.logical_or(
res['log2fc'].values <= np.log(fc_lower_thres) / np.log(2),
res['log2fc'].values >= np.log(fc_upper_thres) / np.log(2)), :]
if mean_thres is not None:
res = res.iloc[res['mean'].values >= mean_thres, :]
return res
def plot_volcano(
self,
corrected_pval=True,
log10_p_threshold=-30,
log2_fc_threshold=10,
alpha=0.05,
min_fc=1,
size=20,
highlight_ids: List = [],
highlight_size: float = 30,
highlight_col: str = "red",
show: bool = True,
save: Union[str, None] = None,
suffix: str = "_volcano.png"
):
"""
Returns a volcano plot of p-value vs. log fold change
:param corrected_pval: Whether to use multiple testing corrected
or raw p-values.
:param log10_p_threshold: lower bound of log10 p-values displayed in plot.
:param log2_fc_threshold: Negative lower and upper bound of
log2 fold change displayed in plot.
:param alpha: p/q-value lower bound at which a test is considered
non-significant. The corresponding points are colored in grey.
:param min_fc: Fold-change lower bound for visualization,
the points below the threshold are colored in grey.
:param size: Size of points.
:param highlight_ids: Genes to highlight in volcano plot.
:param highlight_ids: Size of points of genes to highlight in volcano plot.
:param highlight_ids: Color of points of genes to highlight in volcano plot.
:param show: Whether (if save is not None) and where (save indicates dir and file stem) to display plot.
:param save: Path+file name stem to save plots to.
File will be save+suffix. Does not save if save is None.
:param suffix: Suffix for file name to save plot to. Also use this to set the file type.
:return: Tuple of matplotlib (figure, axis)
"""
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib import rcParams
plt.ioff()
if corrected_pval == True:
neg_log_pvals = - self.log10_qval_clean(log10_threshold=log10_p_threshold)
else:
neg_log_pvals = - self.log10_pval_clean(log10_threshold=log10_p_threshold)
logfc = np.reshape(self.log2_fold_change(), -1)
# Clipping throws errors if not performed in actual data format (ndarray or DataArray):
if isinstance(logfc, xr.DataArray):
logfc = logfc.clip(-log2_fc_threshold, log2_fc_threshold)
else:
logfc = np.clip(logfc, -log2_fc_threshold, log2_fc_threshold, logfc)
fig, ax = plt.subplots()
is_significant = np.logical_and(
neg_log_pvals >= - np.log(alpha) / np.log(10),
np.abs(logfc) >= np.log(min_fc) / np.log(2)
)
sns.scatterplot(y=neg_log_pvals, x=logfc, hue=is_significant, ax=ax,
legend=False, s=size,
palette={True: "orange", False: "black"})
highlight_ids_found = np.array([x in self.gene_ids for x in highlight_ids])
highlight_ids_clean = [highlight_ids[i] for i in np.where(highlight_ids_found == True)[0]]
highlight_ids_not_found = [highlight_ids[i] for i in np.where(highlight_ids_found == False)[0]]
if len(highlight_ids_not_found) > 0:
logger.warning("not all highlight_ids were found in data set: ", ", ".join(highlight_ids_not_found))
if len(highlight_ids_clean) > 0:
neg_log_pvals_highlights = np.zeros([len(highlight_ids_clean)])
logfc_highlights = np.zeros([len(highlight_ids_clean)])
is_highlight = np.zeros([len(highlight_ids_clean)])
for i,id in enumerate(highlight_ids_clean):
idx = np.where(self.gene_ids == id)[0]
neg_log_pvals_highlights[i] = neg_log_pvals[idx]
logfc_highlights[i] = logfc[idx]
sns.scatterplot(y=neg_log_pvals_highlights, x=logfc_highlights,
hue=is_highlight, ax=ax,
legend=False, s=highlight_size,
palette={0: highlight_col})
if corrected_pval == True:
ax.set(xlabel="log2FC", ylabel='-log10(corrected p-value)')
else:
ax.set(xlabel="log2FC", ylabel='-log10(p-value)')
# Save, show and return figure.
if save is not None:
plt.savefig(save + suffix)
if show:
plt.show()
plt.close(fig)
return ax
def plot_ma(
self,
corrected_pval=True,
log2_fc_threshold=10,
alpha=0.05,
size=20,
highlight_ids: List = [],
highlight_size: float = 30,
highlight_col: str = "red",
show: bool = True,
save: Union[str, None] = None,
suffix: str = "_my_plot.png"
):
"""
Returns an MA plot of mean expression vs. log fold change with significance
super-imposed.
:param corrected_pval: Whether to use multiple testing corrected
or raw p-values.
:param log2_fc_threshold: Negative lower and upper bound of
log2 fold change displayed in plot.
:param alpha: p/q-value lower bound at which a test is considered
non-significant. The corresponding points are colored in grey.
:param size: Size of points.
:param highlight_ids: Genes to highlight in volcano plot.
:param highlight_ids: Size of points of genes to highlight in volcano plot.
:param highlight_ids: Color of points of genes to highlight in volcano plot.
:param show: Whether (if save is not None) and where (save indicates dir and file stem) to display plot.
:param save: Path+file name stem to save plots to.
File will be save+suffix. Does not save if save is None.
:param suffix: Suffix for file name to save plot to. Also use this to set the file type.
:return: Tuple of matplotlib (figure, axis)
"""
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib import rcParams
plt.ioff()
ave = np.log(self.mean + 1e-08)
logfc = np.reshape(self.log2_fold_change(), -1)
# Clipping throws errors if not performed in actual data format (ndarray or DataArray):
if isinstance(logfc, xr.DataArray):
logfc = logfc.clip(-log2_fc_threshold, log2_fc_threshold)
else:
logfc = np.clip(logfc, -log2_fc_threshold, log2_fc_threshold, logfc)
fig, ax = plt.subplots()
if corrected_pval:
is_significant = self.pval < alpha
else:
is_significant = self.qval < alpha
sns.scatterplot(y=logfc, x=ave, hue=is_significant, ax=ax,
legend=False, s=size,
palette={True: "orange", False: "black"})
highlight_ids_found = np.array([x in self.gene_ids for x in highlight_ids])
highlight_ids_clean = [highlight_ids[i] for i in np.where(highlight_ids_found == True)[0]]
highlight_ids_not_found = [highlight_ids[i] for i in np.where(highlight_ids_found == False)[0]]
if len(highlight_ids_not_found) > 0:
logger.warning("not all highlight_ids were found in data set: ", ", ".join(highlight_ids_not_found))
if len(highlight_ids_clean) > 0:
ave_highlights = np.zeros([len(highlight_ids_clean)])
logfc_highlights = np.zeros([len(highlight_ids_clean)])
is_highlight = np.zeros([len(highlight_ids_clean)])
for i,id in enumerate(highlight_ids_clean):
idx = np.where(self.gene_ids == id)[0]
ave_highlights[i] = ave[idx]
logfc_highlights[i] = logfc[idx]
sns.scatterplot(y=logfc_highlights, x=ave_highlights,
hue=is_highlight, ax=ax,
legend=False, s=highlight_size,
palette={0: highlight_col})
ax.set(xlabel="log2FC", ylabel='log mean expression')
# Save, show and return figure.
if save is not None:
plt.savefig(save + suffix)
if show:
plt.show()
plt.close(fig)
return ax
def plot_diagnostics(self):
"""
Directly plots a set of diagnostic diagrams
"""
import matplotlib.pyplot as plt
volcano = self.plot_volcano()
plt.show()
class _DifferentialExpressionTestSingle(_DifferentialExpressionTest, metaclass=abc.ABCMeta):
"""
_DifferentialExpressionTest for unit_test with a single test per gene.
The individual test object inherit directly from this class.
All implementations of this class should return one p-value and one fold change per gene.
"""
def summary(
self,
qval_thres=None,
fc_upper_thres=None,
fc_lower_thres=None,
mean_thres=None,
**kwargs
) -> pd.DataFrame:
"""
Summarize differential expression results into an output table.
"""
assert self.gene_ids is not None
res = pd.DataFrame({
"gene": self.gene_ids,
"pval": self.pval,
"qval": self.qval,
"log2fc": self.log2_fold_change(),
"mean": self.mean
})
return res
class DifferentialExpressionTestLRT(_DifferentialExpressionTestSingle):
"""
Single log-likelihood ratio test per gene.
"""
sample_description: pd.DataFrame
full_design_loc_info: patsy.design_info
full_estim: _Estimation
reduced_design_loc_info: patsy.design_info
reduced_estim: _Estimation
def __init__(
self,
sample_description: pd.DataFrame,
full_design_loc_info: patsy.design_info,
full_estim,
reduced_design_loc_info: patsy.design_info,
reduced_estim
):
super().__init__()
self.sample_description = sample_description
self.full_design_loc_info = full_design_loc_info
self.full_estim = full_estim
self.reduced_design_loc_info = reduced_design_loc_info
self.reduced_estim = reduced_estim
@property
def gene_ids(self) -> np.ndarray:
return np.asarray(self.full_estim.features)
@property
def X(self):
return self.full_estim.X
@property
def reduced_model_gradient(self):
return self.reduced_estim.gradients
@property
def full_model_gradient(self):
return self.full_estim.gradients
def _test(self):
if np.any(self.full_estim.log_likelihood < self.reduced_estim.log_likelihood):
logger.warning("Test assumption failed: full model is (partially) less probable than reduced model")
return stats.likelihood_ratio_test(
ll_full=self.full_estim.log_likelihood,
ll_reduced=self.reduced_estim.log_likelihood,
df_full=self.full_estim.constraints_loc.shape[1] + self.full_estim.constraints_scale.shape[1],
df_reduced=self.reduced_estim.constraints_loc.shape[1] + self.reduced_estim.constraints_scale.shape[1],
)
def _ave(self):
"""
Returns a xr.DataArray containing the mean expression by gene
:return: xr.DataArray
"""
return np.mean(self.full_estim.X, axis=0)
def _log_fold_change(self, factors: Union[Dict, Tuple, Set, List], base=np.e):
"""
Returns a xr.DataArray containing the locations for the different categories of the factors
:param factors: the factors to select.
E.g. `condition` or `batch` if formula would be `~ 1 + batch + condition`
:param base: the log base to use; default is the natural logarithm
:return: xr.DataArray
"""
if not (isinstance(factors, list) or isinstance(factors, tuple) or isinstance(factors, set)):
factors = {factors}
if not isinstance(factors, set):
factors = set(factors)
di = self.full_design_loc_info
sample_description = self.sample_description[[f.name() for f in di.subset(factors).factor_infos]]
dmat = self.full_estim.design_loc
# make rows unique
dmat, sample_description = _dmat_unique(dmat, sample_description)
# factors = factors.intersection(di.term_names)
# select the columns of the factors
cols = np.arange(len(di.column_names))
sel = np.concatenate([cols[di.slice(f)] for f in factors], axis=0)
neg_sel = np.ones_like(cols).astype(bool)
neg_sel[sel] = False
# overwrite all columns which are not specified by the factors with 0
dmat[:, neg_sel] = 0
# make the design matrix + sample description unique again
dmat, sample_description = _dmat_unique(dmat, sample_description)
locations = self.full_estim.inverse_link_loc(dmat.dot(self.full_estim.par_link_loc))
locations = np.log(locations) / np.log(base)
dist = np.expand_dims(locations, axis=0)
dist = np.transpose(dist, [1, 0, 2]) - dist
dist = xr.DataArray(dist, dims=("minuend", "subtrahend", "gene"))
# retval = xr.Dataset({"logFC": retval})
dist.coords["gene"] = self.gene_ids
for col in sample_description:
dist.coords["minuend_" + col] = (("minuend",), sample_description[col])
dist.coords["subtrahend_" + col] = (("subtrahend",), sample_description[col])
# # If this is a pairwise comparison, return only one fold change per gene
# if dist.shape[:2] == (2, 2):
# dist = dist[1, 0]
return dist
def log_fold_change(self, base=np.e, return_type="vector"):
"""
Calculates the pairwise log fold change(s) for this DifferentialExpressionTest.
Returns some distance matrix representation of size (groups x groups x genes) where groups corresponds
to the unique groups compared in this differential expression test.
:param base: the log base to use; default is the natural logarithm
:param return_type: Choose the return type.
Possible values are:
- "dataframe":
return a pandas.DataFrame with columns `gene`, `minuend_<group>`, `subtrahend_<group>` and `logFC`.
- "xarray":
return a xarray.DataArray with dimensions `(minuend, subtrahend, gene)`
:return: either pandas.DataFrame or xarray.DataArray
"""
factors = set(self.full_design_loc_info.term_names) - set(self.reduced_design_loc_info.term_names)
if return_type == "dataframe":
dists = self._log_fold_change(factors=factors, base=base)
df = dists.to_dataframe("logFC")
df = df.reset_index().drop(["minuend", "subtrahend"], axis=1, errors="ignore")
return df
elif return_type == "vector":
if len(factors) > 1 or self.sample_description[list(factors)].drop_duplicates().shape[0] != 2:
return None
else:
dists = self._log_fold_change(factors=factors, base=base)
return dists[1, 0].values
else:
dists = self._log_fold_change(factors=factors, base=base)
return dists
def locations(self):
"""
Returns a pandas.DataFrame containing the locations for the different categories of the factors
:return: pd.DataFrame
"""
di = self.full_design_loc_info
sample_description = self.sample_description[[f.name() for f in di.factor_infos]]
dmat = self.full_estim.design_loc
dmat, sample_description = _dmat_unique(dmat, sample_description)
retval = self.full_estim.inverse_link_loc(dmat.dot(self.full_estim.par_link_loc))
retval = | pd.DataFrame(retval, columns=self.full_estim.features) | pandas.DataFrame |
from pathlib import Path
import abc
import logging
import io
import importlib
import time
from _collections import OrderedDict
import traceback
import pandas as pd
import numpy as np
import shutil
from graphviz import Digraph
from ibllib.misc import version
import one.params
from one.alf.files import add_uuid_string
from iblutil.io.parquet import np2str
from ibllib.oneibl.registration import register_dataset
from ibllib.oneibl.patcher import FTPPatcher, SDSCPatcher, SDSC_ROOT_PATH, SDSC_PATCH_PATH
from one.util import filter_datasets
_logger = logging.getLogger('ibllib')
class Task(abc.ABC):
log = ""
cpu = 1
gpu = 0
io_charge = 5 # integer percentage
priority = 30 # integer percentage, 100 means highest priority
ram = 4 # RAM needed to run (Go)
one = None # one instance (optional)
level = 0
outputs = None
time_elapsed_secs = None
time_out_secs = None
version = version.ibllib()
log = ''
signature = {'input_files': (), 'output_files': ()} # tuple (filename, collection, required_flag)
def __init__(self, session_path, parents=None, taskid=None, one=None,
machine=None, clobber=True, aws=None, location='server'):
self.taskid = taskid
self.one = one
self.session_path = session_path
self.register_kwargs = {}
if parents:
self.parents = parents
else:
self.parents = []
self.machine = machine
self.clobber = clobber
self.location = location
self.aws = aws
@property
def name(self):
return self.__class__.__name__
def run(self, **kwargs):
"""
--- do not overload, see _run() below---
wraps the _run() method with
- error management
- logging to variable
"""
# if taskid of one properties are not available, local run only without alyx
use_alyx = self.one is not None and self.taskid is not None
if use_alyx:
tdict = self.one.alyx.rest('tasks', 'partial_update', id=self.taskid,
data={'status': 'Started'})
self.log = ('' if not tdict['log'] else tdict['log'] +
'\n\n=============================RERUN=============================\n')
# setup
self.setUp()
# Setup the console handler with a StringIO object
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
str_format = '%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s'
ch.setFormatter(logging.Formatter(str_format))
_logger.addHandler(ch)
_logger.info(f"Starting job {self.__class__}")
if self.machine:
_logger.info(f"Running on machine: {self.machine}")
_logger.info(f"running ibllib version {version.ibllib()}")
# run
start_time = time.time()
self.status = 0
try:
self.outputs = self._run(**kwargs)
_logger.info(f"Job {self.__class__} complete")
except BaseException:
_logger.error(traceback.format_exc())
_logger.info(f"Job {self.__class__} errored")
self.status = -1
self.time_elapsed_secs = time.time() - start_time
# log the outputs-+
if isinstance(self.outputs, list):
nout = len(self.outputs)
elif self.outputs is None:
nout = 0
else:
nout = 1
_logger.info(f"N outputs: {nout}")
_logger.info(f"--- {self.time_elapsed_secs} seconds run-time ---")
# after the run, capture the log output, amend to any existing logs if not overwrite
new_log = log_capture_string.getvalue()
self.log = new_log if self.clobber else self.log + new_log
log_capture_string.close()
_logger.removeHandler(ch)
# tear down
self.tearDown()
return self.status
def register_datasets(self, one=None, **kwargs):
"""
Register output datasets form the task to Alyx
:param one:
:param jobid:
:param kwargs: directly passed to the register_dataset function
:return:
"""
assert one
if self.location == 'server':
return self._register_datasets_server(one=one, **kwargs)
elif self.location == 'remote':
return self._register_datasets_remote(one=one, **kwargs)
elif self.location == 'SDSC':
return self._register_datasets_SDSC(one=one, **kwargs)
elif self.location == 'AWS':
return self._register_datasets_AWS(one=one, **kwargs)
def _register_datasets_server(self, one=None, **kwargs):
if self.outputs:
if isinstance(self.outputs, list):
versions = [self.version for _ in self.outputs]
else:
versions = [self.version]
return register_dataset(self.outputs, one=one, versions=versions, **kwargs)
def _register_datasets_remote(self, one=None, **kwargs):
if self.outputs:
if isinstance(self.outputs, list):
versions = [self.version for _ in self.outputs]
else:
versions = [self.version]
ftp_patcher = FTPPatcher(one=one)
return ftp_patcher.create_dataset(path=self.outputs, created_by=self.one.alyx.user,
versions=versions, **kwargs)
def _register_datasets_SDSC(self, one=None, **kwargs):
if self.outputs:
if isinstance(self.outputs, list):
versions = [self.version for _ in self.outputs]
else:
versions = [self.version]
sdsc_patcher = SDSCPatcher(one=one)
return sdsc_patcher.patch_datasets(self.outputs, dry=False, versions=versions,
**kwargs)
def _register_datasets_AWS(self, one=None, **kwargs):
# GO through FTP patcher
if self.outputs:
if isinstance(self.outputs, list):
versions = [self.version for _ in self.outputs]
else:
versions = [self.version]
ftp_patcher = FTPPatcher(one=one)
return ftp_patcher.create_dataset(path=self.outputs, created_by=self.one.alyx.user,
versions=versions, **kwargs)
def rerun(self):
self.run(overwrite=True)
@abc.abstractmethod
def _run(self, overwrite=False):
"""
This is the method to implement
:param overwrite: (bool) if the output already exists,
:return: out_files: files to be registered. Could be a list of files (pathlib.Path),
a single file (pathlib.Path) an empty list [] or None.
Whithin the pipeline, there is a distinction between a job that returns an empty list
and a job that returns None. If the function returns None, the job will be labeled as
"empty" status in the database, otherwise, the job has an expected behaviour of not
returning any dataset.
"""
def setUp(self):
"""
Function to optionally overload to check inputs.
:return:
"""
# if on local server don't do anything
if self.location == 'server':
self._setUp_server()
elif self.location == 'remote':
self._setUp_remote()
elif self.location == 'SDSC':
self._setUp_SDSC()
elif self.location == 'AWS':
self._setUp_AWS()
def _setUp_server(self):
pass
def _setUp_remote(self):
assert self.one
df = self._getData()
self.one._download_datasets(df)
def _setUp_SDSC(self):
assert self.one
df = self._getData()
SDSC_TMP = Path(SDSC_PATCH_PATH.joinpath(self.__class__.__name__))
for _, d in df.iterrows():
file_path = Path(d['session_path']).joinpath(d['rel_path'])
file_uuid = add_uuid_string(file_path, np2str(np.r_[d.name[0], d.name[1]]))
file_link = SDSC_TMP.joinpath(file_path)
file_link.parent.mkdir(exist_ok=True, parents=True)
file_link.symlink_to(
Path(SDSC_ROOT_PATH.joinpath(file_uuid)))
self.session_path = SDSC_TMP.joinpath(d['session_path'])
def _setUp_AWS(self):
assert self.aws
assert self.one
df = self._getData()
self.aws._download_datasets(df)
def tearDown(self):
"""
Function to optionally overload to check results
"""
pass
def _getData(self):
"""
Funtcion to optionally overload to download/ create links to data
Important when running tasks in remote or SDSC locations
:return:
"""
assert self.one
session_datasets = self.one.list_datasets(self.one.path2eid(self.session_path), details=True)
df = | pd.DataFrame(columns=self.one._cache.datasets.columns) | pandas.DataFrame |
import os
import sys
import pytest
from shapely.geometry import Polygon, GeometryCollection
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from tests.fixtures import *
from tests.test_core_components_route import self_looping_route, route
from tests.test_core_components_service import service
from genet.inputs_handler import matsim_reader, gtfs_reader
from genet.inputs_handler import read
from genet.schedule_elements import Schedule, Service, Route, Stop, read_vehicle_types
from genet.utils import plot, spatial
from genet.validate import schedule_validation
from genet.exceptions import ServiceIndexError, RouteIndexError, StopIndexError, UndefinedCoordinateSystemError, \
ConflictingStopData, InconsistentVehicleModeError
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
pt2matsim_vehicles_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "vehicles.xml"))
@pytest.fixture()
def schedule():
route_1 = Route(route_short_name='name',
mode='bus', id='1',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'), Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'), Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
route_2 = Route(route_short_name='name_2',
mode='bus', id='2',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700'), Stop(id='6', x=1, y=2, epsg='epsg:27700'),
Stop(id='7', x=3, y=3, epsg='epsg:27700'), Stop(id='8', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
service = Service(id='service', routes=[route_1, route_2])
return Schedule(epsg='epsg:27700', services=[service])
@pytest.fixture()
def strongly_connected_schedule():
route_1 = Route(route_short_name='name',
mode='bus',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700', name='Stop_1'),
Stop(id='2', x=1, y=2, epsg='epsg:27700', name='Stop_2'),
Stop(id='3', x=3, y=3, epsg='epsg:27700', name='Stop_3'),
Stop(id='4', x=7, y=5, epsg='epsg:27700', name='Stop_4'),
Stop(id='1', x=4, y=2, epsg='epsg:27700', name='Stop_1')],
trips={'trip_id': ['1', '2'], 'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['1', '2'], departure_offsets=['1', '2'],
id='1')
route_2 = Route(route_short_name='name_2',
mode='bus',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700', name='Stop_5'),
Stop(id='2', x=1, y=2, epsg='epsg:27700', name='Stop_2'),
Stop(id='7', x=3, y=3, epsg='epsg:27700', name='Stop_7'),
Stop(id='8', x=7, y=5, epsg='epsg:27700', name='Stop_8'),
Stop(id='5', x=4, y=2, epsg='epsg:27700', name='Stop_5')],
trips={'trip_id': ['1', '2'], 'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
arrival_offsets=['1', '2', '3', '4', '5'],
departure_offsets=['1', '2', '3', '4', '5'],
id='2')
service = Service(id='service', routes=[route_1, route_2])
return Schedule(epsg='epsg:27700', services=[service])
def test_initiating_schedule(schedule):
s = schedule
assert_semantically_equal(dict(s._graph.nodes(data=True)), {
'5': {'services': {'service'}, 'routes': {'2'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'6': {'services': {'service'}, 'routes': {'2'}, 'id': '6', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'7': {'services': {'service'}, 'routes': {'2'}, 'id': '7', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76683608549253, 'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()},
'8': {'services': {'service'}, 'routes': {'2'}, 'id': '8', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766856648946295, 'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766856648946295, 'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76683608549253, 'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()}})
assert_semantically_equal(s._graph.edges(data=True)._adjdict,
{'5': {'6': {'services': {'service'}, 'routes': {'2'}}},
'6': {'7': {'services': {'service'}, 'routes': {'2'}}},
'7': {'8': {'services': {'service'}, 'routes': {'2'}}}, '8': {}, '4': {},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}},
'2': {'3': {'services': {'service'}, 'routes': {'1'}}}})
log = s._graph.graph.pop('change_log')
assert log.empty
assert_semantically_equal(s._graph.graph,
{'name': 'Schedule graph',
'routes': {'2': {'route_short_name': 'name_2', 'mode': 'bus',
'trips': {'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
'arrival_offsets': ['00:00:00', '00:03:00',
'00:07:00', '00:13:00'],
'departure_offsets': ['00:00:00', '00:05:00',
'00:09:00', '00:15:00'],
'route_long_name': '', 'id': '2', 'route': [],
'await_departure': [],
'ordered_stops': ['5', '6', '7', '8']},
'1': {'route_short_name': 'name', 'mode': 'bus',
'trips': {'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
'arrival_offsets': ['00:00:00', '00:03:00',
'00:07:00', '00:13:00'],
'departure_offsets': ['00:00:00', '00:05:00',
'00:09:00', '00:15:00'],
'route_long_name': '', 'id': '1', 'route': [],
'await_departure': [],
'ordered_stops': ['1', '2', '3', '4']}},
'services': {'service': {'id': 'service', 'name': 'name'}},
'route_to_service_map': {'1': 'service', '2': 'service'},
'service_to_route_map': {'service': ['1', '2']},
'crs': {'init': 'epsg:27700'}})
def test_initiating_schedule_with_non_uniquely_indexed_objects():
route_1 = Route(route_short_name='name',
mode='bus', id='',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'), Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'), Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
route_2 = Route(route_short_name='name_2',
mode='bus', id='',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700'), Stop(id='6', x=1, y=2, epsg='epsg:27700'),
Stop(id='7', x=3, y=3, epsg='epsg:27700'), Stop(id='8', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_2_bus', 'veh_3_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
service1 = Service(id='service', routes=[route_1, route_2])
service2 = Service(id='service', routes=[route_1, route_2])
s = Schedule(epsg='epsg:27700', services=[service1, service2])
assert s.number_of_routes() == 4
assert len(s) == 2
def test__getitem__returns_a_service(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert schedule['service'] == services[0]
def test_accessing_route(schedule):
assert schedule.route('1') == Route(route_short_name='name',
mode='bus', id='1',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'),
Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'),
Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['1', '2'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
def test__repr__shows_number_of_services(mocker):
mocker.patch.object(Schedule, '__len__', return_value=0)
schedule = Schedule('epsg:27700')
s = schedule.__repr__()
assert 'instance at' in s
assert 'services' in s
Schedule.__len__.assert_called()
def test__str__shows_info():
schedule = Schedule('epsg:27700')
assert 'Number of services' in schedule.__str__()
assert 'Number of routes' in schedule.__str__()
def test__len__returns_the_number_of_services(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert len(schedule) == 1
def test_print_shows_info(mocker):
mocker.patch.object(Schedule, 'info')
schedule = Schedule('epsg:27700')
schedule.print()
Schedule.info.assert_called_once()
def test_info_shows_number_of_services_and_routes(mocker):
mocker.patch.object(Schedule, '__len__', return_value=0)
mocker.patch.object(Schedule, 'number_of_routes')
schedule = Schedule('epsg:27700')
schedule.print()
Schedule.__len__.assert_called()
Schedule.number_of_routes.assert_called_once()
def test_plot_delegates_to_util_plot_plot_graph_routes(mocker, schedule):
mocker.patch.object(plot, 'plot_graph')
schedule.plot()
plot.plot_graph.assert_called_once()
def test_reproject_changes_projection_for_all_stops_in_route():
correct_x_y = {'x': -0.14967658860132668, 'y': 51.52393050617373}
schedule = Schedule(
'epsg:27700',
[Service(id='10314', routes=[
Route(
route_short_name='12',
mode='bus',
stops=[Stop(id='26997928P', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700'),
Stop(id='26997928P.link:1', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700')],
route=['1'],
trips={'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'],
'vehicle_id': ['veh_1_bus']},
arrival_offsets=['00:00:00', '00:02:00'],
departure_offsets=['00:00:00', '00:02:00']
)
])])
schedule.reproject('epsg:4326')
_stops = list(schedule.stops())
stops = dict(zip([stop.id for stop in _stops], _stops))
assert_semantically_equal({'x': stops['26997928P'].x, 'y': stops['26997928P'].y}, correct_x_y)
assert_semantically_equal({'x': stops['26997928P.link:1'].x, 'y': stops['26997928P.link:1'].y}, correct_x_y)
def test_adding_merges_separable_schedules(route):
schedule = Schedule(epsg='epsg:4326', services=[Service(id='1', routes=[route])])
before_graph_nodes = schedule.reference_nodes()
before_graph_edges = schedule.reference_edges()
a = Stop(id='10', x=40, y=20, epsg='epsg:27700', linkRefId='1')
b = Stop(id='20', x=10, y=20, epsg='epsg:27700', linkRefId='2')
c = Stop(id='30', x=30, y=30, epsg='epsg:27700', linkRefId='3')
d = Stop(id='40', x=70, y=50, epsg='epsg:27700', linkRefId='4')
schedule_to_be_added = Schedule(epsg='epsg:4326', services=[Service(id='2', routes=[
Route(
route_short_name='name',
mode='bus',
stops=[a, b, c, d],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['04:40:00', '05:40:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'],
route=['1', '2', '3', '4'], id='2')
])])
tba_graph_nodes = schedule_to_be_added.reference_nodes()
tba_graph_edges = schedule_to_be_added.reference_edges()
schedule.add(schedule_to_be_added)
assert '1' in list(schedule.service_ids())
assert '2' in list(schedule.service_ids())
assert '1' in list(schedule.route_ids())
assert '2' in list(schedule.route_ids())
assert schedule.epsg == 'epsg:4326'
assert schedule.epsg == schedule_to_be_added.epsg
assert set(schedule._graph.nodes()) == set(before_graph_nodes) | set(tba_graph_nodes)
assert set(schedule._graph.edges()) == set(before_graph_edges) | set(tba_graph_edges)
def test_adding_throws_error_when_schedules_not_separable(test_service):
schedule = Schedule(epsg='epsg:4326', services=[test_service])
assert 'service' in schedule
schedule_to_be_added = Schedule(epsg='epsg:4326', services=[test_service])
with pytest.raises(NotImplementedError) as e:
schedule.add(schedule_to_be_added)
assert 'This method only supports adding non overlapping services' in str(e.value)
def test_adding_calls_on_reproject_when_schedules_dont_have_matching_epsg(test_service, different_test_service, mocker):
mocker.patch.object(Schedule, 'reproject')
schedule = Schedule(services=[test_service], epsg='epsg:27700')
assert schedule.has_service('service')
schedule_to_be_added = Schedule(services=[different_test_service], epsg='epsg:4326')
schedule.add(schedule_to_be_added)
schedule_to_be_added.reproject.assert_called_once_with('epsg:27700')
def test_service_ids_returns_keys_of_the_services_dict(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert set(schedule.service_ids()) == {'service'}
def test_routes_returns_service_ids_with_unique_routes(route, similar_non_exact_test_route):
services = [Service(id='1', routes=[route]), Service(id='2', routes=[similar_non_exact_test_route])]
schedule = Schedule(services=services, epsg='epsg:4326')
routes = list(schedule.routes())
assert route in routes
assert similar_non_exact_test_route in routes
assert len(routes) == 2
def test_number_of_routes_counts_routes(test_service, different_test_service):
schedule = Schedule(services=[test_service, different_test_service], epsg='epsg:4362')
assert schedule.number_of_routes() == 3
def test_service_attribute_data_under_key(schedule):
df = schedule.service_attribute_data(keys='name').sort_index()
assert_frame_equal(df, DataFrame(
{'name': {'service': 'name'}}
))
def test_service_attribute_data_under_keys(schedule):
df = schedule.service_attribute_data(keys=['name', 'id']).sort_index()
assert_frame_equal(df, DataFrame(
{'name': {'service': 'name'}, 'id': {'service': 'service'}}
))
def test_route_attribute_data_under_key(schedule):
df = schedule.route_attribute_data(keys='route_short_name').sort_index()
assert_frame_equal(df, DataFrame(
{'route_short_name': {'1': 'name', '2': 'name_2'}}
))
def test_route_attribute_data_under_keys(schedule):
df = schedule.route_attribute_data(keys=['route_short_name', 'mode']).sort_index()
assert_frame_equal(df, DataFrame(
{'route_short_name': {'1': 'name', '2': 'name_2'}, 'mode': {'1': 'bus', '2': 'bus'}}
))
def test_stop_attribute_data_under_key(schedule):
df = schedule.stop_attribute_data(keys='x').sort_index()
assert_frame_equal(df, DataFrame(
{'x': {'1': 4.0, '2': 1.0, '3': 3.0, '4': 7.0, '5': 4.0, '6': 1.0, '7': 3.0, '8': 7.0}}))
def test_stop_attribute_data_under_keys(schedule):
df = schedule.stop_attribute_data(keys=['x', 'y']).sort_index()
assert_frame_equal(df, DataFrame(
{'x': {'1': 4.0, '2': 1.0, '3': 3.0, '4': 7.0, '5': 4.0, '6': 1.0, '7': 3.0, '8': 7.0},
'y': {'1': 2.0, '2': 2.0, '3': 3.0, '4': 5.0, '5': 2.0, '6': 2.0, '7': 3.0, '8': 5.0}}))
def test_extracting_services_on_condition(schedule):
ids = schedule.extract_service_ids_on_attributes(conditions={'name': 'name'})
assert ids == ['service']
def test_extracting_routes_on_condition(schedule):
ids = schedule.extract_route_ids_on_attributes(conditions=[{'mode': 'bus'}, {'route_short_name': 'name_2'}],
how=all)
assert ids == ['2']
def test_extracting_stops_on_condition(schedule):
ids = schedule.extract_stop_ids_on_attributes(conditions=[{'x': (0, 4)}, {'y': (0, 2)}], how=all)
assert set(ids) == {'5', '6', '1', '2'}
def test_getting_services_on_modal_condition(schedule):
service_ids = schedule.services_on_modal_condition(modes='bus')
assert service_ids == ['service']
def test_getting_routes_on_modal_condition(schedule):
route_ids = schedule.routes_on_modal_condition(modes='bus')
assert set(route_ids) == {'1', '2'}
def test_getting_stops_on_modal_condition(schedule):
stop_ids = schedule.stops_on_modal_condition(modes='bus')
assert set(stop_ids) == {'5', '6', '7', '8', '3', '1', '4', '2'}
test_geojson = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "test_geojson.geojson"))
def test_getting_stops_on_spatial_condition_with_geojson(schedule, mocker):
mocker.patch.object(spatial, 'read_geojson_to_shapely',
return_value=GeometryCollection(
[Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])]))
stops = schedule.stops_on_spatial_condition(test_geojson)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_stops_on_spatial_condition_with_shapely_polygon(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
stops = schedule.stops_on_spatial_condition(p)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_stops_on_spatial_condition_with_s2_hex_region(schedule):
s2_region = '4837,4839,483f5,4844,4849'
stops = schedule.stops_on_spatial_condition(s2_region)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_routes_intersecting_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.routes_on_spatial_condition(p)
assert set(routes) == {'1', '2'}
def test_getting_routes_contained_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.routes_on_spatial_condition(p, how='within')
assert set(routes) == {'1', '2'}
def test_getting_services_intersecting_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.services_on_spatial_condition(p)
assert set(routes) == {'service'}
def test_getting_services_contained_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.services_on_spatial_condition(p, how='within')
assert set(routes) == {'service'}
def test_applying_attributes_to_service(schedule):
assert schedule._graph.graph['services']['service']['name'] == 'name'
assert schedule['service'].name == 'name'
schedule.apply_attributes_to_services({'service': {'name': 'new_name'}})
assert schedule._graph.graph['services']['service']['name'] == 'new_name'
assert schedule['service'].name == 'new_name'
def test_applying_attributes_changing_id_to_service_throws_error(schedule):
assert 'service' in schedule._graph.graph['services']
assert schedule._graph.graph['services']['service']['id'] == 'service'
assert schedule['service'].id == 'service'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_services({'service': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def test_applying_attributes_to_route(schedule):
assert schedule._graph.graph['routes']['1']['route_short_name'] == 'name'
assert schedule.route('1').route_short_name == 'name'
schedule.apply_attributes_to_routes({'1': {'route_short_name': 'new_name'}})
assert schedule._graph.graph['routes']['1']['route_short_name'] == 'new_name'
assert schedule.route('1').route_short_name == 'new_name'
def test_applying_mode_attributes_to_route_results_in_correct_mode_methods(schedule):
assert schedule.route('1').mode == 'bus'
assert schedule.modes() == {'bus'}
assert schedule.mode_graph_map() == {
'bus': {('3', '4'), ('2', '3'), ('1', '2'), ('6', '7'), ('5', '6'), ('7', '8')}}
schedule.apply_attributes_to_routes({'1': {'mode': 'new_bus'}})
assert schedule.route('1').mode == 'new_bus'
assert schedule.modes() == {'bus', 'new_bus'}
assert schedule['service'].modes() == {'bus', 'new_bus'}
assert schedule.mode_graph_map() == {'bus': {('7', '8'), ('6', '7'), ('5', '6')},
'new_bus': {('3', '4'), ('1', '2'), ('2', '3')}}
assert schedule['service'].mode_graph_map() == {'bus': {('6', '7'), ('7', '8'), ('5', '6')},
'new_bus': {('3', '4'), ('2', '3'), ('1', '2')}}
def test_applying_attributes_changing_id_to_route_throws_error(schedule):
assert '1' in schedule._graph.graph['routes']
assert schedule._graph.graph['routes']['1']['id'] == '1'
assert schedule.route('1').id == '1'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_routes({'1': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def test_applying_attributes_to_stop(schedule):
assert schedule._graph.nodes['5']['name'] == ''
assert schedule.stop('5').name == ''
schedule.apply_attributes_to_stops({'5': {'name': 'new_name'}})
assert schedule._graph.nodes['5']['name'] == 'new_name'
assert schedule.stop('5').name == 'new_name'
def test_applying_attributes_changing_id_to_stop_throws_error(schedule):
assert '5' in schedule._graph.nodes
assert schedule._graph.nodes['5']['id'] == '5'
assert schedule.stop('5').id == '5'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_routes({'5': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def change_name(attrib):
return 'new_name'
def test_applying_function_to_services(schedule):
schedule.apply_function_to_services(function=change_name, location='name')
assert schedule._graph.graph['services']['service']['name'] == 'new_name'
assert schedule['service'].name == 'new_name'
def test_applying_function_to_routes(schedule):
schedule.apply_function_to_routes(function=change_name, location='route_short_name')
for route in schedule.routes():
assert schedule._graph.graph['routes'][route.id]['route_short_name'] == 'new_name'
assert route.route_short_name == 'new_name'
def test_applying_function_to_stops(schedule):
schedule.apply_function_to_stops(function=change_name, location='name')
for stop in schedule.stops():
assert stop.name == 'new_name'
assert schedule._graph.nodes[stop.id]['name'] == 'new_name'
def test_adding_service(schedule, service):
service.reindex('different_service')
service.route('1').reindex('different_service_1')
service.route('2').reindex('different_service_2')
schedule.add_service(service)
assert set(schedule.route_ids()) == {'1', '2', 'different_service_1', 'different_service_2'}
assert set(schedule.service_ids()) == {'service', 'different_service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service',
'different_service_1': 'different_service', 'different_service_2': 'different_service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2'],
'different_service': ['different_service_1', 'different_service_2']})
def test_adding_service_with_clashing_route_ids(schedule, service):
service.reindex('different_service')
schedule.add_service(service)
assert set(schedule.route_ids()) == {'1', '2', 'different_service_1', 'different_service_2'}
assert set(schedule.service_ids()) == {'service', 'different_service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service',
'different_service_1': 'different_service', 'different_service_2': 'different_service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2'],
'different_service': ['different_service_1', 'different_service_2']})
def test_adding_service_with_clashing_id_throws_error(schedule, service):
with pytest.raises(ServiceIndexError) as e:
schedule.add_service(service)
assert 'already exists' in str(e.value)
def test_adding_service_with_clashing_stops_data_does_not_overwrite_existing_stops(schedule):
expected_stops_data = {
'5': {'services': {'service', 'some_id'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service', 'some_id'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service', 'some_id'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
assert r.ordered_stops == ['1', '2', '5']
s = Service(id='some_id', routes=[r])
schedule.add_service(s, force=True)
assert_semantically_equal(dict(s.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(s.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'some_id', 'service'}})
assert_semantically_equal(s.graph()['2']['5'], {'routes': {'3'}, 'services': {'some_id'}})
def test_adding_service_with_clashing_stops_data_without_force_flag_throws_error(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
with pytest.raises(ConflictingStopData) as e:
schedule.add_service(Service(id='some_id', routes=[r]))
assert 'The following stops would inherit data' in str(e.value)
def test_removing_service(schedule):
schedule.remove_service('service')
assert not set(schedule.route_ids())
assert not set(schedule.service_ids())
assert not schedule._graph.graph['route_to_service_map']
assert not schedule._graph.graph['service_to_route_map']
def test_adding_route(schedule, route):
route.reindex('new_id')
schedule.add_route('service', route)
assert set(schedule.route_ids()) == {'1', '2', 'new_id'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service', 'new_id': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2', 'new_id']})
def test_adding_route_with_clashing_id(schedule, route):
schedule.add_route('service', route)
assert set(schedule.route_ids()) == {'1', '2', 'service_3'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service', 'service_3': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2', 'service_3']})
def test_adding_route_to_non_existing_service_throws_error(schedule, route):
with pytest.raises(ServiceIndexError) as e:
schedule.add_route('service_that_doesnt_exist', route)
assert 'does not exist' in str(e.value)
def test_creating_a_route_to_add_using_id_references_to_existing_stops_inherits_schedule_stops_data(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=['1', '2', '5']
)
assert r.ordered_stops == ['1', '2', '5']
assert_semantically_equal(dict(r._graph.nodes(data=True)),
{'1': {'routes': {'3'}}, '2': {'routes': {'3'}}, '5': {'routes': {'3'}}})
assert_semantically_equal(r._graph.edges(data=True)._adjdict,
{'1': {'2': {'routes': {'3'}}}, '2': {'5': {'routes': {'3'}}}, '5': {}})
schedule.add_route('service', r)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_creating_a_route_to_add_giving_existing_schedule_stops(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[schedule.stop('1'), schedule.stop('2'), schedule.stop('5')]
)
assert r.ordered_stops == ['1', '2', '5']
assert_semantically_equal(dict(r._graph.nodes(data=True)),
{'1': {'routes': {'3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'routes': {'3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'5': {'routes': {'3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()}})
assert_semantically_equal(r._graph.edges(data=True)._adjdict,
{'1': {'2': {'routes': {'3'}}}, '2': {'5': {'routes': {'3'}}}, '5': {}})
schedule.add_route('service', r)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_adding_route_with_clashing_stops_data_does_not_overwrite_existing_stops(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
assert r.ordered_stops == ['1', '2', '5']
schedule.add_route('service', r, force=True)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_adding_route_with_clashing_stops_data_only_flags_those_that_are_actually_different(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=4, y=2, epsg='epsg:27700', name='')]
)
assert r.ordered_stops == ['1', '2', '5']
with pytest.raises(ConflictingStopData) as e:
schedule.add_route('service', r)
assert "The following stops would inherit data currently stored under those Stop IDs in the Schedule: " \
"['1', '2']" in str(e.value)
def test_adding_route_with_clashing_stops_data_without_force_flag_throws_error(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
with pytest.raises(ConflictingStopData) as e:
schedule.add_route('service', r)
assert 'The following stops would inherit data' in str(e.value)
def test_extracting_epsg_from_an_intermediate_route_gives_none():
# intermediate meaning not belonging to a schedule yet but referring to stops in a schedule
r = Route(
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=['S1', 'S2', 'S3']
)
assert r.epsg is None
def test_removing_route(schedule):
schedule.remove_route('2')
assert set(schedule.route_ids()) == {'1'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1']})
def test_removing_route_updates_services_on_nodes_and_edges(schedule):
schedule.remove_route('2')
assert_semantically_equal(dict(schedule.graph().nodes(data=True)),
{'5': {'services': set(), 'routes': set(), 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set()},
'6': {'services': set(), 'routes': set(), 'id': '6', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.766825803756994, 'lon': -7.557148039524952,
's2_id': 5205973754090365183, 'additional_attributes': set()},
'7': {'services': set(), 'routes': set(), 'id': '7', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set()},
'8': {'services': set(), 'routes': set(), 'id': '8', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set()},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.76683608549253,
'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.76682779861249,
'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.766825803756994,
'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.766856648946295,
'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()}})
assert_semantically_equal(schedule.graph().edges(data=True)._adjdict,
{'5': {'6': {'services': set(), 'routes': set()}},
'6': {'7': {'services': set(), 'routes': set()}},
'7': {'8': {'services': set(), 'routes': set()}}, '8': {},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}},
'2': {'3': {'services': {'service'}, 'routes': {'1'}}}, '4': {}})
def test_removing_stop(schedule):
schedule.remove_stop('5')
assert {stop.id for stop in schedule.stops()} == {'1', '3', '4', '7', '8', '6', '2'}
def test_removing_unused_stops(schedule):
schedule.remove_route('1')
schedule.remove_unsused_stops()
assert {stop.id for stop in schedule.stops()} == {'6', '8', '5', '7'}
def test_iter_stops_returns_stops_objects(test_service, different_test_service):
schedule = Schedule(services=[test_service, different_test_service], epsg='epsg:4326')
assert set([stop.id for stop in schedule.stops()]) == {'0', '1', '2', '3', '4'}
assert all([isinstance(stop, Stop) for stop in schedule.stops()])
def test_read_matsim_schedule_returns_expected_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
epsg='epsg:27700')
correct_services = Service(id='10314', routes=[
Route(
route_short_name='12', id='VJbd8660f05fe6f744e58a66ae12bd66acbca88b98',
mode='bus',
stops=[Stop(id='26997928P', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700'),
Stop(id='26997928P.link:1', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700')],
route=['1'],
trips={'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'],
'vehicle_id': ['veh_0_bus']},
arrival_offsets=['00:00:00', '00:02:00'],
departure_offsets=['00:00:00', '00:02:00']
)
])
for val in schedule.services():
assert val == correct_services
assert_semantically_equal(schedule.stop_to_service_ids_map(),
{'26997928P.link:1': {'10314'}, '26997928P': {'10314'}})
assert_semantically_equal(schedule.stop_to_route_ids_map(),
{'26997928P': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'26997928P.link:1': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'}})
assert_semantically_equal(schedule.route('VJbd8660f05fe6f744e58a66ae12bd66acbca88b98').trips,
{'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'], 'vehicle_id': ['veh_0_bus']})
assert_semantically_equal(
dict(schedule.graph().nodes(data=True)),
{'26997928P': {'services': {'10314'}, 'routes': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'id': '26997928P', 'x': 528464.1342843144, 'y': 182179.7435136598, 'epsg': 'epsg:27700',
'name': '<NAME> (Stop P)', 'lat': 51.52393050617373, 'lon': -0.14967658860132668,
's2_id': 5221390302759871369, 'additional_attributes': {'name', 'isBlocking'},
'isBlocking': 'false'},
'26997928P.link:1': {'services': {'10314'}, 'routes': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'id': '26997928P.link:1', 'x': 528464.1342843144, 'y': 182179.7435136598,
'epsg': 'epsg:27700', 'name': 'Brunswick Place (Stop P)', 'lat': 51.52393050617373,
'lon': -0.14967658860132668, 's2_id': 5221390302759871369,
'additional_attributes': {'name', 'linkRefId', 'isBlocking'}, 'linkRefId': '1',
'isBlocking': 'false'}}
)
def test_reading_vehicles_with_a_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
path_to_vehicles=pt2matsim_vehicles_file,
epsg='epsg:27700')
assert_semantically_equal(schedule.vehicles, {'veh_0_bus': {'type': 'bus'}})
assert_semantically_equal(schedule.vehicle_types['bus'], {
'capacity': {'seats': {'persons': '71'}, 'standingRoom': {'persons': '1'}},
'length': {'meter': '18.0'},
'width': {'meter': '2.5'},
'accessTime': {'secondsPerPerson': '0.5'},
'egressTime': {'secondsPerPerson': '0.5'},
'doorOperation': {'mode': 'serial'},
'passengerCarEquivalents': {'pce': '2.8'}})
def test_reading_vehicles_after_reading_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
path_to_vehicles=pt2matsim_vehicles_file,
epsg='epsg:27700')
assert_semantically_equal(schedule.vehicles, {'veh_0_bus': {'type': 'bus'}})
assert_semantically_equal(schedule.vehicle_types['bus'], {
'capacity': {'seats': {'persons': '71'}, 'standingRoom': {'persons': '1'}},
'length': {'meter': '18.0'},
'width': {'meter': '2.5'},
'accessTime': {'secondsPerPerson': '0.5'},
'egressTime': {'secondsPerPerson': '0.5'},
'doorOperation': {'mode': 'serial'},
'passengerCarEquivalents': {'pce': '2.8'}})
def test_is_strongly_connected_with_strongly_connected_schedule(strongly_connected_schedule):
assert strongly_connected_schedule.is_strongly_connected()
def test_is_strongly_connected_with_not_strongly_connected_schedule(schedule):
assert not schedule.is_strongly_connected()
def test_has_self_loops_with_self_has_self_looping_schedule(self_looping_route):
s = Schedule('epsg:27700', [Service(id='service', routes=[self_looping_route])])
assert s.has_self_loops()
def test_has_self_loops_returns_self_looping_stops(self_looping_route):
s = Schedule('epsg:27700', [Service(id='service', routes=[self_looping_route])])
loop_nodes = s.has_self_loops()
assert loop_nodes == ['1']
def test_has_self_loops_with_non_looping_routes(schedule):
assert not schedule.has_self_loops()
def test_validity_of_services(self_looping_route, route):
s = Schedule('epsg:27700', [Service(id='1', routes=[self_looping_route]),
Service(id='2', routes=[route])])
assert not s['1'].is_valid_service()
assert s['2'].is_valid_service()
assert set(s.validity_of_services()) == {False, True}
def test_has_valid_services(schedule):
assert not schedule.has_valid_services()
def test_has_valid_services_with_only_valid_services(service):
s = Schedule('epsg:27700', [service])
assert s.has_valid_services()
def test_invalid_services_shows_invalid_services(service):
for route_id in service.route_ids():
service._graph.graph['routes'][route_id]['route'] = ['1']
s = Schedule('epsg:27700', [service])
assert s.invalid_services() == [service]
def test_is_valid_with_valid_schedule(service):
s = Schedule('epsg:27700', [service])
assert s.is_valid_schedule()
def test_generate_validation_report_delegates_to_method_in_schedule_operations(mocker, schedule):
mocker.patch.object(schedule_validation, 'generate_validation_report')
schedule.generate_validation_report()
schedule_validation.generate_validation_report.assert_called_once()
def test_build_graph_builds_correct_graph(strongly_connected_schedule):
g = strongly_connected_schedule.graph()
assert_semantically_equal(dict(g.nodes(data=True)),
{'5': {'services': {'service'}, 'routes': {'2'}, 'id': '5', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set(), 'name': 'Stop_5'},
'2': {'services': {'service'}, 'routes': {'1', '2'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.766825803756994, 'lon': -7.557148039524952,
's2_id': 5205973754090365183, 'additional_attributes': set(), 'name': 'Stop_2'},
'7': {'services': {'service'}, 'routes': {'2'}, 'id': '7', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set(), 'name': 'Stop_7'},
'8': {'services': {'service'}, 'routes': {'2'}, 'id': '8', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set(), 'name': 'Stop_8'},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set(), 'name': 'Stop_3'},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set(), 'name': 'Stop_1'},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set(), 'name': 'Stop_4'}})
assert_semantically_equal(g.edges(data=True)._adjdict,
{'5': {'2': {'services': {'service'}, 'routes': {'2'}}},
'2': {'7': {'services': {'service'}, 'routes': {'2'}},
'3': {'services': {'service'}, 'routes': {'1'}}},
'7': {'8': {'services': {'service'}, 'routes': {'2'}}},
'8': {'5': {'services': {'service'}, 'routes': {'2'}}},
'4': {'1': {'services': {'service'}, 'routes': {'1'}}},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}}})
def test_building_trips_dataframe(schedule):
df = schedule.route_trips_with_stops_to_dataframe()
correct_df = DataFrame({'departure_time': {0: Timestamp('1970-01-01 13:00:00'), 1: Timestamp('1970-01-01 13:05:00'),
2: Timestamp('1970-01-01 13:09:00'), 3: Timestamp('1970-01-01 13:30:00'),
4: Timestamp('1970-01-01 13:35:00'), 5: Timestamp('1970-01-01 13:39:00'),
6: Timestamp('1970-01-01 11:00:00'), 7: Timestamp('1970-01-01 11:05:00'),
8: Timestamp('1970-01-01 11:09:00'), 9: Timestamp('1970-01-01 13:00:00'),
10: Timestamp('1970-01-01 13:05:00'),
11: Timestamp('1970-01-01 13:09:00')},
'arrival_time': {0: Timestamp('1970-01-01 13:03:00'), 1: Timestamp('1970-01-01 13:07:00'),
2: Timestamp('1970-01-01 13:13:00'), 3: Timestamp('1970-01-01 13:33:00'),
4: Timestamp('1970-01-01 13:37:00'), 5: Timestamp('1970-01-01 13:43:00'),
6: Timestamp('1970-01-01 11:03:00'), 7: Timestamp('1970-01-01 11:07:00'),
8: Timestamp('1970-01-01 11:13:00'), 9: Timestamp('1970-01-01 13:03:00'),
10: | Timestamp('1970-01-01 13:07:00') | pandas.Timestamp |
import datetime
from collections import Counter
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import tree
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 这两行需要手动设置
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
import seaborn as sns
sns.set_style('darkgrid')
sns.set_palette('muted')
pd.set_option('display.max_rows', None) # 可以填数字,填None表示'行'无限制
pd.set_option('display.max_columns', None) # 可以填数字,填None表示'列'无限制
pd.set_option('display.width', 1000) # 横向不换行
# 参数设定
dead_time = datetime.datetime.strptime(str('2020-10-14'), '%Y-%m-%d')
interval_time = datetime.timedelta(days=7)
# 模型参数
DATASPLITRATE = 0.2
MAXDEPTH = 4
# MINIMPURITYDECRESASE=val
MINSAMPLESSPLIT = 1
# 数据
print('正在加载数据')
print('loading...')
psn_lv_30 = pd.read_csv('D:/data/A5_30_offline.csv')
BH_ = pd.read_csv('D:/data/A5BH_1.csv')
CZ_ = pd.read_csv('D:/data/A5CZ_1.csv')
DZ_ = pd.read_csv('D:/data/A5DZ_1.csv')
KILL_ = pd.read_csv('D:/data/A5KILL_1.csv')
LMZ_ = pd.read_csv('D:/data/A5LMZ_1.csv') # LOG_TYPE_LMZ
XMSL_ = pd.read_csv('D:/data/A5XMSL_1.csv') # GAME_LOG_XIANMEN_TRAILS
LYQ_ = pd.read_csv('D:/data/A5LYQ_1.csv') # CHILDLOG_LOG_SKYBOOK_ADD_TIMES_4
KTT_ = pd.read_csv('D:/data/A5KTT_1.csv') # CHILDLOG_LOG_SKYBOOK_ADD_TIMES_7
XMHJ_ = pd.read_csv('D:/data/A5XMHJ_1.csv') # CHILDLOG_LOG_SKYBOOK_ADD_TIMES_14
SYZC_ = pd.read_csv('D:/data/A5SYZC_1.csv') # CHILDLOG_LOG_SKYBOOK_ADD_TIMES_13
PTY_ = pd.read_csv('D:/data/A5PTY_1.csv') # CHILDLOG_LOG_ACT_START_10
FBJL_ = pd.read_csv('D:/data/A5FBJL_1.csv') # LOG_COPY_ENDINFO
FBRH_ = pd.read_csv('D:/data/A5FBRH_1.csv') # LOG_FABAO_FUSION
REGION_ = pd.read_csv('D:/data/A5region_1.csv')
FML_ = pd.read_csv('D:/data/A5FML_1.csv') # LOG_COPY_FML
TJCZ_ = pd.read_csv('D:/data/A5TJCZ_1.csv') # CHILDLOG_LOG_SKYBOOK_ADD_TIMES_23
ZBXM_ = pd.read_csv('D:/data/A5ZBXM_1.csv') # CHILDLOG_LOG_SKYBOOK_ADD_TIMES_86
# GWWJ_ = pd.read_csv('D:/data/A5GWWJ_1.csv')# CHILDLOG_LOG_SKYBOOK_ADD_TIMES_73
CDYH_ = pd.read_csv('D:/data/A5CDYH_1.csv') # CHILDLOG_LOG_SKYBOOK_ADD_TIMES_69
MBMC_ = pd.read_csv('D:/data/A5MBMC_1.csv') # CHILDLOG_LOG_SKYBOOK_ADD_TIMES_5
FBXL_ = pd.read_csv('D:/data/A5FBXL_1.csv') # LOG_FABAO_REFINE
GX_ = | pd.read_csv('D:/data/A5GX_1.csv') | pandas.read_csv |
# This gets all the census data, can be filted by level and state.
# Should play with all the chunk sizes, to see how that affects speed. I'm leaving a message in censusreporter_api.py for now that will alert you if the size gets too big and it does a json_merge. json_merge is slow, we want to avoid those.
import pandas as pd
from censusreporter_api import *
import os
from io import BytesIO
import io
from zipfile import ZipFile
import requests
import datetime
import re
import argparse
from bs4 import BeautifulSoup
def getTractInfo(url, regex=''):
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
return [url + '/' + node.get('href') for node in soup.find_all('a', href=re.compile(regex))]
BASE_URL = "http://www2.census.gov/geo/docs/maps-data/data/gazetteer/"
YEAR = datetime.datetime.now().year
GAZ_YEAR_URL = '{}{}_Gazetteer/'.format(BASE_URL, YEAR)
# For easier Windows compatibility
OUTPUT_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))),
'dimensionaldata'
)
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
STATE_LIST = [ 'AL','AK','AZ','AR','CA','CO','CT','DE','DC','FL','GA','HI','ID','IL','IN','IA','KS','KY','LA','ME','MD','MA','MI','MN','MS','MO','MT','NE','NV','NH','NJ','NM','NY','NC','ND','OH','OK','OR','PA','RI','SC','SD','TN','TX','UT','VT','VA','WA','WV','WI','WY','PR']
STATE_CODES = {'AL': '01','AK': '02','AZ': '04','AR': '05','CA': '06','CO': '08','CT': '09','DE': '10','DC': '11','FL': '12','GA': '13','HI': '15','ID': '16','IL': '17','IN': '18','IA': '19','KS': '20','KY': '21','LA': '22','ME': '23','MD': '24','MA': '25','MI': '26','MN': '27','MS': '28','MO': '29','MT': '30','NE': '31','NV': '32','NH': '33','NJ': '34','NM': '35','NY': '36','NC': '37','ND': '38','OH': '39','OK': '40','OR':'41','PA': '42','RI': '44','SC': '45','SD': '46','TN': '47','TX': '48','UT': '49','VT': '50','VA': '51','WA': '53','WV': '54','WI': '55','WY': '56','PR':'72'}
STATE_ABBREVS = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
DATA_TABLES = ['B01001','B03002','B06008','B23001','B19001','B25009','B25077']
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--states", help="State Abbreviation List, space seperated ie NY AK", nargs="*")
parser.add_argument("-t", "--type", help="ALL|County|Upper|Lower|Congress|City|State|Tract space separated", nargs="*")
def get_combinedData(thePD=None, tables=None):
geoids = thePD.index.tolist()
try:
dFrame = get_dataframe(geoids=geoids, tables=tables)
except Exception as e: #This should never happen, it's handled in censusreporter_api but just in case...
handledError = "release doesn't include GeoID(s) "
errorMsg = str(e)
print(errorMsg)
if handledError in errorMsg:
pattern = re.compile("^\s+|\s*,\s*|\s+$")
geoList = pattern.split(errorMsg.partition(handledError)[2].replace(".", ""))
thePD = thePD[-thePD.index.isin(geoList)]
#If everything was not valid, then we'll just return nothing
if len(thePD) == 0:
return None
return get_combinedData(thePD, tables)
else:
raise
else:
return dFrame
return None
def get_zip(file_url):
url = requests.get(file_url)
zipfile = ZipFile(BytesIO(url.content), 'r')
zip_names = zipfile.namelist()
if len(zip_names) == 1:
file_name = zip_names.pop()
extracted_file = zipfile.open(file_name).read()
return extracted_file
# Util for cleaning up column names of extra whitespace
def strip_colnames(df):
all_cols = df.columns.values.tolist()
col_dict = {}
for col in all_cols:
col_dict[col] = col.strip()
return df.rename(columns=col_dict)
# Gets voter_file_id from different jurisdiction types
def parse_voter_file_id(row):
if str(row['GEOID']).endswith('ZZ'):
return None
# If not ZZ, return letter for district (Alaska has lettered districts)
if not str(row['GEOID'])[-1:].isdigit():
return str(row['GEOID'])[-1:]
# Multiplier is 100 for congress, 1000 for all other types
if row['ENTITYTYPE'] == 'congress':
state_mult = 100
else:
state_mult = 1000
voter_file_id = int(row['GEOID']) - (int(row['STATEFP']) * state_mult)
# Some states with 1 district return 0, return 1 for those
if voter_file_id > 0:
return str(voter_file_id)
else:
return '1'
def get_census_data(geo_type, geo_url, state_list, fips_func,
state_idx=(0, 0),
census_tables=DATA_TABLES,
find_zz=False,
delim='\t',
chunk_size=250):
print("Working " + geo_type)
if requests.get(geo_url).status_code != 200:
raise ValueError("{} file not found at URL: {}".format(geo_type, geo_url))
# Changing source if city URL
if geo_type != 'City' and geo_type != "Tract":
csv_file = get_zip(geo_url)
file_source = io.StringIO(csv_file.decode('cp1252'))
else:
file_source = geo_url
reader = pd.read_csv(file_source,
delimiter=delim,
iterator=True,
chunksize=chunk_size)
context_df_list = []
census_df_list = []
for chunk in reader:
if geo_type == "Tract":
chunk.rename(columns={'CODE': 'GEOID'}, inplace=True)
chunk['USPS'] = state_list[0] #Tracts are passed in one state at a time, but don't have this field
else:
chunk = chunk.loc[chunk['USPS'].isin(state_list)]
if find_zz:
chunk['GEOID'] = chunk['GEOID'].astype(str)
chunk = chunk.loc[chunk['GEOID'].str.find('ZZ') == -1]
if len(chunk) > 0:
chunk['FIPS'] = chunk['GEOID'].apply(fips_func)
context_df_list.append(chunk)
chunk = chunk.set_index('FIPS')
data = get_combinedData(chunk, tables=census_tables)
census_df_list.append(data)
context_df = | pd.concat(context_df_list) | pandas.concat |
# Import relevant libraries
import pandas as pd # to deal with the dataset
import plotly.express as px #to plot with beauty
from download_file import download_file
import json
## Get around pandas freezing when opening the file
url_name = 'https://base-covid19.pt/export3.json'
output_file = 'export3.json'
download_file('https://base-covid19.pt/export3.json', output_file=output_file)
## Reads the json brute force
data = | pd.read_json(output_file) | pandas.read_json |
# pylint: disable=W0231
import numpy as np
from pandas.core.common import save, load
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
#-------------------------------------------------------------------------------
# Picklable mixin
class Picklable(object):
def save(self, path):
save(self, path)
@classmethod
def load(cls, path):
return load(path)
class PandasError(Exception):
pass
class PandasObject(Picklable):
_AXIS_NUMBERS = {
'index' : 0,
'columns' : 1
}
_AXIS_ALIASES = {}
_AXIS_NAMES = dict((v, k) for k, v in _AXIS_NUMBERS.iteritems())
#----------------------------------------------------------------------
# Axis name business
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, int):
if axis in cls._AXIS_NAMES:
return axis
else:
raise Exception('No %d axis' % axis)
else:
return cls._AXIS_NUMBERS[axis]
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, basestring):
if axis in cls._AXIS_NUMBERS:
return axis
else:
raise Exception('No axis named %s' % axis)
else:
return cls._AXIS_NAMES[axis]
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found
Parameters
----------
key : object
Returns
-------
value : type of items contained in object
"""
try:
return self[key]
except KeyError:
return default
def groupby(self, by=None, axis=0, level=None, as_index=True):
"""
Group series using mapper (dict or key function, apply given function
to group, return result as series) or by a series of columns
Parameters
----------
by : mapping function / list of functions, dict, Series, or tuple /
list of column names.
Called on each element of the object index to determine the groups.
If a dict or Series is passed, the Series or dict VALUES will be
used to determine the groups
axis : int, default 0
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels
as_index : boolean, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output
Examples
--------
# DataFrame result
>>> data.groupby(func, axis=0).mean()
# DataFrame result
>>> data.groupby(['col1', 'col2'])['col3'].mean()
# DataFrame with hierarchical index
>>> data.groupby(['col1', 'col2']).mean()
Returns
-------
GroupBy object
"""
from pandas.core.groupby import groupby
return groupby(self, by, axis=axis, level=level, as_index=as_index)
def select(self, crit, axis=0):
"""
Return data corresponding to axis labels matching criteria
Parameters
----------
crit : function
To be called on each index (label). Should return True or False
axis : int
Returns
-------
selection : type of caller
"""
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if len(axis) > 0:
new_axis = axis[np.asarray([crit(label) for label in axis])]
else:
new_axis = axis
return self.reindex(**{axis_name : new_axis})
def drop(self, labels, axis=0):
"""
Return new object with labels in requested axis removed
Parameters
----------
labels : array-like
axis : int
Returns
-------
dropped : type of caller
"""
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
new_axis = axis.drop(labels)
return self.reindex(**{axis_name : new_axis})
def sort_index(self, axis=0, ascending=True):
"""
Sort object by labels (along an axis)
Parameters
----------
axis : {0, 1}
Sort index/rows versus columns
ascending : boolean, default True
Sort ascending vs. descending
Returns
-------
sorted_obj : type of caller
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
labels = self._get_axis(axis)
sort_index = labels.argsort()
if not ascending:
sort_index = sort_index[::-1]
new_axis = labels.take(sort_index)
return self.reindex(**{axis_name : new_axis})
@property
def ix(self):
raise NotImplementedError
def reindex(self, *args, **kwds):
raise NotImplementedError
class NDFrame(PandasObject):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : boolean, default False
"""
# kludge
_default_stat_axis = 0
def __init__(self, data, axes=None, copy=False, dtype=None):
if dtype is not None:
data = data.astype(dtype)
elif copy:
data = data.copy()
if axes is not None:
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
self._data = data
self._item_cache = {}
def astype(self, dtype):
"""
Cast object to input numpy.dtype
Parameters
----------
dtype : numpy.dtype or Python type
Returns
-------
casted : type of caller
"""
return self._constructor(self._data, dtype=dtype)
@property
def _constructor(self):
return NDFrame
@property
def axes(self):
return self._data.axes
def __repr__(self):
return 'NDFrame'
@property
def values(self):
return self._data.as_matrix()
@property
def ndim(self):
return self._data.ndim
def _set_axis(self, axis, labels):
self._data.set_axis(axis, labels)
self._clear_item_cache()
def __getitem__(self, item):
return self._get_item_cache(item)
def _get_item_cache(self, item):
cache = self._item_cache
try:
return cache[item]
except Exception:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
return res
def _box_item_values(self, key, values):
raise NotImplementedError
def _clear_item_cache(self):
self._item_cache.clear()
def _set_item(self, key, value):
self._data.set(key, value)
try:
del self._item_cache[key]
except KeyError:
pass
def __delitem__(self, key):
"""
Delete item
"""
self._data.delete(key)
try:
del self._item_cache[key]
except KeyError:
pass
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
"""
result = self[item]
del self[item]
return result
def _expand_axes(self, key):
new_axes = []
for k, ax in zip(key, self.axes):
if k not in ax:
new_axes.append(np.concatenate([ax, [k]]))
else:
new_axes.append(ax)
return new_axes
#----------------------------------------------------------------------
# Consolidation of internals
def _consolidate_inplace(self):
self._clear_item_cache()
self._data = self._data.consolidate()
def consolidate(self, inplace=False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray). Mainly an internal API function,
but available here to the savvy user
Parameters
----------
inplace : boolean, default False
If False return new object, otherwise modify existing object
Returns
-------
consolidated : type of caller
"""
if inplace:
self._consolidate_inplace()
return self
else:
cons_data = self._data.consolidate()
if cons_data is self._data:
cons_data = cons_data.copy()
return self._constructor(cons_data)
@property
def _is_mixed_type(self):
self._consolidate_inplace()
return len(self._data.blocks) > 1
def _reindex_axis(self, new_index, fill_method, axis, copy):
new_data = self._data.reindex_axis(new_index, axis=axis,
method=fill_method, copy=copy)
if new_data is self._data and not copy:
return self
else:
return self._constructor(new_data)
def cumsum(self, axis=None, skipna=True):
"""
Return DataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
y : DataFrame
"""
if axis is None:
axis = self._default_stat_axis
else:
axis = self._get_axis_number(axis)
y = self.values.copy()
if not issubclass(y.dtype.type, np.integer):
mask = np.isnan(self.values)
if skipna:
np.putmask(y, mask, 0.)
result = y.cumsum(axis)
if skipna:
np.putmask(result, mask, np.nan)
else:
result = y.cumsum(axis)
return self._wrap_array(result, self.axes, copy=False)
def _wrap_array(self, array, axes, copy=False):
raise NotImplementedError
def cumprod(self, axis=None, skipna=True):
"""
Return cumulative product over requested axis as DataFrame
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
y : DataFrame
"""
if axis is None:
axis = self._default_stat_axis
else:
axis = self._get_axis_number(axis)
y = self.values.copy()
if not issubclass(y.dtype.type, np.integer):
mask = np.isnan(self.values)
if skipna:
np.putmask(y, mask, 1.)
result = y.cumprod(axis)
if skipna:
np.putmask(result, mask, np.nan)
else:
result = y.cumprod(axis)
return self._wrap_array(result, self.axes, copy=False)
def copy(self, deep=True):
"""
Make a copy of this object
Parameters
----------
deep : boolean, default True
Make a deep copy, i.e. also copy data
Returns
-------
copy : type of caller
"""
data = self._data
if deep:
data = data.copy()
return self._constructor(data)
def swaplevel(self, i, j, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Returns
-------
swapped : type of caller (new object)
"""
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
def add_prefix(self, prefix):
"""
Concatenate prefix string with panel items names.
Parameters
----------
prefix : string
Returns
-------
with_prefix : type of caller
"""
new_data = self._data.add_prefix(prefix)
return self._constructor(new_data)
def add_suffix(self, suffix):
"""
Concatenate suffix string with panel items names
Parameters
----------
suffix : string
Returns
-------
with_suffix : type of caller
"""
new_data = self._data.add_suffix(suffix)
return self._constructor(new_data)
def rename_axis(self, mapper, axis=0, copy=True):
"""
Alter index and / or columns using input function or functions.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is.
Parameters
----------
mapper : dict-like or function, optional
axis : int, default 0
copy : boolean, default True
Also copy underlying data
See also
--------
DataFrame.rename
Returns
-------
renamed : type of caller
"""
# should move this at some point
from pandas.core.series import _get_rename_function
mapper_f = _get_rename_function(mapper)
if axis == 0:
new_data = self._data.rename_items(mapper_f, copydata=copy)
else:
new_data = self._data.rename_axis(mapper_f, axis=axis)
if copy:
new_data = new_data.copy()
return self._constructor(new_data)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
Parameters
----------
indices : list / array of ints
axis : int, default 0
Returns
-------
taken : type of caller
"""
if axis == 0:
labels = self._get_axis(axis)
new_items = labels.take(indices)
new_data = self._data.reindex_axis(new_items, axis=0)
else:
new_data = self._data.take(indices, axis=axis)
return self._constructor(new_data)
# Good for either Series or DataFrame
def truncate(self, before=None, after=None, copy=True):
"""Function truncate a sorted DataFrame / Series before and/or after
some particular dates.
Parameters
----------
before : date
Truncate before date
after : date
Truncate after date
Returns
-------
truncated : type of caller
"""
before = datetools.to_datetime(before)
after = | datetools.to_datetime(after) | pandas.core.datetools.to_datetime |
import pandas as pd
import os
from typing import List, Tuple, Dict
from collections import defaultdict
from datetime import datetime
import json
def get_game_data_path() -> str:
current_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(current_dir, os.pardir, os.pardir, "data", "kaggle")
return data_dir
def get_game_data_list(path: str) -> List[str]:
data_files = [
x
for x in os.listdir(path)
if x.endswith("bgg_top2000.csv") or x.endswith("bgg_top5000.csv")
]
data_files.sort()
return [os.path.join(path, x) for x in data_files]
def get_curated_df(data_list: List) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
df_ref = None
df_rank_dict = defaultdict(list)
df_rank_id_dict = defaultdict(list)
column_dtypes = {
"AbstractGameRank": float,
"BoardGameRank": float,
"Children'sGameRank": float,
"CustomizableRank": float,
"FamilyGameRank": float,
"PartyGameRank": float,
"StrategyGameRank": float,
"ThematicRank": float,
"WarGameRank": float,
"id": str,
"game_id": str,
}
rankings_list = [
"AbstractGameRank",
"BoardGameRank",
"ChildrensGameRank",
"CustomizableRank",
"FamilyGameRank",
"PartyGameRank",
"StrategyGameRank",
"ThematicRank",
"WarGameRank",
]
for data_fl in data_list:
df_init = pd.read_csv(data_fl, dtype=column_dtypes)
df_init = df_init.rename(
columns={"Children'sGameRank": "ChildrensGameRank", "id": "game_id"}
)
df_init = df_init.drop_duplicates(subset="game_id")
if df_init.duplicated("BoardGameRank").sum() > 0:
df_init.BoardGameRank = list(range(1, df_init.shape[0] + 1))
for col in rankings_list:
df_init[col] = df_init[col].astype("Int64").astype("Int32")
init_date = get_data_date(data_fl)
df_rank_dict = get_game_rank_dict(
df_init, init_date, df_rank_dict, rankings_list
)
df_rank_id_dict = get_game_id_rank_dict(
df_init, init_date, df_rank_id_dict, rankings_list
)
if data_fl == data_list[-1]:
df_ref = get_game_ref(df_init)
df_game_rank = get_game_rank(df_rank_dict["BoardGameRank"])
df_game_id_rank = get_game_id_rank(df_rank_id_dict["BoardGameRank"])
for rank_name, rank_list in df_rank_dict.items():
if rank_name == "BoardGameRank":
df_tmp = get_first_date(rank_name, rank_list)
df_ref = df_ref.merge(df_tmp, on="name", how="left")
df_tmp = get_rank_range(rank_name, rank_list)
df_ref = df_ref.merge(df_tmp, on="name", how="left")
df_ref["bgg_url"] = "https://boardgamegeek.com/boardgame/" + df_ref.game_id
return (df_game_rank, df_game_id_rank, df_ref)
def get_first_date(rank_str: str, df_list: list) -> pd.DataFrame:
df = pd.concat(df_list, sort=False, ignore_index=True).drop(columns=rank_str)
df = df.dropna().sort_values(["rank_date"])
df_first_date = (
df.groupby("name", sort=False, as_index=False)
.first()
.rename(columns={"rank_date": "first_date_in_ref"})
)
return df_first_date
def get_rank_range(rank_str: str, df_list: list) -> pd.DataFrame:
df = | pd.concat(df_list, sort=False, ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 16 10:37:14 2020
Ferrain and horst Together
@author: nikorose
"""
from DJSFunctions import plot_ankle_DJS, ankle_DJS, Plotting
import os
import pandas as pd
import numpy as np
import matplotlib.colors as mcolors
from utilities_QS import ttest, hyperparams, best_hyper, change_labels
import operator
ttest_ = False
plot_theory = False
optimize_params = False
plot_pairs = False
plot_sample = False
plot_per_group = True
# =============================================================================
# Ferrarin execution
# =============================================================================
#Excluding not regular intentions
exclude_list = ["{} {}".format(i,j) for i in ['Toe', 'Heel', 'Descending',
'Ascending'] for j in ['A','Y']]
# exclude_list.extend(['Free A', 'Very Slow A', 'Slow A', 'Medium A', 'Fast A'])
Ferrarin_ = ankle_DJS('mmc3.xls',
dir_loc = 'Ferrarin',
exp_name = 'Ferrarin analysis',
exclude_names = exclude_list)
all_dfs_ferra = Ferrarin_.extract_DJS_data()
#Changing labels
# all_dfs_ferra = Ferrarin_.change_labels([ r'$0.363 < v* < 0.500$ Ch$_{2}$',
# r"$v/h<0.6$ Ch$_{2}$",
# r'$0.6 < v/h < 0.8$ Ch$_{2}$',
# r'$0.8 < v/h < 1$ Ch$_{2}$',
# r'$v/h > 1.0$ Ch$_{2}$',
# r'$0.363 < v* < 0.500$ A',
# r'$v/h < 0.6$ A', #
# r'$0.6 < v/h < 0.8$ A',
# r'$0.8 < v/h < 1$ A',
# r'$v/h > 1.0$ A'])
# df_turn_ferra = Ferrarin_.get_turning_points(turning_points= 6,
# param_1 = 4, cluster_radius= 15)
# Ferrarin_.deg_to_rad()
Ferrarin_.energy_calculation()
#Sensitive results may vary when integrating degrees, the best is to do in radians
Ferrarin_.deg_to_rad()
total_work_ferra = Ferrarin_.total_work()
# =============================================================================
# Obtaining the mechanical work through power instances in regular walking Ferra
# =============================================================================
idx= pd.IndexSlice
work_df_ferra = Ferrarin_.power_energy.loc[idx[: , 'mean'], :]
zero_ro_ferra = Ferrarin_.energy_fun.min_max_power(Ferrarin_.power_ankle)
# =============================================================================
# Schwartz execution
# =============================================================================
Schwartz_ = ankle_DJS('Schwartz.xls',
dir_loc = 'Schwartz',
exp_name = 'Schwartz analysis',
features= ['Ankle Dorsi/Plantarflexion',
'Vertical',
'Ankle Dorsi/Plantarflexion',
'Ankle'])
all_dfs_schwartz = Schwartz_.extract_DJS_data()
# all_dfs_schwartz = Schwartz_.change_labels([r'$v* < 0.227$ Ch$_{1}$',r'$0.227 < v* < 0.363$ Ch$_{1}$',r'$0.363 < v* < 0.500$ Ch$_{1}$',
# r'$0.500 < v* < 0.636$ Ch$_{1}$','$v* > 0.636$ Ch$_{1}$'])
# df_turn_schwartz = Schwartz_.get_turning_points(turning_points= 6,
# param_1 = 2, cluster_radius= 8)
# Schwartz_.deg_to_rad()
Schwartz_.energy_calculation()
#Sensitive results may vary when integrating degrees, the best is to do in radians
Schwartz_.deg_to_rad()
total_work_schwartz = Schwartz_.total_work()
# =============================================================================
# Obtaining the mechanical work through power instances in regular walking Ferra
# =============================================================================
work_df_schwartz = Schwartz_.power_energy.loc[idx[: , 'mean'], :]
zero_ro_schwartz = Schwartz_.energy_fun.min_max_power(Schwartz_.power_ankle)
# =============================================================================
# concatenating DFs
# =============================================================================
concat_gait = pd.concat([Ferrarin_.all_dfs_ankle, Schwartz_.all_dfs_ankle], axis=1)
concat_gait = concat_gait.interpolate(axis=0)
concat_gait = concat_gait.reindex(Schwartz_.index_ankle.get_level_values(0).unique(),
level=0, axis=0)
# =============================================================================
# Obtaining new values for the concatenated df
# =============================================================================
concat_ = ankle_DJS(concat_gait, exp_name = 'Concat Ferrarin and Schwartz analysis')
all_dfs = concat_.extract_df_DJS_data(idx=[0,1,2,3], units=False)
times=3
all_dfs = concat_.interpolate_ankledf(times=times, replace=True)
# =============================================================================
# Best params
# =============================================================================
if optimize_params:
best_df_turn = best_hyper(all_dfs, save='Ferrarin/best_params_all_dfs.csv',
smooth_radius=range(4,7),
cluster_radius=range(15*times,20*times, times),
verbose=False, rows=[0,2])
#Fast A did not do well let us optimize again
df_turn_FA = hyperparams(all_dfs.loc[:,idx['Fast A',:]],
smooth_radius=range(4,7),
c_radius=range(10*times,15*times, times), R2=True,
rows=[0,2])
df_turn_FA['TP']['sr_4_cr_42'].to_csv('Ferrarin/FastA_opt_params.csv')
best_df_turn.loc['Fast A'] = df_turn_FA['TP']['sr_4_cr_42']
else:
best_df_turn = pd.read_csv('Ferrarin/best_params_all_dfs.csv', index_col=[0,1])
df_turn_FA = pd.read_csv('Ferrarin/FastA_opt_params.csv', index_col=[0,1])
#Replacing values on best
best_df_turn.loc['Fast A'] = df_turn_FA
best_df_turnGC = best_df_turn.apply(lambda x: np.int64(x/times))
#Sensitive results may vary when integrating degrees, the best is to do in radians
concat_.deg_to_rad()
total_work = concat_.total_work()
# =============================================================================
# Plotting ankle Quasi-Stiffness
# =============================================================================
Color = [i[1] for i in mcolors.TABLEAU_COLORS.items()]*3
params = {'sharex':False, 'sharey':True, 'color_DJS':['slategray']*20, 'color_reg':['black']*20,
'color_symbols': ['slategray']*20, 'arr_size': 6, 'left_margin': 0.1,
'DJS_linewidth': 0.2, 'reg_linewidth': 1.0, 'grid': False}
DJS_all_ch = plot_ankle_DJS(SD=True, save=True, plt_style='bmh', sep=[3,5],
alpha=3, fig_size=[9,11], params=params, ext='png')
DJS_all_ch.colors = Color
#Previuos config for Y, A and CH
#All np.r_[10,1,6,11,2,7,12,0,5,13,3,8,14,4,9]
# config for more rows np.r_[5,1,6,2,7,0,8,3,9,4]
# Only Ferra np.r_[1,2,0,3,4,10:15]
fig4 = DJS_all_ch.plot_DJS(concat_.all_dfs_ankle,
cols=np.r_[10,1,6,11,2,7,12,0,5,13,3,8,14,4,9], rows= np.r_[0,2],
title="Individual ankle DJS Children",
legend=True, reg=best_df_turn.loc[idx[:,'mean'],:],
integration= True, rad = True)
reg_info_concat_all = DJS_all_ch.reg_info_df.round(3)
reg_info_concat_all = reg_info_concat_all
#Read the metrics from this
metrics_all = reg_info_concat_all.mean(axis=0)
concat_dep = best_df_turnGC.loc[idx[:,'mean'], 'point 1':].droplevel(1)
#Work data handling
work_all = DJS_all_ch.areas
work_all['direction'] = work_all['direction'].replace('cw',0)
work_all['direction'] = work_all['direction'].replace('ccw',1)
work_all = work_all.astype(np.float64).round(3)
concat_dep = pd.concat([concat_dep, reg_info_concat_all['stiffness'].unstack().droplevel(1), work_all], axis=1)
labels_idx = ['{} {}'.format(i,j) for i in ['Very Slow', 'Slow', 'Free', 'Medium', 'Fast'] for j in ['C', 'Y', 'A']]
concat_dep = concat_dep.reindex(labels_idx)
concat_dep.index = pd.MultiIndex.from_product([['Very Slow', 'Slow', 'Free', 'Medium', 'Very Fast'], ['C', 'Y', 'A']], names=['Gait Speed','Group'])
concat_dep.columns = pd.MultiIndex.from_arrays([[r'Turning Point [$\%GC$]']*5+[r'Stiffness [$\frac{Nm}{kg\times rad}$]']*4+[r'Work $\frac{J}{kg}$']*3,
['ERP','LRP','DP','S','TS','CP','ERP','LRP','DP','Abs.', 'Net.', 'Direction']])
with open("Ferrarin/ferra_DJS.tex", "w+") as pt:
concat_dep.to_latex(buf=pt, col_space=10, longtable=False, multirow=True,
caption=r'Cuantitative ankle DJS characteristics depicted in Fig. \ref{fig:comp_speed_QS_ferra} for children and adult groups',
label='tab:table_ferra')
# =============================================================================
# Showing the DJS results for youths and adults
# =============================================================================
DJS_all_ad = plot_ankle_DJS(SD=True, save=True, plt_style='bmh', sep=[2,5],
alpha=2, fig_size=[3,7], params=params, ext='png')
DJS_all_ad.colors = Color
#Previuos config for Y, A and CH
#np.r_[10,1,6,11,2,7,12,0,5,13,3,8,14,4,9]
# config for more rows np.r_[5,1,6,2,7,0,8,3,9,4]
fig5 = DJS_all_ad.plot_DJS(concat_.all_dfs_ankle,
cols=np.r_[1,2,0,3,4,6,7,5,8,9], rows= np.r_[0,2],
title="Individual ankle DJS Y v A",
legend=True, reg=best_df_turn.loc[idx[:,'mean'],:],
integration= True, rad = True)
reg_info_concat_ad = DJS_all_ad.reg_info_df.round(3)
# =============================================================================
# Showing in the regular way, comparing Schwartz and Ferrarin
# =============================================================================
if plot_pairs:
Color = [i[1] for i in mcolors.TABLEAU_COLORS.items()]*3
params_c = {'sharex':False, 'sharey':True, 'left_margin': 0.2, 'arr_size':15,
'hide_labels':(False, True), 'yticks': np.arange(-0.25, 1.75, 0.25),
'xticks':None, 'alpha_absorb': 0.2, 'alpha_prod': 0.4, 'line_width':1}
cols_to_joint ={r'Free': (0,-3, False),
r'Very Slow': (1,-5, False),
r'Slow': (2,-4, False),
r'Fast': (3,-2, False),
r'Very Fast': (4,-1, False)}
cols_to_joint_a ={r'Free': (0,5, False),
r'Very Slow': (1,6, False),
r'Slow': (2,7, False),
r'Fast': (3,8, False),
r'Very Fast': (4,9, False)}
for num, key in enumerate(cols_to_joint.keys()):
params_c.update({'hide_labels': (False, cols_to_joint[key][-1])})
DJS_all = plot_ankle_DJS(SD=True, save=True, plt_style='bmh', sep=False,
alpha=2.0, fig_size=[3,3], params=params_c)
fig6 = DJS_all.plot_DJS(concat_.all_dfs_ankle,
cols=list(cols_to_joint[key][:2]), rows= np.r_[0,2],
title="Ankle DJS Y vs CH comparison {}".format(key),
legend=True, reg=best_df_turn.loc[idx[:,'mean'],:],
integration= True, rad = True)
if num == 0:
reg_info_ch = pd.DataFrame(DJS_all.reg_info_df)
work_ch = pd.DataFrame(DJS_all.areas)
else:
reg_info_ch = pd.concat([reg_info_ch, DJS_all.reg_info_df])
work_ch = pd.concat([work_ch, DJS_all.areas])
reg_info_ch = reg_info_ch.round(3)
work_ch = work_ch.round(3)
for num, key in enumerate(cols_to_joint_a.keys()):
params_c.update({'hide_labels': (False, cols_to_joint_a[key][-1])})
DJS_all = plot_ankle_DJS(SD=True, save=True, plt_style='bmh', sep=False,
alpha=2.0, fig_size=[3,3], params=params_c)
fig6 = DJS_all.plot_DJS(concat_.all_dfs_ankle,
cols=list(cols_to_joint_a[key][:2]), rows= np.r_[0,2],
title="Ankle DJS Y vs A comparison {}".format(key),
legend=True, reg=best_df_turn.loc[idx[:,'mean'],:],
integration= True, rad = True)
if num == 0:
reg_info_a = pd.DataFrame(DJS_all.reg_info_df)
work_a = pd.DataFrame(DJS_all.areas)
else:
reg_info_a = pd.concat([reg_info_a, DJS_all.reg_info_df])
work_a = pd.concat([work_a, DJS_all.areas])
reg_info_a = reg_info_a.round(3)
work_a = work_a.round(3)
# =============================================================================
# Plotting separately per study
# =============================================================================
if plot_per_group:
params_to = {'sharex':False, 'sharey':True, 'left_margin': 0.05, 'arr_size':15,
'hide_labels':(False, False), 'yticks': np.arange(-0.25, 1.75, 0.25),
'xticks':None, 'alpha_absorb': 0.08, 'alpha_prod': 0.3, 'line_width':0.6}
groups = {'Children G2': ([1,0,2,3,4], '<NAME>'),
'Adults' : (np.r_[6,5,7:10], '<NAME>'),
'Children G1': (np.r_[10:15], 'Schwartz')}
for key, item in groups.items():
DJS_g = plot_ankle_DJS(SD=True, save=True, plt_style='bmh', sep=[1,5],
alpha=3.0, fig_size=[4,20], params=params_to)
fig6 = DJS_g.plot_DJS(concat_.all_dfs_ankle,
cols=item[0], rows= np.r_[0,2], #[1,0,2,3,4], np.r_[5:10]
title="Ankle DJS CH together {}".format(item[1]),
legend=True, reg=best_df_turn.loc[idx[:,'mean'],:],
integration= True, rad = True, header= None)
# =============================================================================
# Plotting one sample with labels, theoretical
# =============================================================================
if plot_sample:
params_sample = {'sharex':False, 'sharey':True, 'color_DJS':['slategray']*20,
'color_reg':['black']*20, 'color_symbols': ['slategray']*20,
'arr_size': 13, 'left_margin': 0.15, 'DJS_linewidth': 0.2,
'reg_linewidth': 1.0, 'grid': False, 'alpha_prod': 0.4,
'alpha_absorb': 0.1, 'text':True}
DJS_sample = plot_ankle_DJS(SD=True, save=True, plt_style='bmh', sep=False,
alpha=3.0, fig_size=[4,4], params=params_sample)
fig6 = DJS_sample.plot_DJS(concat_.all_dfs_ankle,
cols=[-3], rows= np.r_[0,2], #[1,0,2,3,4], np.r_[5:10]
title="Ankle DJS sample",
legend=True, reg=best_df_turn.loc[idx[:,'mean'],:],
integration= True, rad = True, header= None)
#Only regressions
params_simple = {'sharex':False, 'sharey':True, 'color_DJS':['white']*20,
'color_reg':['black']*20, 'color_symbols': ['white']*20,
'arr_size': 13, 'left_margin': 0.15, 'DJS_linewidth': 0.2,
'reg_linewidth': 1.0, 'grid': False, 'alpha_prod': 0.0,
'alpha_absorb': 0.0, 'text':False, 'hide_labels': (True,True)}
DJS_simple = plot_ankle_DJS(SD=True, save=True, plt_style='bmh', sep=False,
alpha=2.0, fig_size=[2,2], params=params_simple)
fig6 = DJS_simple.plot_DJS(concat_.all_dfs_ankle,
cols=[-1], rows= np.r_[0,2], #[1,0,2,3,4], np.r_[5:10]
title="Ankle DJS sample dir",
legend=False, reg=best_df_turn.loc[idx[:,'mean'],:],
integration= True, rad = True, header= None)
if ttest_:
# =============================================================================
# Obtaining the ttest of children (Schwartz) against youth (Ferrarin)
# =============================================================================
cols_ch = best_df_turn.index.get_level_values(0).unique()
cols_ch = cols_ch[np.r_[1,2,0,3,4,10:15]]
etiquete = ['VS','S','C','F','VF']
#Dropping GRF and powers, we are interested in QS only
df_QS_ch = all_dfs
df_QS_ch = df_QS_ch.drop(['Vertical Force [%BH]', 'Ankle [W/kg]'], axis=0, level=0)
#Samples of each experiment
n_schwartz = [77, 82, 82, 76, 51] #Very Slow Slow Free Fast Very Fast
n_ferra_y = [34, 76, 111, 71, 100, 83, 51, 75, 67] #XS S Natural M L T H A D
n_ferra_a = [140, 110, 124, 68, 52, 124, 85, 73, 72] #XS S Natural M L T H A D
tt_ch = pd.concat([ttest(df_QS_ch[cols_ch[i]],
df_QS_ch[cols_ch[i+5]],
samples=[n_schwartz[i], n_ferra_y[i]],
name='Ttest_{}'.format(etiquete[i]), method='scipy') for i in range(5)], axis=1)
tt_angles_ch = tt_ch.loc['Ankle Dorsi/Plantarflexion Deg [°]'].mean(axis=0)
tt_moments_ch = tt_ch.loc['Ankle Dorsi/Plantarflexion [Nm/kg]'].mean(axis=0)
# =============================================================================
# Obtaining the ttest of adults vs youth (Ferrarin)
# =============================================================================
cols_ad = best_df_turn.index.get_level_values(0).unique()
cols_ad = cols_ad[np.r_[1,2,0,3,4,6,7,5,8,9]]
etiquete = ['VS','S','C','F','VF']
#Dropping GRF and powers, we are interested in QS only
df_QS_ad = all_dfs
df_QS_ad = df_QS_ad.drop(['Vertical Force [%BH]', 'Ankle [W/kg]'], axis=0, level=0)
tt_ad = pd.concat([ttest(df_QS_ad[cols_ad[i]],
df_QS_ad[cols_ad[i+5]],
samples= [n_ferra_y[i], n_ferra_a[i]],
name='Ttest_{}'.format(etiquete[i]),
method='scipy') for i in range(5)], axis=1)
tt_angles_ad = tt_ad.loc['Ankle Dorsi/Plantarflexion Deg [°]'].mean(axis=0)
tt_moments_ad = tt_ad.loc['Ankle Dorsi/Plantarflexion [Nm/kg]'].mean(axis=0)
tt_concat = pd.concat([tt_angles_ch, tt_moments_ch, tt_angles_ad, tt_moments_ad], axis=1)
tt_concat = tt_concat.round(3)
tt_concat.index = pd.MultiIndex.from_product([['Very Slow', 'Slow', 'Free', 'Medium', 'Very Fast'],['t-value', 'p-value']])
tt_concat.columns = | pd.MultiIndex.from_product([['C vs Y', 'Y vs A'],['Ankle angle', 'Ankle moment']]) | pandas.MultiIndex.from_product |
import pytest
import numpy as np
import pandas as pd
from pandas._testing import assert_frame_equal
from wetterdienst.dwd.util import (
coerce_field_types,
build_parameter_set_identifier,
)
from wetterdienst.util.enumeration import parse_enumeration_from_template
from wetterdienst.dwd.observations import (
DWDObservationPeriod,
DWDObservationResolution,
DWDObservationParameterSet,
)
from wetterdienst.exceptions import InvalidEnumeration
def test_parse_enumeration_from_template():
assert (
parse_enumeration_from_template("climate_summary", DWDObservationParameterSet)
== DWDObservationParameterSet.CLIMATE_SUMMARY
)
assert (
parse_enumeration_from_template("kl", DWDObservationParameterSet)
== DWDObservationParameterSet.CLIMATE_SUMMARY
)
with pytest.raises(InvalidEnumeration):
parse_enumeration_from_template("climate", DWDObservationParameterSet)
def test_coerce_field_types():
df = pd.DataFrame(
{
"QN": ["1"],
"RS_IND_01": ["1"],
"DATE": ["1970010100"],
"END_OF_INTERVAL": ["1970010100:00"],
"V_VV_I": ["P"],
}
)
expected_df = pd.DataFrame(
{
"QN": pd.Series([1], dtype= | pd.Int64Dtype() | pandas.Int64Dtype |
import json
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
import os
import collections
import nltk.classify
import nltk.metrics
import numpy as np
"""
read all business id
"""
business=[]
users=[]
scores=[]
rates=[]
t=0
review= | pd.read_csv('dataset_review_emo_bayes.tsv', sep="\t") | pandas.read_csv |
import logging
import os
import numpy as np
import pandas as pd
from opencell.database import utils, constants
logger = logging.getLogger(__name__)
def parseFloat(val):
try:
val = float(val)
except ValueError:
val = float(str(val).replace(',', ''))
return val
def load_library_snapshot(filename):
'''
Load and format a CSV 'snapshot' of a library spreadsheet
These 'snapshots' are of the google sheet created/maintained by Manu
that contains the crispr designs for all plates
'''
# define maps from the column names in the google sheets
# to the column names in the models.metadata.CrisprDesign table
# (all required columns are included, even if their name is unchanged)
# column names in the original 'library' google sheet (containing plates 1-22)
library_columns = {
'plate_id': 'plate_id',
'well_id': 'well_id',
'gene_name': 'target_name',
'family': 'target_family',
'enst_id': 'enst_id',
'terminus_to_tag': 'target_terminus',
'protospacer_name': 'protospacer_name',
'protospacer_note': 'protospacer_notes',
'protospacer_sequence': 'protospacer_sequence',
'ultramer_name': 'template_name',
'ultramer_note': 'template_notes',
'ultramer_sequence': 'template_sequence',
}
# alternative column names for some columns, specific to the 'library v1.1' spreadsheet
# (this sheet starts with plate23 in Jan 2022)
library_columns_2022 = {
'tagged_terminus': 'target_terminus',
'ensembl_transcript_id': 'enst_id',
'gRNA_sequence': 'protospacer_sequence',
'donor_sequence': 'template_sequence',
}
library_columns.update(library_columns_2022)
library = pd.read_csv(filename)
library.rename(columns=library_columns, inplace=True)
# for clarity, format the plate_ids here
library['plate_id'] = library.plate_id.apply(utils.format_plate_design_id)
# drop any extraneous columns
dropped_columns = list(set(library.columns).difference(library_columns.values()))
library = library.drop(labels=dropped_columns, axis=1)
return library
def load_electroporation_history(filename):
'''
Load and format a 'snapshot' of the list of electroporations
(this is a google sheet from Manu)
Expected columns: ('plate_id', 'date', 'notes')
'''
electroporation_columns = {
'plate_id': 'plate_id',
'electroporation_date': 'date',
'comment': 'notes',
}
electroporations = pd.read_csv(filename)
electroporations.rename(columns=electroporation_columns, inplace=True)
# format the plate_id
electroporations['plate_id'] = electroporations.plate_id.apply(
utils.format_plate_design_id)
# drop unneeded columns
electroporations = electroporations[list(electroporation_columns.values())]
return electroporations
def load_legacy_microscopy_master_key(filepath):
'''
Load and format a snapshot of the 'legacy' tab of the 'Pipeline-microscopy-master-key' google sheet
These are all pipeline-related microscopy acquisitions prior to the transition to PML-based IDs
(which occurred at ML0196).
Note that these acquisitions have IDs of the form 'MLxxxx_YYYYMMDD'.
'''
md = pd.read_csv(filepath)
md = md.rename(columns={c: c.replace(' ', '_').lower() for c in md.columns})
md = md.rename(columns={
'id': 'legacy_id',
'automated_acquisition?': 'automation',
'acquisition_notes': 'notes',
'primary_imager': 'imager',
})
md = md.drop(labels=[c for c in md.columns if c.startswith('unnamed')], axis=1)
# separate the ML ID itself from the date
md['ml_id'] = md.legacy_id.apply(lambda s: s.split('_')[0])
# parse the date from the ML-style ID
md['date'] = pd.to_datetime(md.legacy_id.apply(lambda s: s.split('_')[1]))
# prepend the P to create the PML-style ID
md['pml_id'] = [f'P{ml_id}' for ml_id in md.ml_id]
# columns to retain
md = md[['pml_id', 'date', 'automation', 'imager', 'description', 'notes']]
return md
def load_pipeline_microscopy_master_key(filepath):
'''
Load a CSV snapshot of the 'pipeline-microscopy-master-key' google sheet
'''
snapshot = pd.read_csv(filepath)
snapshot.rename(columns={'id': 'pml_id'}, inplace=True)
snapshot.dropna(how='any', subset=['pml_id', 'date'], axis=0, inplace=True)
return snapshot
def load_pipeline_microscopy_dataset_metadata(filepath):
'''
Load the 'fov-metadata.csv' file from a PML dataset directory
(this file is generated by dragonfly_automation.qc.pipeline_plate_qc.construct_fov_metadata)
This method drops rows for which the manually_flagged column is set to True
and constructs the raw_filepath column. Other than that, it does no data validation.
We rely on the implicit validation that occurs when the dataframe returned by this method
is used to populate the MicroscopyFOV table in PolyclonalLineOperations.insert_microscopy_fovs
'''
metadata = | pd.read_csv(filepath) | pandas.read_csv |
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan
import seaborn as sns
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
load_from_disk = True
start_date = '2018-04-01 00:00:00'
end_date = '2018-05-01 23:59:59'
site = 'MWT2'
es = Elasticsearch(['atlas-kibana.mwt2.org:9200'], timeout=60)
indices = "traces"
print("start:", start_date, "end:", end_date)
start = int(pd.Timestamp(start_date).timestamp())
end = int(pd.Timestamp(end_date).timestamp())
my_query = {
"_source": ["time_start", "time_end", "site", "event", "scope", "filename", "filesize"],
'query': {
'bool': {
'must': [
{'range': {'time_start': {'gte': start, 'lt': end}}},
{'exists': {"field": "filename"}},
{'wildcard': {'site': site + '*'}},
{'wildcard': {'event': 'get*'}},
# {'term': {'event': 'download'}},
]
}
}
}
# "transfer_start", "transfer_end",
if load_from_disk:
all_accesses = pd.read_hdf(site + '.h5', key=site, mode='r')
else:
scroll = scan(client=es, index=indices, query=my_query)
count = 0
requests = []
for res in scroll:
r = res['_source']
# requests.append([r['scope'] + r['filename'], r['filesize'], r['time_start'], r['time_end']])
requests.append([r['scope'] + r['filename'], r['filesize'], r['time_start']])
# if count < 2:
# print(res)
if not count % 100000:
print(count)
# if count > 100000:
# break
count = count + 1
all_accesses = | pd.DataFrame(requests) | pandas.DataFrame |
import math
from abc import ABC
from typing import Optional, Iterable
import pandas as pd
from django.db import connection
from pandas import DataFrame
from recipe_db.analytics import METRIC_PRECISION, POPULARITY_START_MONTH, POPULARITY_CUT_OFF_DATE
from recipe_db.analytics.scope import RecipeScope, StyleProjection, YeastProjection, HopProjection, \
FermentableProjection
from recipe_db.analytics.utils import remove_outliers, get_style_names_dict, get_hop_names_dict, get_yeast_names_dict, \
get_fermentable_names_dict, RollingAverage, Trending, months_ago
from recipe_db.models import Recipe
class RecipeLevelAnalysis(ABC):
def __init__(self, scope: RecipeScope) -> None:
self.scope = scope
class RecipesListAnalysis(RecipeLevelAnalysis):
def random(self, num_recipes: int) -> Iterable[Recipe]:
scope_filter = self.scope.get_filter()
query = '''
SELECT r.uid AS recipe_id
FROM recipe_db_recipe AS r
WHERE r.name IS NOT NULL {}
ORDER BY random()
LIMIT %s
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters + [num_recipes])
recipe_ids = df['recipe_id'].values.tolist()
if len(recipe_ids) == 0:
return []
return Recipe.objects.filter(uid__in=recipe_ids).order_by('name')
class RecipesCountAnalysis(RecipeLevelAnalysis):
def total(self) -> int:
scope_filter = self.scope.get_filter()
query = '''
SELECT
count(r.uid) AS total_recipes
FROM recipe_db_recipe AS r
WHERE
created IS NOT NULL
{}
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
if len(df) == 0:
return 0
return df['total_recipes'].values.tolist()[0]
def per_day(self) -> DataFrame:
scope_filter = self.scope.get_filter()
query = '''
SELECT
date(r.created) AS day,
count(r.uid) AS total_recipes
FROM recipe_db_recipe AS r
WHERE
created IS NOT NULL
{}
GROUP BY date(r.created)
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
df = df.set_index('day')
return df
def per_month(self) -> DataFrame:
scope_filter = self.scope.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
count(r.uid) AS total_recipes
FROM recipe_db_recipe AS r
WHERE
created IS NOT NULL
{}
GROUP BY date(r.created, 'start of month')
ORDER BY month ASC
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
df = df.set_index('month')
return df
def per_style(self) -> DataFrame:
scope_filter = self.scope.get_filter()
query = '''
SELECT
ras.style_id,
count(DISTINCT r.uid) AS total_recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipe_associated_styles ras
ON r.uid = ras.recipe_id
WHERE
1 {}
GROUP BY ras.style_id
ORDER BY ras.style_id ASC
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
df = df.set_index('style_id')
return df
class RecipesPopularityAnalysis(RecipeLevelAnalysis):
def popularity_per_style(
self,
projection: Optional[StyleProjection] = None,
num_top: Optional[int] = None,
top_months: Optional[int] = None,
) -> DataFrame:
projection = projection or StyleProjection()
scope_filter = self.scope.get_filter()
projection_filter = projection.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
ras.style_id,
count(r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipe_associated_styles AS ras
ON r.uid = ras.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
{}
{}
GROUP BY month, ras.style_id
'''.format(scope_filter.where, projection_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters + projection_filter.parameters)
if len(per_month) == 0:
return per_month
# Filter top values
top_ids = None
if num_top is not None:
top_scope = per_month
if top_months is not None:
top_scope = top_scope[top_scope['month'] >= months_ago(top_months).strftime('%Y-%m-%d')]
top_ids = top_scope.groupby('style_id')['recipes'].sum().sort_values(ascending=False).index.values[:num_top]
per_month = per_month[per_month['style_id'].isin(top_ids)]
recipes_per_month = RecipesCountAnalysis(self.scope).per_month()
per_month = per_month.merge(recipes_per_month, on="month")
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(per_month, 'style_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Sort by top styles
if top_ids is not None:
smoothened['style_id'] = pd.Categorical(smoothened['style_id'], top_ids)
smoothened = smoothened.sort_values(['style_id', 'month'])
smoothened['beer_style'] = smoothened['style_id'].map(get_style_names_dict())
return smoothened
def popularity_per_hop(
self,
projection: Optional[HopProjection] = None,
num_top: Optional[int] = None,
top_months: Optional[int] = None,
) -> DataFrame:
projection = projection or HopProjection()
scope_filter = self.scope.get_filter()
projection_filter = projection.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
rh.kind_id,
count(DISTINCT r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipehop AS rh
ON r.uid = rh.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
{}
{}
GROUP BY date(r.created, 'start of month'), rh.kind_id
'''.format(scope_filter.where, projection_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters + projection_filter.parameters)
if len(per_month) == 0:
return per_month
# Filter top values
top_ids = None
if num_top is not None:
top_scope = per_month
if top_months is not None:
top_scope = top_scope[top_scope['month'] >= months_ago(top_months).strftime('%Y-%m-%d')]
top_ids = top_scope.groupby('kind_id')['recipes'].sum().sort_values(ascending=False).index.values[:num_top]
per_month = per_month[per_month['kind_id'].isin(top_ids)]
recipes_per_month = RecipesCountAnalysis(self.scope).per_month()
per_month = per_month.merge(recipes_per_month, on="month")
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(per_month, 'kind_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Sort by top kinds
if top_ids is not None:
smoothened['kind_id'] = pd.Categorical(smoothened['kind_id'], top_ids)
smoothened = smoothened.sort_values(['kind_id', 'month'])
smoothened['hop'] = smoothened['kind_id'].map(get_hop_names_dict())
return smoothened
def popularity_per_fermentable(
self,
projection: Optional[FermentableProjection] = None,
num_top: Optional[int] = None,
) -> DataFrame:
projection = projection or FermentableProjection()
scope_filter = self.scope.get_filter()
projection_filter = projection.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
rf.kind_id,
count(DISTINCT r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipefermentable AS rf
ON r.uid = rf.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
{}
{}
GROUP BY date(r.created, 'start of month'), rf.kind_id
'''.format(scope_filter.where, projection_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters + projection_filter.parameters)
if len(per_month) == 0:
return per_month
# Filter top values
top_ids = None
if num_top is not None:
top_scope = per_month
top_ids = top_scope.groupby('kind_id')['recipes'].sum().sort_values(ascending=False).index.values[:num_top]
per_month = per_month[per_month['kind_id'].isin(top_ids)]
recipes_per_month = RecipesCountAnalysis(self.scope).per_month()
per_month = per_month.merge(recipes_per_month, on="month")
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(per_month, 'kind_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Sort by top kinds
if top_ids is not None:
smoothened['kind_id'] = pd.Categorical(smoothened['kind_id'], top_ids)
smoothened = smoothened.sort_values(['kind_id', 'month'])
smoothened['fermentable'] = smoothened['kind_id'].map(get_fermentable_names_dict())
return smoothened
def popularity_per_yeast(
self,
projection: Optional[YeastProjection] = None,
num_top: Optional[int] = None,
top_months: Optional[int] = None,
) -> DataFrame:
projection = projection or YeastProjection()
scope_filter = self.scope.get_filter()
projection_filter = projection.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
ry.kind_id,
count(DISTINCT r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipeyeast AS ry
ON r.uid = ry.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
{}
{}
GROUP BY date(r.created, 'start of month'), ry.kind_id
'''.format(scope_filter.where, projection_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters + projection_filter.parameters)
if len(per_month) == 0:
return per_month
# Filter top values
top_ids = None
if num_top is not None:
top_scope = per_month
if top_months is not None:
top_scope = top_scope[top_scope['month'] >= months_ago(top_months).strftime('%Y-%m-%d')]
top_ids = top_scope.groupby('kind_id')['recipes'].sum().sort_values(ascending=False).index.values[:num_top]
per_month = per_month[per_month['kind_id'].isin(top_ids)]
recipes_per_month = RecipesCountAnalysis(self.scope).per_month()
per_month = per_month.merge(recipes_per_month, on="month")
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(per_month, 'kind_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Sort by top kinds
if top_ids is not None:
smoothened['kind_id'] = pd.Categorical(smoothened['kind_id'], top_ids)
smoothened = smoothened.sort_values(['kind_id', 'month'])
smoothened['yeast'] = smoothened['kind_id'].map(get_yeast_names_dict())
return smoothened
class RecipesMetricHistogram(RecipeLevelAnalysis):
def metric_histogram(self, metric: str) -> DataFrame:
precision = METRIC_PRECISION[metric] if metric in METRIC_PRECISION else METRIC_PRECISION['default']
scope_filter = self.scope.get_filter()
query = '''
SELECT round({}, {}) as {}
FROM recipe_db_recipe AS r
WHERE
{} IS NOT NULL
{}
'''.format(metric, precision, metric, metric, scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
df = remove_outliers(df, metric, 0.02)
if len(df) == 0:
return df
bins = 16
if metric in ['og', 'fg'] and len(df) > 0:
abs = df[metric].max() - df[metric].min()
bins = max([1, round(abs / 0.002)])
if bins > 18:
bins = round(bins / math.ceil(bins / 12))
if metric in ['abv', 'srm'] and len(df) > 0:
abs = df[metric].max() - df[metric].min()
bins = max([1, round(abs / 0.1)])
if bins > 18:
bins = round(bins / math.ceil(bins / 12))
if metric in ['ibu'] and len(df) > 0:
abs = df[metric].max() - df[metric].min()
bins = max([1, round(abs)])
if bins > 18:
bins = round(bins / math.ceil(bins / 12))
histogram = df.groupby([pd.cut(df[metric], bins, precision=precision)])[metric].agg(['count'])
histogram = histogram.reset_index()
histogram[metric] = histogram[metric].map(str)
return histogram
class RecipesTrendAnalysis(RecipeLevelAnalysis):
def _recipes_per_month_in_scope(self) -> DataFrame:
return RecipesCountAnalysis(self.scope).per_month()
def trending_styles(self, trend_window_months: int = 24) -> DataFrame:
recipes_per_month = self._recipes_per_month_in_scope()
scope_filter = self.scope.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
ras.style_id,
count(r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipe_associated_styles AS ras
ON r.uid = ras.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
{}
GROUP BY month, ras.style_id
'''.format(scope_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters)
if len(per_month) == 0:
return per_month
per_month = per_month.merge(recipes_per_month, on="month")
per_month['month'] = pd.to_datetime(per_month['month'])
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
trend_filter = Trending(RollingAverage(window=trend_window_months + 1), trending_window=trend_window_months)
trending_ids = trend_filter.get_trending_series(per_month, 'style_id', 'month', 'recipes_percent', 'recipes')
# Filter trending series
trending = per_month[per_month['style_id'].isin(trending_ids)]
if len(trending) == 0:
return trending
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(trending, 'style_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Order by relevance
smoothened['style_id'] = pd.Categorical(smoothened['style_id'], trending_ids)
smoothened = smoothened.sort_values(['style_id', 'month'])
smoothened['beer_style'] = smoothened['style_id'].map(get_style_names_dict())
return smoothened
def trending_hops(self, projection: Optional[HopProjection] = None, trend_window_months: int = 24) -> DataFrame:
projection = projection or HopProjection()
recipes_per_month = self._recipes_per_month_in_scope()
scope_filter = self.scope.get_filter()
projection_filter = projection.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
rh.kind_id,
count(DISTINCT r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipehop AS rh
ON r.uid = rh.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
AND rh.kind_id IS NOT NULL
{}
{}
GROUP BY date(r.created, 'start of month'), rh.kind_id
'''.format(scope_filter.where, projection_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters + projection_filter.parameters)
if len(per_month) == 0:
return per_month
per_month = per_month.merge(recipes_per_month, on="month")
per_month['month'] = pd.to_datetime(per_month['month'])
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
trend_filter = Trending(RollingAverage(window=trend_window_months+1), trending_window=trend_window_months)
trending_ids = trend_filter.get_trending_series(per_month, 'kind_id', 'month', 'recipes_percent', 'recipes')
# Filter trending series
trending = per_month[per_month['kind_id'].isin(trending_ids)]
if len(trending) == 0:
return trending
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(trending, 'kind_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Order by relevance
smoothened['kind_id'] = pd.Categorical(smoothened['kind_id'], trending_ids)
smoothened = smoothened.sort_values(['kind_id', 'month'])
smoothened['hop'] = smoothened['kind_id'].map(get_hop_names_dict())
return smoothened
def trending_yeasts(self, projection: Optional[YeastProjection] = None, trend_window_months: int = 24) -> DataFrame:
projection = projection or YeastProjection()
recipes_per_month = self._recipes_per_month_in_scope()
scope_filter = self.scope.get_filter()
projection_filter = projection.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
ry.kind_id,
count(DISTINCT r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipeyeast AS ry
ON r.uid = ry.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
AND ry.kind_id IS NOT NULL
{}
{}
GROUP BY date(r.created, 'start of month'), ry.kind_id
'''.format(scope_filter.where, projection_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters + projection_filter.parameters)
if len(per_month) == 0:
return per_month
per_month = per_month.merge(recipes_per_month, on="month")
per_month['month'] = pd.to_datetime(per_month['month'])
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
trend_filter = Trending(RollingAverage(window=trend_window_months+1), trending_window=trend_window_months)
trending_ids = trend_filter.get_trending_series(per_month, 'kind_id', 'month', 'recipes_percent', 'recipes')
# Filter trending series
trending = per_month[per_month['kind_id'].isin(trending_ids)]
if len(trending) == 0:
return trending
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(trending, 'kind_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Order by relevance
smoothened['kind_id'] = | pd.Categorical(smoothened['kind_id'], trending_ids) | pandas.Categorical |
'''Reads data files in input folder(home by default, -Gi is flag for passing new one) then calls GDDcalculator.py,
passes lists of maximum and minimum temperatures also base and upper, takes list of GDD from that and concatenates it
with associated Data Frame'''
from GDDcalculate import *
import argparse
import pandas as pd
import glob
print("GDD.py starts")
parser = argparse.ArgumentParser(description="Calculating GDD") # Argument parser for command-line friendly script
parser.add_argument("-tbase", "-b", type=float, default=10, help="Base temperature") # takes base temperature
parser.add_argument("-tupper", "-u", type=float, default=30, help="Upper temperature") # takes upper temperature
parser.add_argument("-GDDinfolder", "-Gi", type=str, default="./input/", help="Folder containing GDD input files.")
parser.add_argument("-GDDoutfolder", "-Go", type=str, default="./input/", help="Folder that will keep GDD output files.")
args = parser.parse_args()
for fname in glob.glob(args.GDDinfolder + "*.csv"): # For loop for .csv files in given input folder
D = pd.read_csv(fname, header=0) # skipped rows will change if data frame's shape change###############IMPORTANT
df = pd.DataFrame(D)
print(df.columns.values)
tempmax = df["Max Temp (°C)"]
tempmin = df["Min Temp (°C)"] # Data frame's column
year = list(df['Year'])[1] # Just so that we can name final file!
name = list(df['Name'])[1]
length = len( | pd.Series.dropna(tempmin) | pandas.Series.dropna |
# Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tarfile
import warnings
from collections import defaultdict
import numpy as np
import tensorflow as tf
from scipy import linalg
from six.moves import urllib
import pandas as pd
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
pool3 = None
pool3_mean_real = None
pool3_std_real = None
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_features(images):
assert ((images.shape[3]) == 3)
assert (np.max(images) > 10)
assert (np.min(images) >= 0.0)
images = images.astype(np.float32)
bs = 100
sess = tf.get_default_session()
preds = []
feats = []
for inp in np.array_split(images, round(images.shape[0] / bs)):
# sys.stdout.write(".")
# sys.stdout.flush()
[feat, pred] = sess.run([pool3, softmax], {'InputTensor:0': inp})
feats.append(feat.reshape(-1, 2048))
preds.append(pred)
feats = np.concatenate(feats, 0)
preds = np.concatenate(preds, 0)
return preds, feats
def update_fid_mean(images):
global pool3_mean_real
global pool3_std_real
preds, feats = get_features(images)
pool3_mean_real = np.mean(feats, axis=0)
pool3_std_real = np.cov(feats, rowvar=False)
def calc_scores(images, splits=10):
preds, feats = get_features(images)
# calc inception
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
inception_m = np.mean(scores)
inception_s = np.std(scores)
# fid
mu2 = np.mean(feats, axis=0)
sigma2 = np.cov(feats, rowvar=False)
fid = calculate_frechet_distance(pool3_mean_real, pool3_std_real, mu2, sigma2)
return inception_m, inception_s, fid
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by <NAME>.
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma1: The covariance matrix over activations of the pool_3 layer for
generated samples.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths"
assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
# -------------------------------------------------------------------------------
# This function is called automatically.
def _init_inception():
global softmax
global pool3
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.GFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Import model with a modification in the input tensor to accept arbitrary
# batch size.
input_tensor = tf.placeholder(tf.float32, shape=[None, None, None, 3],
name='InputTensor')
_ = tf.import_graph_def(graph_def, name='inception_v3',
input_map={'ExpandDims:0': input_tensor})
# Works with an arbitrary minibatch size.
pool3 = tf.get_default_graph().get_tensor_by_name('inception_v3/pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
if 'inception_v3' in op.name:
for o in op.outputs:
shape = o.get_shape()
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.set_shape(tf.TensorShape(new_shape))
w = tf.get_default_graph().get_operation_by_name("inception_v3/softmax/logits/MatMul").inputs[1]
logits = tf.matmul(tf.squeeze(pool3, [1, 2]), w)
softmax = tf.nn.softmax(logits)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='index of gpu to be used')
parser.add_argument('--results_dir', type=str, default='./results/gans',
help='directory to save the results to')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
result_dir = args.results_dir
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
scores = defaultdict(list)
with tf.Session() as sess:
_init_inception()
update_fid_mean(x_train)
for iter in range(1000, 50001, 1000):
print('run iteration: '+str(iter))
file = os.path.join(result_dir, 'gen_imgs', 'iter{}.npy'.format(iter))
if os.path.isfile(file):
imgs = np.load(file)
inception_m, inception_s, fid = calc_scores(imgs)
scores['Iteration'].append(iter)
scores['Inception_mean'].append(inception_m)
scores['Inception std'].append(inception_s)
scores['FID'].append(fid)
scores = | pd.DataFrame(scores) | pandas.DataFrame |
"""
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import sys
import tempfile
from six.moves import urllib
import urllib.request
import sys
import os
import math
import time
import logging
import concurrent.futures as cf
import numpy as np
import pandas as pd
import sklearn.preprocessing as skp
logging.basicConfig(format='%(asctime)s %(message)s')
logging.root.setLevel(logging.NOTSET)
NUM_INTEGER_COLUMNS = 13
NUM_CATEGORICAL_COLUMNS = 26
NUM_TOTAL_COLUMNS = 1 + NUM_INTEGER_COLUMNS + NUM_CATEGORICAL_COLUMNS
MAX_NUM_WORKERS = NUM_TOTAL_COLUMNS
INT_NAN_VALUE = np.iinfo(np.int32).min
CAT_NAN_VALUE = '80000000'
def idx2key(idx):
if idx == 0:
return 'label'
return 'I' + str(idx) if idx <= NUM_INTEGER_COLUMNS else 'C' + str(idx - NUM_INTEGER_COLUMNS)
def _fill_missing_features_and_split(chunk, series_list_dict):
for cid, col in enumerate(chunk.columns):
NAN_VALUE = INT_NAN_VALUE if cid <= NUM_INTEGER_COLUMNS else CAT_NAN_VALUE
result_series = chunk[col].fillna(NAN_VALUE)
series_list_dict[col].append(result_series)
def _merge_and_transform_series(src_series_list, col, dense_cols,
normalize_dense):
result_series = pd.concat(src_series_list)
if col != 'label':
unique_value_counts = result_series.value_counts()
unique_value_counts = unique_value_counts.loc[unique_value_counts >= 6]
unique_value_counts = set(unique_value_counts.index.values)
NAN_VALUE = INT_NAN_VALUE if col.startswith('I') else CAT_NAN_VALUE
result_series = result_series.apply(
lambda x: x if x in unique_value_counts else NAN_VALUE)
if col == 'label' or col in dense_cols:
result_series = result_series.astype(np.int64)
le = skp.LabelEncoder()
result_series = pd.DataFrame(le.fit_transform(result_series))
if col != 'label':
result_series = result_series + 1
else:
oe = skp.OrdinalEncoder(dtype=np.int64)
result_series = pd.DataFrame(oe.fit_transform(pd.DataFrame(result_series)))
result_series = result_series + 1
if normalize_dense != 0:
if col in dense_cols:
mms = skp.MinMaxScaler(feature_range=(0,1))
result_series = pd.DataFrame(mms.fit_transform(result_series))
result_series.columns = [col]
min_max = (np.int64(result_series[col].min()), np.int64(result_series[col].max()))
if col != 'label':
logging.info('column {} [{}, {}]'.format(col, str(min_max[0]),str(min_max[1])))
return [result_series, min_max]
def _convert_to_string(series):
return series.astype(str)
def _merge_columns_and_feature_cross(series_list, min_max, feature_pairs,
feature_cross):
name_to_series = dict()
for series in series_list:
name_to_series[series.columns[0]] = series.iloc[:,0]
df = | pd.DataFrame(name_to_series) | pandas.DataFrame |
import pandas as pd
import tushare as ts
from StockAnalysisSystem.core.config import TS_TOKEN
from StockAnalysisSystem.core.Utility.common import *
from StockAnalysisSystem.core.Utility.time_utility import *
from StockAnalysisSystem.core.Utility.CollectorUtility import *
# ----------------------------------------------------------------------------------------------------------------------
FIELDS = {
'Finance.BusinessComposition': {
'bz_item': '主营业务来源',
'bz_sales': '主营业务收入(元)',
'bz_profit': '主营业务利润(元)',
'bz_cost': '主营业务成本(元)',
'curr_type': '货币代码',
'update_flag': '是否更新',
},
}
# -------------------------------------------------------- Prob --------------------------------------------------------
def plugin_prob() -> dict:
return {
'plugin_name': 'finance_business_tushare_pro',
'plugin_version': '0.0.0.1',
'tags': ['tusharepro']
}
def plugin_adapt(uri: str) -> bool:
return uri in FIELDS.keys()
def plugin_capacities() -> list:
return list(FIELDS.keys())
# ----------------------------------------------------------------------------------------------------------------------
# fina_mainbz: https://tushare.pro/document/2?doc_id=81
def __fetch_bussiness_data_by_type(pro: ts.pro_api, ts_code: str, classify: str,
since: datetime.datetime, until: datetime.datetime):
limit = 10
result = None
derive_time = until
while limit > 0:
ts_since = since.strftime('%Y%m%d')
ts_until = derive_time.strftime('%Y%m%d')
ts_delay('fina_mainbz')
sub_result = pro.fina_mainbz(ts_code=ts_code, start_date=ts_since, end_date=ts_until, type=classify)
if not isinstance(sub_result, pd.DataFrame) or sub_result.empty:
break
result = pd.concat([result, sub_result])
result = result.reset_index(drop=True)
result_since = min(sub_result['end_date'])
result_since = text_auto_time(result_since)
# End condition
if result_since == derive_time or len(sub_result) < 100:
break
limit -= 1
derive_time = result_since
if isinstance(result, pd.DataFrame):
result = result.drop_duplicates()
return result
def __fetch_business_data(**kwargs) -> pd.DataFrame:
uri = kwargs.get('uri')
result = check_execute_test_flag(**kwargs)
if result is None:
period = kwargs.get('period')
ts_code = pickup_ts_code(kwargs)
since, until = normalize_time_serial(period, default_since(), today())
# since_limit = years_ago_of(until, 3)
# since = max([since, since_limit])
# Because of the implementation of this interface, we only fetch the annual report
# since_year = since.year
# until_year = until.year
result = None
pro = ts.pro_api(TS_TOKEN)
try:
if is_slice_update(ts_code, since, until):
ts_since = since.strftime('%Y%m%d')
clock = Clock()
result_product = pro.fina_mainbz_vip(ts_since, type='P')
result_area = pro.fina_mainbz_vip(ts_since, type='D')
print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
else:
clock = Clock()
result_product = __fetch_bussiness_data_by_type(pro, ts_code, 'P', since, until)
result_area = __fetch_bussiness_data_by_type(pro, ts_code, 'D', since, until)
print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
if isinstance(result_product, pd.DataFrame) and not result_product.empty:
result_product['classification'] = 'product'
if isinstance(result_area, pd.DataFrame) and not result_area.empty:
result_area['classification'] = 'area'
result = | pd.concat([result_product, result_area]) | pandas.concat |
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression,RidgeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
import tensorflow as tf
from tensorflow.keras import layers
from copy import copy
def transform_data(data_orig):
"""
Function that takes experimental data and gives us the
dependent/independent variables for analysis.
Parameters
----------
data : Pandas DataFrame or string.
If this is a DataFrame, it should have the columns `contrast1` and
`answer` from which the dependent and independent variables will be
extracted. If this is a string, it should be the full path to a csv
file that contains data that can be read into a DataFrame with this
specification.
Returns
-------
X : array
train data, information about each passenger
features: ['PassengerId', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp',\
'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked']
y : array
label of survival of each passenger
"""
if isinstance(data_orig, str):
data_orig = pd.read_csv(data_orig)
data = data_orig
num_rows,num_variables = data.shape
all_columns = data.columns.tolist()
clean_data(data,all_columns,ignore_na=False,fill_mode="prob")
expand_features(data)
variables = ['Pclass','Sex',"Fare","Age","SibSp","Parch","Embarked","Fam_size",\
"cabin_no","ticket_no","friend","Fare_person","Child"]
X = pd.get_dummies(data[variables])
## normalise features to zero man and unit variance
scaler = preprocessing.StandardScaler().fit(X)
X_scaled = scaler.transform(X)
X = | pd.DataFrame(X_scaled, columns=X.columns) | pandas.DataFrame |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: test.py
@time: 2019-05-15 15:09
"""
import pandas as pd
if __name__ == '__main__':
mode = 1
if mode == 1:
df = | pd.read_excel('zy_all.xlsx', converters={'出险人客户号': str}) | pandas.read_excel |
# coding:utf-8
# 用 ARMA 进行时间序列预测
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARMA
from statsmodels.graphics.api import qqplot
# 创建数据
data = [5922, 5308, 5546, 5975, 2704, 1767, 4111, 5542, 4726, 5866, 6183, 3199, 1471, 1325, 6618, 6644, 5337, 7064, 2912, 1456, 4705, 4579, 4990, 4331, 4481, 1813, 1258, 4383, 5451, 5169, 5362, 6259, 3743, 2268, 5397, 5821, 6115, 6631, 6474, 4134, 2728, 5753, 7130, 7860, 6991, 7499, 5301, 2808, 6755, 6658, 7644, 6472, 8680, 6366, 5252, 8223, 8181, 10548, 11823, 14640, 9873, 6613, 14415, 13204, 14982, 9690, 10693, 8276, 4519, 7865, 8137, 10022, 7646, 8749, 5246, 4736, 9705, 7501, 9587, 10078, 9732, 6986, 4385, 8451, 9815, 10894, 10287, 9666, 6072, 5418]
data=pd.Series(data)
data_index = sm.tsa.datetools.dates_from_range('1901','1990')
# 绘制数据图
data.index = | pd.Index(data_index) | pandas.Index |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
| pd.Timestamp("2015-01-20") | pandas.Timestamp |
"""
分析模块
"""
import warnings
from typing import Tuple, Union
import re
import numpy as np
import pandas as pd
from scipy import stats
from statsmodels.api import OLS, add_constant
from QUANTAXIS.QAFactor import utils
from QUANTAXIS.QAFactor.parameters import DAYS_PER_MONTH, DAYS_PER_QUARTER, DAYS_PER_YEAR
from QUANTAXIS.QAFactor.process import demean_forward_returns
from QUANTAXIS.QAFactor.utils import get_forward_returns_columns
def mean_return_by_quantile(
factor_data: pd.DataFrame,
by_datetime: bool = False,
by_group: bool = False,
demeaned: bool = True,
group_adjust: bool = False,
) -> Tuple:
"""
按分位计算因子远期收益和标准差
参数
---
:param factor_data: 索引为 ['日期' '资产'] 的 MultiIndex, values 包括因子的值,各期因子远期收益,因子分位数, 因子分组 [可选], 因子权重 [可选]
:param by_datetime: 按日期计算各分位数的因子远期收益均值
:param by_group: 按分组计算各分位数的因子远期收益均值
:param demeaned: 按日期计算超额收益
:param group_adjust: 按日期和分组计算超额收益
返回
---
:return mean_ret: 各分位数因子远期收益均值
:return std_error_ret: 各分位数因子远期收益标准差
"""
if group_adjust:
grouper = ["datetime", "group"]
factor_data = demean_forward_returns(factor_data, grouper)
elif demeaned:
factor_data = demean_forward_returns(factor_data)
else:
factor_data = factor_data.copy()
grouper = ["factor_quantile"]
if by_datetime:
grouper.append("datetime")
if by_group:
grouper.append("group")
mean_ret, std_error_ret = weighted_mean_return(factor_data, grouper=grouper)
return mean_ret, std_error_ret
def weighted_mean_return(factor_data: pd.DataFrame, grouper: list):
"""
计算加权平均收益/标准差
"""
forward_returns_columns = get_forward_returns_columns(factor_data.columns)
def agg(values, weights):
count = len(values)
average = np.average(values, weights=weights, axis=0)
variance = (
np.average((values - average)**2,
weights=weights,
axis=0) * count / max((count - 1),
1)
)
return pd.Series(
[average,
np.sqrt(variance),
count],
index=["mean",
"std",
"count"]
)
group_stats = factor_data.groupby(grouper)[forward_returns_columns.append(
pd.Index(["weights"])
)].apply(
lambda x: x[forward_returns_columns].
apply(agg,
weights=x["weights"].fillna(0.0).values)
)
mean_ret = group_stats.xs("mean", level=-1)
std_error_ret = group_stats.xs(
"std",
level=-1
) / np.sqrt(group_stats.xs("count",
level=-1))
return mean_ret, std_error_ret
def mean_returns_spread(
mean_returns: pd.DataFrame,
upper_quant: int,
lower_quant: int,
std_err=None
):
"""
计算 upper_quant 与 lower_quant 之间的收益差,与联合收益标准差
参数
---
:param mean_returns: 平均回报
:param upper_quant: 上分位
:param lower_quant: 下分位
:param std_err: 收益标准差
"""
mean_return_difference = mean_returns.xs(upper_quant
) - mean_returns.xs(lower_quant)
if std_err is None:
joint_std_err = None
else:
std1 = std_err.xs(upper_quant)
std2 = std_err.xs(lower_quant)
joint_std_err = np.sqrt(std1**2 + std2**2)
return mean_return_difference, joint_std_err
def factor_alpha_beta(
factor_data: pd.DataFrame,
returns: pd.DataFrame = None,
demeaned: bool = True,
group_adjust: bool = False,
equal_weight: bool = False,
):
"""
计算因子的 alpha (超额收益), alpha 的 t-统计量 以及 beta 值
参数
---
:param factor_data: 索引为 ['日期' '股票'] 的 MultiIndex, values 包括因子值,远期收益,因子分位,因子分组 [可选]
:param returns: 因子远期收益,默认为 None, 如果为 None 的时候,会通过调用 `factor_returns` 来计算相应的收益
:param demeaned: 是否基于一个多空组合
:param group_adjust: 是否进行行业中性处理
:param equal_weight:
返回
---
"""
if returns is None:
returns = factor_returns(
factor_data,
demeaned,
group_adjust,
equal_weight
)
universe_ret = (
factor_data.groupby(level="datetime")[get_forward_returns_columns(
factor_data.columns
)].mean().loc[returns.index]
)
if isinstance(returns, pd.Series):
returns.name = universe_ret.columns.values[0]
returns = pd.DataFrame(returns)
alpha_beta = pd.DataFrame()
for period in returns.columns.values:
x = universe_ret[period].values
y = returns[period].values
x = add_constant(x)
reg_fit = OLS(y, x).fit()
try:
alpha, beta = reg_fit.params
except ValueError:
alpha_beta.loc["Ann. alpha", period] = np.nan
alpha_beta.loc["beta", period] = np.nan
else:
freq_adjust = pd.Timedelta(days=DAYS_PER_YEAR) / pd.Timedelta(
utils.get_period(period.replace("period_",
""))
)
alpha_beta.loc["Ann. alpha",
period] = (1 + alpha)**freq_adjust - 1.0
alpha_beta.loc["beta", period] = beta
return alpha_beta
def factor_returns(
factor_data: pd.DataFrame,
demeaned: bool = True,
group_adjust: bool = False,
equal_weight: bool = False,
by_asset: bool = False,
):
"""
计算按因子值加权的投资组合收益
参数
---
:param factor_data: 因子数据
:param demeaned: 是否构建多空组合
:param group_adjust: 是否按分组进行多空组合
:param equal_weight: 针对因子中位数分别构建多空组合
:param by_asset: 按股票展示组合收益, 默认为 False
返回值
---
"""
weights = factor_weights(factor_data, demeaned, group_adjust, equal_weight)
weighted_returns = factor_data[get_forward_returns_columns(
factor_data.columns
)].multiply(
weights,
axis=0
)
if by_asset:
returns = weighted_returns
else:
returns = weighted_returns.groupby(level="datetime").sum()
return returns
def factor_weights(
factor_data: pd.DataFrame,
demeaned: bool = True,
group_adjust: bool = False,
equal_weight: bool = False,
):
def to_weights(group, _demeaned, _equal_weight):
if _equal_weight:
group = group.copy()
if _demeaned:
# top assets positive weights, bottom ones negative
group = group - group.median()
negative_mask = group < 0
group[negative_mask] = -1.0
positive_mask = group > 0
group[positive_mask] = 1.0
if _demeaned:
# positive weights must equal negative weights
if negative_mask.any():
if negative_mask.sum() == 0:
group[negative_mask] = 0
group[negative_mask] /= negative_mask.sum()
if positive_mask.any():
if positive_mask.sum() == 0:
group[positive_mask] = 0
group[positive_mask] /= positive_mask.sum()
elif _demeaned:
group = group - group.mean()
if group.abs().sum() == 0: # 二分类可能和为 0
return group * 0.0
return group / group.abs().sum()
grouper = ["datetime"]
if group_adjust:
grouper.append("group")
weights = factor_data.groupby(grouper)["factor"].apply(
to_weights,
demeaned,
equal_weight
)
if group_adjust:
weights = weights.groupby(level="datetime"
).apply(to_weights,
False,
False)
return weights
def factor_information_coefficient(
factor_data: pd.DataFrame,
group_adjust: bool = False,
by_group: bool = False
):
"""
Computes the Spearman Rank Correlation based Information Coefficient (IC)
between factor values and N period forward returns for each period in
the factor index.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
group_adjust : bool
Demean forward returns by group before computing IC.
by_group : bool
If True, compute period wise IC separately for each group.
Returns
-------
ic : pd.DataFrame
Spearman Rank correlation between factor and
provided forward returns.
"""
def src_ic(group):
f = group["factor"]
_ic = group[get_forward_returns_columns(
factor_data.columns
)].apply(lambda x: stats.spearmanr(x,
f)[0])
return _ic
factor_data = factor_data.copy()
grouper = ["datetime"]
if group_adjust:
factor_data = demean_forward_returns(factor_data, grouper + ["group"])
if by_group:
grouper.append("group")
ic = factor_data.groupby(grouper).apply(src_ic)
return ic
def quantile_turnover(
quantile_factor: pd.DataFrame,
quantile: int,
period: Union[int,
str] = 1
):
"""
Computes the proportion of names in a factor quantile that were
not in that quantile in the previous period.
Parameters
----------
quantile_factor : pd.Series
DataFrame with date, asset and factor quantile.
quantile : int
Quantile on which to perform turnover analysis.
period: string or int, optional
Period over which to calculate the turnover. If it is a string it must
follow pandas.Timedelta constructor format (e.g. '1 days', '1D', '30m',
'3h', '1D1h', etc).
Returns
-------
quant_turnover : pd.Series
Period by period turnover for that quantile.
"""
quant_names = quantile_factor[quantile_factor == quantile]
quant_name_sets = quant_names.groupby(
level=["datetime"]
).apply(lambda x: set(x.index.get_level_values("code")))
if isinstance(period, int):
name_shifted = quant_name_sets.shift(period)
else:
period = utils.get_period(period)
shifted_idx = utils.add_custom_calendar_timedelta(
quant_name_sets.index,
-pd.Timedelta(period)
)
name_shifted = quant_name_sets.reindex(shifted_idx)
name_shifted.index = quant_name_sets.index
new_names = (quant_name_sets - name_shifted).dropna()
quant_turnover = new_names.apply(lambda x: len(x)
) / quant_name_sets.apply(lambda x: len(x))
quant_turnover.name = quantile
return quant_turnover
def factor_rank_autocorrelation(
factor_data: pd.DataFrame,
period: Union[int,
str] = 1
):
grouper = ["datetime"]
ranks = factor_data.groupby(grouper)["factor"].rank()
asset_factor_rank = ranks.reset_index().pivot(
index="datetime",
columns="code",
values="factor"
)
if isinstance(period, int):
asset_shifted = asset_factor_rank.shift(period)
else:
shifted_idx = utils.add_custom_calendar_timedelta(
asset_factor_rank.index,
-pd.Timedelta(period),
)
asset_shifted = asset_factor_rank.reindex(shifted_idx)
asset_shifted.index = asset_factor_rank.index
autocorr = asset_factor_rank.corrwith(asset_shifted, axis=1)
autocorr.name = period
return autocorr
def cumulative_returns(returns: pd.DataFrame, period, freq=None):
if not isinstance(period, pd.Timedelta):
period = pd.Timedelta(period)
if freq is None:
freq = returns.index.freq
if freq is None:
freq = pd.tseries.offsets.BDay()
warnings.warn(
"'freq' not set, using business day calendar",
UserWarning
)
trades_idx = returns.index.copy()
returns_idx = utils.add_custom_calendar_timedelta(trades_idx, period)
full_idx = trades_idx.union(returns_idx)
sub_returns = []
while len(trades_idx) > 0:
sub_idx = []
next = trades_idx.min()
while next <= trades_idx.max():
sub_idx.append(next)
next = utils.add_custom_calendar_timedelta(
input=next,
timedelta=period,
)
try:
i = trades_idx.get_loc(next, method="bfill")
next = trades_idx[i]
except KeyError:
break
sub_idx = pd.DatetimeIndex(sub_idx, tz=full_idx.tz)
subret = returns[sub_idx]
subret = subret.reindex(full_idx)
for pret_idx in reversed(sub_idx):
pret = subret[pret_idx]
pret_end_idx = utils.add_custom_calendar_timedelta(pret_idx, period)
slice = subret[(subret.index > pret_idx)
& (subret.index <= pret_end_idx)].index
if | pd.isnull(pret) | pandas.isnull |
import pandas as pd
import numpy as np
import pytest
from features_creator.features_creator import *
@pytest.fixture
def data_df():
data = {
"week_payment1": [1.0, 2, 3],
"week_payment2": [4, 5.0, 6],
"week_payment3": [7, 8, 9.0],
"othercolumn": [1, 1, 1]}
df = pd.DataFrame(data)
return df
def test_calculate_average(data_df):
# Test for TypeError
with pytest.raises(TypeError):
# check if TypeError is raised when data is not a pandas dataframe
calculate_average(1, "week_payment")
# check if TypeError is raised when pattern is not a string
calculate_average(data_df, 1)
# Test for ValueError
with pytest.raises(ValueError):
# check if ValueError is raised when pattern is not a string
calculate_average(data_df, "")
# check if ValueError is raised when columns not found
calculate_average(data_df, "not_a_column")
# Test for return type
assert isinstance(calculate_average(data_df, "week_payment"), np.ndarray)
# Test the function with a dataframe with only one column
data_1col = {
"week_payment1": [1, 2, 3]}
df_1col = pd.DataFrame(data_1col)
assert np.array_equal(calculate_average(
df_1col, "week_payment"), np.array([1, 2, 3]))
# Test the function return correct value when there is only one row
data_1row = {
"week_payment1": [1]}
df_1row = pd.DataFrame(data_1row)
assert np.array_equal(calculate_average(
df_1row, "week_payment"), np.array([1]))
# Test the function return correct value
assert np.array_equal(calculate_average(
data_df, "week_payment"), np.array([4, 5, 6]))
def test_get_matching_column_names():
"""
Tests the `get_matching_column_names` function.
Verifies that it raises the correct exceptions, works in
"normal" usage, and does not return any extra columns.
"""
test_df = pd.DataFrame({
"week_payment1": [1, 2, 3],
"week_payment2": [1, 2, 3],
"week_payment3": [1, 2, 3],
"othercolumn": [5, 6, 7],
"week_payment_string4": [5, 6, 7]
})
# Returns the correct type
assert isinstance(get_matching_column_names(
test_df, "week_payment"), list), "Returned the wrong data type"
# Does not return extra columns
assert "othercolumn" not in get_matching_column_names(
test_df, "week_payment"), "`othercolumn` was returned"
assert "week_payment_string7" not in get_matching_column_names(
test_df, "week_payment"), "`week_payment_string7` was returned"
# Raises exceptions for wrong types
with pytest.raises(TypeError):
get_matching_column_names("FakeDF", "week_payment")
get_matching_column_names(test_df, [12, 34])
# Raises an exception for no matches
with pytest.raises(ValueError):
get_matching_column_names(test_df, "fake_string")
# Normal usage test
assert get_matching_column_names(test_df, "week_payment") == [
"week_payment1", "week_payment2", "week_payment3"], "Incorrect columns were returned"
test_df = pd.DataFrame({
"week_payment1": [1.0, 2, 3],
"week_payment2": [4, 5.0, 6],
"week_payment3": [7, 8, 9.0],
"othercolumn": [1, 1, 1]
})
@pytest.mark.parametrize(
'json',
(
### Check data type of the first argument:data
{"data": 3.141, "pattern": "week_payment", "check": "TypeErrorInput1"},
{"data": "test.txt", "pattern": "week_payment", "check": "TypeErrorInput1"},
{"data": ["list", "of", "words"], "pattern": "week_payment", "check": "TypeErrorInput1"},
### Check data type of the second argument:pattern
{"data": test_df, "pattern": 3.14, "check": "TypeErrorInput2"},
{"data": test_df, "pattern": | pd.DataFrame([]) | pandas.DataFrame |
import pull_mdsplus as pull
import pandas as pd
import numpy as np
import meas_locations as geo
import MDSplus as mds
import itertools
from scipy import interpolate
def load_gfile_mds(shot, time, tree="EFIT01", exact=False, connection=None, tunnel=True):
"""
This is scavenged from the load_gfile_d3d script on the EFIT repository,
except updated to run on python3.
shot: Shot to get gfile for.
time: Time of the shot to load gfile for, in ms.
tree: One of the EFIT trees to get the data from.
exact: If True will raise error if time does not exactly match any gfile
times. False will grab the closest time.
connection: An MDSplus connection to atlas.
tunnel: Set to True if accessing outside DIII-D network.
returns: The requested gfile as a dictionary.
"""
# Connect to server, open tree and go to g-file
if connection is None:
if tunnel is True:
connection = mds.Connection("localhost")
else:
connection = mds.Connection('atlas.gat.com')
connection.openTree(tree, shot)
base = 'RESULTS:GEQDSK:'
# get time slice
print("\nLoading gfile:")
print(" Shot: " + str(shot))
print(" Tree: " + tree)
print(" Time: " + str(time))
signal = 'GTIME'
k = np.argmin(np.abs(connection.get(base + signal).data() - time))
time0 = int(connection.get(base + signal).data()[k])
if (time != time0):
if exact:
raise RuntimeError(tree + ' does not exactly contain time %.2f' %time + ' -> Abort')
else:
print('Warning: ' + tree + ' does not exactly contain time %.2f' %time + ' the closest time is ' + str(time0))
print('Fetching time slice ' + str(time0))
time = time0
# store data in dictionary
g = {'shot': shot, 'time': time}
# get header line
header = connection.get(base + 'ECASE').data()[k]
# get all signals, use same names as in read_g_file
translate = {'MW': 'NR', 'MH': 'NZ', 'XDIM': 'Xdim', 'ZDIM': 'Zdim', 'RZERO': 'R0',
'RMAXIS': 'RmAxis', 'ZMAXIS': 'ZmAxis', 'SSIMAG': 'psiAxis', 'SSIBRY': 'psiSep',
'BCENTR': 'Bt0', 'CPASMA': 'Ip', 'FPOL': 'Fpol', 'PRES': 'Pres',
'FFPRIM': 'FFprime', 'PPRIME': 'Pprime', 'PSIRZ': 'psiRZ', 'QPSI': 'qpsi',
'NBBBS': 'Nlcfs', 'LIMITR': 'Nwall'}
for signal in translate:
g[translate[signal]] = connection.get(base + signal).data()[k]
g['R1'] = connection.get(base + 'RGRID').data()[0]
g['Zmid'] = 0.0
RLIM = connection.get(base + 'LIM').data()[:, 0]
ZLIM = connection.get(base + 'LIM').data()[:, 1]
g['wall'] = np.vstack((RLIM, ZLIM)).T
RBBBS = connection.get(base + 'RBBBS').data()[k][:int(g['Nlcfs'])]
ZBBBS = connection.get(base + 'ZBBBS').data()[k][:int(g['Nlcfs'])]
g['lcfs'] = np.vstack((RBBBS, ZBBBS)).T
KVTOR = 0
RVTOR = 1.7
NMASS = 0
RHOVN = connection.get(base + 'RHOVN').data()[k]
# convert floats to integers
for item in ['NR', 'NZ', 'Nlcfs', 'Nwall']:
g[item] = int(g[item])
# convert single (float32) to double (float64) and round
for item in ['Xdim', 'Zdim', 'R0', 'R1', 'RmAxis', 'ZmAxis', 'psiAxis', 'psiSep', 'Bt0', 'Ip']:
g[item] = np.round(np.float64(g[item]), 7)
# convert single arrays (float32) to double arrays (float64)
for item in ['Fpol', 'Pres', 'FFprime', 'Pprime', 'psiRZ', 'qpsi', 'lcfs', 'wall']:
g[item] = np.array(g[item], dtype=np.float64)
# Construct (R,Z) grid for psiRZ
g['dR'] = g['Xdim']/(g['NR'] - 1)
g['R'] = g['R1'] + np.arange(g['NR'])*g['dR']
g['dZ'] = g['Zdim']/(g['NZ'] - 1)
NZ2 = int(np.floor(0.5*g['NZ']))
g['Z'] = g['Zmid'] + np.arange(-NZ2, NZ2+1)*g['dZ']
# normalize psiRZ
g['psiRZn'] = (g['psiRZ'] - g['psiAxis']) / (g['psiSep'] - g['psiAxis'])
return g
def rbs_into_df(number, probe, conn, start=2500, end=5000, step=500, verbal=False):
"""
Pulls RBS data from the MDSplus tree 'dp_probes' and puts it into a
DataFrame ready for analysis. Require ssh to r2d2 if remote.
number: Probe number.
probe: One of A, B or C.
conn: An MDSplus Connection returned via the pull.thin_connect function.
start: Start of time that will be analyzed (i.e. the first gfile loaded).
end: End of time for analysis (i.e. the last gfile loaded).
step: Time step for the above.
returns: A DataFrame formatted and ready to be filled with data (R-Rsep,
R-Rsep_omp, etc.)
"""
# Create array of times to be sampled.
times = np.arange(start, end, step)
# Get shots probe was in for and Rprobe. Same for U and D sides, obviously.
shots = pull.pull_shots(conn, probe + 'U', verbal=verbal)
rprobe = pull.pull_rprobe(conn, probe + 'U', probe_corr=True, verbal=verbal)
print("Shots to be analyzed: " + str(shots))
# Then pull the RBS data.
print('\nLoading ' + probe + 'U' + str(number) + ' data...')
rbs_dict_U = pull.pull_all_rbs(conn, number, probe + 'U', verbal=verbal)
print('\nLoading ' + probe + 'D' + str(number) + ' data...')
rbs_dict_D = pull.pull_all_rbs(conn, number, probe + 'D', verbal=verbal)
# Now prepare the DataFrame. Will have set of data at each time, at each
# shot. So essentially len(times)*len(shots) DataFrames stacked together.
rbs_df_U = pd.DataFrame(rbs_dict_U)
rbs_df_D = pd.DataFrame(rbs_dict_D)
# Want 'locs' as an index.
rbs_df_U.set_index('locs', inplace=True)
rbs_df_D.set_index('locs', inplace=True)
# Create set of DataFrames, len(times) of them, to be 'stacked' on top of each other.
rbs_df_U = pd.concat(list(itertools.repeat(rbs_df_U, len(times))), keys=times, names=['times'])
rbs_df_D = pd.concat(list(itertools.repeat(rbs_df_D, len(times))), keys=times, names=['times'])
# Now do it again, except with shots.
rbs_df_U = pd.concat(list(itertools.repeat(rbs_df_U, len(shots))), keys=shots, names=['shots'])
rbs_df_D = pd.concat(list(itertools.repeat(rbs_df_D, len(shots))), keys=shots, names=['shots'])
return rbs_df_U, rbs_df_D, rprobe
def fill_in_rbs_df(rbs_df_U, rbs_df_D, probe, rprobe, conn, verbal=False):
"""
Takes the rbs_df from above and fill it in with R-Rsep, R-Rsep_omp, etc. It
returns all if it, so that it may then be averaged and get the std. dev. of
after all the data colloction has taken place. Requires ssh to atlas if remote.
rbs_df_U: The DataFrame returned from rbs_into_df. Likewise for D.
probe: One of A, B or C.
rprobe: Radial position of probe tip returned from rbs_into_df.
conn: An MDSplus Connection object from the mds.Connection function (different
procedure compared to connecting to r2d2).
returns: Filled in rbs_df.
"""
if verbal:
print("Analyzing atlas relevant data...")
# Get the shots, times and locs from the rbs_df index. np.unique will sort
# the locs (don't want), so returning the indices and reordering will fix this.
shots = np.unique(rbs_df_U.index.get_level_values('shots').values)
times = np.unique(rbs_df_U.index.get_level_values('times').values)
locs_U, order_U = np.unique(rbs_df_U.index.get_level_values('locs').values, return_index=True)
locs_D, order_D = np.unique(rbs_df_D.index.get_level_values('locs').values, return_index=True)
locs_U = locs_U[order_U]
locs_D = locs_D[order_D]
# Extra columns to be filled out.
rbs_df_U['R-Rsep (cm)'] = pd.Series(); rbs_df_D['R-Rsep (cm)'] = pd.Series()
rbs_df_U['R-Rsep omp (cm)'] = pd.Series(); rbs_df_D['R-Rsep omp (cm)'] = pd.Series()
rbs_df_U['Psin'] = pd.Series(); rbs_df_D['Psin'] = pd.Series()
rbs_df_U['R (cm)'] = pd.Series(); rbs_df_D['R (cm)'] = pd.Series()
# Establish the Z to be used depending on the probe.
if probe == 'A': Z_probe = -0.188
elif probe == 'B': Z_probe = -0.1546
elif probe == 'C': Z_probe = -0.2054
else: print("Error in probe entry.")
for shot in shots:
for time in times:
try:
# Load gfile.
gfile = load_gfile_mds(shot, time, connection=conn, tunnel=True)
# Create grid of R's and Z's.
Rs, Zs = np.meshgrid(gfile['R'], gfile['Z'])
# Z and R of magnetic axis (where omp is), in m.
Z_axis = gfile['ZmAxis']
R_axis = gfile['RmAxis']
# Z's and R's of the separatrix, in m.
Zes = np.copy(gfile['lcfs'][:, 1][13:-17])
Res = np.copy(gfile['lcfs'][:, 0][13:-17])
# Only want right half of everything.
Rs_trunc = Rs > R_axis
# Interpolation functions of psin(R, Z) and R(psin, Z).
f_psin = interpolate.Rbf(Rs[Rs_trunc], Zs[Rs_trunc], gfile['psiRZn'][Rs_trunc])
f_Romp = interpolate.Rbf(gfile['psiRZn'][Rs_trunc], Zs[Rs_trunc], Rs[Rs_trunc], epsilon=0.00001)
f_Rs = interpolate.interp1d(Zes, Res, assume_sorted=False)
# R of the separatrix at each probe Z in cm.
Rsep = f_Rs(Z_probe) * 100.0
Rsep_omp = f_Rs(Z_axis) * 100.0
# Get R of each location along the probe in cm, then R-Rsep.
R_locs_U = geo.calc_R_meas(rprobe, locs_U, probe + 'U')
RminRsep_U = R_locs_U - Rsep
R_locs_D = geo.calc_R_meas(rprobe, locs_D, probe + 'D')
RminRsep_D = R_locs_D - Rsep
# Get the corresponding psins of each location along the probe.
psin_locs_U = f_psin(R_locs_U / 100.0, np.full((len(R_locs_U),), Z_probe))
psin_locs_D = f_psin(R_locs_D / 100.0, np.full((len(R_locs_D),), Z_probe))
# Calculate R_loc at the omp, then R-Rsep omp.
R_locs_omp_U = f_Romp(psin_locs_U, np.full((len(psin_locs_U),), Z_axis)) * 100.0
RminRsep_omp_U = R_locs_omp_U - Rsep_omp
R_locs_omp_D = f_Romp(psin_locs_D, np.full((len(psin_locs_D),), Z_axis)) * 100.0
RminRsep_omp_D = R_locs_omp_D - Rsep_omp
except:
print("Error loading this time.")
# Finally store all these in the corresponding part of the DataFrame.
rbs_df_U.loc[shot].loc[time]['R-Rsep (cm)'] = pd.Series(RminRsep_U, index=rbs_df_U.loc[shot].loc[time].index)
rbs_df_U.loc[shot].loc[time]['R-Rsep omp (cm)'] = pd.Series(RminRsep_omp_U, index=rbs_df_U.loc[shot].loc[time].index)
rbs_df_U.loc[shot].loc[time]['Psin'] = pd.Series(psin_locs_U, index=rbs_df_U.loc[shot].loc[time].index)
rbs_df_U.loc[shot].loc[time]['R (cm)'] = pd.Series(R_locs_U, index=rbs_df_U.loc[shot].loc[time].index)
rbs_df_D.loc[shot].loc[time]['R-Rsep (cm)'] = pd.Series(RminRsep_D, index=rbs_df_D.loc[shot].loc[time].index)
rbs_df_D.loc[shot].loc[time]['R-Rsep omp (cm)'] = pd.Series(RminRsep_omp_D, index=rbs_df_D.loc[shot].loc[time].index)
rbs_df_D.loc[shot].loc[time]['Psin'] = pd.Series(psin_locs_D, index=rbs_df_D.loc[shot].loc[time].index)
rbs_df_D.loc[shot].loc[time]['R (cm)'] = pd.Series(R_locs_D, index=rbs_df_D.loc[shot].loc[time].index)
return rbs_df_U, rbs_df_D
def rbs_df_stats(rbs_df, U_or_D, verbal=False):
"""
Computes the average of each data point at each location along the probe.
rbs_df: DataFrame returned from the above 'fill_in_rbs_df'.
returns: DataFrame of averages at each location for each time during each
shot.
"""
if False:
print("Aggregating statistics over all shots and times...")
# First get how many locations there are. np.unique will sort them (don't want),
# so return index will give the indices to preserve order.
locs, order = np.unique(rbs_df.index.get_level_values('locs').values, return_index=True)
locs = locs[order]
nlocs = locs.size
# The DataFrames that will hold our results.
rbs_stat_df = pd.DataFrame()
err_df = | pd.DataFrame() | pandas.DataFrame |
import math
import load_data
import pickle
import pandas as pd
import numpy as np
import datetime
from collections import deque
import scipy.stats as st
import ast
import astpretty
import re
def main():
# Used first in Organization.ipynb
print('\nCell Output')
get_cell_output()
print('\nCell Stats')
get_cell_stats()
print('\nCell Order')
get_cell_order()
print('\nCell Types')
get_cell_types()
print('\nComments')
get_comments()
# Used first in Packages.ipynb
print('\nGet Imports')
get_nb_imports()
print('\nGet Code')
get_nb_code()
print('\nGetting nb_imports_code_df')
nb_imports_code_df = load_data.load_nb_imports(code = True)
print('\nnb_imports_code_df loaded')
cell_types_df = load_data.load_cell_types()
print('\ncell_types loaded')
cell_stats_df = load_data.load_cell_stats()
print('\ncell_stats loaded')
cell_info_code_df = cell_types_df.merge(
cell_stats_df, on = 'file'
).merge(
nb_imports_code_df.rename(columns={'code':'code_list'}), on = 'file'
)
print('\ndfs combined')
#Used first in APIs.ipynb
print('\nGet Objects')
get_all_objects(cell_info_code_df)
print('\nGet Lines Per Code Cell')
get_lines_per_code_cell(cell_info_code_df)
print('\nGet Function Definitions')
get_function_defs(cell_info_code_df)
print('\nGet Function Uses')
get_function_use(cell_info_code_df)
print('\nSeparate User-defined functions from not user-defined')
add_user_funcs()
# Used first in Struggles.ipynb
print('\nGet Erros')
get_errors()
print('\nGet Statuses')
get_statuses()
# Used first in Visualizations.ipynb
print('\nGet Visualization Uses')
get_vis_uses(nb_imports_code_df)
print('\nAdd Visualization Uses to Notebooks')
get_vis_uses_nb(nb_imports_code_df)
# Used first in Models.ipynb
print('\nGet Framework Uses')
get_framework_uses(nb_imports_code_df)
print('\nGet Magic')
get_magic()
def get_magic():
df_chunks = pd.read_csv(
'data_final/cells_final.csv',
header = 0,
usecols = ['file','cell_id','code'],
chunksize = 10000
)
def aggregate_special_lines(list_of_lines_of_code):
return [
l
for l in load_data.flatten([l.split('\n') for l in list_of_lines_of_code if str(l) != 'nan'])
if l.startswith('%') or '!' in l or
l.startswith('?') or l.endswith('?')
]
special_dfs = []
i = 0
start = datetime.datetime.now()
i = len(special_dfs)
for chunk in df_chunks:
df = chunk.groupby('file')['code'].aggregate(
aggregate_special_lines
).reset_index()
special_dfs.append(df)
if i%1000 == 0:
print(i, datetime.datetime.now() - start)
i+=1
end = datetime.datetime.now()
print('Chunks done in', end - start)
start = datetime.datetime.now()
special_df = pd.concat(
special_dfs,
sort = False
).reset_index(drop = True).groupby('file')['code'].aggregate(
load_data.flatten
).reset_index()
end = datetime.datetime.now()
print('Combined in', end - start)
start = datetime.datetime.now()
f = open('analysis_data/special_functions.df', 'wb')
pickle.dump(special_df, f)
f.close()
end = datetime.datetime.now()
print('Saved in', end - start)
def get_nb_code():
start = datetime.datetime.now()
df_chunks = pd.read_csv(
'data_final/cells_final.csv',
header = 0, usecols = ['file','code','cell_type'],
chunksize=10000
)
# 25 minutes
start = datetime.datetime.now()
i = 0
code_dfs = []
for chunk in df_chunks:
code_dfs.append(
chunk[chunk.cell_type == 'code'].groupby('file')['code'].aggregate(lambda x: list(x)).reset_index()
)
if i%1000 == 0:
print(i, datetime.datetime.now() - start)
i += 1
end = datetime.datetime.now()
print('Chunks', end - start)
code_df = pd.concat(code_dfs, sort = False).reset_index(drop=True)
start = datetime.datetime.now()
code_df = code_df.groupby('file')['code'].aggregate(load_data.flatten).reset_index()
end = datetime.datetime.now()
print('Combined', end - start)
print('now saving')
start = datetime.datetime.now()
try:
f = open('analysis_data/nb_code.df', 'wb')
pickle.dump(code_df, f)
f.close()
print('saved to pickle')
except:
try:
f = open('analysis_data/nb_code.df', 'wb')
pickle.dump(code_df, f)
f.close()
print('saved to pickle')
except:
try:
f = open('analysis_data/nb_code.df', 'wb')
pickle.dump(code_df, f)
f.close()
print('saved to pickle')
except:
code_df.to_csv('analysis_data/nb_code.csv', index = False)
print('saved to csv')
end = datetime.datetime.now()
print(end - start)
def get_all_objects(cell_info_code_df):
# 1.5 hours
start = datetime.datetime.now()
all_objects = []
unprocessed = 0
target_types = [ast.Name,ast.Tuple,
ast.Attribute, ast.Subscript,
ast.List
]
for i, row in cell_info_code_df.iterrows():
o = {
'file': row['file'],
'objects': []
}
try:
all_code = '\n'.join([
c for c in '\n'.join([l for l in row.code_list if type(l) == str]).split('\n')
if (c != '' and not c.strip().startswith('%') and
not c.strip().startswith('?') and not c.strip().startswith('!')
)
])
tree = ast.parse(all_code)
except Exception as e:
all_objects.append(o)
unprocessed += 1
if i%200000 == 0:
print(i, datetime.datetime.now() - start, unprocessed, 'unprocessed')
continue
for t in tree.body:
if type(t) == ast.Assign:
value_type = type(t.value)
for target in t.targets:
if type(target) in [ast.Tuple, ast.List]:
for node in ast.walk(target):
if type(node) == ast.Name:
o['objects'].append((node.id, value_type))
else:
if type(target) == ast.Name:
for node in ast.walk(target):
if type(node) == ast.Name:
o['objects'].append((node.id, value_type))
all_objects.append(o)
if i%200000 == 0:
print(i, datetime.datetime.now() - start)
end = datetime.datetime.now()
print('Found objects', end - start)
all_objects_df = pd.DataFrame(all_objects)
# 14 seconds
start = datetime.datetime.now()
f = open('analysis_data/all_objects.df', 'wb')
pickle.dump(all_objects_df, f)
f.close()
end = datetime.datetime.now()
print('Saved', end - start)
def get_lines_per_code_cell(cell_info_code_df):
# 12.5 minutes
start = datetime.datetime.now()
lines_per_code_cell = [
row['lines_of_code'] / row['code']
for i, row in cell_info_code_df.iterrows()
if row['code'] != 0
]
end = datetime.datetime.now()
print('Calculated', end - start)
# 0.2 seconds
start = datetime.datetime.now()
f = open('analysis_data/lines_per_code_cell.list', 'wb')
pickle.dump(lines_per_code_cell, f)
f.close()
end = datetime.datetime.now()
print('Saved',end - start)
def get_function_use(cell_info_code_df):
'''
Get all function calls from a python file
The MIT License (MIT)
Copyright (c) 2016 <NAME> <<EMAIL>>
'''
class FuncCallVisitor(ast.NodeVisitor):
def __init__(self):
self._name = deque()
@property
def name(self):
return '.'.join(self._name)
@name.deleter
def name(self):
self._name.clear()
def visit_Name(self, node):
self._name.appendleft(node.id)
def visit_Attribute(self, node):
try:
self._name.appendleft(node.attr)
self._name.appendleft(node.value.id)
except AttributeError:
self.generic_visit(node)
def get_func_calls(tree):
func_calls = []
for node in ast.walk(tree):
if isinstance(node, ast.Call):
callvisitor = FuncCallVisitor()
callvisitor.visit(node.func)
func_calls.append((callvisitor.name, [type(a) for a in node.args]))
return func_calls
# 1 hour 45 minutes
start = datetime.datetime.now()
function_use = {
'functions': [],
'parameters': [],
'file': []
}
unprocessed = 0
for i, row in cell_info_code_df.iterrows():
nb_funcs = []
nb_params = []
try:
all_code = '\n'.join([c for c in '\n'.join([l for l in row.code_list if str(l) != 'nan']).split('\n') if (c != '' and
str(c) != 'nan' and not c.strip().startswith('%') and not c.strip().startswith('?') and
not c.strip().startswith('!'))])
tree = ast.parse(all_code)
except:
unprocessed += 1
if i%200000 == 0:
print(i, datetime.datetime.now() - start)
continue
for t in tree.body:
try:
for f in get_func_calls(t):
if f[0] not in nb_funcs:
nb_funcs.append(f[0])
nb_params.append(len(f[1]))
except:
unprocessed += 1
if i%200000 == 0:
print(i, datetime.datetime.now() - start)
continue
function_use['functions'].append(nb_funcs)
function_use['parameters'].append(nb_params)
function_use['file'].append(row['file'])
if i%200000 == 0:
print(i, datetime.datetime.now() - start)
end = datetime.datetime.now()
print('Gone through for function uses', end - start)
function_use_df = pd.DataFrame(function_use)
# 48 seconds
start = datetime.datetime.now()
f = open('analysis_data/nb_function_use.df', 'wb')
pickle.dump(function_use_df, f)
f.close()
end = datetime.datetime.now()
print('Saved', end - start)
def get_function_defs(cell_info_code_df):
start = datetime.datetime.now()
unprocessed = 0
function_defs = {
'function': [],
'parameters':[],
'file': []
}
for i, row in cell_info_code_df.iterrows():
try:
all_code = '\n'.join([c for c in '\n'.join([l for l in row.code_list if str(l) != 'nan']).split('\n') if (c != '' and
str(c) != 'nan' and not c.strip().startswith('%') and not c.strip().startswith('?') and
not c.strip().startswith('!'))])
tree = ast.parse(all_code)
except:
unprocessed += 1
if i%200000 == 0:
print(i, datetime.datetime.now() - start)
continue
for t in tree.body:
if type(t) == ast.FunctionDef:
name = t.name
num_args = 0
for a in ast.walk(t.args):
if type(a) == ast.arg:
num_args += 1
function_defs['function'].append(name)
function_defs['parameters'].append(num_args)
function_defs['file'].append(row.file)
elif type(t) == ast.ClassDef:
name = t.name
num_args = 0
for b in t.body:
if type(b) == ast.FunctionDef and b.name == '__init__':
for a in ast.walk(b.args):
if type(a) == ast.arg and a.arg != 'self':
num_args += 1
elif type(b) == ast.FunctionDef:
name_b = name+"."+b.name
num_args_b = 0
for a in ast.walk(b.args):
if type(a) == ast.arg and a.arg != 'self':
num_args_b += 1
function_defs['function'].append(name_b)
function_defs['parameters'].append(num_args_b)
function_defs['file'].append(row.file)
function_defs['function'].append(name)
function_defs['parameters'].append(num_args)
function_defs['file'].append(row.file)
if i%200000 == 0:
print(i, datetime.datetime.now() - start)
end = datetime.datetime.now()
print('Through cell_info_code for functions', end - start)
start = datetime.datetime.now()
function_defs_df = pd.DataFrame(function_defs)
f = open('analysis_data/function_defs.df', 'wb')
pickle.dump(function_defs_df, f)
f.close()
end = datetime.datetime.now()
print('Saved', end - start)
def add_user_funcs():
notebooks = load_data.load_notebooks()
cell_stats_df = load_data.load_cell_stats()
cell_types_df = load_data.load_cell_types()
function_defs_df = load_data.load_function_defs()
function_use_df = load_data.load_function_use()
print('grouping...')
start = datetime.datetime.now()
function_defs_nb_df = function_defs_df.groupby('file')['function'].aggregate(lambda x: list(x)).reset_index().rename(columns={'function':'function_defs'})
end = datetime.datetime.now()
print('...grouped', end - start)
print('merging...')
start = datetime.datetime.now()
functions_df = function_use_df.merge(function_defs_nb_df, on = 'file', how = 'left')
functions_df.function_defs.loc[functions_df.function_defs.isna()] = [[]]*sum(functions_df.function_defs.isna())
end = datetime.datetime.now()
print('...merged', end - start)
start = datetime.datetime.now()
all_def = []
all_not = []
for i, row in functions_df.iterrows():
def_uses = [f for f in row.functions if f in row.function_defs]
not_uses = [f for f in row.functions if f not in row.function_defs]
all_def.append(def_uses)
all_not.append(not_uses)
if i%100000 == 0 or i == 100 or i == 1000:
print(i, datetime.datetime.now() - start)
end = datetime.datetime.now()
print(end - start)
function_use_df['user_def'] = all_def
function_use_df['not_user_def'] = all_not
t = datetime.datetime.now()
print('Added to df', t - end)
f = open('analysis_data/nb_function_use.df', 'wb')
pickle.dump(function_use_df, f)
f.close()
print('Saved', datetime.datetime.now() - t)
print('DONE')
def get_errors():
df_chunks = pd.read_csv(
'data_final/cells_final.csv',
header = 0, usecols = ['file','num_error','error_names','cell_id'],
chunksize=10000
)
# 25 minutes
start = datetime.datetime.now()
error_dfs = []
i = 0
for chunk in df_chunks:
try:
load_data.string_to_list(chunk, 'error_names')
error_dfs.append(
chunk.groupby('file')['num_error'].aggregate(['sum','count']).reset_index().merge(
chunk.groupby('file')['error_names'].aggregate(load_data.flatten).reset_index(),
on = 'file'
)
)
except Exception:
print(i, type(chunk))
if i%1000 == 0:
print(i, datetime.datetime.now() - start)
i+=1
end = datetime.datetime.now()
print('Chunks', end - start)
error_df = pd.concat(error_dfs, sort = False).reset_index(drop=True)
start = datetime.datetime.now()
error_df = error_df.groupby('file')['count'].sum().reset_index().merge(
error_df.groupby('file')['error_names'].aggregate(load_data.flatten).reset_index(),
on = 'file'
)
end = datetime.datetime.now()
print('Combined', end - start)
# 5 seconds
start = datetime.datetime.now()
f = open('analysis_data/error.df', 'wb')
pickle.dump(error_df, f)
f.close
end = datetime.datetime.now()
print('Saved', end - start)
def get_vis_uses_nb(nb_imports_code_df):
notebooks = pd.read_csv('data_final/notebooks_final.csv')
repos = pd.read_csv('data_final/repos_final.csv')
DATE_CHOICE = 'pushed_at'
vis = ['matplotlib','altair','seaborn',
'ggplot','bokeh','pygal','plotly',
'geoplotlib','gleam','missingno',
'leather']
start = datetime.datetime.now()
for v in vis:
nb_imports_code_df[v] = [v in
[i[0].split('.')[0] for i in imports]
for imports in nb_imports_code_df.imports
]
print(v, datetime.datetime.now() - start)
end = datetime.datetime.now()
print('Got uses', end - start)
start = datetime.datetime.now()
notebooks = notebooks.merge(
nb_imports_code_df[['file'] + vis], on = 'file'
).merge(repos[['repo_id',DATE_CHOICE]], on = 'repo_id')
notebooks[DATE_CHOICE] = pd.to_datetime(notebooks[DATE_CHOICE])
notebooks['year'] = [c.year for c in notebooks[DATE_CHOICE]]
notebooks['month'] = [c.month for c in notebooks[DATE_CHOICE]]
end = datetime.datetime.now()
print('Added to nbs', end - start)
start = datetime.datetime.now()
f = open('analysis_data/notebooks_vis.df', 'wb')
pickle.dump(notebooks, f)
f.close()
end = datetime.datetime.now()
print('Saved', end - start)
def get_vis_uses(nb_imports_code_df):
def get_uses(imports, code):
vis = ['matplotlib','altair','seaborn',
'ggplot','bokeh','pygal','plotly',
'geoplotlib','gleam','missingno',
'leather']
uses = []
for im in imports:
for v in vis:
if im[0].startswith(v):
# look for im in code
for line in '\n'.join([c for c in row.code if type(c) == str]).split('\n'):
f = re.findall(
'(?<![a-zA-Z._])'+im[1]+'\.[a-zA-Z._]{1,}|(?<![a-zA-Z._])'+im[1]+'\s',
str(line)
)
if len(f) > 0:
uses += [use.strip().replace(im[1],im[0]) for use in f]
return uses
# 2 hours
start = datetime.datetime.now()
all_vis_uses = []
for i, row in nb_imports_code_df.iterrows():
all_vis_uses += get_uses(
row.imports,
row.code
)
if i%100000==0:
print(i, datetime.datetime.now() - start)
end = datetime.datetime.now()
print('Gone through code for visualization uses', end - start)
# 30 minutes
start = datetime.datetime.now()
f = open('analysis_data/all_vis_uses.list', 'wb')
pickle.dump(all_vis_uses, f)
f.close()
end = datetime.datetime.now()
print('Saved', end - start)
def get_framework_uses(nb_imports_code_df):
# 22 minutes
def get_uses(row):
frameworks = ['tensorflow', 'sklearn',
'keras', 'theano', 'mxnet',
'caffe', 'pytorch', 'cntk']
uses = {}
for im in row.imports:
for f in frameworks:
if im[0].startswith(f):
if f not in uses:
uses[f] = set([])
as_alias = im[1]
for line in '\n'.join([c for c in row.code if type(c) == str]).split('\n'):
line = line.split('#')[0]
if as_alias in line and 'import' not in line:
use = as_alias+as_alias.join(line.split(as_alias)[1:])
use = re.split('[()\[\]=\s]', use)[0]
if use == as_alias or use.startswith(as_alias+'.'):
use = use.replace(as_alias, im[0])
uses[f].add(use)
return uses
framework_uses = {
'file': [],
'uses': []
}
start = datetime.datetime.now()
for i, row in nb_imports_code_df[len(framework_uses):].iterrows():
framework_uses['file'].append(row.file)
try:
framework_uses['uses'].append(get_uses(row))
except:
framework_uses['uses'].append({})
if i%100000 == 0:
print(i, datetime.datetime.now() - start)
end = datetime.datetime.now()
print('Gone through code for uses', end - start)
start = datetime.datetime.now()
framework_uses_df = pd.DataFrame(framework_uses)
f = open('analysis_data/framework_uses.df','wb')
pickle.dump(framework_uses_df, f)
f.close()
end = datetime.datetime.now()
print('Saved', end - start)
def get_nb_imports():
df_chunks = pd.read_csv(
'data_final/cells_final.csv',
header = 0, usecols = ['file','imports','cell_id'],
chunksize=10000
)
def string_to_list_of_lists(df, column):
df[column] = [[] if len(df[column][i]) == 2 else [h.replace("'","").split(', ')
for h in df[column][i][2:-2].replace('(','[').replace(')',']').split('], [')]
for i in range(len(df))]
def agg_imports(list_of_lists):
overall = []
for li in list_of_lists:
for l in li:
if type(l) == list and len(l) != 0:
overall.append(l)
return overall
nb_imports = []
# 1 hour
start = datetime.datetime.now()
i = 0
for chunk in df_chunks:
try:
string_to_list_of_lists(chunk.reset_index(drop=True), 'imports')
chunk_nb_imports = chunk.groupby('file')['imports']\
.aggregate(agg_imports).reset_index()
nb_imports.append(chunk_nb_imports)
except Exception:
print(type(chunk))
if i%1000 == 0:
print(i, datetime.datetime.now() - start) # prints up to 15000
i+=1
end = datetime.datetime.now()
print('Chunks', end - start)
# 2.5 minutes
start = datetime.datetime.now()
nb_imports_df = pd.concat(nb_imports).reset_index(drop=True).groupby('file')['imports'].aggregate(load_data.flatten).reset_index()
end = datetime.datetime.now()
print('Combined', end - start)
# 1 minute
start = datetime.datetime.now()
f = open('analysis_data/nb_imports.df', 'wb')
pickle.dump(nb_imports_df, f)
f.close()
end = datetime.datetime.now()
print('Saved', end - start)
def get_cell_output():
df_chunks = pd.read_csv(
'data_final/cells_final.csv',
header = 0, usecols = [
'file','num_execute_result',
'execute_result_keys','num_display_data',
'num_stream','cell_id'
],
chunksize=10000
)
# 27 minutes
start = datetime.datetime.now()
output_dfs = []
def agg_execute_result_keys(list_of_strs):
vis_count = 0
for v_output in [
'application/vnd.vegalite.v2+json',
'application/vnd.vegalite.v3+json',
'image/png'
]:
vis_count += ' '.join(list_of_strs).count(v_output)
return vis_count
i = 0
for chunk in df_chunks:
df = chunk.groupby('file')[[
'num_execute_result','num_display_data','num_stream'
]].sum().reset_index().merge(
chunk.groupby('file')['cell_id'].count().reset_index().rename(
columns = {'cell_id':'num_cells'}
),
on = 'file'
).merge(
chunk.groupby('file')['execute_result_keys'].aggregate(
agg_execute_result_keys
).reset_index().rename(columns = {'execute_result_keys':'num_vis_out'}),
on = 'file'
)
output_dfs.append(df)
if i%1000 == 0:
print(i, datetime.datetime.now() - start)
i += 1
end = datetime.datetime.now()
print('Chunks done', end - start)
# 33 seconds
start = datetime.datetime.now()
output_df = | pd.concat(output_dfs) | pandas.concat |
import datetime as dt
import os.path
import re
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pint.errors
import pytest
import scmdata.processing
from scmdata import ScmRun
from scmdata.errors import MissingRequiredColumnError, NonUniqueMetadataError
from scmdata.testing import _check_pandas_less_120
@pytest.fixture(scope="function")
def test_processing_scm_df():
data = np.array(
[
[1, 1.1, 1.2, 1.1],
[1.1, 1.2, 1.3, 1.41],
[1.3, 1.4, 1.5, 1.6],
[1.3, 1.5, 1.6, 1.2],
[1.48, 1.51, 1.72, 1.56],
]
).T
yield ScmRun(
data=data,
columns={
"model": ["a_iam"],
"climate_model": ["a_model"],
"scenario": ["a_scenario"],
"region": ["World"],
"variable": ["Surface Air Temperature Change"],
"unit": ["K"],
"ensemble_member": range(data.shape[1]),
},
index=[2005, 2006, 2007, 2100],
)
@pytest.fixture()
def test_processing_scm_df_multi_climate_model(test_processing_scm_df):
other = test_processing_scm_df + 0.1
other["climate_model"] = "z_model"
return test_processing_scm_df.append(other)
crossing_times_year_conversions = pytest.mark.parametrize(
"return_year,conv_to_year", ((None, True), (True, True), (False, False),)
)
def _get_calculate_crossing_times_call_kwargs(return_year):
call_kwargs = {}
if return_year is not None:
call_kwargs["return_year"] = return_year
return call_kwargs
def _get_expected_crossing_times(exp_vals, conv_to_year):
if conv_to_year:
exp_vals = [v if pd.isnull(v) else v.year for v in exp_vals]
else:
exp_vals = [pd.NaT if pd.isnull(v) else v for v in exp_vals]
return exp_vals
@pytest.mark.parametrize(
"threshold,exp_vals",
(
(
1.0,
[
dt.datetime(2006, 1, 1), # doesn't cross 1.0 until 2006
dt.datetime(2005, 1, 1),
dt.datetime(2005, 1, 1),
dt.datetime(2005, 1, 1),
dt.datetime(2005, 1, 1),
],
),
(
1.5,
[
np.nan, # never crosses
np.nan, # never crosses
dt.datetime(2100, 1, 1), # doesn't cross 1.5 until 2100
dt.datetime(2007, 1, 1), # 2007 is first year to actually exceed 1.5
dt.datetime(2006, 1, 1),
],
),
(2.0, [np.nan, np.nan, np.nan, np.nan, np.nan]),
),
)
@crossing_times_year_conversions
def test_crossing_times(
threshold, exp_vals, return_year, conv_to_year, test_processing_scm_df
):
call_kwargs = _get_calculate_crossing_times_call_kwargs(return_year)
res = scmdata.processing.calculate_crossing_times(
test_processing_scm_df, threshold=threshold, **call_kwargs,
)
exp_vals = _get_expected_crossing_times(exp_vals, conv_to_year)
exp = pd.Series(exp_vals, pd.MultiIndex.from_frame(test_processing_scm_df.meta))
pdt.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"end_year",
(
5000,
pytest.param(
10 ** 3, marks=pytest.mark.xfail(reason="ScmRun fails to initialise #179")
),
pytest.param(
10 ** 4, marks=pytest.mark.xfail(reason="ScmRun fails to initialise #179")
),
),
)
@crossing_times_year_conversions
def test_crossing_times_long_runs(
end_year, return_year, conv_to_year, test_processing_scm_df
):
test_processing_scm_df = test_processing_scm_df.timeseries(time_axis="year").rename(
{2100: end_year}, axis="columns"
)
test_processing_scm_df = scmdata.ScmRun(test_processing_scm_df)
call_kwargs = _get_calculate_crossing_times_call_kwargs(return_year)
res = scmdata.processing.calculate_crossing_times(
test_processing_scm_df, threshold=1.5, **call_kwargs,
)
exp_vals = [
np.nan,
np.nan,
dt.datetime(end_year, 1, 1),
dt.datetime(2007, 1, 1),
dt.datetime(2006, 1, 1),
]
exp_vals = _get_expected_crossing_times(exp_vals, conv_to_year)
exp = pd.Series(exp_vals, pd.MultiIndex.from_frame(test_processing_scm_df.meta))
pdt.assert_series_equal(res, exp)
@crossing_times_year_conversions
def test_crossing_times_multi_climate_model(
return_year, conv_to_year, test_processing_scm_df_multi_climate_model
):
call_kwargs = _get_calculate_crossing_times_call_kwargs(return_year)
threshold = 1.5
exp_vals = [
# a_model
np.nan,
np.nan,
dt.datetime(2100, 1, 1),
dt.datetime(2007, 1, 1),
dt.datetime(2006, 1, 1),
# z_model
np.nan,
dt.datetime(2100, 1, 1),
dt.datetime(2007, 1, 1),
dt.datetime(2006, 1, 1),
dt.datetime(2005, 1, 1),
]
res = scmdata.processing.calculate_crossing_times(
test_processing_scm_df_multi_climate_model, threshold=threshold, **call_kwargs,
)
exp_vals = _get_expected_crossing_times(exp_vals, conv_to_year)
exp = pd.Series(
exp_vals,
pd.MultiIndex.from_frame(test_processing_scm_df_multi_climate_model.meta),
)
pdt.assert_series_equal(res, exp)
def _get_expected_crossing_time_quantiles(
cts, groups, exp_quantiles, interpolation, nan_fill_value, nan_fill_threshold
):
cts = cts.fillna(nan_fill_value)
cts_qs = cts.groupby(groups).quantile(q=exp_quantiles, interpolation=interpolation)
out = cts_qs.where(cts_qs < nan_fill_threshold)
out.index = out.index.set_names("quantile", level=-1)
return out
@pytest.mark.parametrize(
"groups", (["model", "scenario"], ["climate_model", "model", "scenario"])
)
@pytest.mark.parametrize(
"quantiles,exp_quantiles",
(
(None, [0.05, 0.5, 0.95]),
([0.05, 0.17, 0.5, 0.83, 0.95], [0.05, 0.17, 0.5, 0.83, 0.95]),
),
)
@pytest.mark.parametrize(
"interpolation,exp_interpolation",
((None, "linear"), ("linear", "linear"), ("nearest", "nearest"),),
)
def test_crossing_times_quantiles(
groups,
quantiles,
exp_quantiles,
interpolation,
exp_interpolation,
test_processing_scm_df_multi_climate_model,
):
threshold = 1.5
crossing_times = scmdata.processing.calculate_crossing_times(
test_processing_scm_df_multi_climate_model,
threshold=threshold,
# return_year False handled in
# test_crossing_times_quantiles_datetime_error
return_year=True,
)
exp = _get_expected_crossing_time_quantiles(
crossing_times,
groups,
exp_quantiles,
exp_interpolation,
nan_fill_value=10 ** 6,
nan_fill_threshold=10 ** 5,
)
call_kwargs = {"groupby": groups}
if quantiles is not None:
call_kwargs["quantiles"] = quantiles
if interpolation is not None:
call_kwargs["interpolation"] = interpolation
res = scmdata.processing.calculate_crossing_times_quantiles(
crossing_times, **call_kwargs
)
if _check_pandas_less_120():
check_dtype = False
else:
check_dtype = True
pdt.assert_series_equal(res, exp, check_dtype=check_dtype)
def test_crossing_times_quantiles_datetime_error(
test_processing_scm_df_multi_climate_model,
):
crossing_times = scmdata.processing.calculate_crossing_times(
test_processing_scm_df_multi_climate_model, threshold=1.5, return_year=False,
)
with pytest.raises(NotImplementedError):
scmdata.processing.calculate_crossing_times_quantiles(
crossing_times, ["model", "scenario"]
)
@pytest.mark.parametrize(
"nan_fill_value,out_nan_threshold,exp_vals",
(
(None, None, [2025.4, 2027.0, np.nan]),
(None, 10 ** 4, [2025.4, 2027.0, np.nan]),
(10 ** 5, 10 ** 4, [2025.4, 2027.0, np.nan]),
(10 ** 6, 10 ** 5, [2025.4, 2027.0, np.nan]),
(
# fill value less than threshold means calculated quantiles are used
3000,
10 ** 5,
[2025.4, 2027.0, 2805.4],
),
(3000, 2806, [2025.4, 2027.0, 2805.4]),
(3000, 2805, [2025.4, 2027.0, np.nan]),
),
)
def test_crossing_times_quantiles_nan_fill_values(
nan_fill_value, out_nan_threshold, exp_vals
):
data = np.array(
[
[1.3, 1.35, 1.5, 1.52],
[1.37, 1.43, 1.54, 1.58],
[1.48, 1.51, 1.72, 2.02],
[1.55, 1.65, 1.85, 2.1],
[1.42, 1.46, 1.55, 1.62],
]
).T
ensemble = scmdata.ScmRun(
data=data,
index=[2025, 2026, 2027, 2100],
columns={
"model": ["a_iam"],
"climate_model": ["a_model"],
"scenario": ["a_scenario"],
"region": ["World"],
"variable": ["Surface Air Temperature Change"],
"unit": ["K"],
"ensemble_member": range(data.shape[1]),
},
)
call_kwargs = {}
if nan_fill_value is not None:
call_kwargs["nan_fill_value"] = nan_fill_value
if out_nan_threshold is not None:
call_kwargs["out_nan_threshold"] = out_nan_threshold
crossing_times = scmdata.processing.calculate_crossing_times(
ensemble, threshold=1.53, return_year=True,
)
res = scmdata.processing.calculate_crossing_times_quantiles(
crossing_times,
["climate_model", "scenario"],
quantiles=(0.05, 0.5, 0.95),
**call_kwargs,
)
exp = pd.Series(
exp_vals,
pd.MultiIndex.from_product(
[["a_model"], ["a_scenario"], [0.05, 0.5, 0.95]],
names=["climate_model", "scenario", "quantile"],
),
)
if _check_pandas_less_120():
check_dtype = False
else:
check_dtype = True
pdt.assert_series_equal(res, exp, check_dtype=check_dtype)
output_name_options = pytest.mark.parametrize(
"output_name", (None, "test", "test other")
)
def _get_calculate_exceedance_probs_call_kwargs(output_name):
call_kwargs = {}
if output_name is not None:
call_kwargs["output_name"] = output_name
return call_kwargs
def _get_calculate_exeedance_probs_expected_name(output_name, threshold):
if output_name is not None:
return output_name
return "{} exceedance probability".format(threshold)
@pytest.mark.parametrize(
"threshold,exp_vals",
(
(1.0, [0.8, 1.0, 1.0, 1.0]),
(1.5, [0.0, 0.2, 0.4, 0.4]),
(2.0, [0.0, 0.0, 0.0, 0.0]),
),
)
@output_name_options
def test_exceedance_probabilities_over_time(
output_name, threshold, exp_vals, test_processing_scm_df
):
call_kwargs = _get_calculate_exceedance_probs_call_kwargs(output_name)
res = scmdata.processing.calculate_exceedance_probabilities_over_time(
test_processing_scm_df,
process_over_cols="ensemble_member",
threshold=threshold,
**call_kwargs,
)
exp_idx = pd.MultiIndex.from_frame(
test_processing_scm_df.meta.drop(
"ensemble_member", axis="columns"
).drop_duplicates()
)
exp = pd.DataFrame(
np.array(exp_vals)[np.newaxis, :],
index=exp_idx,
columns=test_processing_scm_df.time_points.to_index(),
)
exp.index = exp.index.set_levels(
[_get_calculate_exeedance_probs_expected_name(output_name, threshold)],
level="variable",
).set_levels(["dimensionless"], level="unit",)
pdt.assert_frame_equal(res, exp, check_like=True, check_column_type=False)
def test_exceedance_probabilities_over_time_multiple_res(
test_processing_scm_df_multi_climate_model,
):
start = test_processing_scm_df_multi_climate_model.copy()
threshold = 1.5
exp_vals = np.array([[0, 1, 2, 2], [1, 2, 3, 3]]) / 5
res = scmdata.processing.calculate_exceedance_probabilities_over_time(
start, process_over_cols=["ensemble_member"], threshold=threshold,
)
exp_idx = pd.MultiIndex.from_frame(
start.meta.drop(["ensemble_member"], axis="columns").drop_duplicates()
)
exp = pd.DataFrame(exp_vals, index=exp_idx, columns=start.time_points.to_index())
exp.index = exp.index.set_levels(
[_get_calculate_exeedance_probs_expected_name(None, threshold)],
level="variable",
).set_levels(["dimensionless"], level="unit",)
pdt.assert_frame_equal(res, exp, check_like=True, check_column_type=False)
def test_exceedance_probabilities_over_time_multiple_grouping(
test_processing_scm_df_multi_climate_model,
):
start = test_processing_scm_df_multi_climate_model.copy()
threshold = 1.5
exp_vals = np.array([1, 3, 5, 5]) / 10
res = scmdata.processing.calculate_exceedance_probabilities_over_time(
start,
process_over_cols=["climate_model", "ensemble_member"],
threshold=threshold,
)
exp_idx = pd.MultiIndex.from_frame(
start.meta.drop(
["climate_model", "ensemble_member"], axis="columns"
).drop_duplicates()
)
exp = pd.DataFrame(
exp_vals[np.newaxis, :], index=exp_idx, columns=start.time_points.to_index(),
)
exp.index = exp.index.set_levels(
[_get_calculate_exeedance_probs_expected_name(None, threshold)],
level="variable",
).set_levels(["dimensionless"], level="unit",)
pdt.assert_frame_equal(res, exp, check_like=True, check_column_type=False)
@pytest.mark.parametrize(
"threshold,exp_val", ((1.0, 1.0), (1.5, 0.6), (2.0, 0.0),),
)
@output_name_options
def test_exceedance_probabilities(
output_name, threshold, exp_val, test_processing_scm_df
):
call_kwargs = _get_calculate_exceedance_probs_call_kwargs(output_name)
res = scmdata.processing.calculate_exceedance_probabilities(
test_processing_scm_df,
process_over_cols="ensemble_member",
threshold=threshold,
**call_kwargs,
)
exp_idx = pd.MultiIndex.from_frame(
test_processing_scm_df.meta.drop(
"ensemble_member", axis="columns"
).drop_duplicates()
)
exp = pd.Series(exp_val, index=exp_idx)
exp.name = _get_calculate_exeedance_probs_expected_name(output_name, threshold)
exp.index = exp.index.set_levels(["dimensionless"], level="unit",)
pdt.assert_series_equal(res, exp)
def test_exceedance_probabilities_multiple_res(
test_processing_scm_df_multi_climate_model,
):
start = test_processing_scm_df_multi_climate_model.copy()
threshold = 1.5
exp_vals = [0.6, 0.8]
res = scmdata.processing.calculate_exceedance_probabilities(
start, process_over_cols=["ensemble_member"], threshold=threshold,
)
exp_idx = pd.MultiIndex.from_frame(
start.meta.drop("ensemble_member", axis="columns").drop_duplicates()
)
exp = pd.Series(exp_vals, index=exp_idx)
exp.name = _get_calculate_exeedance_probs_expected_name(None, threshold)
exp.index = exp.index.set_levels(["dimensionless"], level="unit",)
pdt.assert_series_equal(res, exp)
def test_exceedance_probabilities_multiple_grouping(
test_processing_scm_df_multi_climate_model,
):
start = test_processing_scm_df_multi_climate_model.copy()
threshold = 1.5
exp_vals = [0.7]
res = scmdata.processing.calculate_exceedance_probabilities(
start,
process_over_cols=["ensemble_member", "climate_model"],
threshold=threshold,
)
exp_idx = pd.MultiIndex.from_frame(
start.meta.drop(
["ensemble_member", "climate_model"], axis="columns"
).drop_duplicates()
)
exp = pd.Series(exp_vals, index=exp_idx)
exp.name = _get_calculate_exeedance_probs_expected_name(None, threshold)
exp.index = exp.index.set_levels(["dimensionless"], level="unit",)
pdt.assert_series_equal(res, exp)
@pytest.mark.parametrize("col", ["unit", "variable"])
@pytest.mark.parametrize(
"func,kwargs",
(
(scmdata.processing.calculate_exceedance_probabilities, {"threshold": 1.5}),
(
scmdata.processing.calculate_exceedance_probabilities_over_time,
{"threshold": 1.5},
),
),
)
def test_requires_preprocessing(test_processing_scm_df, col, func, kwargs):
test_processing_scm_df[col] = [
str(i) for i in range(test_processing_scm_df.shape[0])
]
error_msg = (
"More than one value for {}. "
"This is unlikely to be what you want.".format(col)
)
with pytest.raises(ValueError, match=error_msg):
func(
test_processing_scm_df,
process_over_cols=["ensemble_member", col],
**kwargs,
)
def _get_calculate_peak_call_kwargs(output_name, variable):
call_kwargs = {}
if output_name is not None:
call_kwargs["output_name"] = output_name
return call_kwargs
@output_name_options
def test_peak(output_name, test_processing_scm_df):
call_kwargs = _get_calculate_peak_call_kwargs(
output_name, test_processing_scm_df.get_unique_meta("variable", True),
)
exp_vals = [1.2, 1.41, 1.6, 1.6, 1.72]
res = scmdata.processing.calculate_peak(test_processing_scm_df, **call_kwargs,)
exp_idx = pd.MultiIndex.from_frame(test_processing_scm_df.meta)
exp = pd.Series(exp_vals, index=exp_idx)
if output_name is not None:
exp.index = exp.index.set_levels([output_name], level="variable")
else:
idx = exp.index.names
exp = exp.reset_index()
exp["variable"] = exp["variable"].apply(lambda x: "Peak {}".format(x))
exp = exp.set_index(idx)[0]
pdt.assert_series_equal(res, exp)
def test_peak_multi_variable(test_processing_scm_df_multi_climate_model):
test_processing_scm_df_multi_climate_model["variable"] = [
str(i) for i in range(test_processing_scm_df_multi_climate_model.shape[0])
]
exp_vals = [1.2, 1.41, 1.6, 1.6, 1.72, 1.3, 1.51, 1.7, 1.7, 1.82]
res = scmdata.processing.calculate_peak(test_processing_scm_df_multi_climate_model,)
exp_idx = pd.MultiIndex.from_frame(test_processing_scm_df_multi_climate_model.meta)
exp = pd.Series(exp_vals, index=exp_idx)
idx = exp.index.names
exp = exp.reset_index()
exp["variable"] = exp["variable"].apply(lambda x: "Peak {}".format(x))
exp = exp.set_index(idx)[0]
pdt.assert_series_equal(res, exp)
def _get_calculate_peak_time_call_kwargs(return_year, output_name):
call_kwargs = {}
if return_year is not None:
call_kwargs["return_year"] = return_year
if output_name is not None:
call_kwargs["output_name"] = output_name
return call_kwargs
@output_name_options
@crossing_times_year_conversions
def test_peak_time(output_name, return_year, conv_to_year, test_processing_scm_df):
call_kwargs = _get_calculate_peak_time_call_kwargs(return_year, output_name)
exp_vals = [
dt.datetime(2007, 1, 1),
dt.datetime(2100, 1, 1),
dt.datetime(2100, 1, 1),
dt.datetime(2007, 1, 1),
dt.datetime(2007, 1, 1),
]
res = scmdata.processing.calculate_peak_time(test_processing_scm_df, **call_kwargs,)
exp_idx = pd.MultiIndex.from_frame(test_processing_scm_df.meta)
if conv_to_year:
exp_vals = [v.year if conv_to_year else v for v in exp_vals]
time_name = "Year"
else:
time_name = "Time"
exp = | pd.Series(exp_vals, index=exp_idx) | pandas.Series |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/1/26 13:10
Desc: 申万指数-申万一级、二级和三级
http://www.swsindex.com/IdxMain.aspx
https://legulegu.com/stockdata/index-composition?industryCode=851921.SI
"""
import time
import json
import pandas as pd
from akshare.utils import demjson
import requests
from bs4 import BeautifulSoup
from akshare.index.cons import sw_headers, sw_payload, sw_url
def sw_index_representation_spot() -> pd.DataFrame:
"""
申万-市场表征实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8831
:return: 市场表征实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
params = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801001','801002','801003','801005','801300','801901','801903','801905','801250','801260','801270','801280','802613')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "9",
"timed": "1632300641756",
}
r = requests.get(url, params=params)
data_json = demjson.decode(r.text)
temp_df = pd.DataFrame(data_json["root"])
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_spot() -> pd.DataFrame:
"""
申万一级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8832
:return: 申万一级行业实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
result = []
for i in range(1, 3):
payload = sw_payload.copy()
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_second_spot() -> pd.DataFrame:
"""
申万二级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnId=8833
:return: 申万二级行业-实时行情数据
:rtype: pandas.DataFrame
"""
result = []
for i in range(1, 6):
payload = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801011','801012','801013','801014','801015','801016','801021','801022','801023','801032','801033','801034','801035','801036','801037','801041','801051','801072','801073','801074','801075','801081','801082','801083','801084','801092','801093','801094','801101','801102','801111','801112','801123','801131','801132','801141','801142','801143','801151','801152','801153','801154','801155','801156','801161','801162','801163','801164','801171','801172','801173','801174','801175','801176','801177','801178','801181','801182','801191','801192','801193','801194','801202','801211','801212','801213','801214','801222','801223','801053','801054','801055','801076','801203','801204','801205','801711','801712','801713','801721','801722','801723','801724','801725','801731','801732','801733','801734','801741','801742','801743','801744','801751','801752','801761','801881','801017','801018')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "98",
"timed": "",
}
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(sw_url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_cons(symbol: str = "801011") -> pd.DataFrame:
"""
申万指数成份信息-包括一级和二级行业都可以查询
http://www.swsindex.com/idx0210.aspx?swindexcode=801010
:param symbol: 指数代码
:type symbol: str
:return: 申万指数成份信息
:rtype: pandas.DataFrame
"""
url = f"http://www.swsindex.com/downfile.aspx?code={symbol}"
r = requests.get(url)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 4:
stock_code = cols[0].text
stock_name = cols[1].text
weight = cols[2].text
start_date = cols[3].text
data.append(
{
"stock_code": stock_code,
"stock_name": stock_name,
"start_date": start_date,
"weight": weight,
}
)
temp_df = pd.DataFrame(data)
temp_df["start_date"] = pd.to_datetime(temp_df["start_date"]).dt.date
temp_df["weight"] = pd.to_numeric(temp_df["weight"])
return temp_df
def sw_index_daily(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20201207",
) -> pd.DataFrame:
"""
申万指数一级和二级日频率行情数据
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: 申万指数
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 申万指数日频率行情数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.swsindex.com/excel2.aspx"
params = {
"ctable": "swindexhistory",
"where": f" swindexcode in ('{symbol}') and BargainDate >= '{start_date}' and BargainDate <= '{end_date}'",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 10:
symbol = cols[0].text
index_name = cols[1].text
date = cols[2].text
open_ = cols[3].text
high = cols[4].text
low = cols[5].text
close = cols[6].text
vol = cols[7].text
amount = cols[8].text
change_pct = cols[9].text
data.append(
{
"index_code": symbol.replace(",", ""),
"index_name": index_name.replace(",", ""),
"date": date.replace(",", ""),
"open": open_.replace(",", ""),
"high": high.replace(",", ""),
"low": low.replace(",", ""),
"close": close.replace(",", ""),
"vol": vol.replace(",", ""),
"amount": amount.replace(",", ""),
"change_pct": change_pct.replace(",", ""),
}
)
temp_df = pd.DataFrame(data)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["vol"] = pd.to_numeric(temp_df["vol"])
temp_df["amount"] = pd.to_numeric(temp_df["amount"])
temp_df["change_pct"] = pd.to_numeric(temp_df["change_pct"])
return temp_df
def sw_index_daily_indicator(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20210907",
data_type: str = "Day",
) -> pd.DataFrame:
"""
申万一级和二级行业历史行情指标
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: 申万指数
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:param data_type: choice of {"Day": 日报表, "Week": 周报表}
:type data_type: str
:return: 申万指数不同频率数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.swsindex.com/excel.aspx"
params = {
"ctable": "V_Report",
"where": f" swindexcode in ('{symbol}') and BargainDate >= '{start_date}' and BargainDate <= '{end_date}' and type='{data_type}'",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 14:
symbol = cols[0].text
index_name = cols[1].text
date = cols[2].text
close = cols[3].text
volume = cols[4].text
chg_pct = cols[5].text
turn_rate = cols[6].text
pe = cols[7].text
pb = cols[8].text
v_wap = cols[9].text
turnover_pct = cols[10].text
float_mv = cols[11].text
avg_float_mv = cols[12].text
dividend_yield_ratio = cols[13].text
data.append(
{
"index_code": symbol,
"index_name": index_name,
"date": date,
"close": close,
"volume": volume,
"chg_pct": chg_pct,
"turn_rate": turn_rate,
"pe": pe,
"pb": pb,
"vwap": v_wap,
"float_mv": float_mv,
"avg_float_mv": avg_float_mv,
"dividend_yield_ratio": dividend_yield_ratio,
"turnover_pct": turnover_pct,
}
)
temp_df = pd.DataFrame(data)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["volume"] = temp_df["volume"].apply(lambda x: x.replace(",", ""))
temp_df["volume"] = pd.to_numeric(temp_df["volume"])
temp_df["chg_pct"] = pd.to_numeric(temp_df["chg_pct"])
temp_df["turn_rate"] = pd.to_numeric(temp_df["turn_rate"])
temp_df["pe"] = pd.to_numeric(temp_df["pe"])
temp_df["pb"] = pd.to_numeric(temp_df["pb"])
temp_df["vwap"] = pd.to_numeric(temp_df["vwap"])
temp_df["float_mv"] = temp_df["float_mv"].apply(lambda x: x.replace(",", ""))
temp_df["float_mv"] = pd.to_numeric(
temp_df["float_mv"],
)
temp_df["avg_float_mv"] = temp_df["avg_float_mv"].apply(
lambda x: x.replace(",", "")
)
temp_df["avg_float_mv"] = pd.to_numeric(temp_df["avg_float_mv"])
temp_df["dividend_yield_ratio"] = pd.to_numeric(temp_df["dividend_yield_ratio"])
temp_df["turnover_pct"] = pd.to_numeric(temp_df["turnover_pct"])
return temp_df
def sw_index_third_info() -> pd.DataFrame:
"""
乐咕乐股-申万三级-分类
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: 分类
:rtype: pandas.DataFrame
"""
url = "https://legulegu.com/stockdata/sw-industry-overview"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-industries-item-chinese-title"}
)
name_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-industries-item-number"}
)
value_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-sw-industries-item-value"}
)
code = [item.get_text() for item in code_raw]
name = [item.get_text().split("(")[0] for item in name_raw]
num = [item.get_text().split("(")[1].split(")")[0] for item in name_raw]
num_1 = [
item.find_all("span", attrs={"class": "value"})[0].get_text().strip()
for item in value_raw
]
num_2 = [
item.find_all("span", attrs={"class": "value"})[1].get_text().strip()
for item in value_raw
]
num_3 = [
item.find_all("span", attrs={"class": "value"})[2].get_text().strip()
for item in value_raw
]
num_4 = [
item.find_all("span", attrs={"class": "value"})[3].get_text().strip()
for item in value_raw
]
temp_df = pd.DataFrame([code, name, num, num_1, num_2, num_3, num_4]).T
temp_df.columns = [
"行业代码",
"行业名称",
"成份个数",
"静态市盈率",
"TTM(滚动)市盈率",
"市净率",
"静态股息率",
]
temp_df["成份个数"] = pd.to_numeric(temp_df["成份个数"])
temp_df["静态市盈率"] = pd.to_numeric(temp_df["静态市盈率"])
temp_df["TTM(滚动)市盈率"] = pd.to_numeric(temp_df["TTM(滚动)市盈率"])
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"])
temp_df["静态股息率"] = pd.to_numeric(temp_df["静态股息率"])
return temp_df
def sw_index_third_cons(symbol: str = "851921.SI") -> pd.DataFrame:
"""
乐咕乐股-申万三级-行业成份
https://legulegu.com/stockdata/index-composition?industryCode=851921.SI
:param symbol: 三级行业的行业代码
:type symbol: str
:return: 行业成份
:rtype: pandas.DataFrame
"""
url = f"https://legulegu.com/stockdata/index-composition?industryCode={symbol}"
temp_df = pd.read_html(url)[0]
temp_df.columns = [
"序号",
"股票代码",
"股票简称",
"纳入时间",
"申万1级",
"申万2级",
"申万3级",
"价格",
"市盈率",
"市盈率ttm",
"市净率",
"股息率",
"市值",
]
temp_df["价格"] = pd.to_numeric(temp_df["价格"], errors="coerce")
temp_df["市盈率"] = pd.to_numeric(temp_df["市盈率"], errors="coerce")
temp_df["市盈率ttm"] = pd.to_ | numeric(temp_df["市盈率ttm"], errors="coerce") | pandas.to_numeric |
import sys, os
sys.path.append(os.path.abspath(__file__).split('test')[0])
import pandas as pd
import numpy as np
from pyml.supervised.linear_regression.LinearRegression import LinearRegression
"""
------------------------------------------------------------------------------------------------------------------------
SIMPLE LINEAR REGRESSION
------------------------------------------------------------------------------------------------------------------------
"""
#-----------------------------------------------------------------------------------------------------------------------
#---------------------------------------------- OBTENCION DATOS --------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
data = pd.read_csv('../../../data/ex1data1.txt', sep=",", header=None) #Cargamos los datos
data.columns = ["population", "profit"]
print("\n--------------------------------------------")
print("--------------------------------------------")
print(" SIMPLE LINEAR REGRESSION ")
print("--------------------------------------------")
print("--------------------------------------------\n")
print(data.head()) #Imprimimos los datos
print(data.describe())
num_col = data.shape[0] #Numero de columnas: numero de ejemplos
num_filas = data.shape[1] #Numero de filas: numero de features
X = np.matrix([np.ones(num_col), data['population']]).T #Cada fila es un ejemplo, cada columna es un feature del ejemplo
y = np.matrix(data['profit']).T #Vector columna
#-----------------------------------------------------------------------------------------------------------------------
#---------------------------------------------------- TEST -------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
norm_linear_regression = LinearRegression(X, y, axis=1)
norm_linear_regression.norm_equation()
print("\n--------------------------------------------")
print(" RESULTADOS ")
print("--------------------------------------------\n")
print("\nMinimizacion parámetros theta: ", norm_linear_regression.theta)
print("Dimension theta: ", norm_linear_regression.theta.shape)
print("Theta como array: ", np.ravel(norm_linear_regression.theta))
print("Prueba prediccion de: ", 6.1101, " con nuestro modelo de Linear Regression: ", norm_linear_regression.prediccion(np.matrix([1, 6.1101]).T))
norm_linear_regression.plot_regression("Plot sin division", "Poblacion", "Beneficio")
#-----------------------------------------------------------------------------------------------------------------------
#---------------------------------------------- DIVISION Y TEST --------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
norm_linear_regression = LinearRegression(X, y, axis=1, split=0.2)
norm_linear_regression.norm_equation()
norm_linear_regression.plot_regression("Training set", "Poblacion", "Beneficio")
norm_linear_regression.plot_regression_test("Test set", "Poblacion", "Beneficio")
"""
------------------------------------------------------------------------------------------------------------------------
EJEMPLO SALARIOS
------------------------------------------------------------------------------------------------------------------------
"""
#-----------------------------------------------------------------------------------------------------------------------
#---------------------------------------------- OBTENCION DATOS --------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
data_salarios = | pd.read_csv("../../../data/Salary_Data.csv", sep=",") | pandas.read_csv |
import os
import numpy as np
import matplotlib.pyplot as pp
import pandas as pd
#########################
## INTIALISE VARIABLES ##
#########################
newDesk=[]
selectedList=[]
yPlotlabel=""
flow=["red", "orange","brown","tan", "lime", "purple", "teal", "black", "blue", "grey", "pink", "violet", "goldenrod","darkkhaki","peru", "saddlebrown"]
blues=["blue","turquoise","lime", "darkgreen","midnightblue", "slateblue", "dodgerblue", "mediumblue", "seagreen","yellowgreen","olivedrab","lightseagreen"]
greens=["olive","crimson","black", "blue", "maroon", "lightcoral", "chocolate", "lightsalmon", "darkolivegreen", "rosybrown"]
reds=flow+blues+greens+flow+blues+greens
BODStats=pd.DataFrame()
######################
## DEFINE FUNCTIONS ##
######################
def importData(directory):
os.chdir(directory)
folderList=os.listdir()
idvgData=pd.DataFrame() # Initialises a blank dataframe to be appended to
newDesk=[] # Initialise a blank list for the data to be selected from
counter=0
for folderName in folderList:# Loop over the functionalisation folders
os.chdir(directory)
folderList=os.listdir( )# Now list the FOLDERS inside the top directory
os.chdir(directory+"/"+folderName) # Change directory to the ith folderName
fileList=os.listdir() # List the FILES in the folderName FOLDER
for file in fileList:# Loop over the files in the fileList and import them to the dataframe with a new snazzier name
fName = directory+"/"+folderName+"/"+file
df=pd.read_csv(fName, usecols=[1,2], skiprows=248)
global device
newTitle,device = newNameFinal(folderName,file)
df.columns=pd.MultiIndex.from_product([[newTitle],df.columns]) # Introduce multiindex naming of columns
idvgData=pd.concat([idvgData,df],axis=1)
newDesk.append(newTitle)
global copied_original
copied_original=idvgData.copy()
copied_original.name=device
return copied_original,device,newDesk
def newNameFinal(folderName1, originalName):
# Takes a file name and shortens it based on the position of the "_" and then concatenates with the folder name.
displayText=originalName[0:originalName.find("_")]
outputName=folderName1+"_"+displayText
return outputName, displayText[0:2]
def importBOD(filename):
# Imports data from a .BOD file (a file which has been previosuly exported from SCRAMBLE)
BODdf=pd.read_csv(filename, header=[0,1])
global copied_original
copied_original=BODdf.copy()
# Produce a list of the data
niceCoffee=[]
for i, x in enumerate(BODdf.columns.get_level_values(0)):
if i%2>0: # Select every other name as they are repeated
niceCoffee.append(x)
return copied_original,niceCoffee
def statsTable(selection):
bigData=copied_original.copy() # Always work from a copy of the original data
statsInput=bigData.loc[:,(selection)] # Filter based on name of data
sVg = statsInput.loc[:,[statsInput.columns[0]]] # Select the Vbg
sDrain = statsInput.loc[:,[statsInput.columns[1]]] # Select the Ids
statsFrame=pd.DataFrame() #Initialise the dataframe for this loop
## FORWARD SWEEP STATS ##
#Slice the data and select the forward sweep
fVg=sVg.iloc[0:(int(statsInput.shape[0]/2))]
fDrain=sDrain.iloc[0:(int(statsInput.shape[0]/2))]
#DP Current - fDPI
fMinI=fDrain.describe().loc["min"]
statsFrame=pd.concat([statsFrame,fMinI],ignore_index=True)
#DP Voltage - fDPV
fMinVIndex=abs(fDrain-fMinI).idxmin()
fMinV1=fVg.iloc[fMinVIndex].values[0][0]
fMinV=pd.Series(fMinV1)
statsFrame=pd.concat([statsFrame,fMinV], ignore_index=True)
#DP Voltage Gradient - fDPMaxgrad and fDPMaxgradV
fDPIseries=fDrain[statsInput.columns[1]].values
fDPVseries=fVg[statsInput.columns[0]].values
fDPIgrad1=np.gradient(fDPIseries)
fDPIgradMax1=max(abs(fDPIgrad1))
indexGradMax=np.argmax(abs(fDPIgrad1))
fDPVgradMax1=fDPVseries[indexGradMax]
fDPIgradMaxI1=fDPIseries[indexGradMax]
fDPIgradMax=pd.Series(fDPIgradMax1)
fDPVgradMax=pd.Series(fDPVgradMax1)
fDPIgradMaxI=pd.Series(fDPIgradMaxI1)
statsFrame=pd.concat([statsFrame,fDPIgradMax], ignore_index=True)
statsFrame=pd.concat([statsFrame,fDPVgradMax], ignore_index=True)
statsFrame=pd.concat([statsFrame,fDPIgradMaxI], ignore_index=True)
#Current value at 0 BackGate - fI0Vg
fI0Vg1=fDrain.iloc[int(((fDrain.shape[0])-1)/2)].values[0] # Halfway point
fI0Vg=pd.Series(fI0Vg1)
statsFrame=pd.concat([statsFrame,fI0Vg], ignore_index=True)
## REVERSE SWEEP STATS ##
#Slice the data and select the reverse sweep
rVg=sVg.iloc[(int(statsInput.shape[0]/2)):]
rDrain=sDrain.iloc[(int(statsInput.shape[0]/2)):]
#DP Current - rDPI
rMinI=rDrain.describe().loc["min"]
statsFrame=pd.concat([statsFrame,rMinI],ignore_index=True)
#DP Voltage - rDPV
rMinVIndex=abs(rDrain-rMinI).idxmin()
rMinV1=sVg.iloc[rMinVIndex].values[0][0]
rMinV=pd.Series(rMinV1)
statsFrame=pd.concat([statsFrame,rMinV], ignore_index=True)
#DP Voltage Gradient - rDPMaxgrad and rDPMaxgradV
rDPIseries=rDrain[statsInput.columns[1]].values
rDPVseries=rVg[statsInput.columns[0]].values
rDPIgrad1=np.gradient(rDPIseries)
rDPIgradMax1=max(abs(rDPIgrad1))
indexGradMax=np.argmax(abs(rDPIgrad1))
rDPVgradMax1=rDPVseries[indexGradMax]
rDPIgradMaxI1=rDPIseries[indexGradMax]
rDPIgradMax=pd.Series(rDPIgradMax1)
rDPVgradMax=pd.Series(rDPVgradMax1)
rDPIgradMaxI=pd.Series(rDPIgradMaxI1)
statsFrame=pd.concat([statsFrame,rDPIgradMax], ignore_index=True)
statsFrame=pd.concat([statsFrame,rDPVgradMax], ignore_index=True)
statsFrame=pd.concat([statsFrame,rDPIgradMaxI], ignore_index=True)
#Current value at 0 BackGate - fI0Vg
rI0Vg1=rDrain.iloc[int(((rDrain.shape[0])-1)/2)].values[0]
rI0Vg=pd.Series(rI0Vg1)
statsFrame=pd.concat([statsFrame,rI0Vg], ignore_index=True)
## CONSTRUCT THE PARAMETER TABLE ##
insides = {'Column 1' : [1,2,3,4,5,6,30,40,50,60,70,80],
'Index Title' : ["fDPI","fDPV","fMaxgrad","fMaxgradV", "fMaxgradI", "fI0Vg",
"rDPI","rDPV","rMaxgrad","rMaxgradV","rMaxgradI", "rI0Vg"]}
blankStats = pd.DataFrame(insides)
del blankStats["Column 1"]
blankStats.index.name = "BOD_Params"
newFrame=pd.concat([blankStats,statsFrame], axis=1) #Concatenate the initial df with data from statsFrame
newFrame.index = newFrame["Index Title"]
del newFrame["Index Title"]
newFrame.columns=[selection]
newFrame.index.name="BOD_Params"
return newFrame #Output from StatsTable
def mobility(selection,Vds,L,W,oxideThick,oxideDielectric):
bigData=copied_original.copy() # Always work from a copy of the original data
mobilitySeries=pd.Series([]) # Convert dataframe to series for ease of maniupulation
mobilityFrame= | pd.DataFrame() | pandas.DataFrame |
# Written and maintained by <NAME>
# <EMAIL>
#
# A project of XamPak Open Source Software
# Follow my github for more !
# https://github.com/XamNalpak
#
# Last updated 2/22/21
#
#
# Interested in more ideas? Let me know !!##
#
#
#
#
# importing Python Packages
import praw
import pandas as pd
from datetime import datetime
import time
#Reddit API information from https://www.reddit.com/prefs/apps
# sign into your account and set up an app
reddit = praw.Reddit(
username='', # this is your reddit username
password='', # this is your reddit password
client_id='', # client ID from the top left of the information box
client_secret='', # secret code developed by reddit
user_agent='' # the agent name you assigned
)
# intializing which subreddit we want to look at
subreddit = reddit.subreddit('nbatopshot')
#which section we want to look at and the limit of entries to return
# i.e this subreddit isn't active enought for 1000 posts a day at the moment of 2/22/21
new_subreddit = subreddit.new(limit=1000)
# creating a dictionary to return the submission values and create a dataframe
topics_dict = { "title":[],
"score":[],
"id":[], "url":[],
"comms_num": [],
"date": [],
"body":[]}
# returning all the values from each post
for submission in new_subreddit:
topics_dict["id"].append(submission.id)
topics_dict["title"].append(submission.title)
topics_dict["score"].append(submission.score)
topics_dict["comms_num"].append(submission.num_comments)
topics_dict["date"].append(datetime.fromtimestamp(submission.created).strftime('%Y-%m-%d'))
topics_dict["body"].append(submission.selftext)
topics_dict["url"].append(submission.url)
# turning the dict to a df
topics_data = pd.DataFrame(topics_dict)
# only taking the posts w/ todays data
todays_subs = topics_data[topics_data['date'] >= datetime.now().strftime('%Y-%m-%d')]
# reading in for counting purposes
old = | pd.read_csv('nbatop.csv') | pandas.read_csv |
####################
# IMPORT LIBRARIES #
####################
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import modelling as ml
import os
import altair as alt
import bs4
import requests
from collections import Counter
import streamlit.components.v1 as components
import yfinance as yf
import datetime
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from streamlit import caching
import SessionState
import sys
import platform
from pysummarization.nlpbase.auto_abstractor import AutoAbstractor
from pysummarization.tokenizabledoc.simple_tokenizer import SimpleTokenizer
from pysummarization.web_scraping import WebScraping
from pysummarization.abstractabledoc.std_abstractor import StdAbstractor
from pysummarization.abstractabledoc.top_n_rank_abstractor import TopNRankAbstractor
from sklearn.feature_extraction.text import CountVectorizer
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
sys.tracebacklimit = 0
#------------------------------------------------------------------------------------------
# SETTINGS
settings_expander=st.sidebar.beta_expander('Settings')
with settings_expander:
st.caption("**Help**")
sett_hints = st.checkbox('Show learning hints', value=False)
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False)
sett_theme = st.selectbox('Theme', ["Light", "Dark"])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
#++++++++++++++++++++++++++++++++++++++++++++
# RESET INPUT
#st.sidebar.subheader("Reset")
reset_clicked = st.sidebar.button("Reset all your input")
session_state = SessionState.get(id = 0)
if reset_clicked:
session_state.id = session_state.id + 1
st.sidebar.markdown("")
#++++++++++++++++++++++++++++++++++++++++++++
# Text Mining
#++++++++++++++++++++++++++++++++++++++++++++
basic_text="Let STATY do text/web processing for you and start exploring your data stories right below... "
st.header('**Web scraping and text data**')
tw_meth = ['Text analysis','Web-Page summary','Stock data analysis']
tw_classifier = st.selectbox('What analysis would you like to perform?', list('-')+tw_meth, key = session_state.id)
if tw_classifier in tw_meth:
st.write("")
st.write("")
st.header('**'+tw_classifier+'**')
st.markdown(basic_text)
if tw_classifier=='Web-Page summary':
user_path = st.text_input("What what web page should I summarize in five sentences for you?","https://en.wikipedia.org/wiki/Data_mining")
run_models = st.button("Press to start the data processing...")
if run_models:
# Pysummarization of a web page:
def pysumMain(url):
web_scrape = WebScraping()
# Web-scraping:
document = web_scrape.scrape(url)
auto_abstractor = AutoAbstractor()
auto_abstractor.tokenizable_doc = SimpleTokenizer()
# Set delimiter for a sentence:
auto_abstractor.delimiter_list = [".", "\n"]
abstractable_doc = TopNRankAbstractor()
# Summarize a document:
result_dict = auto_abstractor.summarize(document, abstractable_doc)
# Set the limit for the number of output sentences:
limit = 5
i = 1
for sentence in result_dict["summarize_result"]:
st.write(sentence)
if i >= limit:
break
i += 1
#user_path = st.text_input("What what web page should I summarize in five sentences for you?","https://en.wikipedia.org/wiki/Data_mining")
if user_path !='':
a1, a2 = st.beta_columns(2)
with a1:
st.subheader('Web page preview:')
st.text("")
components.iframe(user_path,width=None,height=500,scrolling=True)
with a2:
st.subheader('Web page summary:')
st.text("")
pysumMain(user_path)
if tw_classifier =='Stock data analysis':
# dwonload first the list of comanies in the S&P500 and DAX indices
payload= | pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies') | pandas.read_html |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib as mpl
import netCDF4 as nc
import datetime as dt
from salishsea_tools import evaltools as et, places, viz_tools, visualisations, geo_tools
import xarray as xr
import pandas as pd
import pickle
import os
import gsw
# Extracting winds from the correct path
def getWindVarsYear(year,loc):
''' Given a year, returns the correct directory and nam_fmt for wind forcing as well as the
location of S3 on the corresponding grid.
Parameters:
year: a year value in integer form
loc: the location name as a string. Eg. loc='S3'
Returns:
jW: y-coordinate for the location
iW: x-coordinate for the location
opsdir: path to directory where wind forcing file is stored
nam_fmt: naming convention of the appropriate files
'''
if year>2014:
opsdir='/results/forcing/atmospheric/GEM2.5/operational/'
nam_fmt='ops'
jW,iW=places.PLACES[loc]['GEM2.5 grid ji']
else:
opsdir='/data/eolson/results/MEOPAR/GEMLAM/'
nam_fmt='gemlam'
with xr.open_dataset('/results/forcing/atmospheric/GEM2.5/gemlam/gemlam_y2012m03d01.nc') as gridrefWind:
# always use a post-2011 file here to identify station grid location
lon,lat=places.PLACES[loc]['lon lat']
jW,iW=geo_tools.find_closest_model_point(lon,lat,
gridrefWind.variables['nav_lon'][:,:]-360,gridrefWind.variables['nav_lat'][:,:],
grid='GEM2.5')
# the -360 is needed because longitudes in this case are reported in postive degrees East
return jW,iW,opsdir,nam_fmt
# Metric 1:
def metric1_bloomtime(phyto_alld,no3_alld,bio_time):
''' Given datetime array and two 2D arrays of phytoplankton and nitrate concentrations, over time
and depth, returns a datetime value of the spring phytoplankton bloom date according to the
following definition (now called 'metric 1'):
'The spring bloom date is the peak phytoplankton concentration (averaged from the surface to
3 m depth) within four days of the average upper 3 m nitrate concentration going below 0.5 uM
(the half-saturation concentration) for two consecutive days'
EDIT: 0.5 uM was changed to 2.0 uM to yield more accurate results
Parameters:
phyto_alld: 2D array of phytoplankton concentrations (in uM N) over all depths and time
range of 'bio_time'
no3_alld: 2D array of nitrate concentrations (in uM N) over all depths and time
range of 'bio_time'
bio_time: 1D datetime array of the same time frame as phyto_alld and no3_alld
Returns:
bloomtime1: the spring bloom date as a single datetime value
'''
# a) get avg phytplankton in upper 3m
phyto_alld_df=pd.DataFrame(phyto_alld)
upper_3m_phyto=pd.DataFrame(phyto_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_phyto.columns=['upper_3m_phyto']
#upper_3m_phyto
# b) get average no3 in upper 3m
no3_alld_df=pd.DataFrame(no3_alld)
upper_3m_no3=pd.DataFrame(no3_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_no3.columns=['upper_3m_no3']
#upper_3m_no3
# make bio_time into a dataframe
bio_time_df=pd.DataFrame(bio_time)
bio_time_df.columns=['bio_time']
metric1_df=pd.concat((bio_time_df,upper_3m_phyto,upper_3m_no3), axis=1)
# c) Find first location where nitrate crosses below 0.5 micromolar and
# stays there for 2 days
# NOTE: changed the value to 2 micromolar
location1=np.nan
for i, row in metric1_df.iterrows():
try:
if metric1_df['upper_3m_no3'].iloc[i]<2 and metric1_df['upper_3m_no3'].iloc[i+1]<2:
location1=i
break
except IndexError:
location1=np.nan
print('bloom not found')
# d) Find date with maximum phytoplankton concentration within four days (say 9 day window) of date in c)
if np.isnan(location1):
bloomrange=np.nan
bloomtime1=np.nan
else:
bloomrange=metric1_df[location1-4:location1+5]
bloomtime1=bloomrange.loc[bloomrange.upper_3m_phyto.idxmax(), 'bio_time']
return bloomtime1
# Metric 2:
def metric2_bloomtime(phyto_alld,no3_alld,bio_time):
''' Given datetime array and two 2D arrays of phytoplankton and nitrate concentrations, over time
and depth, returns a datetime value of the spring phytoplankton bloom date according to the
following definition (now called 'metric 2'):
'The first peak in which chlorophyll concentrations in upper 3m are above 5 ug/L for more than two days'
Parameters:
phyto_alld: 2D array of phytoplankton concentrations (in uM N) over all depths and time
range of 'bio_time'
no3_alld: 2D array of nitrate concentrations (in uM N) over all depths and time
range of 'bio_time'
bio_time: 1D datetime array of the same time frame as sphyto and sno3
Returns:
bloomtime2: the spring bloom date as a single datetime value
'''
# a) get avg phytplankton in upper 3m
phyto_alld_df=pd.DataFrame(phyto_alld)
upper_3m_phyto=pd.DataFrame(phyto_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_phyto.columns=['sphyto']
#upper_3m_phyto
# b) get average no3 in upper 3m
no3_alld_df=pd.DataFrame(no3_alld)
upper_3m_no3=pd.DataFrame(no3_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_no3.columns=['sno3']
#upper_3m_no3
# make bio_time into a dataframe
bio_time_df=pd.DataFrame(bio_time)
bio_time_df.columns=['bio_time']
df=pd.concat((bio_time_df,upper_3m_phyto,upper_3m_no3), axis=1)
# to find all the peaks:
df['phytopeaks'] = df.sphyto[(df.sphyto.shift(1) < df.sphyto) & (df.sphyto.shift(-1) < df.sphyto)]
# need to covert the value of interest from ug/L to uM N (conversion factor: 1.8 ug Chl per umol N)
chlvalue=5/1.8
# extract the bloom time date
for i, row in df.iterrows():
try:
if df['sphyto'].iloc[i-1]>chlvalue and df['sphyto'].iloc[i-2]>chlvalue and | pd.notna(df['phytopeaks'].iloc[i]) | pandas.notna |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
from functools import partial
from .model_selection import features
from .model_selection import make_k_folds
from .model_selection import perform_k_fold_cv
from .model_selection import make_score_dict
from .model_selection import report_result
def selector(
df,
k,
indel_class,
artifact_ratio,
beta,
num_of_processes,
feature_names,
max_features="auto",
):
"""Select s subset of features optimizing F beta
Args:
df (pandas,DataFrame)
k (int): num of folds in cross validation
indel_class (str): s for single-nucleotide indels, m for multi-nucleotide indels
artifact_ratio (int): downsampling ratio for artifact class
beta (int): specify F beta score to be optimized
num_of_processes (int): num of processes in parallelism
feature_names (str): filename specifying a subset of features to be selected
max_features (str or int): maximum num of features considered in sklearn random forest. default to 'auto'
Returns:
report_result (tuple): (selected_features (str), fs_f_beta (float), fs_precision (float))
selected_features: subset of features optimizing fs_f_beta. features are semicolon-delimited(;)
fs_f_beta: F beta score optimized in feature selection (fs) step
fs_precision: associated precision at the F beta optimum
"""
folds = make_k_folds(df, k, indel_class)
if feature_names:
feature_list = [line.rstrip() for line in open(feature_names)]
selected_features = feature_list[:-1]
remaining_features = [feature_list[-1]]
else:
selected_features = []
remaining_features = features(indel_class)
result_dict_lst = []
while remaining_features:
d = {}
best, f_beta, precision = greedy_search(
selected_features,
remaining_features,
folds,
artifact_ratio,
beta,
num_of_processes,
max_features,
)
selected_features.append(best)
remaining_features.remove(best)
f_label = "tpr" if beta > 100 else "f" + str(beta)
d["param"] = ";".join(selected_features)
d[f_label] = f_beta
d["precision"] = precision
result_dict_lst.append(d)
return report_result(pd.DataFrame(result_dict_lst), beta)
def greedy_search(
selected_features,
remaining_features,
folds,
artifact_ratio,
beta,
num_of_processes,
max_features,
):
"""Pick up a feature with the greatest increment in F beta in a greedy manner
Args:
selected_features (list): selected features
remaining_features (list): features to be examined
folds (list): folds (list): a k-element list of list [training_df, validation_df]
artifact_ratio (int): downsampling ratio for artifact class
beta (int): specify F beta score to be optimized
num_of_processes (int): num of processes in parallelism
max_features (str or int): maximum num of features considered in sklearn random forest model
Returns:
report_result (tuple): (best, f_beta, precision)
best (str): feature name with maximum increment in f_beta
f_beta (float): F beta score for model with selected_features + best
precision (float): associated precision
"""
scores = []
for feature in remaining_features:
selected_features.append(feature)
# do k-fold CV for each feature
stats = perform_k_fold_cv(
folds, selected_features, artifact_ratio, num_of_processes, max_features
)
scores.append(make_score_dict(feature, stats, beta))
del selected_features[-1]
return report_result( | pd.DataFrame(scores) | pandas.DataFrame |
import os
import requests
from time import sleep, time
import pandas as pd
from polygon import RESTClient
from dotenv import load_dotenv, find_dotenv
from FileOps import FileReader, FileWriter
from TimeMachine import TimeTraveller
from Constants import PathFinder
import Constants as C
class MarketData:
def __init__(self):
load_dotenv(find_dotenv('config.env'))
self.writer = FileWriter()
self.reader = FileReader()
self.finder = PathFinder()
self.traveller = TimeTraveller()
self.provider = 'iexcloud'
def get_indexer(self, s1, s2):
return list(s1.intersection(s2))
def try_again(self, func, **kwargs):
retries = (kwargs['retries']
if 'retries' in kwargs
else C.DEFAULT_RETRIES)
delay = (kwargs['delay']
if 'delay' in kwargs
else C.DEFAULT_DELAY)
func_args = {k: v for k, v in kwargs.items() if k not in {
'retries', 'delay'}}
for retry in range(retries):
try:
return func(**func_args)
except Exception as e:
if retry == retries - 1:
raise e
else:
sleep(delay)
def get_symbols(self):
# get cached list of symbols
symbols_path = self.finder.get_symbols_path()
return list(self.reader.load_csv(symbols_path)[C.SYMBOL])
def get_dividends(self, symbol, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_dividends_path(symbol, self.provider))
filtered = self.reader.data_in_timeframe(df, C.EX, timeframe)
return filtered
def standardize(self, df, full_mapping,
filename, columns, default):
mapping = {k: v for k, v in full_mapping.items() if k in df}
df = df[list(mapping)].rename(columns=mapping)
time_col, val_cols = columns[0], columns[1:]
if time_col in df and set(val_cols).issubset(df.columns):
df = self.reader.update_df(
filename, df, time_col).sort_values(by=[time_col])
# since time col is pd.datetime,
# consider converting to YYYY-MM-DD str format
for val_col in val_cols:
df[val_col] = df[val_col].apply(
lambda val: float(val) if val else default)
return df
def standardize_dividends(self, symbol, df):
full_mapping = dict(
zip(
['exDate', 'paymentDate', 'declaredDate', 'amount'],
[C.EX, C.PAY, C.DEC, C.DIV]
)
)
filename = self.finder.get_dividends_path(symbol, self.provider)
return self.standardize(
df,
full_mapping,
filename,
[C.EX, C.DIV],
0
)
def save_dividends(self, **kwargs):
# given a symbol, save its dividend history
symbol = kwargs['symbol']
filename = self.finder.get_dividends_path(symbol, self.provider)
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_dividends(**kwargs), C.EX, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def get_splits(self, symbol, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_splits_path(symbol, self.provider))
filtered = self.reader.data_in_timeframe(df, C.EX, timeframe)
return filtered
def standardize_splits(self, symbol, df):
full_mapping = dict(
zip(
['exDate', 'paymentDate', 'declaredDate', 'ratio'],
[C.EX, C.PAY, C.DEC, C.RATIO]
)
)
filename = self.finder.get_splits_path(symbol, self.provider)
return self.standardize(
df,
full_mapping,
filename,
[C.EX, C.RATIO],
1
)
def save_splits(self, **kwargs):
# given a symbol, save its splits history
symbol = kwargs['symbol']
filename = self.finder.get_splits_path(symbol, self.provider)
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_splits(**kwargs), C.EX, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_ohlc(self, symbol, df, filename=None):
full_mapping = dict(
zip(
['date', 'open', 'high', 'low', 'close',
'volume', 'average', 'trades'],
[C.TIME, C.OPEN, C.HIGH, C.LOW, C.CLOSE,
C.VOL, C.AVG, C.TRADES]
)
)
filename = filename or self.finder.get_ohlc_path(symbol, self.provider)
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.OPEN, C.HIGH, C.LOW, C.CLOSE],
0
)
for col in [C.VOL, C.TRADES]:
if col in df:
df[col] = df[col].apply(
lambda val: 0 if pd.isnull(val) else int(val))
return df
def get_ohlc(self, symbol, timeframe='max'):
df = self.reader.load_csv(
self.finder.get_ohlc_path(symbol, self.provider))
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)
return filtered
def save_ohlc(self, **kwargs):
symbol = kwargs['symbol']
filename = self.finder.get_ohlc_path(symbol, self.provider)
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_ohlc(**kwargs), C.TIME, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def get_social_sentiment(self, symbol, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_sentiment_path(symbol))
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME, C.POS, C.NEG]]
return filtered
def get_social_volume(self, symbol, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_sentiment_path(symbol))
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME, C.VOL, C.DELTA]]
return filtered
def save_social_sentiment(self, **kwargs):
# # given a symbol, save its sentiment data
symbol = kwargs['symbol']
filename = self.finder.get_sentiment_path(symbol)
if os.path.exists(filename):
os.remove(filename)
sen_df = self.reader.update_df(
filename, self.get_social_sentiment(**kwargs), C.TIME)
sen_df = sen_df[self.get_indexer(
{C.TIME, C.POS, C.NEG}, sen_df.columns)]
vol_df = self.reader.update_df(
filename, self.get_social_volume(**kwargs), C.TIME)
vol_df = vol_df[self.get_indexer(
{C.TIME, C.VOL, C.DELTA}, vol_df.columns)]
if sen_df.empty and not vol_df.empty:
df = vol_df
elif not sen_df.empty and vol_df.empty:
df = sen_df
elif not sen_df.empty and not vol_df.empty:
df = sen_df.merge(vol_df, how="outer", on=C.TIME)
else:
return
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_sentiment(self, symbol, df):
full_mapping = dict(
zip(
['timestamp', 'bullish', 'bearish'],
[C.TIME, C.POS, C.NEG]
)
)
filename = self.finder.get_sentiment_path(symbol, self.provider)
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.POS, C.NEG],
0
)
return df[self.get_indexer({C.TIME, C.POS, C.NEG}, df.columns)]
def standardize_volume(self, symbol, df):
full_mapping = dict(
zip(
['timestamp', 'volume_score', 'volume_change'],
[C.TIME, C.VOL, C.DELTA]
)
)
filename = self.finder.get_sentiment_path(symbol, self.provider)
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.VOL, C.DELTA],
0
)
return df[self.get_indexer({C.TIME, C.VOL, C.DELTA}, df.columns)]
def get_intraday(self, symbol, min=1, timeframe='max', extra_hrs=False):
# implement way to transform 1 min dataset to 5 min data
# or 30 or 60 should be flexible soln
# implement way to only get market hours
# given a symbol, return a cached dataframe
dates = self.traveller.dates_in_range(timeframe)
for date in dates:
df = self.reader.load_csv(
self.finder.get_intraday_path(symbol, date, self.provider))
yield self.reader.data_in_timeframe(df, C.TIME, timeframe)
def save_intraday(self, **kwargs):
symbol = kwargs['symbol']
dfs = self.get_intraday(**kwargs)
filenames = []
for df in dfs:
date = df[C.TIME].iloc[0].strftime(C.DATE_FMT)
filename = self.finder.get_intraday_path(
symbol, date, self.provider)
if os.path.exists(filename):
os.remove(filename)
save_fmt = f'{C.DATE_FMT} {C.TIME_FMT}'
df = self.reader.update_df(
filename, df, C.TIME, save_fmt)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
filenames.append(filename)
return filenames
def get_unemployment_rate(self, timeframe='max'):
# given a timeframe, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_unemployment_path())
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)
return filtered
def standardize_unemployment(self, df):
full_mapping = dict(
zip(
['time', 'value'],
[C.TIME, C.UN_RATE]
)
)
filename = self.finder.get_unemployment_path()
return self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.UN_RATE],
0
)
def save_unemployment_rate(self, **kwargs):
# given a symbol, save its dividend history
filename = self.finder.get_unemployment_path()
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_unemployment_rate(**kwargs), C.TIME, '%Y-%m')
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_s2f_ratio(self, df):
full_mapping = dict(
zip(
['t', 'o.daysTillHalving', 'o.ratio'],
[C.TIME, C.HALVING, C.RATIO]
)
)
filename = self.finder.get_s2f_path()
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.HALVING, C.RATIO],
0
)
return df[self.get_indexer({C.TIME, C.HALVING, C.RATIO}, df.columns)]
def get_s2f_ratio(self, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_s2f_path())
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME, C.HALVING, C.RATIO]]
return filtered
def save_s2f_ratio(self, **kwargs):
# # given a symbol, save its s2f data
filename = self.finder.get_s2f_path()
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_s2f_ratio(**kwargs), C.TIME, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_diff_ribbon(self, df):
full_mapping = dict(
zip(
['t', 'o.ma9', 'o.ma14', 'o.ma25', 'o.ma40',
'o.ma60', 'o.ma90', 'o.ma128', 'o.ma200'],
[C.TIME] + C.MAs
)
)
filename = self.finder.get_diff_ribbon_path()
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME] + C.MAs,
0
)
return df[self.get_indexer(set([C.TIME] + C.MAs), df.columns)]
def get_diff_ribbon(self, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_diff_ribbon_path())
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME] + C.MAs]
return filtered
def save_diff_ribbon(self, **kwargs):
# # given a symbol, save its s2f data
filename = self.finder.get_diff_ribbon_path()
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_diff_ribbon(**kwargs), C.TIME, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_sopr(self, df):
full_mapping = dict(
zip(
['t', 'v'],
[C.TIME, C.SOPR]
)
)
filename = self.finder.get_diff_ribbon_path()
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.SOPR],
1
)
return df[self.get_indexer({C.TIME, C.SOPR}, df.columns)]
def get_sopr(self, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_sopr_path())
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME, C.SOPR]]
return filtered
def save_sopr(self, **kwargs):
# # given a symbol, save its s2f data
filename = self.finder.get_sopr_path()
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_sopr(**kwargs), C.TIME, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
# def handle_request(self, url, err_msg):
class IEXCloud(MarketData):
def __init__(self, test=False):
super().__init__()
self.version = 'v1'
self.provider = 'iexcloud'
if test:
self.base = 'https://sandbox.iexapis.com'
self.token = os.environ['IEXCLOUD_SANDBOX']
else:
self.base = 'https://cloud.iexapis.com'
self.token = os.environ['IEXCLOUD']
def get_dividends(self, **kwargs):
# given a symbol, return the dividend history
def _get_dividends(symbol, timeframe='3m'):
category = 'stock'
dataset = 'dividends'
parts = [
self.base,
self.version,
category,
symbol.lower(),
dataset,
timeframe
]
url = '/'.join(parts)
params = {'token': self.token}
response = requests.get(url, params=params)
empty = pd.DataFrame()
if response.ok:
data = [datum for datum in response.json() if datum['flag']
== 'Cash' and datum['currency'] == 'USD']
else:
raise Exception(
f'Invalid response from IEX for {symbol} dividends.')
if data == []:
return empty
df = self.standardize_dividends(symbol, pd.DataFrame(data))
return self.reader.data_in_timeframe(df, C.EX, timeframe)
return self.try_again(func=_get_dividends, **kwargs)
def get_splits(self, **kwargs):
# given a symbol, return the stock splits
def _get_splits(symbol, timeframe='3m'):
category = 'stock'
dataset = 'splits'
parts = [
self.base,
self.version,
category,
symbol.lower(),
dataset,
timeframe
]
url = '/'.join(parts)
params = {'token': self.token}
response = requests.get(url, params=params)
empty = pd.DataFrame()
if response.ok:
data = response.json()
else:
raise Exception(
f'Invalid response from IEX for {symbol} splits.')
if data == []:
return empty
df = self.standardize_splits(symbol, pd.DataFrame(data))
return self.reader.data_in_timeframe(df, C.EX, timeframe)
return self.try_again(func=_get_splits, **kwargs)
def get_ohlc(self, **kwargs):
def _get_prev_ohlc(symbol):
category = 'stock'
dataset = 'previous'
parts = [
self.base,
self.version,
category,
symbol.lower(),
dataset
]
url = '/'.join(parts)
params = {'token': self.token}
response = requests.get(url, params=params)
empty = pd.DataFrame()
if response.ok:
data = response.json()
else:
raise Exception(
f'Invalid response from IEX for {symbol} OHLC.')
if data == []:
return empty
df = pd.DataFrame([data])
return self.standardize_ohlc(symbol, df)
def _get_ohlc(symbol, timeframe='1m'):
if timeframe == '1d':
return _get_prev_ohlc(symbol)
category = 'stock'
dataset = 'chart'
parts = [
self.base,
self.version,
category,
symbol.lower(),
dataset,
timeframe
]
url = '/'.join(parts)
params = {'token': self.token}
response = requests.get(url, params=params)
empty = pd.DataFrame()
if response.ok:
data = response.json()
else:
raise Exception(
f'Invalid response from IEX for {symbol} OHLC.')
if data == []:
return empty
df = self.standardize_ohlc(symbol, pd.DataFrame(data))
return self.reader.data_in_timeframe(df, C.TIME, timeframe)
return self.try_again(func=_get_ohlc, **kwargs)
# extra_hrs should be True if possible
def get_intraday(self, **kwargs):
def _get_intraday(symbol, min=1, timeframe='max', extra_hrs=True):
# pass min directly into hist prices endpoint
# to get 1, 5, 30, 60 min granularity if possible
# and get extra hrs if possible
category = 'stock'
dataset = 'chart'
dates = self.traveller.dates_in_range(timeframe)
if dates == []:
raise Exception(f'No dates in timeframe: {timeframe}.')
for date in dates:
parts = [
self.base,
self.version,
category,
symbol.lower(),
dataset,
'date',
date.replace('-', '')
]
url = '/'.join(parts)
params = {'token': self.token}
response = requests.get(url, params=params)
if response.ok:
data = response.json()
else:
raise Exception(
f'Invalid response from IEX for {symbol} intraday.')
if data == []:
continue
df = pd.DataFrame(data)
df['date'] = pd.to_datetime(df['date'] + ' ' + df['minute'])
# if all values are na except time, then skip
num_data_rows = len(
df.drop(columns=['date', 'minute']).dropna(how='all'))
if (num_data_rows == 0):
continue
res_cols = ['date', 'minute', 'marketOpen', 'marketHigh',
'marketLow', 'marketClose', 'marketVolume',
'marketAverage', 'marketNumberOfTrades']
std_cols = ['date', 'minute', 'open', 'high', 'low',
'close', 'volume', 'average', 'trades']
columns = dict(zip(res_cols, std_cols))
df = df[res_cols].rename(columns=columns)
df.drop(columns='minute', inplace=True)
filename = self.finder.get_intraday_path(
symbol, date, self.provider)
df = self.standardize_ohlc(symbol, df, filename)
yield df
return self.try_again(func=_get_intraday, **kwargs)
class Polygon(MarketData):
def __init__(self, token=os.environ.get('POLYGON'), free=True):
super().__init__()
self.client = RESTClient(token)
self.provider = 'polygon'
self.free = free
def obey_free_limit(self):
if self.free and hasattr(self, 'last_api_call_time'):
time_since_last_call = time() - self.last_api_call_time
delay = C.POLY_FREE_DELAY - time_since_last_call
if delay > 0:
sleep(delay)
def log_api_call_time(self):
self.last_api_call_time = time()
def get_dividends(self, **kwargs):
def _get_dividends(symbol, timeframe='max'):
self.obey_free_limit()
try:
response = self.client.reference_stock_dividends(symbol)
except Exception as e:
raise e
finally:
self.log_api_call_time()
raw = pd.DataFrame(response.results)
df = self.standardize_dividends(symbol, raw)
return self.reader.data_in_timeframe(df, C.EX, timeframe)
return self.try_again(func=_get_dividends, **kwargs)
def get_splits(self, **kwargs):
def _get_splits(symbol, timeframe='max'):
self.obey_free_limit()
try:
response = self.client.reference_stock_splits(symbol)
except Exception as e:
raise e
finally:
self.log_api_call_time()
raw = pd.DataFrame(response.results)
df = self.standardize_splits(symbol, raw)
return self.reader.data_in_timeframe(df, C.EX, timeframe)
return self.try_again(func=_get_splits, **kwargs)
def get_ohlc(self, **kwargs):
def _get_ohlc(symbol, timeframe='max'):
is_crypto = symbol.find('X%3A') == 0
formatted_start, formatted_end = self.traveller.convert_dates(
timeframe)
self.obey_free_limit()
try:
response = self.client.stocks_equities_aggregates(
symbol, 1, 'day',
from_=formatted_start, to=formatted_end, unadjusted=False
)
except Exception as e:
raise e
finally:
self.log_api_call_time()
raw = response.results
columns = {'t': 'date', 'o': 'open', 'h': 'high',
'l': 'low', 'c': 'close', 'v': 'volume',
'vw': 'average', 'n': 'trades'}
df = pd.DataFrame(raw).rename(columns=columns)
if is_crypto:
df['date'] = pd.to_datetime(
df['date'], unit='ms')
else:
df['date'] = pd.to_datetime(
df['date'], unit='ms').dt.tz_localize(
'UTC').dt.tz_convert(
C.TZ).dt.tz_localize(None)
df = self.standardize_ohlc(symbol, df)
return self.reader.data_in_timeframe(df, C.TIME, timeframe)
return self.try_again(func=_get_ohlc, **kwargs)
def get_intraday(self, **kwargs):
def _get_intraday(symbol, min=1, timeframe='max', extra_hrs=True):
# pass min directly into stock_aggs function as multiplier
is_crypto = symbol.find('X%3A') == 0
dates = self.traveller.dates_in_range(timeframe)
if dates == []:
raise Exception(f'No dates in timeframe: {timeframe}.')
for idx, date in enumerate(dates):
self.obey_free_limit()
try:
response = self.client.stocks_equities_aggregates(
symbol, min, 'minute', from_=date, to=date,
unadjusted=False
)
except Exception as e:
raise e
finally:
self.log_api_call_time()
if hasattr(response, 'results'):
response = response.results
else:
continue
columns = {'t': 'date', 'o': 'open', 'h': 'high',
'l': 'low', 'c': 'close', 'v': 'volume',
'vw': 'average', 'n': 'trades'}
df = pd.DataFrame(response).rename(columns=columns)
if is_crypto:
df['date'] = pd.to_datetime(
df['date'], unit='ms')
else:
df['date'] = pd.to_datetime(
df['date'], unit='ms').dt.tz_localize(
'UTC').dt.tz_convert(
C.TZ).dt.tz_localize(None)
filename = self.finder.get_intraday_path(
symbol, date, self.provider)
df = self.standardize_ohlc(symbol, df, filename)
df = df[df[C.TIME].dt.strftime(C.DATE_FMT) == date]
yield df
return self.try_again(func=_get_intraday, **kwargs)
# newShares = oldShares / ratio
class StockTwits(MarketData):
def __init__(self):
super().__init__()
self.base = 'https://api.stocktwits.com'
self.version = '2'
self.token = os.environ.get('STOCKTWITS')
self.provider = 'stocktwits'
def get_social_volume(self, **kwargs):
def _get_social_volume(symbol, timeframe='max'):
parts = [
self.base,
'api',
self.version,
'symbols',
symbol,
'volume.json'
]
url = '/'.join(parts)
params = {'access_token': self.token}
vol_res = requests.get(url, params=params)
json_res = vol_res.json()
empty = pd.DataFrame()
if vol_res.ok:
vol_data = json_res['data']
else:
if 'errors' in json_res:
errors = '\n'.join([error['message']
for error in json_res['errors']])
raise Exception(
f'Invalid response from Stocktwits for {symbol}\n{errors}')
if vol_data == []:
return empty
vol_data.sort(key=lambda x: x['timestamp'])
vol_data.pop()
df = pd.DataFrame(vol_data)
std = self.standardize_volume(symbol, df)
if timeframe == '1d':
filtered = std.tail(1)
else:
filtered = self.reader.data_in_timeframe(
std, C.TIME, timeframe)
[[C.TIME, C.VOL, C.DELTA]]
return filtered
return self.try_again(func=_get_social_volume, **kwargs)
def get_social_sentiment(self, **kwargs):
def _get_social_sentiment(symbol, timeframe='max'):
parts = [
self.base,
'api',
self.version,
'symbols',
symbol,
'sentiment.json'
]
url = '/'.join(parts)
params = {'access_token': self.token}
sen_res = requests.get(url, params=params)
json_res = sen_res.json()
empty = pd.DataFrame()
if sen_res.ok:
sen_data = json_res['data']
else:
if 'errors' in json_res:
errors = '\n'.join([error['message']
for error in json_res['errors']])
raise Exception(
f'Invalid response from Stocktwits for {symbol}\n{errors}')
if sen_data == []:
return empty
sen_data.sort(key=lambda x: x['timestamp'])
sen_data.pop()
df = pd.DataFrame(sen_data)
std = self.standardize_sentiment(symbol, df)
if timeframe == '1d':
filtered = std.tail(1)
else:
filtered = self.reader.data_in_timeframe(
std, C.TIME, timeframe)
return filtered
return self.try_again(func=_get_social_sentiment, **kwargs)
class LaborStats(MarketData):
def __init__(self):
super().__init__()
self.base = 'https://api.bls.gov'
self.version = 'v2'
self.token = os.environ.get('BLS')
self.provider = 'bls'
def get_unemployment_rate(self, **kwargs):
def _get_unemployment_rate(timeframe):
start, end = self.traveller.convert_dates(timeframe, '%Y')
parts = [
self.base,
'publicAPI',
self.version,
'timeseries',
'data'
]
url = '/'.join(parts)
params = {'registrationkey': self.token,
'startyear': start, 'endyear': end,
'seriesid': 'LNS14000000'}
response = requests.post(url, data=params)
if (
response.ok and
response.json()['status'] == 'REQUEST_SUCCEEDED'
):
payload = response.json()
if payload['status'] == 'REQUEST_SUCCEEDED':
data = payload['Results']['series'][0]['data']
else:
raise Exception(
f'''
Invalid response from BLS because {data["message"][0]}
'''
)
else:
raise Exception(
'Invalid response from BLS for unemployment rate')
df = pd.DataFrame(data)
df['time'] = df['year'] + '-' + \
df['period'].str.slice(start=1)
df = self.standardize_unemployment(df)
return self.reader.data_in_timeframe(df, C.TIME, timeframe)
return self.try_again(func=_get_unemployment_rate, **kwargs)
class Glassnode(MarketData):
def __init__(self):
super().__init__()
self.base = 'https://api.glassnode.com'
self.version = 'v1'
self.token = os.environ.get('<PASSWORD>')
self.provider = 'glassnode'
def get_s2f_ratio(self, **kwargs):
def _get_s2f_ratio(timeframe):
parts = [
self.base,
self.version,
'metrics',
'indicators',
'stock_to_flow_ratio'
]
url = '/'.join(parts)
empty = pd.DataFrame()
response = requests.get(
url, params={'a': 'BTC', 'api_key': self.token})
if response.ok:
data = response.json()
else:
raise Exception(
'Invalid response from Glassnode for S2F Ratio')
if data == []:
return empty
df = | pd.json_normalize(data) | pandas.json_normalize |
import pandas as pd
a = {"Bir": 1, "İki": 2, "Üç": 3, "Dört": 4, "Beş": 5}
b = {"Bir": 10, "İki": 20, "Üç": 30, "Dört": 40, "Altı": 60}
x = pd.Series(a)
y = | pd.Series(b) | pandas.Series |
from sequana.viz import ANOVA
from pylab import normal
def test_anova():
import pandas as pd
A = normal(0.5,size=10000)
B = normal(0.25, size=10000)
C = normal(0, 0.5,size=10000)
df = | pd.DataFrame({"A":A, "B":B, "C":C}) | pandas.DataFrame |
from copy import deepcopy
import inspect
import pydoc
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.util._test_decorators import (
async_mark,
skip_if_no,
)
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
timedelta_range,
)
import pandas._testing as tm
class TestDataFrameMisc:
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_axis(self, float_frame):
f = float_frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number("index") == 0
assert f._get_axis_number("rows") == 0
assert f._get_axis_number("columns") == 1
assert f._get_axis_name(0) == "index"
assert f._get_axis_name(1) == "columns"
assert f._get_axis_name("index") == "index"
assert f._get_axis_name("rows") == "index"
assert f._get_axis_name("columns") == "columns"
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(2)
with pytest.raises(ValueError, match="No axis.*foo"):
f._get_axis_name("foo")
with pytest.raises(ValueError, match="No axis.*None"):
f._get_axis_name(None)
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(None)
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
df = DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))
for key in list("ABCD"):
assert key in dir(df)
assert isinstance(df.__getitem__("A"), Series)
# DataFrame whose first-level columns are identifiers shall have
# them in __dir__.
df = DataFrame(
[list("abcd"), list("efgh")],
columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),
)
for key in list("ABCD"):
assert key in dir(df)
for key in list("EFGH"):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), DataFrame)
def test_not_hashable(self):
empty_frame = DataFrame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_column_name_contains_unicode_surrogate(self):
# GH 25509
colname = "\ud83d"
df = DataFrame({colname: []})
# this should not crash
assert colname not in dir(df)
assert df.columns[0] == colname
def test_new_empty_index(self):
df1 = DataFrame(np.random.randn(0, 3))
df2 = DataFrame(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
msg = r"Axis must be 0 or 1 \(got 2\)"
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
def test_empty(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
assert not float_frame.empty
assert not float_string_frame.empty
# corner case
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))
del df["A"]
assert not df.empty
def test_len(self, float_frame):
assert len(float_frame) == len(float_frame.index)
# single block corner case
arr = float_frame[["A", "B"]].values
expected = float_frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(arr, expected)
def test_axis_aliases(self, float_frame):
f = float_frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis="index")
tm.assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis="columns")
tm.assert_series_equal(result, expected)
def test_class_axis(self):
# GH 18147
# no exception and no empty docstring
assert pydoc.getdoc(DataFrame.index)
assert pydoc.getdoc(DataFrame.columns)
def test_series_put_names(self, float_string_frame):
series = float_string_frame._series
for k, v in series.items():
assert v.name == k
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
assert not df.empty
df = DataFrame(index=[1], columns=[1])
assert not df.empty
df = DataFrame(index=["a", "b"], columns=["c", "d"]).dropna()
assert df.empty
assert df.T.empty
empty_frames = [
DataFrame(),
DataFrame(index=[1]),
DataFrame(columns=[1]),
DataFrame({1: []}),
]
for df in empty_frames:
assert df.empty
assert df.T.empty
def test_with_datetimelikes(self):
df = DataFrame(
{
"A": date_range("20130101", periods=10),
"B": timedelta_range("1 day", periods=10),
}
)
t = df.T
result = t.dtypes.value_counts()
expected = Series({np.dtype("object"): 10})
tm.assert_series_equal(result, expected)
def test_deepcopy(self, float_frame):
cp = deepcopy(float_frame)
series = cp["A"]
series[:] = 10
for idx, value in series.items():
assert float_frame["A"][idx] != value
def test_inplace_return_self(self):
# GH 1893
data = DataFrame(
{"a": ["foo", "bar", "baz", "qux"], "b": [0, 0, 1, 1], "c": [1, 2, 3, 4]}
)
def _check_f(base, f):
result = f(base)
assert result is None
# -----DataFrame-----
# set_index
f = lambda x: x.set_index("a", inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index("a"), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values("b", inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()["c"]
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index("a")["c"], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(d.copy(), f)
@async_mark()
@td.check_file_leaks
async def test_tab_complete_warning(self, ip, frame_or_series):
# GH 16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
if frame_or_series is DataFrame:
code = "from pandas import DataFrame; obj = DataFrame()"
else:
code = "from pandas import Series; obj = Series(dtype=object)"
await ip.run_code(code)
# GH 31324 newer jedi version raises Deprecation warning;
# appears resolved 2021-02-02
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("obj.", 1))
def test_attrs(self):
df = DataFrame({"A": [2, 3]})
assert df.attrs == {}
df.attrs["version"] = 1
result = df.rename(columns=str)
assert result.attrs == {"version": 1}
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) setitem (no copy)
@pytest.mark.parametrize("allows_duplicate_labels", [True, False, None])
def test_set_flags(self, allows_duplicate_labels, frame_or_series):
obj = | DataFrame({"A": [1, 2]}) | pandas.DataFrame |
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = pd.Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
expected = pd.Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=["A", "B", "C"])
left = df.groupby(by="A", as_index=False)["B"].sum()
assert type(left) is DataFrame
assert left.to_dict() == {"A": {}, "B": {}}
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith",):
pytest.skip("Test not applicable")
if reduction_func in ("nth", "ngroup",):
pytest.skip("Skip until behavior is determined (GH #5755)")
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
g = df.groupby("a", as_index=False)
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(["A", "B"], as_index=False)
# GH #421
result = grouped["C"].agg(len)
expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
msg = "as_index=False only valid with DataFrame"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda x: x.weekday(), as_index=False)
msg = "as_index=False only valid for axis=0"
with pytest.raises(ValueError, match=msg):
df.groupby(lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_multiple_key(df):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby(
[lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
)
agged = grouped.agg(lambda x: x.sum())
tm.assert_index_equal(agged.index, df.columns)
tm.assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
tm.assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(df):
# test that having an all-NA column doesn't mess you up
df = df.copy()
df["bad"] = np.nan
agged = df.groupby(["A", "B"]).mean()
expected = df.groupby(["A", "B"]).mean()
expected["bad"] = np.nan
tm.assert_frame_equal(agged, expected)
def test_omit_nuisance(df):
grouped = df.groupby("A")
result = grouped.mean()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
tm.assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
df = df.loc[:, ["A", "C", "D"]]
df["E"] = datetime.now()
grouped = df.groupby("A")
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
msg = "reduction operation 'sum' not allowed for this dtype"
with pytest.raises(TypeError, match=msg):
grouped.agg(lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(["A", "B"])
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
def test_empty_groups_corner(mframe):
# handle empty groups
df = DataFrame(
{
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
"k3": ["foo", "bar"] * 3,
"v1": np.random.randn(6),
"v2": np.random.randn(6),
}
)
grouped = df.groupby(["k1", "k2"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped = mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped["A"].apply(np.mean)
tm.assert_series_equal(agged["A"], agged_A)
assert agged.index.name == "first"
def test_nonsense_func():
df = DataFrame([0])
msg = r"unsupported operand type\(s\) for \+: 'int' and 'str'"
with pytest.raises(TypeError, match=msg):
df.groupby(lambda x: x + "foo")
def test_wrap_aggregated_output_multindex(mframe):
df = mframe.T
df["baz", "two"] = "peekaboo"
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
assert isinstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ("foo", "one"):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
assert len(agged2.columns) + 1 == len(df.columns)
def test_groupby_level_apply(mframe):
result = mframe.groupby(level=0).count()
assert result.index.name == "first"
result = mframe.groupby(level=1).count()
assert result.index.name == "second"
result = mframe["A"].groupby(level=0).count()
assert result.index.name == "first"
def test_groupby_level_mapper(mframe):
deleveled = mframe.reset_index()
mapper0 = {"foo": 0, "bar": 0, "baz": 1, "qux": 1}
mapper1 = {"one": 0, "two": 0, "three": 1}
result0 = mframe.groupby(mapper0, level=0).sum()
result1 = mframe.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled["first"]])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled["second"]])
expected0 = mframe.groupby(mapped_level0).sum()
expected1 = mframe.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = "first", "second"
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
def test_groupby_level_nonmulti():
# GH 1313, GH 13901
s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name="foo"))
expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name="foo"))
result = s.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[0]).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=-1).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[-1]).sum()
tm.assert_series_equal(result, expected)
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=1)
with pytest.raises(ValueError, match=msg):
s.groupby(level=-2)
msg = "No group keys passed!"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[])
msg = "multiple levels only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 0])
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 1])
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[1])
def test_groupby_complex():
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = a.sum(level=0)
tm.assert_series_equal(result, expected)
def test_groupby_series_indexed_differently():
s1 = Series(
[5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],
index=Index(["a", "b", "c", "d", "e", "f", "g"]),
)
s2 = Series(
[1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index(["a", "b", "d", "f", "g", "h"])
)
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
tm.assert_series_equal(agged, exp)
def test_groupby_with_hier_columns():
tuples = list(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
)
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples(
[("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")]
)
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).mean()
tm.assert_index_equal(result.index, df.index)
result = df.groupby(level=0).agg(np.mean)
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0).apply(lambda x: x.mean())
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
tm.assert_index_equal(result.columns, Index(["A", "B"]))
tm.assert_index_equal(result.index, df.index)
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df["A", "foo"] = "bar"
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, df.columns[:-1])
def test_grouping_ndarray(df):
grouped = df.groupby(df["A"].values)
result = grouped.sum()
expected = df.groupby("A").sum()
tm.assert_frame_equal(
result, expected, check_names=False
) # Note: no names when grouping by value
def test_groupby_wrong_multi_labels():
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(["foo", "bar", "baz", "spam"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_groupby_series_with_name(df):
result = df.groupby(df["A"]).mean()
result2 = df.groupby(df["A"], as_index=False).mean()
assert result.index.name == "A"
assert "A" in result2
result = df.groupby([df["A"], df["B"]]).mean()
result2 = df.groupby([df["A"], df["B"]], as_index=False).mean()
assert result.index.names == ("A", "B")
assert "A" in result2
assert "B" in result2
def test_seriesgroupby_name_attr(df):
# GH 6265
result = df.groupby("A")["C"]
assert result.count().name == "C"
assert result.mean().name == "C"
testFunc = lambda x: np.sum(x) * 2
assert result.agg(testFunc).name == "C"
def test_consistency_name():
# GH 12363
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
expected = df.groupby(["A"]).B.count()
result = df.B.groupby(df.A).count()
tm.assert_series_equal(result, expected)
def test_groupby_name_propagation(df):
# GH 6124
def summarize(df, name=None):
return Series({"count": 1, "mean": 2, "omissions": 3}, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"])
metrics = df.groupby("A").apply(summarize)
assert metrics.columns.name is None
metrics = df.groupby("A").apply(summarize, "metrics")
assert metrics.columns.name == "metrics"
metrics = df.groupby("A").apply(summarize_random_name)
assert metrics.columns.name is None
def test_groupby_nonstring_columns():
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
tm.assert_frame_equal(result, expected)
def test_groupby_mixed_type_columns():
# GH 13432, unorderable types in py3
df = DataFrame([[0, 1, 2]], columns=["A", "B", 0])
expected = DataFrame([[1, 2]], columns=["B", 0], index=Index([0], name="A"))
result = df.groupby("A").first()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").sum()
tm.assert_frame_equal(result, expected)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:Mean of:RuntimeWarning")
def test_cython_grouper_series_bug_noncontig():
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0])
inds = np.tile(range(10), 10)
result = obj.groupby(inds).agg(Series.median)
assert result.isna().all()
def test_series_grouper_noncontig_index():
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone():
s = Series(range(5))
labels = np.array(["a", "b", "c", "d", "e"], dtype="O")
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert len(x.values.base) > 0
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_groupby_dtype_inference_empty():
# GH 6733
df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")})
assert df["x"].dtype == np.float64
result = df.groupby("x").first()
exp_index = Index([], name="x", dtype=np.float64)
expected = DataFrame({"range": Series([], index=exp_index, dtype="int64")})
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_groupby_list_infer_array_like(df):
result = df.groupby(list(df["A"])).mean()
expected = df.groupby(df["A"]).mean()
tm.assert_frame_equal(result, expected, check_names=False)
with pytest.raises(KeyError, match=r"^'foo'$"):
df.groupby(list(df["A"][:-1]))
# pathological case of ambiguity
df = DataFrame({"foo": [0, 1], "bar": [3, 4], "val": np.random.randn(2)})
result = df.groupby(["foo", "bar"]).mean()
expected = df.groupby([df["foo"], df["bar"]]).mean()[["val"]]
def test_groupby_keys_same_size_as_index():
# GH 11185
freq = "s"
index = pd.date_range(
start=pd.Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq
)
df = pd.DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index)
result = df.groupby([pd.Grouper(level=0, freq=freq), "metric"]).mean()
expected = df.set_index([df.index, "metric"])
tm.assert_frame_equal(result, expected)
def test_groupby_one_row():
# GH 11741
msg = r"^'Z'$"
df1 = pd.DataFrame(np.random.randn(1, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df1.groupby("Z")
df2 = pd.DataFrame(np.random.randn(2, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df2.groupby("Z")
def test_groupby_nat_exclude():
# GH 6992
df = pd.DataFrame(
{
"values": np.random.randn(8),
"dt": [
np.nan,
pd.Timestamp("2013-01-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-01-01"),
],
"str": [np.nan, "a", np.nan, "a", np.nan, "a", np.nan, "b"],
}
)
grouped = df.groupby("dt")
expected = [pd.Index([1, 7]), pd.Index([3, 5])]
keys = sorted(grouped.groups.keys())
assert len(keys) == 2
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
tm.assert_index_equal(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
assert grouped.ngroups == 2
expected = {
Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp),
Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp),
}
for k in grouped.indices:
tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-01-01")), df.iloc[[1, 7]])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-02-01")), df.iloc[[3, 5]])
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
nan_df = DataFrame(
{"nan": [np.nan, np.nan, np.nan], "nat": [pd.NaT, pd.NaT, pd.NaT]}
)
assert nan_df["nan"].dtype == "float64"
assert nan_df["nat"].dtype == "datetime64[ns]"
for key in ["nan", "nat"]:
grouped = nan_df.groupby(key)
assert grouped.groups == {}
assert grouped.ngroups == 0
assert grouped.indices == {}
with pytest.raises(KeyError, match=r"^nan$"):
grouped.get_group(np.nan)
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
def test_groupby_2d_malformed():
d = DataFrame(index=range(2))
d["group"] = ["g1", "g2"]
d["zeros"] = [0, 0]
d["ones"] = [1, 1]
d["label"] = ["l1", "l2"]
tmp = d.groupby(["group"]).mean()
res_values = np.array([[0, 1], [0, 1]], dtype=np.int64)
tm.assert_index_equal(tmp.columns, Index(["zeros", "ones"]))
tm.assert_numpy_array_equal(tmp.values, res_values)
def test_int32_overflow():
B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)))
A = np.arange(25000)
df = DataFrame({"A": A, "B": B, "C": A, "D": B, "E": np.random.randn(25000)})
left = df.groupby(["A", "B", "C", "D"]).sum()
right = df.groupby(["D", "C", "B", "A"]).sum()
assert len(left) == len(right)
def test_groupby_sort_multi():
df = DataFrame(
{
"a": ["foo", "bar", "baz"],
"b": [3, 2, 1],
"c": [0, 1, 2],
"d": np.random.randn(3),
}
)
tups = [tuple(row) for row in df[["a", "b", "c"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["a", "b", "c"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
tups = [tuple(row) for row in df[["c", "a", "b"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["c", "a", "b"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups)
tups = [tuple(x) for x in df[["b", "c", "a"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["b", "c", "a"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
df = DataFrame(
{"a": [0, 1, 2, 0, 1, 2], "b": [0, 0, 0, 1, 1, 1], "d": np.random.randn(6)}
)
grouped = df.groupby(["a", "b"])["d"]
result = grouped.sum()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = [tuple(row) for row in df[keys].values]
tups = com.asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in expected.items():
assert result[k] == v
_check_groupby(df, result, ["a", "b"], "d")
def test_dont_clobber_name_column():
df = DataFrame(
{"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2}
)
result = df.groupby("key").apply(lambda x: x)
tm.assert_frame_equal(result, df)
def test_skip_group_keys():
tsf = tm.makeTimeDataFrame()
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values(by="A")[:3])
pieces = [group.sort_values(by="A")[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_frame_equal(result, expected)
grouped = tsf["A"].groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values()[:3])
pieces = [group.sort_values()[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_series_equal(result, expected)
def test_no_nonsense_name(float_frame):
# GH #995
s = float_frame["C"].copy()
s.name = None
result = s.groupby(float_frame["A"]).agg(np.sum)
assert result.name is None
def test_multifunc_sum_bug():
# GH #1065
x = DataFrame(np.arange(9).reshape(3, 3))
x["test"] = 0
x["fl"] = [1.3, 1.5, 1.6]
grouped = x.groupby("test")
result = grouped.agg({"fl": "sum", 2: "size"})
assert result["fl"].dtype == np.float64
def test_handle_dict_return_value(df):
def f(group):
return {"max": group.max(), "min": group.min()}
def g(group):
return Series({"max": group.max(), "min": group.min()})
result = df.groupby("A")["C"].apply(f)
expected = df.groupby("A")["C"].apply(g)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grouper", ["A", ["A", "B"]])
def test_set_group_name(df, grouper):
def f(group):
assert group.name is not None
return group
def freduce(group):
assert group.name is not None
return group.sum()
def foo(x):
return freduce(x)
grouped = df.groupby(grouper)
# make sure all these work
grouped.apply(f)
grouped.aggregate(freduce)
grouped.aggregate({"C": freduce, "D": freduce})
grouped.transform(f)
grouped["C"].apply(f)
grouped["C"].aggregate(freduce)
grouped["C"].aggregate([freduce, foo])
grouped["C"].transform(f)
def test_group_name_available_in_inference_pass():
# gh-15062
df = pd.DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": np.arange(6)})
names = []
def f(group):
names.append(group.name)
return group.copy()
df.groupby("a", sort=False, group_keys=False).apply(f)
expected_names = [0, 1, 2]
assert names == expected_names
def test_no_dummy_key_names(df):
# see gh-1291
result = df.groupby(df["A"].values).sum()
assert result.index.name is None
result = df.groupby([df["A"].values, df["B"].values]).sum()
assert result.index.names == (None, None)
def test_groupby_sort_multiindex_series():
# series multiindex groupby sort argument was not being passed through
# _compress_group_index
# GH 9444
index = MultiIndex(
levels=[[1, 2], [1, 2]],
codes=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
names=["a", "b"],
)
mseries = Series([0, 1, 2, 3, 4, 5], index=index)
index = MultiIndex(
levels=[[1, 2], [1, 2]], codes=[[0, 0, 1], [1, 0, 0]], names=["a", "b"]
)
mseries_result = Series([0, 2, 4], index=index)
result = mseries.groupby(level=["a", "b"], sort=False).first()
tm.assert_series_equal(result, mseries_result)
result = mseries.groupby(level=["a", "b"], sort=True).first()
tm.assert_series_equal(result, mseries_result.sort_index())
def test_groupby_reindex_inside_function():
periods = 1000
ind = date_range(start="2012/1/1", freq="5min", periods=periods)
df = DataFrame({"high": np.arange(periods), "low": np.arange(periods)}, index=ind)
def agg_before(hour, func, fix=False):
"""
Run an aggregate func on the subset of data.
"""
def _func(data):
d = data.loc[data.index.map(lambda x: x.hour < 11)].dropna()
if fix:
data[data.index[0]]
if len(d) == 0:
return None
return func(d)
return _func
def afunc(data):
d = data.select(lambda x: x.hour < 11).dropna()
return np.max(d)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
closure_bad = grouped.agg({"high": agg_before(11, np.max)})
closure_good = grouped.agg({"high": agg_before(11, np.max, True)})
tm.assert_frame_equal(closure_bad, closure_good)
def test_groupby_multiindex_missing_pair():
# GH9049
df = DataFrame(
{
"group1": ["a", "a", "a", "b"],
"group2": ["c", "c", "d", "c"],
"value": [1, 1, 1, 5],
}
)
df = df.set_index(["group1", "group2"])
df_grouped = df.groupby(level=["group1", "group2"], sort=True)
res = df_grouped.agg("sum")
idx = MultiIndex.from_tuples(
[("a", "c"), ("a", "d"), ("b", "c")], names=["group1", "group2"]
)
exp = DataFrame([[2], [1], [5]], index=idx, columns=["value"])
tm.assert_frame_equal(res, exp)
def test_groupby_multiindex_not_lexsorted():
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[("a", ""), ("b1", "c1"), ("b2", "c2")], names=["b", "c"]
)
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
)
not_lexsorted_df = not_lexsorted_df.pivot_table(
index="a", columns=["b", "c"], values="d"
)
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.groupby("a").mean()
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.groupby("a").mean()
tm.assert_frame_equal(expected, result)
# a transforming function should work regardless of sort
# GH 14776
df = DataFrame(
{"x": ["a", "a", "b", "a"], "y": [1, 1, 2, 2], "z": [1, 2, 3, 4]}
).set_index(["x", "y"])
assert not df.index.is_lexsorted()
for level in [0, 1, [0, 1]]:
for sort in [False, True]:
result = df.groupby(level=level, sort=sort).apply(DataFrame.drop_duplicates)
expected = df
tm.assert_frame_equal(expected, result)
result = (
df.sort_index()
.groupby(level=level, sort=sort)
.apply(DataFrame.drop_duplicates)
)
expected = df.sort_index()
tm.assert_frame_equal(expected, result)
def test_index_label_overlaps_location():
# checking we don't have any label/location confusion in the
# the wake of GH5375
df = DataFrame(list("ABCDE"), index=[2, 0, 2, 1, 1])
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
tm.assert_series_equal(actual, expected)
# ... and again, with a generic Index of floats
df.index = df.index.astype(float)
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
tm.assert_series_equal(actual, expected)
def test_transform_doesnt_clobber_ints():
# GH 7972
n = 6
x = np.arange(n)
df = DataFrame({"a": x // 2, "b": 2.0 * x, "c": 3.0 * x})
df2 = DataFrame({"a": x // 2 * 1.0, "b": 2.0 * x, "c": 3.0 * x})
gb = df.groupby("a")
result = gb.transform("mean")
gb2 = df2.groupby("a")
expected = gb2.transform("mean")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sort_column",
["ints", "floats", "strings", ["ints", "floats"], ["ints", "strings"]],
)
@pytest.mark.parametrize(
"group_column", ["int_groups", "string_groups", ["int_groups", "string_groups"]]
)
def test_groupby_preserves_sort(sort_column, group_column):
# Test to ensure that groupby always preserves sort order of original
# object. Issue #8588 and #9651
df = DataFrame(
{
"int_groups": [3, 1, 0, 1, 0, 3, 3, 3],
"string_groups": ["z", "a", "z", "a", "a", "g", "g", "g"],
"ints": [8, 7, 4, 5, 2, 9, 1, 1],
"floats": [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],
"strings": ["z", "d", "a", "e", "word", "word2", "42", "47"],
}
)
# Try sorting on different types and with different group types
df = df.sort_values(by=sort_column)
g = df.groupby(group_column)
def test_sort(x):
tm.assert_frame_equal(x, x.sort_values(by=sort_column))
g.apply(test_sort)
def test_group_shift_with_null_key():
# This test is designed to replicate the segfault in issue #13813.
n_rows = 1200
# Generate a moderately large dataframe with occasional missing
# values in column `B`, and then group by [`A`, `B`]. This should
# force `-1` in `labels` array of `g.grouper.group_info` exactly
# at those places, where the group-by key is partially missing.
df = DataFrame(
[(i % 12, i % 3 if i % 3 else np.nan, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i % 3 and i < n_rows - 12 else np.nan) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1)
tm.assert_frame_equal(result, expected)
def test_group_shift_with_fill_value():
# GH #24128
n_rows = 24
df = DataFrame(
[(i % 12, i % 3, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i < n_rows - 12 else 0) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1, fill_value=0)[["Z"]]
tm.assert_frame_equal(result, expected)
def test_group_shift_lose_timezone():
# GH 30134
now_dt = pd.Timestamp.utcnow()
df = DataFrame({"a": [1, 1], "date": now_dt})
result = df.groupby("a").shift(0).iloc[0]
expected = Series({"date": now_dt}, name=result.name)
tm.assert_series_equal(result, expected)
def test_pivot_table_values_key_error():
# This test is designed to replicate the error in issue #14938
df = pd.DataFrame(
{
"eventDate": pd.date_range(datetime.today(), periods=20, freq="M").tolist(),
"thename": range(0, 20),
}
)
df["year"] = df.set_index("eventDate").index.year
df["month"] = df.set_index("eventDate").index.month
with pytest.raises(KeyError, match="'badname'"):
df.reset_index().pivot_table(
index="year", columns="month", values="badname", aggfunc="count"
)
def test_empty_dataframe_groupby():
# GH8093
df = DataFrame(columns=["A", "B", "C"])
result = df.groupby("A").sum()
expected = DataFrame(columns=["B", "C"], dtype=np.float64)
expected.index.name = "A"
tm.assert_frame_equal(result, expected)
def test_tuple_as_grouping():
# https://github.com/pandas-dev/pandas/issues/18314
df = pd.DataFrame(
{
("a", "b"): [1, 1, 1, 1],
"a": [2, 2, 2, 2],
"b": [2, 2, 2, 2],
"c": [1, 1, 1, 1],
}
)
with pytest.raises(KeyError, match=r"('a', 'b')"):
df[["a", "b", "c"]].groupby(("a", "b"))
result = df.groupby(("a", "b"))["c"].sum()
expected = pd.Series([4], name="c", index=pd.Index([1], name=("a", "b")))
tm.assert_series_equal(result, expected)
def test_tuple_correct_keyerror():
# https://github.com/pandas-dev/pandas/issues/18798
df = pd.DataFrame(
1, index=range(3), columns=pd.MultiIndex.from_product([[1, 2], [3, 4]])
)
with pytest.raises(KeyError, match=r"^\(7, 8\)$"):
df.groupby((7, 8)).mean()
def test_groupby_agg_ohlc_non_first():
# GH 21716
df = pd.DataFrame(
[[1], [1]],
columns=["foo"],
index=pd.date_range("2018-01-01", periods=2, freq="D"),
)
expected = pd.DataFrame(
[[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],
columns=pd.MultiIndex.from_tuples(
(
("foo", "sum", "foo"),
("foo", "ohlc", "open"),
("foo", "ohlc", "high"),
("foo", "ohlc", "low"),
("foo", "ohlc", "close"),
)
),
index=pd.date_range("2018-01-01", periods=2, freq="D"),
)
result = df.groupby(pd.Grouper(freq="D")).agg(["sum", "ohlc"])
tm.assert_frame_equal(result, expected)
def test_groupby_multiindex_nat():
# GH 9236
values = [
(pd.NaT, "a"),
(datetime(2012, 1, 2), "a"),
(datetime(2012, 1, 2), "b"),
(datetime(2012, 1, 3), "a"),
]
mi = | pd.MultiIndex.from_tuples(values, names=["date", None]) | pandas.MultiIndex.from_tuples |
import time
from Bio import Entrez
import xml.etree.ElementTree as ET
import pandas as pd
import numpy as np
from io import StringIO
import unidecode
# Create output file
output_file = 'BIOI4870-Tumor-Sample-Database-DML.sql'
with open(output_file, 'w+') as f:
pass
# Email for connecting to Entrez, enter your own email
Entrez.email = '<EMAIL>'
# string in ids that indicate type of data, biosample ids begin with SAMN and sra ids begin with SRX
biosample_header = 'SAMN'
sra_header = 'SRX'
# Create DML for cancer_categories table, this is done first for the biosample table
# to compare 'disease' strings to 'cancer_type' strings present in cancer_categories
input_file = 'NCG_cancerdrivers_annotation_supporting_evidence.tsv'
cancer = pd.read_csv(input_file, sep="\t")
cancer = cancer.drop(columns = ['entrez','symbol','pubmed_id','type','method','coding_status',
'cgc_annotation','vogelstein_annotation','saito_annotation','NCG_oncogene','NCG_tsg'])
cancer = cancer.dropna(how='any')
cancer = cancer.drop_duplicates()
# Clean up table format
cancer = cancer.sort_values(by=['primary_site'])
cancer = cancer.sort_values(by=['organ_system'])
print(cancer)
with open(output_file, 'a') as f:
print("--DML for cancer_categories", file=f)
for i in cancer.itertuples(index=False, name=None):
print("INSERT INTO cancer_categories VALUES {};".format(str(i).replace('_',' ')), file=f)
# Create dict object from cancer_dict.tsv
cancer_dict = {}
with open('cancer_dict.tsv', 'r') as f:
for i in f:
key, value = i.split('\t')
cancer_dict[key] = value.strip('\n')
# Pull biosample and SRA data from samples folder
samples = open('sample_files.txt', 'r')
sra_attributes = StringIO("sra_id,link,study,instrument,strategy,source,selection,layout")
biosample_attributes = StringIO("biosample_id,sra_id,biomaterial_provider,sample_type,isolate,"
+
"tissue,cell_type,disease_stage,cancer_type,phenotype,ethnicity,population,age,sex")
sra = pd.read_csv(sra_attributes)
biosample = pd.read_csv(biosample_attributes)
# Determine if entry is a biosample id or sra id
for i in samples.readlines():
if biosample_header in i:
db = 'biosample'
elif sra_header in i:
db = 'sra'
# Retrieve NCBI xml page through Entrez
handle = Entrez.efetch(db, id = i.strip('\n'))
tree = ET.parse(handle)
root = tree.getroot()
# Parse Biosample data
if root.tag == 'BioSampleSet':
# Create dict for values to be inserted into dataframe
biosample_add = dict(
biosample_id = '',
sra_id = '',
biomaterial_provider = '',
sample_type = '',
isolate = '',
tissue = '',
cell_type = '',
disease_stage = '',
cancer_type = '',
phenotype = '',
ethnicity = '',
population = '',
age = '',
sex = ''
)
for id in root.find('BioSample').find('Ids'):
if 'db' in id.attrib:
# Set biosample_id value
if id.get('db') == 'BioSample':
biosample_add['biosample_id'] = id.text
# Set sra_id value
elif id.get('db') == 'SRA':
biosample_add['sra_id'] = id.text
# For each attribute, check for identical key in biosample_add dict, and if present, set key value to text of xml attribute
for attribute in root.find('BioSample').find('Attributes'):
if attribute.get('harmonized_name') in biosample_add:
if attribute.get('harmonized_name') == 'biomaterial_provider':
# Removes accents from biomaterial_provider string they do not agree with SQL
biosample_add[attribute.get('harmonized_name')] = unidecode.unidecode(attribute.text)
else:
biosample_add[attribute.get('harmonized_name')] = attribute.text
elif attribute.get('harmonized_name') == 'disease':
# Converts disease string to one recognized by cancer_categories and cancer_genes table
if attribute.text in cancer_dict:
biosample_add['cancer_type'] = cancer_dict[attribute.text]
else:
for disease in cancer['cancer_type']:
if disease.lower().replace('_',' ') == attribute.text.lower().replace('_',' '):
biosample_add['cancer_type'] = disease.lower().replace('_',' ')
pd_add = | pd.DataFrame.from_dict([biosample_add]) | pandas.DataFrame.from_dict |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')),
(30, 131*11*12, pd.Timestamp('2015-01-20')),
(40, 140. * 13 * 14, pd.Timestamp('2015-02-10')),
(50, 150., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 130*1/10, cls.window_test_start_date),
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140, pd.Timestamp('2015-01-09')),
(50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))],
pd.Timestamp('2015-01-09')
),
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-12')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-13')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-14')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*5, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*.7, cls.window_test_start_date),
(20, 121*.7, pd.Timestamp('2015-01-07')),
(30, 230*11, cls.window_test_start_date),
(40, 240, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100*5*6, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110*.3, pd.Timestamp('2015-01-09')),
(10, 111*.3, pd.Timestamp('2015-01-12')),
(20, 120*.7*.8, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-07')),
(30, 230*11*12, cls.window_test_start_date),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240*13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-21')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-22')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220*5/3, cls.window_test_start_date),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
(50, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-09')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*1/4, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*5/3, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-12')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-13', '2015-01-14')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*5, | pd.Timestamp('2015-01-12') | pandas.Timestamp |
"""
Packages to use :
tsfresh
tsfel https://tsfel.readthedocs.io/en/latest/
sktime
feature tools : https://docs.featuretools.com/en/stable/automated_feature_engineering/handling_time.html
Cesium http://cesium-ml.org/docs/feature_table.html
Feature Tools for advacned fewatures `https://github.com/Featuretools/predict-remaining-useful-life/blob/master/Advanced%20Featuretools%20RUL.ipynb
"""
import pandas as pd
import tsfresh
from tsfresh import extract_relevant_features, extract_features
import numpy as np
import pdb
import re
def features_time_basic(df, input_raw_path = None, dir_out = None, features_group_name = None, auxiliary_csv_path = None, drop_cols = None, index_cols = None, merge_cols_mapping = None, cat_cols = None, id_cols = None, dep_col = None, max_rows = 10):
df['date_t'] = pd.to_datetime(df['date'])
df['year'] = df['date_t'].dt.year
df['month'] = df['date_t'].dt.month
df['week'] = df['date_t'].dt.week
df['day'] = df['date_t'].dt.day
df['dayofweek'] = df['date_t'].dt.dayofweek
cat_cols = []
return df[['year', 'month', 'week', 'day', 'dayofweek'] + id_cols], cat_cols
def features_lag(df, fname):
out_df = df[['item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']]
###############################################################################
# day lag 29~57 day and last year's day lag 1~28 day
day_lag = df.iloc[:,-28:]
day_year_lag = df.iloc[:,-393:-365]
day_lag.columns = [str("lag_{}_day".format(i)) for i in range(29,57)] # Rename columns
day_year_lag.columns = [str("lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# Rolling mean(3) and (7) and (28) and (84) 29~57 day and last year's day lag 1~28 day
rolling_3 = df.iloc[:,-730:].T.rolling(3).mean().T.iloc[:,-28:]
rolling_3.columns = [str("rolling3_lag_{}_day".format(i)) for i in range(29,57)] # Rename columns
rolling_3_year = df.iloc[:,-730:].T.rolling(3).mean().T.iloc[:,-393:-365]
rolling_3_year.columns = [str("rolling3_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
rolling_7 = df.iloc[:,-730:].T.rolling(7).mean().T.iloc[:,-28:]
rolling_7.columns = [str("rolling7_lag_{}_day".format(i)) for i in range(29,57)] # Rename columns
rolling_7_year = df.iloc[:,-730:].T.rolling(7).mean().T.iloc[:,-393:-365]
rolling_7_year.columns = [str("rolling7_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
rolling_28 = df.iloc[:,-730:].T.rolling(28).mean().T.iloc[:,-28:]
rolling_28.columns = [str("rolling28_lag_{}_day".format(i)) for i in range(29,57)]
rolling_28_year = df.iloc[:,-730:].T.rolling(28).mean().T.iloc[:,-393:-365]
rolling_28_year.columns = [str("rolling28_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
rolling_84 = df.iloc[:,-730:].T.rolling(84).mean().T.iloc[:,-28:]
rolling_84.columns = [str("rolling84_lag_{}_day".format(i)) for i in range(29,57)]
rolling_84_year = df.iloc[:,-730:].T.rolling(84).mean().T.iloc[:,-393:-365]
rolling_84_year.columns = [str("rolling84_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# monthly lag 1~18 month
month_lag = pd.DataFrame({})
for i in range(1,19):
if i == 1:
monthly = df.iloc[:,-28*i:].T.sum().T
month_lag["monthly_lag_{}_month".format(i)] = monthly
else:
monthly = df.iloc[:, -28*i:-28*(i-1)].T.sum().T
month_lag["monthly_lag_{}_month".format(i)] = monthly
# combine day lag and monthly lag
out_df = pd.concat([out_df, day_lag], axis=1)
out_df = pd.concat([out_df, day_year_lag], axis=1)
out_df = pd.concat([out_df, rolling_3], axis=1)
out_df = pd.concat([out_df, rolling_3_year], axis=1)
out_df = pd.concat([out_df, rolling_7], axis=1)
out_df = pd.concat([out_df, rolling_7_year], axis=1)
out_df = pd.concat([out_df, rolling_28], axis=1)
out_df = pd.concat([out_df, rolling_28_year], axis=1)
out_df = pd.concat([out_df, rolling_84], axis=1)
out_df = pd.concat([out_df, rolling_84_year], axis=1)
out_df = pd.concat([out_df, month_lag], axis=1)
###############################################################################
# dept_id
group_dept = df.groupby("dept_id").sum()
# day lag 29~57 day and last year's day lag 1~28 day
dept_day_lag = group_dept.iloc[:,-28:]
dept_day_year_lag = group_dept.iloc[:,-393:-365]
dept_day_lag.columns = [str("dept_lag_{}_day".format(i)) for i in range(29,57)]
dept_day_year_lag.columns = [str("dept_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# monthly lag 1~18 month
month_dept_lag = pd.DataFrame({})
for i in range(1,19):
if i == 1:
monthly_dept = group_dept.iloc[:,-28*i:].T.sum().T
month_dept_lag["dept_monthly_lag_{}_month".format(i)] = monthly_dept
elif i >= 7 and i < 13:
continue
else:
monthly = group_dept.iloc[:, -28*i:-28*(i-1)].T.sum().T
month_dept_lag["dept_monthly_lag_{}_month".format(i)] = monthly_dept
# combine out df
out_df = pd.merge(out_df, dept_day_lag, left_on="dept_id", right_index=True, how="left")
out_df = pd.merge(out_df, dept_day_year_lag, left_on="dept_id", right_index=True, how="left")
out_df = pd.merge(out_df, month_dept_lag, left_on="dept_id", right_index=True, how="left")
###############################################################################
# cat_id
group_cat = df.groupby("cat_id").sum()
# day lag 29~57 day and last year's day lag 1~28 day
cat_day_lag = group_cat.iloc[:,-28:]
cat_day_year_lag = group_cat.iloc[:,-393:-365]
cat_day_lag.columns = [str("cat_lag_{}_day".format(i)) for i in range(29,57)]
cat_day_year_lag.columns = [str("cat_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# monthly lag 1~18 month
month_cat_lag = pd.DataFrame({})
for i in range(1,19):
if i == 1:
monthly_cat = group_cat.iloc[:,-28*i:].T.sum().T
month_cat_lag["cat_monthly_lag_{}_month".format(i)] = monthly_cat
elif i >= 7 and i < 13:
continue
else:
monthly_cat = group_cat.iloc[:, -28*i:-28*(i-1)].T.sum().T
month_cat_lag["dept_monthly_lag_{}_month".format(i)] = monthly_cat
# combine out df
out_df = pd.merge(out_df, cat_day_lag, left_on="cat_id", right_index=True, how="left")
out_df = pd.merge(out_df, cat_day_year_lag, left_on="cat_id", right_index=True, how="left")
out_df = pd.merge(out_df, month_cat_lag, left_on="cat_id", right_index=True, how="left")
###############################################################################
# store_id
group_store = df.groupby("store_id").sum()
# day lag 29~57 day and last year's day lag 1~28 day
store_day_lag = group_store.iloc[:,-28:]
store_day_year_lag = group_store.iloc[:,-393:-365]
store_day_lag.columns = [str("store_lag_{}_day".format(i)) for i in range(29,57)]
store_day_year_lag.columns = [str("store_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# monthly lag 1~18 month
month_store_lag = pd.DataFrame({})
for i in range(1,19):
if i == 1:
monthly_store = group_store.iloc[:,-28*i:].T.sum().T
month_store_lag["store_monthly_lag_{}_month".format(i)] = monthly_store
elif i >= 7 and i <13:
continue
else:
monthly_store = group_store.iloc[:, -28*i:-28*(i-1)].T.sum().T
month_store_lag["store_monthly_lag_{}_month".format(i)] = monthly_store
# combine out df
out_df = pd.merge(out_df, store_day_lag, left_on="store_id", right_index=True, how="left")
out_df = pd.merge(out_df, store_day_year_lag, left_on="store_id", right_index=True, how="left")
out_df = pd.merge(out_df, month_store_lag, left_on="store_id", right_index=True, how="left")
###############################################################################
# state_id
group_state = df.groupby("state_id").sum()
# day lag 29~57 day and last year's day lag 1~28 day
state_day_lag = group_state.iloc[:,-28:]
state_day_year_lag = group_state.iloc[:,-393:-365]
state_day_lag.columns = [str("state_lag_{}_day".format(i)) for i in range(29,57)]
state_day_year_lag.columns = [str("state_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# monthly lag 1~18 month
month_state_lag = pd.DataFrame({})
for i in range(1,13):
if i == 1:
monthly_state = group_state.iloc[:,-28*i:].T.sum().T
month_state_lag["state_monthly_lag_{}_month".format(i)] = monthly_state
elif i >= 7 and i < 13:
continue
else:
monthly_state = group_state.iloc[:, -28*i:-28*(i-1)].T.sum().T
month_state_lag["state_monthly_lag_{}_month".format(i)] = monthly_state
# combine out df
out_df = pd.merge(out_df, state_day_lag, left_on="state_id", right_index=True, how="left")
out_df = pd.merge(out_df, state_day_year_lag, left_on="state_id", right_index=True, how="left")
out_df = | pd.merge(out_df, month_state_lag, left_on="state_id", right_index=True, how="left") | pandas.merge |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/10/14 12:19
Desc: 巨潮资讯-数据中心-专题统计-债券报表-债券发行
http://webapi.cninfo.com.cn/#/thematicStatistics
"""
import time
import pandas as pd
import requests
from py_mini_racer import py_mini_racer
js_str = """
function mcode(input) {
var keyStr = "<KEY> <KEY>;
var output = "";
var chr1, chr2, chr3 = "";
var enc1, enc2, enc3, enc4 = "";
var i = 0;
do {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2)
+ keyStr.charAt(enc3) + keyStr.charAt(enc4);
chr1 = chr2 = chr3 = "";
enc1 = enc2 = enc3 = enc4 = "";
} while (i < input.length);
return output;
}
"""
def bond_treasure_issue_cninfo(
start_date: str = "20210910", end_date: str = "20211109"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-国债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 国债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1120"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
"F009D": "缴款日",
"SECNAME": "债券简称",
"DECLAREDATE": "公告日期",
"F004D": "发行起始日",
"F003D": "发行终止日",
"F008N": "单位面值",
"SECCODE": "债券代码",
"F007N": "发行价格",
"F006N": "计划发行总量",
"F005N": "实际发行总量",
"F028N": "增发次数",
"BONDNAME": "债券名称",
"F014V": "发行对象",
"F002V": "交易市场",
"F013V": "发行方式",
},
inplace=True,
)
temp_df = temp_df[
[
"债券代码",
"债券简称",
"发行起始日",
"发行终止日",
"计划发行总量",
"实际发行总量",
"发行价格",
"单位面值",
"缴款日",
"增发次数",
"交易市场",
"发行方式",
"发行对象",
"公告日期",
"债券名称",
]
]
temp_df["发行起始日"] = pd.to_datetime(temp_df["发行起始日"]).dt.date
temp_df["发行终止日"] = pd.to_datetime(temp_df["发行终止日"]).dt.date
temp_df["缴款日"] = pd.to_datetime(temp_df["缴款日"]).dt.date
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["计划发行总量"] = pd.to_numeric(temp_df["计划发行总量"])
temp_df["实际发行总量"] = pd.to_numeric(temp_df["实际发行总量"])
temp_df["发行价格"] = pd.to_numeric(temp_df["发行价格"])
temp_df["单位面值"] = pd.to_numeric(temp_df["单位面值"])
temp_df["增发次数"] = pd.to_numeric(temp_df["增发次数"])
return temp_df
def bond_local_government_issue_cninfo(
start_date: str = "20210911", end_date: str = "20211110"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-地方债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 地方债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1121"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
"F009D": "缴款日",
"SECNAME": "债券简称",
"DECLAREDATE": "公告日期",
"F004D": "发行起始日",
"F003D": "发行终止日",
"F008N": "单位面值",
"SECCODE": "债券代码",
"F007N": "发行价格",
"F006N": "计划发行总量",
"F005N": "实际发行总量",
"F028N": "增发次数",
"BONDNAME": "债券名称",
"F014V": "发行对象",
"F002V": "交易市场",
"F013V": "发行方式",
},
inplace=True,
)
temp_df = temp_df[
[
"债券代码",
"债券简称",
"发行起始日",
"发行终止日",
"计划发行总量",
"实际发行总量",
"发行价格",
"单位面值",
"缴款日",
"增发次数",
"交易市场",
"发行方式",
"发行对象",
"公告日期",
"债券名称",
]
]
temp_df["发行起始日"] = pd.to_datetime(temp_df["发行起始日"]).dt.date
temp_df["发行终止日"] = pd.to_datetime(temp_df["发行终止日"]).dt.date
temp_df["缴款日"] = pd.to_datetime(temp_df["缴款日"]).dt.date
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["计划发行总量"] = pd.to_numeric(temp_df["计划发行总量"])
temp_df["实际发行总量"] = pd.to_numeric(temp_df["实际发行总量"])
temp_df["发行价格"] = pd.to_numeric(temp_df["发行价格"])
temp_df["单位面值"] = pd.to_numeric(temp_df["单位面值"])
temp_df["增发次数"] = pd.to_numeric(temp_df["增发次数"])
return temp_df
def bond_corporate_issue_cninfo(
start_date: str = "20210911", end_date: str = "20211110"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-企业债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 企业债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1122"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
"SECNAME": "债券简称",
"DECLAREDATE": "公告日期",
"F004D": "交易所网上发行终止日",
"F003D": "交易所网上发行起始日",
"F008N": "发行面值",
"SECCODE": "债券代码",
"F007N": "发行价格",
"F006N": "实际发行总量",
"F005N": "计划发行总量",
"F022N": "最小认购单位",
"F017V": "承销方式",
"F052N": "最低认购额",
"F015V": "发行范围",
"BONDNAME": "债券名称",
"F014V": "发行对象",
"F013V": "发行方式",
"F023V": "募资用途说明",
},
inplace=True,
)
temp_df = temp_df[
[
"债券代码",
"债券简称",
"公告日期",
"交易所网上发行起始日",
"交易所网上发行终止日",
"计划发行总量",
"实际发行总量",
"发行面值",
"发行价格",
"发行方式",
"发行对象",
"发行范围",
"承销方式",
"最小认购单位",
"募资用途说明",
"最低认购额",
"债券名称",
]
]
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["交易所网上发行起始日"] = pd.to_datetime(temp_df["交易所网上发行起始日"]).dt.date
temp_df["交易所网上发行终止日"] = pd.to_datetime(temp_df["交易所网上发行终止日"]).dt.date
temp_df["计划发行总量"] = pd.to_numeric(temp_df["计划发行总量"])
temp_df["实际发行总量"] = pd.to_numeric(temp_df["实际发行总量"])
temp_df["发行面值"] = pd.to_numeric(temp_df["发行面值"])
temp_df["发行价格"] = pd.to_numeric(temp_df["发行价格"])
temp_df["最小认购单位"] = pd.to_numeric(temp_df["最小认购单位"])
temp_df["最低认购额"] = pd.to_numeric(temp_df["最低认购额"])
return temp_df
def bond_cov_issue_cninfo(
start_date: str = "20210913", end_date: str = "20211112"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-可转债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 可转债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1123"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
'F029D': '发行起始日',
'SECNAME': '债券简称',
'F027D': '转股开始日期',
'F003D': '发行终止日',
'F007N': '发行面值',
'F053D': '转股终止日期',
'F005N': '计划发行总量',
'F051D': '网上申购日期',
'F026N': '初始转股价格',
'F066N': '网上申购数量下限',
'F052N': '发行价格',
'BONDNAME': '债券名称',
'F014V': '发行对象',
'F002V': '交易市场',
'F032V': '网上申购简称',
'F086V': '转股代码',
'DECLAREDATE': '公告日期',
'F028D': '债权登记日',
'F004D': '优先申购日',
'F068D': '网上申购中签结果公告日及退款日',
'F054D': '优先申购缴款日',
'F008N': '网上申购数量上限',
'SECCODE': '债券代码',
'F006N': '实际发行总量',
'F067N': '网上申购单位',
'F065N': '配售价格',
'F017V': '承销方式',
'F015V': '发行范围',
'F013V': '发行方式',
'F021V': '募资用途说明',
'F031V': '网上申购代码'
},
inplace=True,
)
temp_df = temp_df[
[
'债券代码',
'债券简称',
'公告日期',
'发行起始日',
'发行终止日',
'计划发行总量',
'实际发行总量',
'发行面值',
'发行价格',
'发行方式',
'发行对象',
'发行范围',
'承销方式',
'募资用途说明',
'初始转股价格',
'转股开始日期',
'转股终止日期',
'网上申购日期',
'网上申购代码',
'网上申购简称',
'网上申购数量上限',
'网上申购数量下限',
'网上申购单位',
'网上申购中签结果公告日及退款日',
'优先申购日',
'配售价格',
'债权登记日',
'优先申购缴款日',
'转股代码',
'交易市场',
'债券名称',
]
]
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["发行起始日"] = pd.to_datetime(temp_df["发行起始日"]).dt.date
temp_df["发行终止日"] = pd.to_datetime(temp_df["发行终止日"]).dt.date
temp_df["转股开始日期"] = pd.to_datetime(temp_df["转股开始日期"]).dt.date
temp_df["转股终止日期"] = pd.to_datetime(temp_df["转股终止日期"]).dt.date
temp_df["转股终止日期"] = pd.to_datetime(temp_df["转股终止日期"]).dt.date
temp_df["网上申购日期"] = pd.to_datetime(temp_df["网上申购日期"]).dt.date
temp_df["网上申购中签结果公告日及退款日"] = pd.to_datetime(temp_df["网上申购中签结果公告日及退款日"]).dt.date
temp_df["债权登记日"] = pd.to_datetime(temp_df["债权登记日"]).dt.date
temp_df["优先申购日"] = pd.to_datetime(temp_df["优先申购日"]).dt.date
temp_df["优先申购缴款日"] = pd.to_datetime(temp_df["优先申购缴款日"]).dt.date
temp_df["计划发行总量"] = pd.to_numeric(temp_df["计划发行总量"])
temp_df["实际发行总量"] = pd.to_numeric(temp_df["实际发行总量"])
temp_df["发行面值"] = pd.to_nu | meric(temp_df["发行面值"]) | pandas.to_numeric |
import matplotlib.pyplot as plt
import os
import seaborn as sns
import numpy as np
from matplotlib.colors import ListedColormap
import pandas as pd
from sklearn.manifold import TSNE
from src.Utils.Fitness import Fitness
class Graphs:
def __init__(self,objectiveNames,data,save=True,display=False,path='./Figures/'):
self.objectiveNames = objectiveNames
self.data = data
self.save = save
self.path = path
self.display = display
self.CheckIfPathExist()
def CheckIfPathExist(self):
p = self.path.split('/')
p = p[:-1]
p = '/'.join(p)
pathExist = os.path.exists(p)
if not pathExist :
os.mkdir(p)
def dataTSNE(self):
self.data = self.ChangeAlgoNames(self.data)
fig = sns.relplot(data=self.data,x=self.data['x'],y=self.data['y'],col='algorithm',kind='scatter',col_wrap=4,height=8.27, aspect=17/8.27)
if self.display:
plt.show()
if self.save:
fig.savefig(self.path + ".png")
def findGlobalParetoFront(self,dataSet,pop):
print('find global pareto front')
fitness = Fitness('horizontal_binary', ['support','confidence','cosine'], len(pop) ,dataSet.shape[1])
fitness.ComputeScorePopulation(pop,dataSet)
scores = fitness.scores
print(scores)
paretoFront = []
isParetoFrontColumn = []
for p in range(len(scores)):
dominate = True
for q in range(len(scores)):
if fitness.Domination(scores[p], scores[q]) == 1:
dominate = False
isParetoFrontColumn.append(False)
break
if dominate:
paretoFront.append(p)
isParetoFrontColumn.append(True)
paretoFront = np.array(paretoFront)
return paretoFront
def getRulesFromFiles(self,dataSet,data):
rules = []
pop = []
files = os.listdir('D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/Rules/0/')
for file in files:
f = open('D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/Rules/0/'+file,'r')
lines = f.readlines()
f.close()
for i in range(len(lines)):
if(i%2==0):
ind = np.zeros(dataSet.shape[1]*2)
line = lines[i]
line = line[1:len(line)-2]
line = line.split("' '")
line = [l.replace("'", "") for l in line]
for li in range(len(line)):
obj = line[li]
obj = obj[1:len(obj)-1]
obj = obj.split(' ')
obj= [ x for x in obj if x!='']
if(li==0):
for item in obj:
ind[int(item)] = 1
if(li==2):
for item in obj:
ind[int(item)+dataSet.shape[1]] = 1
pop.append(ind)
pop = np.array(pop)
paretoFront = self.findGlobalParetoFront(dataSet,pop)
pop = pop[paretoFront]
pop = [list(x) for x in pop]
isInParetoFront = []
for i in range(len(data)):
line = list(np.array(data.loc[i])[1:])
isInPareto = False
for ind in pop:
if(ind == line):
isInPareto = True
if isInPareto:
isInParetoFront.append(True)
else:
isInParetoFront.append(False)
return isInParetoFront
def dataTSNEFromFile(self,dataSet):
self.data = pd.read_csv('D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/0/TestedIndividuals/49.csv',index_col=0)
isParetoFrontColumn = self.getRulesFromFiles(dataSet,self.data)
self.data = self.ChangeAlgoNames(self.data)
print(self.data)
algorithms = self.data['algorithm']
self.data = self.data.drop('algorithm',axis=1)
self.data['isInParetoFront'] = isParetoFrontColumn
self.data = TSNE(n_components=2, learning_rate='auto',
init='random').fit_transform(np.asarray(self.data,dtype='float64'))
transformed = pd.DataFrame(list(zip(list(algorithms),self.data[:,0],self.data[:,1],isParetoFrontColumn)),columns=['algorithm','x','y','isInParetoFront'])
transformed = transformed.drop_duplicates()
self.data = transformed
print(self.data)
fig = sns.relplot(data=self.data,x=self.data['x'],y=self.data['y'],col='algorithm',kind='scatter',col_wrap=4,height=8.27, aspect=17/8.27,hue='isInParetoFront')
self.path = 'D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/0/TestedIndividuals/graph'
if True:
plt.show()
if True:
fig.savefig(self.path + ".png")
def GraphNbRules(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='nbRules', data=self.data)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphDistances(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='distances', data=self.data)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphCoverages(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='coverages', data=self.data)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphAverageCoverages(self,p,algName,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
df = pd.read_csv(p + str(i) + '/Coverages.csv', index_col=0)
for nameIndex in range(len(algName)):
# data.append([algName[nameIndex],float(df.loc[(df['algorithm'] == algName[nameIndex]) & (df['i'] == nbIter-1)]['coverages'])])
data.append([algName[nameIndex], float(
df.loc[df['algorithm'] == algName[nameIndex]].head(1)['coverages'])])
df = pd.DataFrame(data,columns=['algorithm','coverages'])
df = df.sort_values(by=['coverages'],ascending=False)
df.reset_index(level=0, inplace=True)
df = self.ChangeAlgoNames(df)
print(df)
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='coverages', data=df)
plt.xticks(rotation=70)
plt.tight_layout()
if true:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphAverageNBRules(self,p,algName,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
df = pd.read_csv(p + str(i) + '/NbRules/'+str(nbIter-1)+'.csv', index_col=0)
for nameIndex in range(len(algName)):
data.append([algName[nameIndex],float(df.loc[df['algorithm'] == algName[nameIndex]]['nbRules'])])
df = pd.DataFrame(data,columns=['algorithm','nbRules'])
df = df.sort_values(by=['nbRules'],ascending=False)
df = self.ChangeAlgoNames(df)
print(df)
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='nbRules', data=df)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphAverageExecutionTime(self,p,algName,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
df = pd.read_csv(p + str(i) + '/ExecutionTime.csv', index_col=0)
for nameIndex in range(len(algName)):
for j in range(nbIter):
data.append([algName[nameIndex], float(df.loc[(df['algorithm'] == algName[nameIndex]) & (df['i'] == j)]['execution Time'])])
df = pd.DataFrame(data, columns=['algorithm', 'execution Time'])
df = df.sort_values(by=['execution Time'], ascending=False)
df = self.ChangeAlgoNames(df)
print(df)
fig = plt.figure(figsize=(15, 15))
sns.barplot(x='algorithm', y='execution Time', data=df)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphAverageDistances(self, p, algName,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
df = pd.read_csv(p + str(i) + '/Distances.csv', index_col=0)
for nameIndex in range(len(algName)):
# data.append([algName[nameIndex], float(df.loc[(df['algorithm'] == algName[nameIndex]) & (df['i'] == nbIter-1) ]['distances'])])
data.append([algName[nameIndex], float(
df.loc[df['algorithm'] == algName[nameIndex]].head(1)['distances'])])
df = pd.DataFrame(data, columns=['algorithm', 'distances'])
df = df.sort_values(by=['distances'], ascending=False)
df.reset_index(level=0, inplace=True)
df = self.ChangeAlgoNames(df)
fig = plt.figure(figsize=(15, 15))
sns.barplot(x='algorithm', y='distances', data=df)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphExecutionTime(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
self.data = self.ChangeAlgoNames(self.data)
sns.lineplot(x='i',y='execution Time',hue='algorithm',style='algorithm',data=self.data)
fig.legend(loc='center left', bbox_to_anchor=(1, 0.5))
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path+".png")
def GraphScores(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(0, 1)
ax.set_ylim3d(0, 1)
#a Changer si on a une IM avec un interval de definition autre
ax.set_zlim3d(0, 1)
ax.set_xlabel(self.objectiveNames[0])
ax.set_ylabel(self.objectiveNames[1])
ax.set_zlabel(self.objectiveNames[2])
for alg in self.data.algorithm.unique():
ax.scatter(self.data[self.data.algorithm==alg][self.objectiveNames[0]],
self.data[self.data.algorithm==alg][self.objectiveNames[1]],
self.data[self.data.algorithm==alg][self.objectiveNames[2]],
label=alg)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path+".png")
def ChangeAlgoNames(self,df):
df = df.replace('custom','Cambrian Explosion')
df = df.replace('mohsbotsarm', 'Bee Swarm')
df = df.replace('moaloarm', 'Antlion')
df = df.replace('modearm', 'Differential Evolution')
df = df.replace('mossoarm', 'Social Spider')
df = df.replace('modaarm', 'Dragonfly')
df = df.replace('mowoaarm', 'Whale')
df = df.replace('mogsaarm', 'Gravity Search')
df = df.replace('hmofaarm', 'Firefly')
df = df.replace('mofpaarm', 'Flower Polination')
df = df.replace('mososarm', 'Symbiotic')
df = df.replace('mowsaarm', 'Wolf')
df = df.replace('mocatsoarm', 'Cat')
df = df.replace('mogeaarm', 'Gradient')
df = df.replace('nshsdearm', 'NSHSDE')
df = df.replace('mosaarm', 'Simulated Annealing')
df = df.replace('motlboarm', 'Teaching Learning')
df = df.replace('mopso', 'Particle Swarm')
df = df.replace('mocssarm', 'Charged System')
df = df.replace('nsgaii', 'NSGAII')
df = df.replace('mocsoarm', 'Cockroach')
return df
def getAverage(self):
nbRepeat = 50
dataset = 'RISK'
mesureFolder = 'LeaderBoard'
dfArray = []
avgArray = []
for i in range(nbRepeat):
p = 'D:/ULaval/Maitrise/Recherche/Code/Experiments/' + dataset + '/'
p = p +str(i)+'/'+ mesureFolder+'/49.csv'
df = pd.read_csv(p,index_col=1)
if(i>0):
fdf = fdf + df
else:
fdf = df
fdf = fdf/nbRepeat
fdf = fdf.sort_values(by=['support'],ascending=False)
print(fdf)
def Graph3D(self):
plt.cla()
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = self.data[:, 0]
y = self.data[:, 1]
z = self.data[:, 2]
ax.set_xlabel(self.objectiveNames[0])
ax.set_ylabel(self.objectiveNames[1])
ax.set_zlabel(self.objectiveNames[2])
ax.scatter(x, y, z)
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path+".png")
plt.close()
def GraphNBRulesVsCoverages(self,algName,p,graphType,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
dfNbRules = pd.read_csv(p + str(i) + '/NbRules/' + str(nbIter - 1) + '.csv', index_col=0)
dfCoverages = pd.read_csv(p + str(i) + '/Coverages.csv', index_col=0)
# dfCoverages = dfCoverages[dfCoverages['i']==float(nbRepeat-1)]
for nameIndex in range(len(algName)):
data.append([algName[nameIndex], float(dfNbRules.loc[dfNbRules['algorithm'] == algName[nameIndex]]['nbRules']),float(
dfCoverages.loc[dfCoverages['algorithm'] == algName[nameIndex]].head(1)['coverages'])])
df = pd.DataFrame(data, columns=['algorithm', 'nbRules','coverages'])
df = df.sort_values(by=['nbRules'], ascending=False)
coverages = df.groupby(['algorithm'])
coverages = coverages['coverages'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
coverages = coverages.rename(columns={'mean':'covMean','std':'covStd'})
nbRules = df.groupby(['algorithm'])
nbRules = nbRules['nbRules'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
nbRules = nbRules.rename(columns={'mean': 'nbRulesMean', 'std': 'nbRulesStd'})
df = pd.concat([coverages,nbRules],axis=1)
df.reset_index(level=0, inplace=True)
df = self.ChangeAlgoNames(df)
fig = plt.figure(figsize=(15, 15))
ax = sns.scatterplot(x='nbRulesMean', y='covMean', hue='algorithm', style='algorithm',data=df)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
if self.save:
fig.savefig(self.path+'GraphNBRulesVsCoverages' + ".png")
def GraphSCCVsCoverage(self,algName,p,graphType,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
dfCoverages = pd.read_csv(p + str(i) + '/Coverages.csv', index_col=0)
# dfCoverages = dfCoverages[dfCoverages['i'] == float(nbRepeat - 1)]
dfScores = pd.read_csv(p + str(i) + '/LeaderBoard/'+ str(nbIter - 1)+'.csv', index_col=0)
for nameIndex in range(len(algName)):
data.append([algName[nameIndex], float(dfCoverages.loc[dfCoverages['algorithm'] == algName[nameIndex]].head(1)['coverages']),float(
dfScores.loc[dfScores['algorithm'] == algName[nameIndex]]['support']),float(
dfScores.loc[dfScores['algorithm'] == algName[nameIndex]]['confidence']),float(
dfScores.loc[dfScores['algorithm'] == algName[nameIndex]]['cosine'])])
df = pd.DataFrame(data, columns=['algorithm', 'coverages','support','confidence','cosine'])
df = df.sort_values(by=['coverages'], ascending=False)
support = df.groupby(['algorithm'])
support = support['support'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
support = support.rename(columns={'mean':'supportMean','std':'supportStd'})
confidence = df.groupby(['algorithm'])
confidence = confidence['confidence'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
confidence = confidence.rename(columns={'mean': 'confidenceMean', 'std': 'confidenceStd'})
cosine = df.groupby(['algorithm'])
cosine = cosine['cosine'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
cosine = cosine.rename(columns={'mean': 'cosineMean', 'std': 'cosineStd'})
coverages = df.groupby(['algorithm'])
coverages = coverages['coverages'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
coverages = coverages.rename(columns={'mean': 'coveragesMean', 'std': 'coveragesStd'})
df = pd.concat([support,confidence,cosine,coverages],axis=1)
df.reset_index(level=0, inplace=True)
df = self.ChangeAlgoNames(df)
fig, axes = plt.subplots(1, 3, figsize=(17, 5), sharey=True)
ax = sns.scatterplot(ax=axes[0],x='coveragesMean', y='supportMean', hue='algorithm', style='algorithm',data=df)
ax.get_legend().remove()
ax =sns.scatterplot(ax=axes[1], x='coveragesMean', y='confidenceMean', hue='algorithm', style='algorithm', data=df)
ax.get_legend().remove()
ax =sns.scatterplot(ax=axes[2], x='coveragesMean', y='cosineMean', hue='algorithm', style='algorithm', data=df)
ax.get_legend().remove()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
if self.save:
fig.savefig(self.path+'GraphCoveragesVsSCC' + ".png")
def GraphSCCVsNBRules(self,algName,p,graphType,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
dfNbRules = pd.read_csv(p + str(i) + '/NbRules/' + str(nbIter - 1) + '.csv', index_col=0)
dfScores = pd.read_csv(p + str(i) + '/LeaderBoard/'+ str(nbIter - 1)+'.csv', index_col=0)
for nameIndex in range(len(algName)):
data.append([algName[nameIndex], float(dfNbRules.loc[dfNbRules['algorithm'] == algName[nameIndex]]['nbRules']),float(
dfScores.loc[dfScores['algorithm'] == algName[nameIndex]]['support']),float(
dfScores.loc[dfScores['algorithm'] == algName[nameIndex]]['confidence']),float(
dfScores.loc[dfScores['algorithm'] == algName[nameIndex]]['cosine'])])
df = pd.DataFrame(data, columns=['algorithm', 'nbRules','support','confidence','cosine'])
df = df.sort_values(by=['nbRules'], ascending=False)
support = df.groupby(['algorithm'])
support = support['support'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
support = support.rename(columns={'mean':'supportMean','std':'supportStd'})
confidence = df.groupby(['algorithm'])
confidence = confidence['confidence'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
confidence = confidence.rename(columns={'mean': 'confidenceMean', 'std': 'confidenceStd'})
cosine = df.groupby(['algorithm'])
cosine = cosine['cosine'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
cosine = cosine.rename(columns={'mean': 'cosineMean', 'std': 'cosineStd'})
nbRules = df.groupby(['algorithm'])
nbRules = nbRules['nbRules'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False)
nbRules = nbRules.rename(columns={'mean': 'nbRulesMean', 'std': 'nbRulesStd'})
df = pd.concat([support,confidence,cosine,nbRules],axis=1)
df.reset_index(level=0, inplace=True)
df = self.ChangeAlgoNames(df)
fig, axes = plt.subplots(1, 3, figsize=(18, 5), sharey=True)
ax = sns.scatterplot(ax=axes[0],x='nbRulesMean', y='supportMean', hue='algorithm', style='algorithm',data=df)
ax.get_legend().remove()
ax =sns.scatterplot(ax=axes[1], x='nbRulesMean', y='confidenceMean', hue='algorithm', style='algorithm', data=df)
ax.get_legend().remove()
ax =sns.scatterplot(ax=axes[2], x='nbRulesMean', y='cosineMean', hue='algorithm', style='algorithm', data=df)
ax.get_legend().remove()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
if self.save:
fig.savefig(self.path+'GraphNBRulesVsSCC' + ".png")
def GraphPopDistances(self,algName,p,graphType,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
repetitionPath = p + str(i) + '/' + graphType + '/'
nbIter = len(os.listdir(repetitionPath))
for j in range(nbIter):
iterPath = repetitionPath+str(j)+'.csv'
df = pd.read_csv(iterPath,index_col=0)
nameCol = [nc for nc in df.columns if nc != 'algorithm']
for nameIndex in range(len(algName)):
distances = df[df['algorithm'] == algName[nameIndex]][nameCol[0]]
data.append([algName[nameIndex],j,float(distances)])
df = pd.DataFrame(data, columns=['algorithm', 'iter', 'distances'])
df = self.ChangeAlgoNames(df)
objectiveName = 'distances'
fig = plt.figure(figsize=(15, 15))
ax = sns.lineplot(x='iter', y='distances', hue='algorithm', style='algorithm', data=df)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
dfTemp = df[df['iter'] == nbIter-1].groupby(['algorithm'])
if self.save:
fig.savefig(self.path+'distances' + ".png")
plt.close()
print('distances :')
print(dfTemp['distances'].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False))
def GraphExperimentation(self,algName,p,graphType,nbIter):
plt.cla()
plt.clf()
nbRepeat = len(os.listdir(p))-2
data = []
for i in range(nbRepeat):
print(i)
repetitionPath = p + str(i) + '/' + graphType + '/'
nbIter = len(os.listdir(repetitionPath))
for j in range(nbIter):
iterPath = repetitionPath+str(j)+'.csv'
df = pd.read_csv(iterPath,index_col=0)
nameCol = [nc for nc in df.columns if nc != 'algorithm']
for nameIndex in range(len(algName)):
s1 = df[df['algorithm'] == algName[nameIndex]][nameCol[0]]
s2 =df[df['algorithm'] == algName[nameIndex]][nameCol[1]]
s3 = df[df['algorithm'] == algName[nameIndex]][nameCol[2]]
data.append([algName[nameIndex],j,float(s1),float(s2),float(s3)])
df = pd.DataFrame(data,columns=['algorithm','iter']+self.objectiveNames)
df.reset_index(level=0, inplace=True)
df = self.ChangeAlgoNames(df)
for k in range(len(self.objectiveNames)):
objectiveName = self.objectiveNames[k]
fig = plt.figure(figsize=(15, 15))
ax = sns.lineplot(x='iter', y=objectiveName, hue='algorithm', style='algorithm', data=df)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
dfTemp = df[df['iter'] == nbIter-1].groupby(['algorithm'])
if self.save:
fig.savefig(self.path+objectiveName + ".png")
plt.close()
print(objectiveName)
print(dfTemp[objectiveName].agg(
['mean', 'std']).sort_values(by=['mean'], ascending=False))
def DatasetColumnsRows(self,p):
df = pd.read_csv(p)
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15, 15))
sns.scatterplot(x='row', y='binary attribute',hue='dataset', data=df)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphFitness(self,p):
df = | pd.read_csv(p) | pandas.read_csv |
import yaml
import pandas as pd
import numpy as np
from os.path import join
from os import makedirs
import glob
import sys
import re
def parse_samplesheet(fp_samplesheet):
#print(fp_samplesheet.split('/')[-1])
# in a first iteration, open the file, read line by line and determine start
# of sample information by looking for a line starting with "[Data]".
# the following lines will be sample information, about lines are header infos.
row_sampleinformation = None
row_reads = None
with open(fp_samplesheet, "r") as f:
for linenumber, line in enumerate(f.readlines()):
if line.startswith("[Data]"):
row_sampleinformation = linenumber+1
elif line.startswith("[Reads]"):
row_reads = linenumber+1
if row_sampleinformation is None:
raise ValueError("Could not find [Data] line in file '%s'." % fp_samplesheet)
if row_reads is None:
raise ValueError("Could not find [Reads] line in file '%s'." % fp_samplesheet)
header = pd.read_csv(fp_samplesheet, sep=",", nrows=row_reads-2, index_col=0).dropna(axis=1, how="all").dropna(axis=0, how="all")
#header = header.set_index(header.columns[0])
header.index = list(map(lambda x: 'header_%s' % x, header.index))
header = header.dropna(axis=0, how="any")
header = header.T.reset_index()
del header['index']
# a xxx iteration parses sample information via pandas
ss = pd.read_csv(fp_samplesheet, sep=",", skiprows=row_sampleinformation, dtype={'Sample_Name': str, 'Sample_ID': str, 'spike_entity_id': str})
# bcl2fasta automatically changes - into _ char in output filenames
idx_rawilluminainput = ss[pd.notnull(ss['Lane'])].index
for f in ['Sample_ID', 'Sample_Name', 'Sample_Project']:
ss.loc[idx_rawilluminainput, f] = ss.loc[idx_rawilluminainput, f].apply(lambda x: x.replace('-', '_') if type(x) != float else x)
# bcl2fastq uses a S%03i index to address samples.
# They are numbered as occuring in the samplesheet order starting with 1.
# However, number is not increased if Sample_ID was already seen.
uidx = dict()
for _, sample_id in ss['Sample_ID'].iteritems():
if sample_id not in uidx:
uidx[sample_id] = len(uidx) + 1
ss['s-idx'] = ss['Sample_ID'].apply(lambda x: uidx[x])
ss['run'] = fp_samplesheet.split('/')[-1].replace('_spike.csv', '')
# TODO: ensure that sample names do not clash when not considering s-idx!
# fastq-prefix
fp_fastqs = []
for idx, row in ss.iterrows():
fp_fastq = ''
if pd.notnull(row['Sample_Project']):
fp_fastq = row['Sample_Project']
if pd.notnull(row['Sample_Name']):
fp_fastq = join(fp_fastq, row['Sample_ID'])
fp_fastqs.append(join(fp_fastq,
'%s' % (
row['Sample_Name'] if pd.notnull(
row['Sample_Name']) else row['Sample_ID'])))
ss['fastq-prefix'] = fp_fastqs
# remove samples that are marked to be ignored
if 'spike_ignore_sample' in ss.columns:
ss = ss[pd.isnull(ss['spike_ignore_sample'])]
if 'spike_notes' not in ss.columns:
ss['spike_notes'] = None
# merge with header information
if not all([c not in ss.columns for c in header.columns]):
raise ValueError("Header name conflicts with sample column in '%s'." % fp_samplesheet)
for c in header.columns:
ss[c] = header[c].iloc[0]
return ss
def validate_samplesheet(ss: pd.DataFrame, config, line_offset: int=22, err=sys.stderr):
"""Checks if sample sheet is valid.
Parameters
----------
ss : pd.DataFrame
Samplesheet to be validated.
config : dict from YAML
Snakemake configuration file holding information about projects.
line_offset : int
Default: 22.
To give user information about problematic lines, we need to go back
to the file (not the DataFrame) to address the correct line.
err : IO.stream
Default: sys.stderr
Stream onto which warnings are written.
Returns
-------
[str] : List of warnings
Raises
------
ValueError if errors are found in the sample sheet.
"""
errors = []
warnings = []
# ensure all needed columns are in the table
exp_columns = {'Lane', 'Sample_ID', 'Sample_Name', 'I7_Index_ID', 'index',
'Sample_Project', 'spike_entity_id', 'spike_entity_role'}
if len(exp_columns - set(ss.columns)) > 0:
errors.append(
'Samplesheet is missing column(s): "%s".' %
'", "'.join(sorted(exp_columns - set(ss.columns))))
# ensure to only use [A-z0-9_] in identifiers
allowedChars = re.compile("^[A-z0-9_]*$")
for field in ['Sample_ID', 'Sample_Name', 'Sample_Project',
'spike_entity_id', 'spike_entity_role']:
if field in ss:
for idx, x in ss[field].iteritems():
if pd.notnull(x):
if allowedChars.fullmatch(x) is None:
errors.append(
('%s in line %i contains a restricted char'
'acter: "%s". Only a-z A-Z 0-9 and _ are al'
'lowed!') % (field, line_offset+idx, x))
# ensure Sample_Project is not empty
if 'Sample_Project' in ss:
for idx, x in ss['Sample_Project'].iteritems():
if pd.isnull(x) or x.strip() == "":
errors.append('Line %i has an empty Sample_Project.' %
(line_offset+idx))
if len(errors) > 0:
raise ValueError('The following %i errors(s) were found in your sample sheet:\n%s\n' % (len(errors), '\n'.join(['ERROR %i: %s' % (i+1, error) for i, error in enumerate(errors)])))
# check that sample project is describes in config.yaml
for prj in ss['Sample_Project'].unique():
if prj not in config['projects']:
warnings.append(('Sample_Project "%s" is not described in config.'
'yaml. No processing other than demultiplexing w'
'ill be applied.') % (prj))
# check that spike_entity_role is a defined one
exp_roles = { 'patient', 'father', 'mother', 'sibling', 'healthy',
'tumor', 'tumor_patient', 'tumor_father', 'tumor_mother', 'tumor_sibling'}
for idx, row in ss.iterrows():
if pd.notnull(row['spike_entity_role']):
if row['spike_entity_role'] not in exp_roles:
warnings.append('spike_entity_role "%s" in line %i for Sample_Project "%s" is unknown!' % (row['spike_entity_role'], line_offset+idx, row['Sample_Project']))
# test that entity name is infix of sample name
for idx, row in ss.iterrows():
if pd.notnull(row['spike_entity_id']):
if row['spike_entity_id'] not in row['Sample_ID']:
warnings.append('spike_entity_id "%s" is not part of the Sample_ID "%s" in line %i.' % (row['spike_entity_id'], row['Sample_ID'], line_offset+idx))
# check assumptions about naming schemas per project
exp_names = {'Keimbahn': re.compile("^KB\d{4}"),
'Alps': re.compile("^ALPS")}
for idx, row in ss.iterrows():
if row['Sample_Project'] in exp_names:
if exp_names[row['Sample_Project']].match(row['Sample_ID']) is None:
warnings.append('Sample_ID "%s" does not follow expected naming schema "%s" in line %i.' % (row['Sample_ID'], exp_names[row['Sample_Project']].pattern, line_offset+idx))
# check assumptions about name suffices
exp_suffices = {'Keimbahn': {'patient': {'_c'},
'father': {'_f'},
'mother': {'_m'}},
'Alps': {'patient': {''},
'father': {'_a', 'a'},
'mother': {'_b', 'b'}},
'Maus_Hauer': {'healthy': {'_c', 'c', '_n', 'n'},
'tumor': {'_t', 't'}}}
for idx, row in ss.iterrows():
if | pd.isnull(row['spike_entity_id']) | pandas.isnull |
import calendar
import datetime as dt
from datetime import timedelta
import holidays
import math
import os
from dateutil.relativedelta import relativedelta
import json
import numpy as np
import pandas as pd
import pickle
from pyiso import client_factory
from pyiso.eia_esod import EIAClient
import requests
from sklearn import preprocessing
from timezonefinderL import TimezoneFinder
GEO_COORDS = {
"NYISO": {"lat": "40.7128", "lon": "-73.935242"},
"ISONE": {"lat": "42.3601", "lon": "-71.0589"},
"CAISO": {"lat": "34.0522", "lon": "-118.2437"},
"PJM": {"lat": "39.9526", "lon": "-75.1652"},
"MISO": {"lat": "44.9778", "lon": "-93.2650",},
}
MONTH_TO_SEASON = {
1: "Winter",
2: "Winter",
3: "Spring",
4: "Spring",
5: "Spring",
6: "Summer",
7: "Summer",
8: "Summer",
9: "Fall",
10: "Fall",
11: "Fall",
12: "Winter",
}
BASE_URL = "https://api.darksky.net/forecast"
EXCLUDE = "flags, minutely, daily, alerts"
LOAD_COLS = ["load_MW", "timestamp"]
EASTERN_TZ = "US/Eastern"
US_HOLIDAYS = holidays.UnitedStates()
CATEGORICAL_FEATURES = ["weekday", "hour_of_day", "holiday"]
NUMERICAL_FEATURES = ["temperature", "load (t-24)"]
class LoadCollector:
def __init__(self, iso: str, start_date: str, end_date: str):
self.start = start_date
self.end = end_date
self.iso_name = iso
self.lat = GEO_COORDS[iso]["lat"]
self.lon = GEO_COORDS[iso]["lon"]
self.iso = self._set_iso(iso)
self.holidays = holidays.UnitedStates()
self.load = self.get_historical_load()
self.model_input = None
def get_historical_load(self) -> pd.DataFrame:
if self.iso_name == "CAISO":
load = self.get_caiso_load()
elif (
self.iso_name == "MISO"
or self.iso_name == "PJM"
or self.iso_name == "ERCOT"
):
load = self.get_eia_load()
else:
load = pd.DataFrame(
self.iso.get_load(
latest=False, yesterday=False, start_at=self.start, end_at=self.end
)
)[LOAD_COLS].set_index("timestamp")
tz_finder = TimezoneFinder()
tz_name = tz_finder.timezone_at(lng=float(self.lon), lat=float(self.lat))
load.index = load.index.tz_convert(tz_name)
return load.resample("H").mean()
def get_historical_peak_load(self) -> pd.DataFrame:
daily_peak = self.load.resample("D").max()
holiday_bool = dict()
for date, _ in daily_peak.iterrows():
holiday_bool[date] = self._check_for_holiday(date)
daily_peak["month"] = daily_peak.index.month_name()
daily_peak["season"] = daily_peak.index.month.map(MONTH_TO_SEASON)
daily_peak["weekday"] = daily_peak.index.day_name()
daily_peak["holiday"] = daily_peak.index.map(holiday_bool)
return daily_peak
def get_eia_load(self):
load = pd.DataFrame(
self.iso.get_load(
latest=True, yesterday=False, start_at=self.start, end_at=self.end
)
)
load = load.iloc[::-1]
return load[LOAD_COLS].set_index("timestamp")
def get_caiso_load(self):
if pd.Timestamp(self.start).month == pd.Timestamp(self.end).month:
months = [pd.Timestamp(self.start)]
else:
months = pd.date_range(self.start, self.end, freq="MS").tolist()
monthly_load = []
for month in months:
start, end = self.get_month_day_range(month)
start = start.strftime("%Y-%m-%d")
end = end.strftime("%Y-%m-%d")
load = pd.DataFrame(
self.iso.get_load(
latest=False, yesterday=False, start_at=start, end_at=end
)
)[LOAD_COLS].set_index("timestamp")
monthly_load.append(load)
return pd.concat(monthly_load)
@staticmethod
def get_month_day_range(date):
"""
Returns the start and end date for the month of 'date'.
"""
last_day = date + relativedelta(day=1, months=+1, days=-1)
first_day = date + relativedelta(day=1)
return first_day, last_day
def engineer_features(self):
temperatures = dict()
holiday_bool = dict()
for date, _ in self.load.iterrows():
temperatures[date] = self._get_temperature(date)
holiday_bool[date] = self._check_for_holiday(date)
self.load["weekday"] = self.load.index.dayofweek
self.load["hour_of_day"] = self.load.index.hour
self.load["temperature"] = self.load.index.map(temperatures)
self.load["holiday"] = self.load.index.map(holiday_bool)
self.load["load (t-24)"] = self.load.load_MW.shift(24)
def engineer_features_lite(self, weather_dict: dict):
holiday_bool = dict()
for date, _ in self.load.iterrows():
holiday_bool[date] = self._check_for_holiday(date)
self.load["weekday"] = self.load.index.dayofweek
self.load["hour_of_day"] = self.load.index.hour
self.load["temperature"] = self.load.index.map(pd.Series(weather_dict))
self.load["holiday"] = self.load.index.map(holiday_bool)
self.load["load (t-24)"] = self.load.load_MW.shift(24)
def build_model_input(self):
featurized_df = self.dummify_categorical_features(self.load.copy())
self.model_input = featurized_df[featurized_df.notna()]
@staticmethod
def _set_iso(iso_name: str):
if iso_name == "NYISO":
iso_engine = client_factory("NYISO")
elif iso_name == "ISONE":
iso_engine = client_factory("ISONE", timeout_seconds=60)
elif iso_name == "CAISO":
iso_engine = client_factory("CAISO")
elif iso_name == "ERCOT":
iso_engine = EIAClient(timeout_seconds=60)
iso_engine.BA = "ERCOT"
elif iso_name == "PJM":
iso_engine = EIAClient(timeout_seconds=60)
iso_engine.BA = "PJM"
elif iso_name == "MISO":
iso_engine = EIAClient(timeout_seconds=60)
iso_engine.BA = "MISO"
else:
print(f"Peaky Finders does not support {iso_name} yet!")
return iso_engine
def _get_temperature(self, date):
date_input = date.strftime("%s")
full_url = f"{self.weather_url}{date_input}?exclude={EXCLUDE}"
response = requests.get(full_url)
if response.status_code == 200:
print(response.status_code)
else:
raise ValueError(
f"Error getting data from DarkSky API: "
f"Response Code {response.status_code}"
)
info = response.json()
current_info = info["currently"]
try:
temp = current_info["temperature"]
except KeyError:
temp = None
return temp
@staticmethod
def _check_for_holiday(day):
if day in US_HOLIDAYS:
return True
else:
return False
@staticmethod
def dummify_categorical_features(load_df: pd.DataFrame):
for feature in CATEGORICAL_FEATURES:
dummies = | pd.get_dummies(load_df[feature], prefix=feature, drop_first=True) | pandas.get_dummies |
# coding: utf-8
# In[1]:
# Implementation from https://github.com/dougalsutherland/opt-mmd
import sys, os
import numpy as np
from math import sqrt
CHANNEL_MEANS = (129.38732832670212/255, 124.35894414782524/255, 113.09937313199043/255)
CHANNEL_STDS = (67.87980079650879/255, 65.10988622903824/255, 70.04801765084267/255)
# In[2]:
from scipy.spatial.distance import pdist, cdist
def energy_distance(v, w):
VV = np.mean(pdist(v, 'euclidean'))
WW = np.mean(pdist(w, 'euclidean'))
VW = np.mean(cdist(v, w, 'euclidean'))
return 2 * VW - VV - WW
# In[3]:
from PIL import Image
from matplotlib import pyplot as plt
def display_sample(sample):
img = sample.reshape((28, 28)) * 255.
plt.imshow(Image.fromarray(img))
plt.show()
# In[4]:
# Add Bayesian-and-novelty directory to the PYTHONPATH
import sys
import os
sys.path.append(os.path.realpath('../../../..'))
# Autoreload changes in utils, etc.
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import torch
from torchvision import datasets, transforms
import numpy as np
from functools import reduce
from novelty.utils.datasets import GaussianNoiseDataset
from novelty.utils.datasets import UniformNoiseDataset
from novelty.utils import DatasetSubset
torch.manual_seed(1)
# # CIFAR80
# In[5]:
from novelty.utils import DatasetSubset
def get_cifar_images(cifar80_dir):
"""
Return flattened and scaled CIFAR80 test data as a numpy array.
Filter so only 'classes' remain in dataset.
Saves/loads dataset from cifar80_dir.
"""
print("Loading CIFAR80 data")
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS)
])
dataset = datasets.ImageFolder(cifar80_dir, transform=transform)
dataset = np.array([a[0].numpy() for a in dataset])
dataset = dataset.astype('float32')
return dataset.reshape(dataset.shape[0], reduce(lambda s, x: s * x, dataset.shape[1:], 1))
cifar80_test = get_cifar_images('/media/tadenoud/DATADisk/datasets/cifar80/test')
# In[6]:
cifar80_test.shape
# In[7]:
def get_imagenet_crop_data(imagenet_dir):
"""
Return cropped, flattened, and scaled TinyImageNet test data as a numpy array.
Saves/loads dataset from imagenet_dir.
"""
print("Loading ImageNet crop")
transform_crop = transforms.Compose([
transforms.RandomCrop([32, 32]),
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS)
])
dataset = datasets.ImageFolder(imagenet_dir, transform=transform_crop)
dataset = np.array([a[0].numpy() for a in dataset])
dataset = dataset.astype('float32')
return dataset.reshape(dataset.shape[0], reduce(lambda s, x: s * x, dataset.shape[1:], 1))
imagenet_crop = get_imagenet_crop_data('/media/tadenoud/DATADisk/datasets/tiny-imagenet-200/test/')
imagenet_crop_energy = energy_distance(cifar80_test, imagenet_crop)
print("Imagenet (crop) Energy:", imagenet_crop_energy)
# In[8]:
def get_imagenet_resize_data(imagenet_dir):
"""
Return resized, flattened, and scaled TinyImageNet test data as a numpy array.
Saves/loads dataset from imagenet_dir.
"""
print("Loading ImageNet resize")
transform_resize = transforms.Compose([
transforms.Resize([32, 32]),
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS)
])
dataset = datasets.ImageFolder(imagenet_dir, transform=transform_resize)
dataset = np.array([a[0].numpy() for a in dataset])
dataset = dataset.astype('float32')
return dataset.reshape(dataset.shape[0], reduce(lambda s, x: s * x, dataset.shape[1:], 1))
imagenet_resize = get_imagenet_resize_data('/media/tadenoud/DATADisk/datasets/tiny-imagenet-200/test/')
imagenet_resize_energy = energy_distance(cifar80_test, imagenet_resize)
print("Imagenet (resize) Energy:", imagenet_resize_energy)
# In[9]:
def get_lsun_crop_data(lsun_dir):
"""
Return cropped, flattened, and scaled LSUN test data as a numpy array.
Saves/loads dataset from lsun_dir.
"""
print("Loading LSUN crop")
transform_crop = transforms.Compose([
transforms.RandomCrop([32, 32]),
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS)
])
dataset = datasets.LSUN(lsun_dir, classes='test', transform=transform_crop)
dataset = np.array([a[0].numpy() for a in dataset])
dataset = dataset.astype('float32')
return dataset.reshape(dataset.shape[0], reduce(lambda s, x: s * x, dataset.shape[1:], 1))
lsun_crop = get_lsun_crop_data('/media/tadenoud/DATADisk/datasets/lsun/')
lsun_crop_energy = energy_distance(cifar80_test, lsun_crop)
print("LSUN (crop) Energy:", lsun_crop_energy)
# In[10]:
def get_lsun_resize_data(lsun_dir):
"""
Return resized, flattened, and scaled LSUN test data as a numpy array.
Saves/loads dataset from lsun_dir.
"""
print("Loading LSUN resize")
transform_resize = transforms.Compose([
transforms.Resize([32, 32]),
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS)
])
dataset = datasets.LSUN(lsun_dir, classes='test', transform=transform_resize)
dataset = np.array([a[0].numpy() for a in dataset])
dataset = dataset.astype('float32')
return dataset.reshape(dataset.shape[0], reduce(lambda s, x: s * x, dataset.shape[1:], 1))
lsun_resize = get_lsun_resize_data('/media/tadenoud/DATADisk/datasets/lsun/')
lsun_resize_energy = energy_distance(cifar80_test, lsun_resize)
print("LSUN (resize) Energy:", lsun_resize_energy)
# In[11]:
def get_isun_data(isun_dir):
"""
Return flattened, and scaled iSUN test data as a numpy array.
Saves/loads dataset from isun_dir.
"""
print("Loading iSUN")
transform_resize = transforms.Compose([
transforms.Resize([32, 32]),
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS)
])
dataset = datasets.ImageFolder(isun_dir, transform=transform_resize)
dataset = np.array([a[0].numpy() for a in dataset])
dataset = dataset.astype('float32')
return dataset.reshape(dataset.shape[0], reduce(lambda s, x: s * x, dataset.shape[1:], 1))
isun_data = get_isun_data('/media/tadenoud/DATADisk/datasets/isun/')
isun_energy = energy_distance(cifar80_test, isun_data)
print("iSUN Energy:", isun_energy)
# In[12]:
def get_gaussian_test_data():
"""Return flattened, and scaled Gaussian Noise test data as a numpy array."""
print("Loading Gaussian Noise data")
dataset = GaussianNoiseDataset((10000, 32*32*3), mean=0.0, std=1)
dataset = np.array([a for a, _ in iter(dataset)])
return dataset.astype('float32')
gaussianTestX = get_gaussian_test_data()
gaussian_energy = energy_distance(cifar80_test, gaussianTestX)
print("Gaussian Energy:", gaussian_energy)
# In[13]:
import math
def get_uniform_test_data():
"""Return flattened, and scaled Uniform Noise test data as a numpy array."""
print("Loading Uniform Noise data")
dataset = UniformNoiseDataset((10000, 32*32*3), low=-math.sqrt(3), high=math.sqrt(3))
dataset = np.array([a for a, _ in iter(dataset)])
return dataset.astype('float32')
uniformTestX = get_uniform_test_data()
uniform_energy = energy_distance(cifar80_test, uniformTestX)
print("Uniform Energy:", uniform_energy)
# In[14]:
classes = [c for c in range(80, 100)]
from novelty.utils import DatasetSubset
def get_cifar_images(cifar80_dir):
"""
Return flattened and scaled CIFAR80 test data as a numpy array.
Filter so only 'classes' remain in dataset.
Saves/loads dataset from cifar80_dir.
"""
print("Loading CIFAR80 data")
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS)
])
dataset = datasets.ImageFolder(cifar80_dir, transform=transform)
dataset = np.array([a[0].numpy() for a in dataset])
dataset = dataset.astype('float32')
return dataset.reshape(dataset.shape[0], reduce(lambda s, x: s * x, dataset.shape[1:], 1))
cifar20_test = get_cifar_images('/media/tadenoud/DATADisk/datasets/cifar20/test')
cifar20_energy = energy_distance(cifar80_test, cifar20_test)
print("CIFAR20 Energy:", cifar20_energy)
# # CIFAR80 results
# In[15]:
import pandas as pd
from IPython.display import display
df = pd.DataFrame(columns=['energy'],
index=['imagenet_crop', 'imagenet_resize', 'lsun_crop', 'lsun_resize',
'isun_resize', 'gaussian', 'uniform', 'cifar20'])
df.loc['imagenet_crop'] = pd.Series({'energy': imagenet_crop_energy})
df.loc['imagenet_resize'] = pd.Series({'energy': imagenet_resize_energy})
df.loc['lsun_crop'] = | pd.Series({'energy': lsun_crop_energy}) | pandas.Series |
"""
Auxiliar standarization functions
"""
import pandas as pd
import numpy as np
import os
from ..IO.aux_functions import parse_xlsx_sheet
from ..Preprocess.geo_filters import check_correct_spain_coord
extra_folder = 'extra'
servicios_columns = ['nom', 'nif', 'cp', 'cnae', 'localidad', 'x', 'y', 'es-x',
'es-y', 'holding', 'cierre',
'06act', '07act', '08act', '09act', '10act', '11act',
'12act', '13act', '06actc', '07actc', '08actc', '09actc',
'10actc', '11actc', '12actc', '13actc', '06pasfijo',
'07pasfijo', '08pasfijo', '09pasfijo', '10pasfijo',
'11pasfijo', '12pasfijo', '13pasfijo', '06pasliq',
'07pasliq', '08pasliq', '09pasliq', '10pasliq',
'11pasliq', '12pasliq', '13pasliq', '06trab', '07trab',
'08trab', '09trab', '10trab', '11trab', '12trab',
'13trab', '06va', '07va', '08va', '09va', '10va', '11va',
'12va', '13va', '06vtas', '07vtas', '08vtas', '09vtas',
'10vtas', '11vtas', '12vtas', '13vtas']
def pre_read_servicios(pathdata):
raw_locs_serv, nifs_serv, cps_serv, names_serv = [], [], [], []
muni_serv, null_cp, null_muni, null_locs = [], [], [], []
ca_cp_dict = {}
for df, ca_name in get_sequencial_servicios(pathdata):
names_serv += list(df['nom'])
nifs_serv += list(df['nif'])
cps_serv += list(df['cp'])
raw_locs_serv.append(df[['es-x', 'es-y']].as_matrix())
# Dictionary cp_ca creation
u_cp = df['cp'][np.logical_not( | pd.isnull(df['cp']) | pandas.isnull |
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for stoichiometric analaysis"""
from __future__ import print_function
import logging
from copy import copy
from itertools import product
import numpy as np
import optlang
from optlang.interface import OPTIMAL
import pandas
import six
import sympy
from cobra import Metabolite, Reaction, Model
from numpy.linalg import svd
from scipy.sparse import dok_matrix, lil_matrix
from six.moves import zip
from cobra.exceptions import OptimizationError
__all__ = ['find_dead_end_reactions', 'find_coupled_reactions', 'ShortestElementaryFluxModes']
logger = logging.getLogger(__name__)
def create_stoichiometric_array(model, array_type='dense', dtype=None):
"""Return a stoichiometric array representation of the given model.
The the columns represent the reactions and rows represent
metabolites. S[i,j] therefore contains the quantity of metabolite `i`
produced (negative for consumed) by reaction `j`.
Parameters
----------
model : cobra.Model
The cobra model to construct the matrix for.
array_type : string
The type of array to construct. if 'dense', return a standard
numpy.array, 'dok', or 'lil' will construct a sparse array using
scipy of the corresponding type and 'data_frame' will give a
pandas `DataFrame` with metabolite and reaction identifiers as indices.
dtype : data-type
The desired data-type for the array. If not given, defaults to float.
Returns
-------
matrix of class `dtype`
The stoichiometric matrix for the given model.
"""
if dtype is None:
dtype = np.float64
def data_frame(_, dtype):
metabolite_ids = [met.id for met in model.metabolites]
reaction_ids = [rxn.id for rxn in model.reactions]
index = pandas.MultiIndex.from_tuples(
list(product(metabolite_ids, reaction_ids)))
return | pandas.DataFrame(data=0, index=index, columns=['stoichiometry'], dtype=dtype) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# viewer.py - View aggregated i2p network statistics.
# Author: <NAME> <<EMAIL>>
# License: This is free and unencumbered software released into the public domain.
#
# NOTE: This file should never write to the database, only read.
import argparse
import datetime
import math
import matplotlib
# We don't want matplotlib to use X11 (https://stackoverflow.com/a/3054314)
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sqlite3
import pandas as pd
from jinja2 import Environment, FileSystemLoader
interval = 3600 # = 60 minutes
num_intervals = 24 # = 20 hours
min_version = 20
min_country = 20
# http://i2p-projekt.i2p/en/docs/how/network-database#routerInfo
# H is left out since it's almost always empty.
#netdb_caps = ['f','H','K','L','M','N','O','P','R','U','X',]
netdb_caps = ['f','K','L','M','N','O','P','R','U','X',]
# Used in the plots.
generation_time = str(datetime.datetime.utcnow())[:-7]
site = 'http://nacl.i2p/stats'
ONE_DAY = 5*24*60*60
FIVE_DAYS = 5*24*60*60
FIFTEEN_DAYS = 15*24*60*60
THIRTY_DAYS = 30*24*60*60
ACTIVE_TIME = THIRTY_DAYS
def query_db(conn, query, args=(), one=False):
cur = conn.execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
# TODO: Port to Pandas
def pie_graph(conn, query, output, title='', lower=0, log=False):
labels = []
sizes = []
res = query_db(conn, query)
# Sort so the graph doesn't look like complete shit.
res = sorted(res, key=lambda tup: tup[1])
for row in res:
if row[1] > lower:
labels.append(row[0])
if log:
sizes.append(math.log(row[1]))
else:
sizes.append(row[1])
# Normalize.
norm = [float(i)/sum(sizes) for i in sizes]
plt.pie(norm,
labels=labels,
shadow=True,
startangle=90,
)
plt.figtext(.1,.03,'{}\n{} UTC'.format(site,generation_time))
plt.axis('equal')
plt.legend()
plt.title(title)
plt.savefig(output)
plt.close()
def plot_x_y(conn, query, output, title='', xlab='', ylab=''):
df = pd.read_sql_query(query, conn)
df['sh'] = pd.to_datetime(df['sh'], unit='s')
df = df.set_index('sh')
df.head(num_intervals).plot(marker='o')
plt.figtext(.1,.03,'{}\n{} UTC'.format(site,generation_time))
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.savefig(output)
plt.close()
# Plots network traffic.
def i2pcontrol_stats(conn, output=''):
things=[
{'stat':'activepeers','xlab':'time','ylab':'total',},
{'stat':'tunnelsparticipating','xlab':'time','ylab':'total',},
{'stat':'decryptFail','xlab':'time','ylab':'total',},
{'stat':'failedLookupRate','xlab':'time','ylab':'total',},
{'stat':'streamtrend','xlab':'time','ylab':'total',},
{'stat':'windowSizeAtCongestion','xlab':'time','ylab':'total',},
#{'stat':'','xlab':'','ylab':'',}, # Template to add more.
]
tokens = query_db(conn, 'select owner,token from submitters;')
for thing in things:
combined=[]
dfs=[]
for token in tokens:
q = 'select datetime(cast(((submitted)/({0})) as int)*{0}, "unixepoch") as sh, {1} from speeds where submitter="{2}" group by sh order by sh desc;'.format(interval, thing['stat'], token[1])
df = pd.read_sql_query(q, conn)
# unix -> human
df['sh'] = pd.to_datetime(df['sh'], unit='s')
dfs.append(df)
# Reverse so it's put in left to right
combined = reduce(lambda left,right: pd.merge(left,right,on='sh',how='outer'), dfs)
combined.columns=['time'] + [i[0] for i in tokens]
combined = combined.set_index('time')
combined.head(num_intervals).plot(marker='o')
plt.figtext(.1,.03,'{}\n{} UTC'.format(site,generation_time))
plt.title(thing['stat'])
plt.xlabel(thing['xlab'])
plt.ylabel(thing['ylab'])
plt.savefig('{}/{}.png'.format(output, thing['stat']))
plt.close()
# Make plot of how many nodes reported in.
def reporting_in(conn, output=''):
q = 'select count(*) as count, datetime(cast(((submitted)/({0})) as int)*{0}, "unixepoch") as sh from speeds group by sh order by sh desc;'.format(interval)
df = pd.read_sql_query(q, conn)
# unix -> human
df['sh'] = | pd.to_datetime(df['sh'], unit='s') | pandas.to_datetime |
import requests
import re
from bs4 import BeautifulSoup
import pandas as pd
import gzip
import csv
import os
import boto3
import json
from decimal import Decimal
cveTable = os.environ['CVE_TABLE']
awsRegion = os.environ['AWS_REGION']
dynamodb = boto3.resource('dynamodb', region_name = awsRegion)
def collect_exploit():
print("Collecting CVE reference map for source EXPLOIT-DB")
csv_file = open('Exploit_CVE.csv', 'w')
csv_writer = csv.writer(csv_file)
exploitUrl = 'https://cve.mitre.org/data/refs/refmap/source-EXPLOIT-DB.html'
req = requests.get(exploitUrl, allow_redirects=True)
# get status code
print ("Exploit status code: " + str(req.status_code))
# read the data from the URL and print it in html form
# this is the full html, not just the table's html
# we will need to parse through this to only grab the table we are interested in
# use BeautifulSoup to parse through the html
soup = BeautifulSoup(req.text, "html.parser")
# find all the tables that fit these attributes
# we only want the ExploitDB/CVENum table, so we index with [1] to grab table #2
table = soup.findAll("table", attrs={"cellpadding":"2", "cellspacing":"2", "border":"2"})[1]
# The first tr contains the field names.
headings = ["ExploitId", "CveId"]
datasets = []
for row in table.find_all("tr")[0:]:
row = list(td.get_text() for td in row.find_all("td"))
#print(type(dataset))
#df.append(dataset, ignore_index = True)
#df = pd.DataFrame(dataset, columns=['ExploitDB Number', 'CVE Number'])
datasets.append(row)
#print(dataset)
df = pd.DataFrame(datasets, columns = headings) # creating data frame with the proper headings and loading in the data
df = df.astype('string') # converting the pandas objects (default) to strings
df.drop(df.tail(2).index, inplace = True) # dropping the last two rows because they don't have exploit db Id's
df[headings[0]] = df[headings[0]].str.replace(r'\D', '') # removing the prefix "EXPLOIT-DB" from the ExploitDBId column
df[headings[1]] = df[headings[1]].str.rstrip("\n") # removing the trailing newline from the CVEId column
df[headings[1]] = df[headings[1]].str.lstrip(' ') # removing the leading white space from the CVEId column
df[headings[1]] = df[headings[1]].str.split(' ') # splitting the column based on white space within the entries
df = df.set_index([headings[0]])[headings[1]].apply(pd.Series).stack().reset_index().drop('level_1',axis = 1).rename(columns = {0: headings[1]}) # creating multiple rows for exploits that correspond to multiple CVE #'s
print(df)
#print(df[headings[1]].nunique()) # find the number of unique CVE values
#print(df[headings[0]].nunique()) # find the number of unique Exploit values
#print(pd.concat(g for _, g in df.groupby("CveId") if len(g) > 1)) # find the CVEs that have more than one exploit
n = len(df[headings[1]]) # find the number of rows in the dataframe
csv_writer.writerow(headings)
for i in range(n - 1):
csv_writer.writerow(df.loc[i]) # writing data frame to a csv file
csv_file.close()
df.to_json("Exploit_CVE.json", indent = 2, orient = 'records') # writing the dataframe to a json file
def collect_nvd_feeds():
print("Collecting JSON vulnerability feeds from NVD")
nvdJson = []
req = requests.get('https://nvd.nist.gov/vuln/data-feeds#JSON_FEED')
# scan for all yearly gzip files
for gzFile in re.findall("nvdcve-1.1-[0-9]*\.json\.gz", req.text):
#print(gzFile)
url = 'https://nvd.nist.gov/feeds/json/cve/1.1/' + gzFile
reqFile = requests.get(url, stream=True, allow_redirects=True)
# get status code
print (gzFile + " status code: " + str(reqFile.status_code))
# write response
with open(gzFile, 'wb') as file:
file.write(reqFile.content)
with gzip.open(gzFile) as openGz:
nvdDF = pd.read_json(openGz)
for x in nvdDF['CVE_Items']:
cveId = str(x['cve']['CVE_data_meta']['ID'])
cveSrcUrl = 'https://cve.mitre.org/cgi-bin/cvename.cgi?name=' + cveId
try:
cweId = str(x['cve']['problemtype']['problemtype_data'][0]['description'][0]['value'])
except:
cweId = 'NONE_PROVIDED'
try:
reference = str(x['cve']['references']['reference_data'][0]['url'])
except:
reference = 'NONE_PROVIDED'
try:
description = str(x['cve']['description']['description_data'][0]['value'])
except:
description = 'NONE_PROVIDED'
try:
cvssV2Version = str('CVSSv' + x['impact']['baseMetricV2']['cvssV2']['version'])
cvssV2AccessVector = str(x['impact']['baseMetricV2']['cvssV2']['accessVector'])
cvssV2AccessComplexity = str(x['impact']['baseMetricV2']['cvssV2']['accessComplexity'])
cvssV2Authentication = str(x['impact']['baseMetricV2']['cvssV2']['authentication'])
cvssV2ConfidentialityImpact = str(x['impact']['baseMetricV2']['cvssV2']['confidentialityImpact'])
cvssV2IntegrityImpact = str(x['impact']['baseMetricV2']['cvssV2']['integrityImpact'])
cvssV2AvailabilityImpact = str(x['impact']['baseMetricV2']['cvssV2']['availabilityImpact'])
cvssV2Score = float(x['impact']['baseMetricV2']['cvssV2']['baseScore'])
cvssV2Severity = str(x['impact']['baseMetricV2']['severity'])
except:
cvssV2Version = 'NONE_PROVIDED'
cvssV2AccessVector = 'Unknown'
cvssV2AccessComplexity = 'Unknown'
cvssV2Authentication = 'Unknown'
cvssV2ConfidentialityImpact = 'Unknown'
cvssV2IntegrityImpact = 'Unknown'
cvssV2AvailabilityImpact = 'Unknown'
cvssV2Score = float(0.0)
cvssV2Severity = 'Unknown'
try:
cvssV3Version = str('CVSSv' + x['impact']['baseMetricV3']['cvssV3']['version'])
cvssV3AttackVector = str(x['impact']['baseMetricV3']['cvssV3']['attackVector'])
cvssV3AttackComplexity = str(x['impact']['baseMetricV3']['cvssV3']['attackComplexity'])
cvssV3PrivilegesRequired = str(x['impact']['baseMetricV3']['cvssV3']['privilegesRequired'])
cvssV3UserInteraction = str(x['impact']['baseMetricV3']['cvssV3']['userInteraction'])
cvssV3Scope = str(x['impact']['baseMetricV3']['cvssV3']['scope'])
cvssV3ConfidentialityImpact = str(x['impact']['baseMetricV3']['cvssV3']['confidentialityImpact'])
cvssV3IntegrityImpact = str(x['impact']['baseMetricV3']['cvssV3']['integrityImpact'])
cvssV3AvailabilityImpact = str(x['impact']['baseMetricV3']['cvssV3']['availabilityImpact'])
cvssV3Score = float(x['impact']['baseMetricV3']['cvssV3']['baseScore'])
cvssV3Severity = str(x['impact']['baseMetricV3']['cvssV3']['baseSeverity'])
except:
cvssV3Version = 'NONE_PROVIDED'
cvssV3AttackVector = 'Unknown'
cvssV3AttackComplexity = 'Unknown'
cvssV3PrivilegesRequired = 'Unknown'
cvssV3UserInteraction = 'Unknown'
cvssV3Scope = 'Unknown'
cvssV3ConfidentialityImpact = 'Unknown'
cvssV3IntegrityImpact = 'Unknown'
cvssV3AvailabilityImpact = 'Unknown'
cvssV3Score = float(0.0)
cvssV3Severity = 'Unknown'
try:
nvdJson.append({
'CveId': cveId,
'CveSourceUrl': cveSrcUrl,
'CweId': cweId,
'Reference': reference,
'Description': description,
'CvssV2Version': cvssV2Version,
'CvssV2AccessVector': cvssV2AccessVector,
'CvssV2AccessComplexity': cvssV2AccessComplexity,
'CvssV2Authentication': cvssV2Authentication,
'CvssV2ConfidentialityImpact': cvssV2ConfidentialityImpact,
'CvssV2IntegrityImpact': cvssV2IntegrityImpact,
'CvssV2AvailabilityImpact': cvssV2AvailabilityImpact,
'CvssV2Score': cvssV2Score,
'CvssV2Severity': cvssV2Severity,
'CvssV3Version': cvssV3Version,
'CvssV3AttackVector': cvssV3AttackVector,
'CvssV3AttackComplexity': cvssV3AttackComplexity,
'CvssV3PrivilegesRequired': cvssV3PrivilegesRequired,
'CvssV3UserInteraction': cvssV3UserInteraction,
'CvssV3Scope': cvssV3Scope,
'CvssV3ConfidentialityImpact': cvssV3ConfidentialityImpact,
'CvssV3IntegrityImpact': cvssV3IntegrityImpact,
'CvssV3AvailabilityImpact': cvssV3AvailabilityImpact,
'CvssV3Score': cvssV3Score,
'CvssV3Severity': cvssV3Severity
})
except Exception as e:
print(e)
with open("NVD_Feeds.json", "w") as file:
json.dump(nvdJson, file)
def cve_population():
table = dynamodb.Table(cveTable)
exploitDF = | pd.read_json('./Exploit_CVE.json') | pandas.read_json |
# pylint: disable=E1101
from pandas.util.py3compat import StringIO, BytesIO, PY3
from datetime import datetime
from os.path import split as psplit
import csv
import os
import sys
import re
import unittest
import nose
from numpy import nan
import numpy as np
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
ExcelFile, TextFileReader, TextParser)
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
network,
ensure_clean)
import pandas.util.testing as tm
import pandas as pd
import pandas.lib as lib
from pandas.util import py3compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
from pandas._parser import OverflowError
from pandas.io.parsers import (ExcelFile, ExcelWriter, read_csv)
def _skip_if_no_xlrd():
try:
import xlrd
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9):
raise nose.SkipTest('xlrd not installed, skipping')
except ImportError:
raise nose.SkipTest('xlrd not installed, skipping')
def _skip_if_no_xlwt():
try:
import xlwt
except ImportError:
raise nose.SkipTest('xlwt not installed, skipping')
def _skip_if_no_openpyxl():
try:
import openpyxl
except ImportError:
raise nose.SkipTest('openpyxl not installed, skipping')
def _skip_if_no_excelsuite():
_skip_if_no_xlrd()
_skip_if_no_xlwt()
_skip_if_no_openpyxl()
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
_tsframe = tm.makeTimeDataFrame()[:5]
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
class ExcelTests(unittest.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
def test_parse_cols_int(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['', 'x']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.xls%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols=3)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['A', 'B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True, parse_cols=3)
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls file)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_parse_cols_list(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['', 'x']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.xls%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols=[0, 2, 3])
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols=[0, 2, 3])
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls file
tm.assert_frame_equal(df3, df2, check_names=False)
def test_parse_cols_str(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['', 'x']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.xls%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A:D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['A', 'B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True, parse_cols='A:D')
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df3, df2, check_names=False)
del df, df2, df3
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A,C,D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols='A,C,D')
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls file
tm.assert_frame_equal(df3, df2, check_names=False)
del df, df2, df3
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A,C:D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols='A,C:D')
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_excel_stop_iterator(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xls'))
parsed = excel_data.parse('Sheet1')
expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test3.xls'))
parsed = excel_data.parse('Sheet1')
expected = DataFrame([[np.nan]], columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def test_excel_table(self):
_skip_if_no_xlrd()
pth = os.path.join(self.dirpath, 'test.xls')
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True)
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
df4 = xls.parse('Sheet1', index_col=0, parse_dates=True,
skipfooter=1)
df5 = xls.parse('Sheet1', index_col=0, parse_dates=True,
skip_footer=1)
tm.assert_frame_equal(df4, df.ix[:-1])
tm.assert_frame_equal(df4, df5)
def test_excel_read_buffer(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
pth = os.path.join(self.dirpath, 'test.xls')
f = open(pth, 'rb')
xls = ExcelFile(f)
# it works
xls.parse('Sheet1', index_col=0, parse_dates=True)
pth = os.path.join(self.dirpath, 'test.xlsx')
f = open(pth, 'rb')
xl = ExcelFile(f)
df = xl.parse('Sheet1', index_col=0, parse_dates=True)
def test_xlsx_table(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
pth = os.path.join(self.dirpath, 'test.xlsx')
xlsx = ExcelFile(pth)
df = xlsx.parse('Sheet1', index_col=0, parse_dates=True)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df3 = xlsx.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True)
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xlsx file
tm.assert_frame_equal(df3, df2, check_names=False)
df4 = xlsx.parse('Sheet1', index_col=0, parse_dates=True,
skipfooter=1)
df5 = xlsx.parse('Sheet1', index_col=0, parse_dates=True,
skip_footer=1)
tm.assert_frame_equal(df4, df.ix[:-1])
tm.assert_frame_equal(df4, df5)
def test_specify_kind_xls(self):
_skip_if_no_xlrd()
xlsx_file = os.path.join(self.dirpath, 'test.xlsx')
xls_file = os.path.join(self.dirpath, 'test.xls')
# succeeds with xlrd 0.8.0, weird
# self.assertRaises(Exception, ExcelFile, xlsx_file, kind='xls')
# ExcelFile(open(xls_file, 'rb'), kind='xls')
# self.assertRaises(Exception, ExcelFile, open(xlsx_file, 'rb'),
# kind='xls')
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def test_excel_roundtrip_xls(self):
_skip_if_no_excelsuite()
self._check_extension('xls')
def test_excel_roundtrip_xlsx(self):
_skip_if_no_excelsuite()
self._check_extension('xlsx')
def _check_extension(self, ext):
path = '__tmp_to_excel_from_excel__.' + ext
with ensure_clean(path) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', cols=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# test roundtrip
self.frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', index=False)
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=None)
recons.index = self.frame.index
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', na_rep='NA')
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=0, na_values=['NA'])
tm.assert_frame_equal(self.frame, recons)
def test_excel_roundtrip_xls_mixed(self):
_skip_if_no_xlrd()
_skip_if_no_xlwt()
self._check_extension_mixed('xls')
def test_excel_roundtrip_xlsx_mixed(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
self._check_extension_mixed('xlsx')
def _check_extension_mixed(self, ext):
path = '__tmp_to_excel_from_excel_mixed__.' + ext
with ensure_clean(path) as path:
self.mixed_frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=0)
tm.assert_frame_equal(self.mixed_frame, recons)
def test_excel_roundtrip_xls_tsframe(self):
_skip_if_no_xlrd()
_skip_if_no_xlwt()
self._check_extension_tsframe('xls')
def test_excel_roundtrip_xlsx_tsframe(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
self._check_extension_tsframe('xlsx')
def _check_extension_tsframe(self, ext):
path = '__tmp_to_excel_from_excel_tsframe__.' + ext
df = tm.makeTimeDataFrame()[:5]
with ensure_clean(path) as path:
df.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1')
tm.assert_frame_equal(df, recons)
def test_excel_roundtrip_xls_int64(self):
_skip_if_no_excelsuite()
self._check_extension_int64('xls')
def test_excel_roundtrip_xlsx_int64(self):
_skip_if_no_excelsuite()
self._check_extension_int64('xlsx')
def _check_extension_int64(self, ext):
path = '__tmp_to_excel_from_excel_int64__.' + ext
with ensure_clean(path) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', cols=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# Test np.int64, values read come back as float
frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)), dtype=np.int64)
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1').astype(np.int64)
tm.assert_frame_equal(frame, recons, check_dtype=False)
def test_excel_roundtrip_xls_bool(self):
_skip_if_no_excelsuite()
self._check_extension_bool('xls')
def test_excel_roundtrip_xlsx_bool(self):
_skip_if_no_excelsuite()
self._check_extension_bool('xlsx')
def _check_extension_bool(self, ext):
path = '__tmp_to_excel_from_excel_bool__.' + ext
with ensure_clean(path) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', cols=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# Test reading/writing np.bool8, roundtrip only works for xlsx
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1').astype(np.bool8)
tm.assert_frame_equal(frame, recons)
def test_excel_roundtrip_xls_sheets(self):
_skip_if_no_excelsuite()
self._check_extension_sheets('xls')
def test_excel_roundtrip_xlsx_sheets(self):
_skip_if_no_excelsuite()
self._check_extension_sheets('xlsx')
def _check_extension_sheets(self, ext):
path = '__tmp_to_excel_from_excel_sheets__.' + ext
with ensure_clean(path) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', cols=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# Test writing to separate sheets
writer = ExcelWriter(path)
self.frame.to_excel(writer, 'test1')
self.tsframe.to_excel(writer, 'test2')
writer.save()
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
recons = reader.parse('test2', index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
np.testing.assert_equal(2, len(reader.sheet_names))
np.testing.assert_equal('test1', reader.sheet_names[0])
np.testing.assert_equal('test2', reader.sheet_names[1])
def test_excel_roundtrip_xls_colaliases(self):
_skip_if_no_excelsuite()
self._check_extension_colaliases('xls')
def test_excel_roundtrip_xlsx_colaliases(self):
_skip_if_no_excelsuite()
self._check_extension_colaliases('xlsx')
def _check_extension_colaliases(self, ext):
path = '__tmp_to_excel_from_excel_aliases__.' + ext
with ensure_clean(path) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', cols=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_excel(path, 'test1', header=col_aliases)
reader = ExcelFile(path)
rs = reader.parse('test1', index_col=0)
xp = self.frame2.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
def test_excel_roundtrip_xls_indexlabels(self):
_skip_if_no_excelsuite()
self._check_extension_indexlabels('xls')
def test_excel_roundtrip_xlsx_indexlabels(self):
_skip_if_no_excelsuite()
self._check_extension_indexlabels('xlsx')
def _check_extension_indexlabels(self, ext):
path = '__tmp_to_excel_from_excel_indexlabels__.' + ext
with ensure_clean(path) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', cols=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# test index_label
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(path, 'test1', index_label=['test'])
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=0).astype(np.int64)
frame.index.names = ['test']
self.assertEqual(frame.index.names, recons.index.names)
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(
path, 'test1', index_label=['test', 'dummy', 'dummy2'])
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=0).astype(np.int64)
frame.index.names = ['test']
self.assertEqual(frame.index.names, recons.index.names)
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(path, 'test1', index_label='test')
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=0).astype(np.int64)
frame.index.names = ['test']
self.assertEqual(frame.index.names, recons.index.names)
# test index_labels in same row as column names
path = '%s.xls' % tm.rands(10)
with ensure_clean(path) as path:
self.frame.to_excel(path, 'test1',
cols=['A', 'B', 'C', 'D'], index=False)
# take 'A' and 'B' as indexes (they are in same row as cols 'C',
# 'D')
df = self.frame.copy()
df = df.set_index(['A', 'B'])
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=[0, 1])
tm.assert_frame_equal(df, recons)
def test_excel_roundtrip_indexname(self):
_skip_if_no_xlrd()
_skip_if_no_xlwt()
path = '%s.xls' % tm.rands(10)
df = DataFrame(np.random.randn(10, 4))
df.index.name = 'foo'
with ensure_clean(path) as path:
df.to_excel(path)
xf = ExcelFile(path)
result = xf.parse(xf.sheet_names[0], index_col=0)
tm.assert_frame_equal(result, df)
self.assertEqual(result.index.name, 'foo')
def test_excel_roundtrip_datetime(self):
_skip_if_no_xlrd()
_skip_if_no_xlwt()
# datetime.date, not sure what to test here exactly
path = '__tmp_excel_roundtrip_datetime__.xls'
tsf = self.tsframe.copy()
with ensure_clean(path) as path:
tsf.index = [x.date() for x in self.tsframe.index]
tsf.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1')
tm.assert_frame_equal(self.tsframe, recons)
def test_to_excel_periodindex(self):
_skip_if_no_excelsuite()
for ext in ['xls', 'xlsx']:
path = '__tmp_to_excel_periodindex__.' + ext
frame = self.tsframe
xp = frame.resample('M', kind='period')
with ensure_clean(path) as path:
xp.to_excel(path, 'sht1')
reader = ExcelFile(path)
rs = reader.parse('sht1', index_col=0, parse_dates=True)
tm.assert_frame_equal(xp, rs.to_period('M'))
def test_to_excel_multiindex(self):
_skip_if_no_xlrd()
_skip_if_no_xlwt()
self._check_excel_multiindex('xls')
def test_to_excel_multiindex_xlsx(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
self._check_excel_multiindex('xlsx')
def _check_excel_multiindex(self, ext):
path = '__tmp_to_excel_multiindex__' + ext + '__.' + ext
frame = self.frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
with ensure_clean(path) as path:
frame.to_excel(path, 'test1', header=False)
frame.to_excel(path, 'test1', cols=['A', 'B'])
# round trip
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
df = reader.parse('test1', index_col=[0, 1], parse_dates=False)
tm.assert_frame_equal(frame, df)
self.assertEqual(frame.index.names, df.index.names)
self.frame.index = old_index # needed if setUP becomes a classmethod
def test_to_excel_multiindex_dates(self):
_skip_if_no_xlrd()
_skip_if_no_xlwt()
self._check_excel_multiindex_dates('xls')
def test_to_excel_multiindex_xlsx_dates(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
self._check_excel_multiindex_dates('xlsx')
def _check_excel_multiindex_dates(self, ext):
path = '__tmp_to_excel_multiindex_dates__' + ext + '__.' + ext
# try multiindex with dates
tsframe = self.tsframe
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
with ensure_clean(path) as path:
tsframe.to_excel(path, 'test1', index_label=['time', 'foo'])
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=[0, 1])
tm.assert_frame_equal(tsframe, recons, check_names=False)
self.assertEquals(recons.index.names, ['time', 'foo'])
# infer index
tsframe.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1')
tm.assert_frame_equal(tsframe, recons)
self.tsframe.index = old_index # needed if setUP becomes classmethod
def test_to_excel_float_format(self):
_skip_if_no_excelsuite()
for ext in ['xls', 'xlsx']:
filename = '__tmp_to_excel_float_format__.' + ext
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean(filename) as filename:
df.to_excel(filename, 'test1', float_format='%.2f')
reader = ExcelFile(filename)
rs = reader.parse('test1', index_col=None)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
tm.assert_frame_equal(rs, xp)
def test_to_excel_unicode_filename(self):
_skip_if_no_excelsuite()
for ext in ['xls', 'xlsx']:
filename = u'\u0192u.' + ext
try:
f = open(filename, 'wb')
except UnicodeEncodeError:
raise nose.SkipTest('no unicode file names on this system')
else:
f.close()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean(filename) as filename:
df.to_excel(filename, 'test1', float_format='%.2f')
reader = ExcelFile(filename)
rs = reader.parse('test1', index_col=None)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
tm.assert_frame_equal(rs, xp)
def test_to_excel_styleconverter(self):
from pandas.io.parsers import CellStyleConverter
try:
import xlwt
import openpyxl
except ImportError:
raise nose.SkipTest
hstyle = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center"}}
xls_style = CellStyleConverter.to_xls(hstyle)
self.assertTrue(xls_style.font.bold)
self.assertEquals(xlwt.Borders.THIN, xls_style.borders.top)
self.assertEquals(xlwt.Borders.THIN, xls_style.borders.right)
self.assertEquals(xlwt.Borders.THIN, xls_style.borders.bottom)
self.assertEquals(xlwt.Borders.THIN, xls_style.borders.left)
self.assertEquals(xlwt.Alignment.HORZ_CENTER, xls_style.alignment.horz)
xlsx_style = CellStyleConverter.to_xlsx(hstyle)
self.assertTrue(xlsx_style.font.bold)
self.assertEquals(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.top.border_style)
self.assertEquals(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.right.border_style)
self.assertEquals(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.bottom.border_style)
self.assertEquals(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.left.border_style)
self.assertEquals(openpyxl.style.Alignment.HORIZONTAL_CENTER,
xlsx_style.alignment.horizontal)
# def test_to_excel_header_styling_xls(self):
# import StringIO
# s = StringIO.StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import xlwt
# import xlrd
# except ImportError:
# raise nose.SkipTest
# filename = '__tmp_to_excel_header_styling_xls__.xls'
# pdf.to_excel(filename, 'test1')
# wbk = xlrd.open_workbook(filename,
# formatting_info=True)
# self.assertEquals(["test1"], wbk.sheet_names())
# ws = wbk.sheet_by_name('test1')
# self.assertEquals([(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)],
# ws.merged_cells)
# for i in range(0, 2):
# for j in range(0, 7):
# xfx = ws.cell_xf_index(0, 0)
# cell_xf = wbk.xf_list[xfx]
# font = wbk.font_list
# self.assertEquals(1, font[cell_xf.font_index].bold)
# self.assertEquals(1, cell_xf.border.top_line_style)
# self.assertEquals(1, cell_xf.border.right_line_style)
# self.assertEquals(1, cell_xf.border.bottom_line_style)
# self.assertEquals(1, cell_xf.border.left_line_style)
# self.assertEquals(2, cell_xf.alignment.hor_align)
# os.remove(filename)
# def test_to_excel_header_styling_xlsx(self):
# import StringIO
# s = StringIO.StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import openpyxl
# from openpyxl.cell import get_column_letter
# except ImportError:
# raise nose.SkipTest
# if openpyxl.__version__ < '1.6.1':
# raise nose.SkipTest
# # test xlsx_styling
# filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
# pdf.to_excel(filename, 'test1')
# wbk = openpyxl.load_workbook(filename)
# self.assertEquals(["test1"], wbk.get_sheet_names())
# ws = wbk.get_sheet_by_name('test1')
# xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
# xlsaddrs += ["A%s" % i for i in range(1, 6)]
# xlsaddrs += ["B1", "D1", "F1"]
# for xlsaddr in xlsaddrs:
# cell = ws.cell(xlsaddr)
# self.assertTrue(cell.style.font.bold)
# self.assertEquals(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.top.border_style)
# self.assertEquals(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.right.border_style)
# self.assertEquals(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.bottom.border_style)
# self.assertEquals(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.left.border_style)
# self.assertEquals(openpyxl.style.Alignment.HORIZONTAL_CENTER,
# cell.style.alignment.horizontal)
# mergedcells_addrs = ["C1", "E1", "G1"]
# for maddr in mergedcells_addrs:
# self.assertTrue(ws.cell(maddr).merged)
# os.remove(filename)
def test_excel_010_hemstring(self):
try:
import xlwt
import openpyxl
except ImportError:
raise nose.SkipTest
from pandas.util.testing import makeCustomDataframe as mkdf
# ensure limited functionality in 0.10
# override of #2370 until sorted out in 0.11
def roundtrip(df, header=True, parser_hdr=0):
path = '__tmp__test_xl_010_%s__.xls' % np.random.randint(1, 10000)
df.to_excel(path, header=header)
with ensure_clean(path) as path:
xf = pd.ExcelFile(path)
res = xf.parse(xf.sheet_names[0], header=parser_hdr)
return res
nrows = 5
ncols = 3
for i in range(1, 4): # row multindex upto nlevel=3
for j in range(1, 4): # col ""
df = | mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j) | pandas.util.testing.makeCustomDataframe |
import csv
import pprint
import datetime
import time
import pandas as pd
## Filenames
chicago = 'chicago.csv'
new_york_city = 'new_york_city.csv'
washington = 'washington.csv'
def get_city():
'''Asks the user for a city and returns the filename for that city's bike share data.
Args:
none.
Returns:
(str) Filename for a city's bikeshare data.
'''
city = input('\nHello! Let\'s explore some US bikeshare data!\n'
'Would you like to see data for Chicago, New York, or Washington?\n')
city= city.lower()
if city == 'chicago':
print('Ok, Let\'s explore the data for Chicago!')
return chicago
elif city == 'new york':
print('Ok, Let\'s explore the data for New York!')
return new_york_city
elif city== 'washington':
print('Ok, Let\'s explore the data for Washington')
return washington
else:
print ('Sorry that is not a valid input, please make sure you are using lowercase letters.')
return get_city()
def get_time_period():
'''Asks the user for a time period and returns the specified filter.
Args:
none.
Returns:
(str) Time period for a city's bikeshare data.
'''
time_period = input('\nWould you like to filter the data by month, day, or not at'
' all? Type "none" for no time filter.\n')
if time_period == 'month':
print('Great, let\'s filter the data by month.')
return get_month()
elif time_period == 'day':
print('Great, let\'s filter the data by day.')
return get_day()
elif time_period == 'none':
print('Great, we will not filter the time period.')
return time_period
else:
print('Sorry, that is not a valid input, please try again.')
return get_time_period()
def get_month():
'''Asks the user for a month and returns the specified month.
Args:
none.
Returns:
(str): Returns the month the user chooses.
'''
month = input('\nWhich month? January, February, March, April, May, or June?\n')
month= month.lower()
if month not in ['january', 'february', 'march', 'april','may', 'june']:
print('This input is not valid.')
return get_month()
return month
def get_day():
'''Asks the user for as an integer day and returns the specified day.
Args:
none.
Returns:
(int): Returns the day of the weeek that the user chooses.
'''
day = input('\nWhich day? Please type your response as an integer. For example Mon=0, Tues=1, etc.\n')
if int(day) not in [0,1,2,3,4,5,6]:
print('This input is not valid.')
return get_day()
day = int(day)
return day
def load_df(city):
'''Loads data frame.
Args:
City
Returns:
Loads city CSV using pandas.
'''
df = | pd.read_csv(city) | pandas.read_csv |
import argparse
import pandas as pd
from util.util_funcs import load_json, load_jsonl
def main():
parser = argparse.ArgumentParser(
description="Merges table and sentence data for input to the veracity prediction model"
)
parser.add_argument(
"--tapas_csv_file",
default=None,
type=str,
help="Path to the csv file containing the tapas data",
)
parser.add_argument(
"--sentence_data_file",
default=None,
type=str,
help="Path to the jsonl file containing the sentence evidence",
)
parser.add_argument(
"--id_label_map_file",
default=None,
type=str,
help="Path to the json file containing the id label mapping",
)
parser.add_argument(
"--out_file",
default=None,
type=str,
help="Path to the output csv file to store the merged data",
)
args = parser.parse_args()
if not args.tapas_csv_file:
raise RuntimeError("Invalid tapas csv file path")
if ".csv" not in args.tapas_csv_file:
raise RuntimeError(
"The tapas csv file path should include the name of the .csv file"
)
if not args.sentence_data_file:
raise RuntimeError("Invalid sentence data file path")
if ".jsonl" not in args.sentence_data_file:
raise RuntimeError(
"The sentence data file path should include the name of the .jsonl file"
)
if not args.id_label_map_file:
raise RuntimeError("Invalid id label map file path")
if ".json" not in args.id_label_map_file:
raise RuntimeError(
"The id label map file path should include the name of the .jsonl file"
)
if not args.out_file:
raise RuntimeError("Invalid output file path")
if ".csv" not in args.out_file:
raise RuntimeError(
"The output file path should include the name of the .csv file"
)
tapas_data = pd.read_csv(args.tapas_csv_file)
tapas_data.rename(columns={"question": "claim"}, inplace=True)
tapas_data = tapas_data.drop(["annotator"], axis=1)
sentence_data = load_jsonl(args.sentence_data_file)
sent_data_table = pd.DataFrame(sentence_data)
sent_data_table.rename(columns={"id": "claim_id"}, inplace=True)
claim_id_label_map = load_json(args.id_label_map_file)
claim_id_label_map = {
"claim_id": list(claim_id_label_map.keys()),
"label": list(claim_id_label_map.values()),
}
claim_id_label_map_table = | pd.DataFrame(claim_id_label_map) | pandas.DataFrame |
import sys
import psutil
import pandas as pd
from tornado import gen
from functools import wraps
from distributed import Client, LocalCluster
from concurrent.futures import CancelledError
from ..static import DatasetStatus
from ..util import listify, logger
_cluster = None
tasks = {}
futures = {}
class StartCluster():
def __init__(self, n_cores=None):
if n_cores is None:
n_cores = psutil.cpu_count()-2
self.cluster = LocalCluster(processes=True, n_workers=1)
self.client = Client(self.cluster)
def __exit__(self, type, value, traceback):
self.cluster.close()
def _get_client():
global _cluster
if _cluster is None:
_cluster = StartCluster()
return _cluster.client
def add_async(f):
@wraps(f)
def wrapper(*args, **kwargs):
async_tasks = kwargs.pop('async_tasks', None)
if async_tasks:
client = _get_client()
future = client.submit(f, *args, **kwargs)
client.loop.add_callback(add_result_when_done, future)
futures[future.key] = future
tasks[future.key] = {
'fn': f.__name__,
'args': args,
'kwargs': kwargs,
'status': DatasetStatus.PENDING,
'result': None,
}
return future.key
else:
return f(*args, **kwargs)
return wrapper
def get_pending_tasks(**kwargs):
"""Return list of pending tasks
calls get_tasks with filter -> status=pending, passes through other kwargs
(filters={}, expand=None, as_dataframe=None, with_future=None)
"""
filters = {'status': DatasetStatus.PENDING}
if 'filters' in kwargs:
kwargs['filters'].update(filters)
else:
kwargs['filters'] = filters
return get_tasks(**kwargs)
def get_task(task_id, with_future=None):
"""Get details for a task.
Args:
task_id (string,Required):
id of a task
with_future (bool, Optional, Default=None):
If true include the task `future` objects in the returned dataframe/dictionary
"""
task = get_tasks(expand=True, with_future=with_future).get(task_id)
if task is None:
logger.error('task {} not found'.format(task_id))
return task
def get_tasks(filters=None, expand=None, as_dataframe=None, with_future=None):
"""Get all available tasks.
Args:
filters (dict, Optional, Default=None):
filter tasks by one or more of the available filters
available filters:
* `task_ids` (str or list): task id or list of task ids
* `status` (str or list): single status or list of statuses. Must be subset of
['pending', 'cancelled', 'finished', 'lost', 'error']
* `fn` (str): name of the function a task was assigned
* `args` (list): list of arguments that were passed to the task function
* `kwargs` (dict): dictionary of keyword arguments that were passed to the task function
* `result` (object): result of the task function
expand (bool, Optional, Default=None):
include details of tasks and format as a dict
as_dataframe (bool, Optional, Default=None):
include details of tasks and format as a pandas dataframe
with_future (bool, Optional, Default=None):
If true include the task `future` objects in the returned dataframe/dictionary
Returns:
tasks (list, dict, or pandas dataframe, Default=list):
all available tasks
"""
task_list = tasks
if filters:
task_ids = listify(filters.pop('task_ids', None))
if task_ids is not None:
task_list = {k: v for k, v in task_list.items() if k in task_ids}
statuses = listify(filters.pop('status', None))
if statuses is not None:
task_list = {k: v for k, v in task_list.items() if v['status'] in statuses}
for fk, fv in filters.items():
task_list = {k: v for k, v in task_list.items() if v[fk] == fv}
task_list = | pd.DataFrame.from_dict(task_list, orient='index') | pandas.DataFrame.from_dict |
import pandas as pd
import sys
# from urllib import urlopen # python2
from urllib.request import urlopen
#try:
# from rpy2.robjects.packages import importr
# try:
# biomaRt = importr("biomaRt")
# except:
# print "rpy2 could be loaded but 'biomaRt' could not be found.\nIf you want to use 'biomaRt' related functions please install 'biomaRt' in R.\n\n$ R\n> source('http://bioconductor.org/biocLite.R')\n> biocLite()\n> biocLite('biomaRt')\n> quit()"
# sys.stdout.flush()
#except:
# print "Failed to import rpy2 module.\nPlease make sure you are using the same version of R you had when AGEpy was installed."
# sys.stdout.flush()
import biomart
from biomart import BiomartServer
def organismsKEGG():
"""
Lists all organisms present in the KEGG database.
:returns: a dataframe containing one organism per row.
"""
organisms=urlopen("http://rest.kegg.jp/list/organism").read()
organisms=organisms.decode().split("\n")
#for o in organisms:
# print o
# sys.stdout.flush()
organisms=[ s.split("\t") for s in organisms ]
organisms=pd.DataFrame(organisms)
return organisms
def databasesKEGG(organism,ens_ids):
"""
Finds KEGG database identifiers for a respective organism given example ensembl ids.
:param organism: an organism as listed in organismsKEGG()
:param ens_ids: a list of ensenbl ids of the respective organism
:returns: nothing if no database was found, or a string if a database was found
"""
all_genes=urlopen("http://rest.kegg.jp/list/"+organism).read()
all_genes=all_genes.decode().split("\n")
dbs=[]
while len(dbs) == 0:
for g in all_genes:
if len(dbs) == 0:
kid = g.split("\t")[0]
gene=urlopen("http://rest.kegg.jp/get/"+kid).read()
DBLINKS=gene.decode().split("\n")
DBLINKS=[ s for s in DBLINKS if ":" in s ]
for d in DBLINKS:
test=d.split(" ")
test=test[len(test)-1]
if test in ens_ids:
DBLINK=[ s for s in DBLINKS if test in s ]
DBLINK=DBLINK[0].split(":")
DBLINK=DBLINK[len(DBLINK)-2]
dbs.append(DBLINK)
else:
break
ens_db=dbs[0].split(" ")
ens_db=ens_db[len(ens_db)-1]
test_db=urlopen("http://rest.genome.jp/link/"+ens_db+"/"+organism).read()
test_db=test_db.decode().split("\n")
if len(test_db) == 1:
print("For "+organism+" the following db was found: "+ens_db)
print("This database does not seem to be valid KEGG-linked database identifier")
print("For \n'hsa' use 'ensembl-hsa'\n'mmu' use 'ensembl-mmu'\n'cel' use 'EnsemblGenomes-Gn'\n'dme' use 'FlyBase'")
sys.stdout.flush()
ens_db = None
else:
print("For "+organism+" the following db was found: "+ens_db)
sys.stdout.flush()
return ens_db
def ensembl_to_kegg(organism,kegg_db):
"""
Looks up KEGG mappings of KEGG ids to ensembl ids
:param organism: an organisms as listed in organismsKEGG()
:param kegg_db: a matching KEGG db as reported in databasesKEGG
:returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'.
"""
print("KEGG API: http://rest.genome.jp/link/"+kegg_db+"/"+organism)
sys.stdout.flush()
kegg_ens=urlopen("http://rest.genome.jp/link/"+kegg_db+"/"+organism).read()
kegg_ens=kegg_ens.decode().split("\n")
final=[]
for i in kegg_ens:
final.append(i.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
ens_id=pd.DataFrame(df[1].str.split(":").tolist())[1]
df=pd.concat([df,ens_id],axis=1)
df.columns=['KEGGid','ensDB','ENSid']
df=df[['KEGGid','ENSid']]
return df
def ecs_idsKEGG(organism):
"""
Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism
:param organism: an organisms as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'ec' and 'KEGGid'.
"""
kegg_ec=urlopen("http://rest.kegg.jp/link/"+organism+"/enzyme").read()
kegg_ec=kegg_ec.decode().split("\n")
final=[]
for k in kegg_ec:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['ec','KEGGid']
return df
def idsKEGG(organism):
"""
Uses KEGG to retrieve all ids for a given KEGG organism
:param organism: an organism as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'gene_name' and 'KEGGid'.
"""
ORG=urlopen("http://rest.kegg.jp/list/"+organism).read()
ORG=ORG.decode().split("\n")
final=[]
for k in ORG:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['KEGGid','description']
field = pd.DataFrame(df['description'].str.split(';',1).tolist())[0]
field = pd.DataFrame(field)
df = pd.concat([df[['KEGGid']],field],axis=1)
df.columns=['KEGGid','gene_name']
df=df[['gene_name','KEGGid']]
return df
def pathwaysKEGG(organism):
"""
Retrieves all pathways for a given organism.
:param organism: an organism as listed in organismsKEGG()
:returns df: a Pandas dataframe with the columns 'KEGGid','pathIDs', and 'pathName'.
:returns df_: a Pandas dataframe with a columns for 'KEGGid', and one column for each pathway with the corresponding gene ids below
"""
print("KEGG API: http://rest.kegg.jp/list/pathway/"+organism)
sys.stdout.flush()
kegg_paths=urlopen("http://rest.kegg.jp/list/pathway/"+organism).read()
kegg_paths=kegg_paths.decode().split("\n")
final=[]
for k in kegg_paths:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['pathID','pathName']
print("KEGG API: http://rest.kegg.jp/link/"+organism+"/pathway/")
sys.stdout.flush()
kegg_paths_genes=urlopen("http://rest.kegg.jp/link/"+organism+"/pathway/").read()
kegg_paths_genes=kegg_paths_genes.decode().split("\n")
kegg_paths_genes=[ s.split("\t") for s in kegg_paths_genes ]
kegg_paths_genes=pd.DataFrame(kegg_paths_genes)
kegg_paths_genes.columns=['pathID','KEGGid']
df= | pd.merge(kegg_paths_genes,df,on=["pathID"],how="outer") | pandas.merge |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # VIP This notebook use data from `Fig2.ipynb`
# +
import os, sys, warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_context(context='poster')
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['font.sans-serif'] = "Arial"
mpl.rcParams['font.monospace'] = 'Andale Mono'
# mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['mathtext.rm'] = 'Arial'
mpl.rcParams['mathtext.default'] = 'rm'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['text.usetex'] = False
mpl.rcParams['text.latex.preamble'] = [
r'\usepackage{wasysym}',
r'\usepackage{amsmath}',
r'\usepackage{amssymb}',
]
bigsize = 20
midsize = 18
smallsize = 14
hugesize = 24
# -
sys.path.append('.'); warnings.simplefilter(action='ignore', category=FutureWarning);
# +
from figdata import ref, xlsx_tab, sum_df, cell_chr_cnv_size
from figdata import sample_colors, good_pos, r_acen, chrf, bigCNV, big_cnv_cells, big_cnv_idx
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from sklearn.linear_model import LinearRegression
# -
from scipy import stats
FSAMPLE = True
FSAMPLE = False
# ---
# +
male_heatmap = pd.read_csv('male_heatmap.csv', index_col=0)
female_heatmap = pd.read_csv('female_heatmap.csv', index_col=0)
cmap_CNV = LinearSegmentedColormap.from_list('forCNV', ['blueviolet', 'deepskyblue', 'black', 'darkorange', 'orangered'])
# # Sort fraction
tmp_df = sum_df.loc[big_cnv_idx].sort_values('chrom_fraction', ascending=False)
tmp_df['chrom_fraction'] = (tmp_df['chrom_fraction']/10).round()
# # Sort chroms.
heatmap_order = (
tmp_df.set_index('Cell')
.loc[tmp_df['Cell']]
.reset_index()
.groupby('Cell',sort=False).first()
.replace({'Sample':xlsx_tab.set_index('SAMPLE')['CODE'].to_dict()})
# .sort_values(['chr_id','chrom_fraction','cnvTag','binSize','Cell'], ascending=[True, False, True, False, True])
.sort_values(
['chr_id','chrom_fraction','cnvTag','Sample','binSize','Cell'],
ascending=[True, False, True, True, False, True]
)
.index
)
# # Sort fraction
# tmp_df = cell_chr_cnv_size.sort_values('chrom_fraction', ascending=False)
# # Sort chroms.
# heatmap_order = (
# tmp_df.reset_index().set_index('Cell')
# .loc[tmp_df['Cell']]
# .reset_index()
# .groupby('Cell',sort=False).first()
# .sort_values(['chr_id','chrom_fraction','binSize','Cell'], ascending=[True, False, False, True])
# .index
# )
del(tmp_df)
# +
male_fig_df = male_heatmap.reindex(columns=big_cnv_cells).dropna(how='all',axis=1).T
male_fig_df.loc[:,male_fig_df.columns[male_fig_df.columns.str.contains(r'chr[X|Y]:')]] += 1
male_fig_df = male_fig_df.loc[:, ~male_fig_df.columns.str.contains('chrY')]
chr_pos = [
male_fig_df.columns.get_loc(idx)
for idx in ref.loc[male_fig_df.columns]
.reset_index()
.set_index(['space','start'])
.loc[
ref.loc[male_fig_df.columns]
.groupby('space',sort=False)
.min().reset_index()
.set_index(['space','start']).index,
'index'
].values
] + [ male_fig_df.columns.get_loc( male_fig_df.columns[-1] )-1 ]
if not FSAMPLE:
male_fig_df = male_fig_df.loc[male_fig_df.index[~male_fig_df.index.str.contains('$FSAMPLE')]]
# +
male_row_codes = xlsx_tab.set_index('SAMPLE').loc[
(
sum_df
.set_index('Cell')
.loc[male_fig_df.index,'Sample']
.reset_index()
.drop_duplicates(keep='first')
.set_index('Cell')
.reindex(index=heatmap_order)
.dropna()['Sample']
), 'CODE'
].dropna()
male_row_codes.index = (np.arange(0,male_row_codes.shape[0]))
if not FSAMPLE:
male_row_codes = male_row_codes.replace('M11','M10')
male_sample_fig_df = pd.concat(
[
pd.Series(np.arange(0,male_row_codes.shape[0]), name='y'),
male_row_codes.rename('x').str.replace('M','').astype(int),
], axis=1
)
# +
female_fig_df = female_heatmap.reindex(columns=big_cnv_cells).dropna(how='all',axis=1).T
female_fig_df.loc[:,female_fig_df.columns[female_fig_df.columns.str.contains(r'chrY:')]] += 2
female_fig_df = female_fig_df.loc[:, ~female_fig_df.columns.str.contains('chrY')]
female_row_codes = xlsx_tab.set_index('SAMPLE').loc[
(
sum_df
.set_index('Cell')
.loc[female_fig_df.index,'Sample']
.reset_index()
.drop_duplicates(keep='first')
.set_index('Cell')
.reindex(index=heatmap_order)
.dropna()['Sample']
), 'CODE'
].dropna()
female_row_codes.index = (np.arange(0,female_row_codes.shape[0]))
female_sample_fig_df = pd.concat(
[
pd.Series(np.arange(0,female_row_codes.shape[0]), name='y'),
female_row_codes.rename('x').str.replace('F','').astype(int),
], axis=1
)
# -
# ---
def male_heatmap_plot(ax1, ax2):
# ax1.scatter(
# male_sample_fig_df['x'], male_sample_fig_df['y'],
# s = 3, marker = 's',
# color = pd.Series(sample_colors)[male_sample_fig_df['x']-1],
# )
ax1.hlines(
male_sample_fig_df['y'],
male_sample_fig_df['x']-0.3, male_sample_fig_df['x']+0.3,
color = pd.Series(sample_colors)[male_sample_fig_df['x']-1],
lw=1
)
sns.heatmap(
male_fig_df.reindex(index=heatmap_order).dropna(),
cmap = cmap_CNV,
ax = ax2,
cbar = None,
vmax=4, vmin=0, center=2,
xticklabels=[], yticklabels=[],
rasterized=True,
)
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_ylabel('Male Cell (n = ' + str(male_fig_df.shape[0]) + ')' , fontsize=midsize)
ax1.set_xlim(0,male_sample_fig_df['x'].max()+1)
ax2.vlines(chr_pos, ax2.get_ylim()[0], ax2.get_ylim()[1], lw=1, color='lightgrey')
ax2.set_ylabel('')
ax2.set_xticks([])
ax2.set_yticks([])
return
def female_heatmap_plot(ax1, ax2, ax3):
# ax1.scatter(
# female_sample_fig_df['x'], female_sample_fig_df['y'],
# s = 3, marker = 's',
# color = pd.Series(sample_colors[11:])[female_sample_fig_df['x']-1],
# )
ax1.hlines(
female_sample_fig_df['y'],
female_sample_fig_df['x']-0.3, female_sample_fig_df['x']+0.3,
color = pd.Series(sample_colors[11:])[female_sample_fig_df['x']-1],
lw=1
)
sns.heatmap(
female_fig_df.reindex(index=heatmap_order).dropna(),
cmap = cmap_CNV,
ax = ax2,
cbar_ax = ax3,
cbar_kws={
'ticks' : np.arange(0,5),
'boundaries' : np.arange(-0.5,5.5,1),
'pad' : 0.01,
'orientation': 'horizontal',
},
vmax=4, vmin=0, center=2,
xticklabels=[], yticklabels=[],
rasterized=True,
)
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_ylabel('Female Cell (n = ' + str(female_fig_df.shape[0]) + ')' , fontsize=midsize)
ax1.set_xlim(0,female_sample_fig_df['x'].max()+1)
ax2.vlines(chr_pos, ax2.get_ylim()[0], ax2.get_ylim()[1], lw=1, color='lightgrey')
ax2.set_ylabel('')
ax2.set_xticks([])
ax2.set_yticks([])
return
# +
def sample_code_plot(ax, FSAMPLE=True):
# fig, ax = plt.subplots(figsize=(6,2))
if not FSAMPLE:
male_code = xlsx_tab.query('GENDER=="male" & SAMPLE!="$FSAMPLE"')['CODE'].replace('M11','M10')
else:
male_code = xlsx_tab.query('GENDER=="male"')['CODE']
female_code = xlsx_tab.query('GENDER=="female"')['CODE']
ax.scatter(
x = np.arange(male_code.shape[0]).reshape(1, male_code.shape[0]),
y = [1]*male_code.shape[0],
s = 200, marker = 's',
c=list( | pd.Series(sample_colors[:male_code.shape[0]]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
sys.path.append('..')
# In[2]:
import os
import gc
import yaml
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from joblib import Parallel, delayed
from utils import optimize_dtypes, get_files, int16_repr, downcast_dtypes
# In[3]:
BASE_PATH = '../../'
CONFIG_DIR = os.path.join(BASE_PATH, 'config')
STORAGE_DIR = os.path.join(BASE_PATH, 'storage')
config = yaml.load(open(os.path.join(CONFIG_DIR, 'env.yml')),
Loader=yaml.FullLoader)
# In[4]:
model = 'unplugged'
model_storage = os.path.join(STORAGE_DIR, model)
output_storage = os.path.join(STORAGE_DIR, 'output')
# In[5]:
filepath = os.path.join(output_storage, f'{model}.parquet')
df = pd.read_parquet(filepath)
# In[6]:
converted_int = downcast_dtypes(df, ['int'], 'unsigned')
converted_float = downcast_dtypes(df, ['float'], 'float')
converted_bool = df.select_dtypes(include=['bool'])
converted_object = df.select_dtypes(include=['object'])
df.loc[:, converted_int.columns] = converted_int
df.loc[:, converted_float.columns] = converted_float
df.loc[:, converted_bool.columns] = converted_bool.astype(np.uint8)
df.loc[:, converted_object.columns] = converted_object.astype('category')
# In[7]:
mappings = {'id': 'uint32', 'device_id': 'uint32', 'battery_level': 'uint8', 'timezone': 'category', 'memory_active': 'uint32',
'memory_inactive': 'uint32', 'memory_free': 'uint32', 'memory_user': 'uint32', 'health': 'category', 'voltage': 'float32',
'temperature': 'float32', 'usage': 'uint8', 'up_time': 'float32', 'sleep_time': 'float32', 'wifi_signal_strength': 'int16',
'wifi_link_speed': 'int16', 'screen_on': 'uint8', 'screen_brightness': 'int16', 'roaming_enabled': 'uint8', 'bluetooth_enabled': 'uint8',
'location_enabled': 'uint8', 'power_saver_enabled': 'uint8', 'nfc_enabled': 'uint8', 'developer_mode': 'uint8', 'free': 'uint32',
'total': 'uint32', 'free_system': 'uint32', 'total_system': 'uint32', 'wifi_enabled': 'uint8', 'mobile_enabled': 'uint8',
'wifi_active': 'uint8', 'mobile_active': 'uint8', 'profile': 'uint16'}
# df = df.astype(mappings)
# In[8]:
df = df.rename({'usage': 'cpu_usage'}, axis=1)
# In[9]:
model = 'app_processes'
model_storage = os.path.join(STORAGE_DIR, model)
app_files = get_files(os.path.join(model_storage, '*.parquet'))
# In[10]:
mappings = {'sample_id': 'uint32', 'domain': 'category', 'application_label': 'category',
'is_system_app': 'uint8'}
subset = ['domain', 'battery_level', 'health', 'voltage', 'temperature', 'cpu_usage',
'screen_on', 'screen_brightness', 'roaming_enabled', 'bluetooth_enabled',
'location_enabled', 'power_saver_enabled', 'developer_mode',
'wifi_enabled', 'wifi_active', 'mobile_active']
# In[11]:
pages = []
min_id = df['id'].min()
found_start = False
for i, f in enumerate(app_files[:100]):
page = pd.read_parquet(f) if found_start else pd.read_parquet(f, columns=['sample_id'])
if page['sample_id'].max() < min_id:
continue
else:
page = pd.read_parquet(f)
found_start = True
print(f'Entry {i}')
page = page.dropna()
page = page.astype(mappings)
page = pd.merge(page, df, left_on='sample_id', right_on='id', sort=False, copy=False)
pages.append(page.drop_duplicates(subset=subset))
# In[ ]:
environments = | pd.DataFrame() | pandas.DataFrame |
import io
import requests
import pandas as pd
from bokeh.models import ColumnDataSource, HoverTool, ResizeTool, SaveTool
from bokeh.models.widgets import TextInput, Button
from bokeh.plotting import figure, curdoc
from bokeh.layouts import row, widgetbox
TICKER = ""
base = "https://api.iextrading.com/1.0/"
data = ColumnDataSource(dict(time=[], display_time=[], price=[]))
def get_last_price(symbol):
payload = {
"format": "csv",
"symbols": symbol
}
endpoint = "tops/last"
raw = requests.get(base + endpoint, params=payload)
raw = io.BytesIO(raw.content)
prices_df = pd.read_csv(raw, sep=",")
prices_df["time"] = | pd.to_datetime(prices_df["time"], unit="ms") | pandas.to_datetime |
import json
import boto3
import logging
import pandas as pd
import glob
logger = logging.getLogger()
logger.setLevel(logging.INFO)
#https://stackoverflow.com/questions/43355074/read-a-csv-file-from-aws-s3-using-boto-and-pandas
def flight_data_df_from_response(response):
initial_df = | pd.read_csv(response['Body']) | pandas.read_csv |
import pandas as pd
import numpy as np
import openml
from pandas.api.types import is_numeric_dtype
from sklearn.model_selection import cross_validate, train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.metrics import f1_score, mean_squared_error
from sklearn.pipeline import Pipeline
from statistics import stdev
from warnings import filterwarnings, resetwarnings
from time import time
from datetime import datetime
from os import mkdir, listdir
from shutil import rmtree
import concurrent
import matplotlib.pyplot as plt
import seaborn as sns
from multiprocessing import Process, Queue
def get_single_dataset(q, dataset_did, dataset_name):
dataset = openml.datasets.get_dataset(dataset_did)
print(f" Loaded {dataset_name} from openml.org")
q.put(dataset)
class DatasetsTester:
"""
Tool to compare predictors (classifiers or regressors) on a set of datasets collected from openml.org.
This simplifies automatically comparing the performance of predictors on potentially large numbers
of datasets, thereby supporting more thorough and accurate testing of predictors.
"""
# have the directories and problem type set here
def __init__(self, problem_type, path_local_cache=""):
"""
problem_type: str
Either "classification" or "regression"
All estimators will be compared using the same metric, so it is necessary that all
datasets used are of the same type.
path_local_cache: str
Folder identify the local cache of datasets, stored in .csv format.
"""
self.problem_type = problem_type
self.path_local_cache = path_local_cache
self.openml_df = None
def check_problem_type(self):
problem_type_okay = self.problem_type in ["classification", "regression", "both"]
if not problem_type_okay:
print("problem_type must be one of: 'classification', 'regression', 'both'")
return problem_type_okay
def find_by_name(self, names_arr):
"""
Identifies, but does not collect, the set of datasets meeting the specified set of names.
Parameters
----------
names_arr: array of dataset names
Returns
-------
dataframe with a row for each dataset on openml meeting the specified set of names.
"""
if not self.check_problem_type():
return None
self.openml_df = openml.datasets.list_datasets(output_format="dataframe")
self.openml_df = self.openml_df[self.openml_df.name.isin(names_arr)]
return self.openml_df
def find_by_tag(self, my_tag):
"""
Identifies, but does not collect, the set of datasets attached to the specified tag.
Parameters
----------
my_tag: the dataset tag
Returns
-------
dataframe with a row for each dataset on openml meeting the specified tag.
"""
if not self.check_problem_type():
return None
self.openml_df = openml.datasets.list_datasets(tag=my_tag, output_format="dataframe")
return self.openml_df
def find_datasets(self,
use_cache=True,
min_num_classes=2,
max_num_classes=10,
min_num_minority_class=5,
max_num_minority_class=np.inf,
min_num_features=0,
max_num_features=100,
min_num_instances=500,
max_num_instances=5000,
min_num_numeric_features=0,
max_num_numeric_features=50,
min_num_categorical_features=0,
max_num_categorical_features=50):
"""
Identifies, but does not collect, the set of datasets meeting the specified set of names.
This, find_by_name(), or find_by_tag() must be called to identify the potential set of datasets to be collected.
Parameters
----------
All other parameters are direct checks of the statistics about each dataset provided by openml.org.
Returns
-------
dataframe with a row for each dataset on openml meeting the specified set of criteria.
"""
if not self.check_problem_type():
return None
if self.problem_type == "classification" and (min_num_classes <= 0 or max_num_classes <= 0):
print("For classification datasets, both min_num_classes and max_num_classes must be specified.")
return None
read_dataset_list = False # Set True if manage to read from cache. Otherwise read from openml.org.
if use_cache and self.path_local_cache != "":
try:
path_to_file = self.path_local_cache + "/dataset_list.csv"
self.openml_df = pd.read_csv(path_to_file)
read_dataset_list = True
except Exception as e:
if "No such file or directory:" not in str(e):
print(f" Error reading file: {e}")
else:
print(" File not found in cache.")
if not read_dataset_list:
self.openml_df = openml.datasets.list_datasets(output_format="dataframe")
if use_cache and self.path_local_cache != "":
try:
mkdir(self.path_local_cache)
except FileExistsError:
pass
except Exception as e:
print(f"Error creating local cache folder: {e}")
path_to_file = self.path_local_cache + "/dataset_list.csv"
self.openml_df.to_csv(path_to_file)
# Filter out datasets where some key attributes are unspecified
self.openml_df = self.openml_df[
(np.isnan(self.openml_df.NumberOfFeatures) == False) &
(np.isnan(self.openml_df.NumberOfInstances) == False) &
(np.isnan(self.openml_df.NumberOfInstancesWithMissingValues) == False) &
(np.isnan(self.openml_df.NumberOfMissingValues) == False) &
(np.isnan(self.openml_df.NumberOfNumericFeatures) == False) &
(np.isnan(self.openml_df.NumberOfSymbolicFeatures) == False)
]
self.openml_df = self.openml_df[
(self.openml_df.NumberOfFeatures >= min_num_features) &
(self.openml_df.NumberOfFeatures <= max_num_features) &
(self.openml_df.NumberOfInstances >= min_num_instances) &
(self.openml_df.NumberOfInstances <= max_num_instances) &
(self.openml_df.NumberOfNumericFeatures >= min_num_numeric_features) &
(self.openml_df.NumberOfNumericFeatures <= max_num_numeric_features) &
(self.openml_df.NumberOfSymbolicFeatures >= min_num_categorical_features) &
(self.openml_df.NumberOfSymbolicFeatures <= max_num_categorical_features)
]
if self.problem_type == "classification":
self.openml_df = self.openml_df[
(np.isnan(self.openml_df.MajorityClassSize) == False) &
(np.isnan(self.openml_df.MaxNominalAttDistinctValues) == False) &
(np.isnan(self.openml_df.MinorityClassSize) == False) &
(np.isnan(self.openml_df.NumberOfClasses) == False)
]
self.openml_df = self.openml_df[
(self.openml_df.NumberOfClasses >= min_num_classes) &
(self.openml_df.NumberOfClasses <= max_num_classes) &
(self.openml_df.MinorityClassSize >= min_num_minority_class) &
(self.openml_df.MinorityClassSize <= max_num_minority_class)
]
if self.problem_type == "regression":
self.openml_df = self.openml_df[self.openml_df.NumberOfClasses == 0]
return self.openml_df
def collect_data(self,
max_num_datasets_used=-1,
method_pick_sets="pick_random",
shuffle_random_state=0,
exclude_list=None,
use_automatic_exclude_list=False,
max_cat_unique_vals=20,
keep_duplicated_names=False,
check_local_cache=False,
check_online=True,
save_local_cache=False,
preview_data=False,
one_hot_encode=True,
fill_nan_and_inf_zero=True,
verbose=False):
"""
This method collects the data from openml.org, unless check_local_cache is True and the dataset is available
in the local folder. This will collect the specified subset of datasets identified by the most recent call
to find_by_name() or find_datasets(). This allows users to call those methods until a suitable
collection of datasets have been identified.
Parameters
----------
max_num_datasets_used: integer
The maximum number of datasets to collect.
method_pick_sets: str
If only a subset of the full set of matches are to be collected, this identifies if those
will be selected randomly, or simply using the first matches
shuffle_random_state: int
Where method_pick_sets is "pick_random", this is used to shuffle the order of the datasets
exclude_list: array
list of names of datasets to exclude
use_automatic_exclude_list: bool
If set True, any files that can't be loaded will be appended to a list and subsequent calls will not attempt
to load them. This may be set to save time. However, if there are errors simply due to internet problems or
temporary issues, this may erroneously exclude some datasets.
max_cat_unique_vals: int
As categorical columns are one-hot encoded, it may not be desirable to one-hot encode categorical
columns with large numbers of unique values. Columns with a greater number of unique values than
max_cat_unique_vals will be dropped.
keep_duplicated_names: bool
If False, for each set of datasets with the same name, only the one with the highest
version number will be used. In some cases, different versions of a dataset are significantly different.
save_local_cache: bool
If True, any collected datasets will be saved locally in path_local_cache
check_local_cache: bool
If True, before collecting any datasets from openml.org, each will be checked to determine if
it is already stored locally in path_local_cache
check_online: bool
If True, openml.org may be checked for the dataset, unless check_local_cache is True and the dataset has
been cached.
preview_data: bool
Indicates if the first rows of each collected dataset should be displayed
one_hot_encode: bool
If True, categorical columns are one-hot encoded. This is necessary for many types of predictor, but
may be done elsewhere, for example in a pipeline passed to the run_tests() function.
fill_nan_and_inf_zero: bool
If True, all instances of NaN, inf and -inf are replaced with 0.0. Replacing these values with something
valid is necessary for many types of predictor, butmay be done elsewhere, for example in a pipeline passed
to the run_tests() function.
verbose: bool
If True, messages will be displayed indicating errors collecting any datasets.
Returns
-------
dataset_collection: dictionary containing: index in this collection, dataset_name, version, X, y
This method will attempt to collect as many datasets as specified, even where additional datasets must
be examined.
"""
def append_auto_exclude_list(did):
if not use_automatic_exclude_list:
return
auto_exclude_list.append(did)
def read_auto_exclude_list():
nonlocal auto_exclude_list
if not use_automatic_exclude_list or self.path_local_cache == "":
return
try:
path_to_file = self.path_local_cache + "/exclude_list.csv"
auto_list_df = pd.read_csv(path_to_file)
except Exception as e:
print(f" Error reading file: {e}")
return
auto_exclude_list = auto_list_df['List'].tolist()
def save_auto_exclude_list():
nonlocal auto_exclude_list
if not use_automatic_exclude_list or self.path_local_cache == "" or len(auto_exclude_list) == 0:
return
try:
mkdir(self.path_local_cache)
except FileExistsError:
pass
except Exception as e:
print(f"Error creating local cache folder: {e}")
path_to_file = self.path_local_cache + "/exclude_list.csv"
pd.DataFrame({'List': auto_exclude_list}).to_csv(path_to_file)
assert method_pick_sets in ['pick_first', 'pick_random']
q = Queue()
if self.openml_df is None or len(self.openml_df) == 0:
print("Error. No datasets specified. Call find_datasets() or find_by_name() before collect_data().")
return None
if not keep_duplicated_names:
self.openml_df = self.openml_df.drop_duplicates(subset=["name"], keep="last")
self.dataset_collection = []
#if max_num_datasets_used > -1 and max_num_datasets_used < len(self.openml_df) and method_pick_sets == "pick_random":
if -1 < max_num_datasets_used < len(self.openml_df) and method_pick_sets == "pick_random":
openml_subset_df = self.openml_df.sample(frac=1, random_state=shuffle_random_state)
else:
openml_subset_df = self.openml_df
auto_exclude_list = []
read_auto_exclude_list()
usable_dataset_idx = 0
for dataset_idx in range(len(openml_subset_df)):
if (max_num_datasets_used > -1) and (len(self.dataset_collection) >= max_num_datasets_used):
break
dataset_did = int(openml_subset_df.iloc[dataset_idx].did)
dataset_name = openml_subset_df.iloc[dataset_idx]['name']
dataset_version = openml_subset_df.iloc[dataset_idx]['version']
if not exclude_list is None and dataset_name in exclude_list:
continue
if dataset_did in auto_exclude_list:
continue
print(f"Collecting {usable_dataset_idx}: {dataset_name}")
dataset_df = None
dataset_source = ""
if check_local_cache:
try:
path_to_file = self.path_local_cache + "/" + dataset_name + '.csv'
X_with_y = pd.read_csv(path_to_file)
dataset_df = X_with_y.drop("y", axis=1)
y = X_with_y["y"]
dataset_source = "cache"
except Exception as e:
if "No such file or directory:" not in str(e):
print(f" Error reading file: {e}")
else:
print(" File not found in cache.")
dataset_df = None
if not check_online and dataset_df is None:
continue
if dataset_df is None:
p = Process(target=get_single_dataset, name="get_single_dataset", args=(q, dataset_did, dataset_name))
p.start()
p.join(timeout=20)
if q.empty():
print(f" Unable to collect {dataset_name} from openml.org")
append_auto_exclude_list(dataset_did)
continue
dataset = q.get()
try:
X, y, categorical_indicator, attribute_names = dataset.get_data(
dataset_format="dataframe",
target=dataset.default_target_attribute
)
except Exception as e:
if verbose:
print(f" Error collecting file with did: {dataset_did}, name: {dataset_name}. Error: {e}")
append_auto_exclude_list(dataset_did)
continue
if X is None or y is None:
if verbose:
print(f" Error collecting file with did: {dataset_did}, name: {dataset_name}. X or y is None")
append_auto_exclude_list(dataset_did)
continue
dataset_df = pd.DataFrame(X, columns=attribute_names)
if len(dataset_df) != len(y):
if verbose:
print(f" Error collecting file with did: {dataset_did}, name: {dataset_name}. Number rows in X: {len(X)}. Number rows in y: {len(y)}")
append_auto_exclude_list(dataset_did)
continue
if preview_data:
print(dataset_df.head())
if save_local_cache:
try:
mkdir(self.path_local_cache)
except FileExistsError:
pass
except Exception as e:
print(f"Error creating local cache folder: {e}")
X_with_y = dataset_df.copy()
X_with_y['y'] = y
X_with_y.to_csv(self.path_local_cache + "/" + dataset_name + '.csv', index=False)
if (self.problem_type == "regression") and (is_numeric_dtype(y) == False):
continue
if dataset_source == "cache":
print(f" Reading from local cache: {usable_dataset_idx}, id: {dataset_did}, name: {dataset_name}")
else:
print(f" Loading dataset from openml: {usable_dataset_idx}, id: {dataset_did}, name: {dataset_name}")
dataset_df = self.__clean_dataset(dataset_df, max_cat_unique_vals, one_hot_encode,
fill_nan_and_inf_zero)
self.dataset_collection.append({'Index': usable_dataset_idx,
'Dataset_name': dataset_name,
'Dataset_version': dataset_version,
'X': dataset_df,
'y': y})
usable_dataset_idx += 1
save_auto_exclude_list()
def __clean_dataset(self, X, max_cat_unique_vals, one_hot_encode, fill_nan_and_inf_zero):
# The categorical_indicator provided by openml isn't 100% reliable, so we also check panda's is_numeric_dtype
categorical_indicator = [False] * len(X.columns)
for c in range(len(X.columns)):
if not is_numeric_dtype(X[X.columns[c]]):
categorical_indicator[c] = True
# Remove any NaN or inf values
if fill_nan_and_inf_zero:
for c_idx, col_name in enumerate(X.columns):
if categorical_indicator[c_idx] == True:
if hasattr(X[col_name], "cat"):
X[col_name] = X[col_name].cat.add_categories("").fillna("")
else:
X[col_name] = X[col_name].fillna("")
else:
X[col_name] = X[col_name].fillna(0.0)
# One-hot encode the categorical columns
if one_hot_encode:
new_df = | pd.DataFrame() | pandas.DataFrame |
"""
This script is for analysing the outputs from the implementation of DeepAR in GluonTS
"""
import os, time
from pathlib import Path
import streamlit as st
import pandas as pd
import numpy as np
from gluonts.model.predictor import Predictor
from gluonts.dataset.common import ListDataset
from gluonts.transform import FieldName
from gluonts.evaluation.backtest import make_evaluation_predictions
import autodraft.visualization as viz
import autodraft.gluonts as glu
import autodraft.api as nhl
from bokeh.sampledata.perceptions import probly
# @st.cache
def load_model(file_path):
model = Predictor.deserialize(Path(file_path))
return model
@st.cache
def get_data(path='../../data/input/full_dataset_4_seasons.csv'):
data = pd.read_csv(path)
return data
# @st.cache
# def load_predictions(path='/home/ubuntu/AutoDraft/data/deepar_truncated_results_ne100_lre-4_bs64.csv'):
# data = pd.read_csv(path, index_col=2)
# return data
@st.cache
def load_predictions(path='../../data/output/deepar_truncated_results_unit_s_ne300_lr1e-3_bs64_nl3_cl3.csv'):
data = pd.read_csv(path, index_col=2)
model_name = path.split('/')[-1].split('.')[0]
return data, model_name
@st.cache
def load_joe():
joe = pd.read_csv('../../data/input/joe_schmo_4_seasons.csv')
return joe
@st.cache
def get_roster(path='../../data/input/full_roster_4_seasons.csv'):
data = | pd.read_csv(path) | pandas.read_csv |
import torch
import os
import sys
import pandas as pd
class WeeBitDataset(torch.utils.data.Dataset):
def __init__(self, datapath):
self.dirs = os.listdir(datapath)
self.data_dict = {'WRLevel2': [], 'WRLevel4': [], 'WRLevel3': [], 'BitGCSE': [], 'BitKS3': []}
for dir in self.dirs:
if dir != '.DS_Store':
files = os.listdir('WeeBit-TextOnly/' + dir)
for file in files:
if file != '.DS_Store':
f = open('WeeBit-TextOnly/' + dir + '/' + file, 'rb')
text = f.read()
text = text.decode('utf-8', errors='ignore').replace('\n', ' ')
self.data_dict[dir].append(text)
f.close()
self.text_to_label = {'WRLevel2': 0, 'WRLevel3': 1, 'WRLevel4': 2, 'BitKS3': 3, 'BitGCSE': 4}
self.df = | pd.DataFrame(columns=['text', 'label']) | pandas.DataFrame |
# %%
import os
from google.cloud.bigquery.client import Client
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import typing
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE
from sklearn.metrics import silhouette_score
from collections import Counter
# %%
# set Google BigQuery client
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'C:\\Users\\Francesco\\fiery-rarity-322109-6ba6fa8a811c.json'
bq_client = Client()
# set seaborn plotting theme
sns.set_theme()
# set colour palette
COLORS_PALETTE = ['#00388F', '#FFB400', '#FF4B00', '#65B800', '#00B1EA']
sns.set_palette(sns.color_palette(COLORS_PALETTE))
# %%
# run query
address_clustering = bq_client.query('''
select *
from `fiery-rarity-322109.ethereum.address_classification`
where rank <= 1000 and date_month >= '2017-01-01'
''').to_dataframe()
# %%
class SklearnWrapper:
def __init__(self, transformation: typing.Callable):
self.transformation = transformation
self._group_transforms = []
# Start with -1 and for each group up the pointer by one
self._pointer = -1
def _call_with_function(self, df: pd.DataFrame, function: str):
# If pointer >= len we are making a new apply, reset _pointer
if self._pointer >= len(self._group_transforms):
self._pointer = -1
self._pointer += 1
return pd.DataFrame(
getattr(self._group_transforms[self._pointer], function)(df.values),
columns=df.columns,
index=df.index,
)
def fit(self, df):
self._group_transforms.append(self.transformation.fit(df.values))
return self
def transform(self, df):
return self._call_with_function(df, "transform")
def fit_transform(self, df):
self.fit(df)
return self.transform(df)
def inverse_transform(self, df):
return self._call_with_function(df, "inverse_transform")
# %%
# Create scaler outside the class
scaler = SklearnWrapper(StandardScaler())
# Fit and transform data (holding state)
df_scale = address_clustering.loc[: , ~address_clustering.columns.isin(['address', 'rank'])].groupby("date_month").apply(scaler.fit_transform)
df_scale = df_scale.loc[: , ~df_scale.columns.isin(['date_month'])]
# %%
# Run t-SNE
tsne = TSNE(n_components=3, verbose=1, random_state=42) #, perplexity=80, n_iter=5000, learning_rate=200
tsne_scale_results = tsne.fit_transform(df_scale)
tsne_df_scale = | pd.DataFrame(tsne_scale_results, columns=['t-SNE 1', 't-SNE 2', 't-SNE 3']) | pandas.DataFrame |
from collections import defaultdict
from typing import Any, Callable, Dict, Iterable, Sequence
import numpy as np
import pandas as pd
import scipy.optimize
from invoice_net.parsers import (
parses_as_full_date,
parses_as_amount,
parses_as_invoice_number,
)
from invoice_net.data_handler import DataHandler
def __inner_filter_out_mistakes(
tokens: Iterable[str],
filter_func: Callable[[str], Any],
ignore_exceptions: bool = False,
) -> np.ndarray:
mask = []
for token in tokens:
try:
mask.append(bool(filter_func(token)))
except Exception:
if ignore_exceptions:
mask.append(False)
else:
raise
return np.array(mask)
def _filter_out_mistakes(token_predictions: pd.DataFrame) -> pd.DataFrame:
"""Filter out obvious mistakes, like Foo bar -> date prediction"""
filters_table: Dict[str, Callable[[str], Any]] = defaultdict(
lambda: lambda x: x
)
filters_table["document_date"] = parses_as_full_date
filters_table["document_id"] = parses_as_invoice_number
filters_table["amount_total"] = parses_as_amount
groups = []
for prediction, group in token_predictions.groupby("pred"):
groups.append(
group[
__inner_filter_out_mistakes(
group.word, filters_table[prediction]
)
]
)
return pd.concat(groups)
def _get_token_predictions(
predictions: np.ndarray, raw_text: Sequence[str], file_names: pd.Series
) -> pd.DataFrame:
"""Take model predictions and flatten to prediction per token."""
assert predictions.shape[0] == len(raw_text) == len(file_names), (
f"Number of samples does not match; ({predictions.shape[0]}, "
f"{len(raw_text)}, {len(file_names)})"
)
assert predictions.ndim == 3
candidates = np.where(predictions > 0.5)
tokens = [line.split() for line in raw_text]
tmp = []
for sample_idx, token_idx, class_idx in zip(*candidates):
# if prediction is not for padding text
if len(tokens[sample_idx]) > token_idx:
tmp.append(
{
"word": tokens[sample_idx][token_idx],
"pred": class_idx,
"confidence": predictions[sample_idx, token_idx, class_idx],
"file_name": file_names.iloc[sample_idx],
}
)
return pd.DataFrame.from_records(tmp)
def hungarian_prediction(token_predictions):
predictions = defaultdict(dict)
for file_name, df in token_predictions.groupby("file_name"):
hungarian_table = pd.pivot_table(
df,
values=["cost"],
index=["word"],
columns=["pred"],
aggfunc=np.min,
fill_value=1,
)
row_idxs, col_idxs = scipy.optimize.linear_sum_assignment(
hungarian_table
)
for row_idx, col_idx in zip(row_idxs, col_idxs):
col_name = hungarian_table.columns[col_idx][1]
predictions[file_name][col_name] = (
hungarian_table.iloc[row_idx].name,
1 - hungarian_table.iloc[row_idx, col_idx],
)
predictions_df = | pd.DataFrame(predictions) | pandas.DataFrame |
"""
Common routines to work with raw MS data from metabolomics experiments.
Functions
---------
detect_features(path_list) : Perform feature detection on several samples.
feature_correspondence(feature_data) : Match features across different samples
using a combination of clustering algorithms.
"""
import pandas as pd
import numpy as np
from .fileio import MSData
from .container import DataContainer
from .lcms import Roi
from . import validation
from pathlib import Path
from sklearn.cluster import DBSCAN
from sklearn import mixture
from scipy.optimize import linear_sum_assignment
from typing import Optional, Tuple, List, Dict, Union
from IPython.display import clear_output
__all__ = ["detect_features", "feature_correspondence", "make_data_container"]
def detect_features(path: Union[Path, List[str]], separation: str = "uplc",
instrument: str = "qtof", roi_params: Optional[dict] = None,
smoothing_strength: Optional[float] = 1.0,
noise_params: Optional[dict] = None,
baseline_params: Optional[dict] = None,
find_peaks_params: Optional[dict] = None,
descriptors: Optional[dict] = None,
filters: Optional[dict] = None,
verbose: bool = True
) -> Tuple[Dict[str, List[Roi]], pd.DataFrame]:
"""
Perform feature detection on LC-MS centroid samples.
Parameters
----------
path: Path or List[str]
Path can be a list of strings of absolute path representations to mzML
files in centroid mode or a Path object. Path objects can be used in
two ways: It can point to a mzML file or to a directory. in the second
case all mzML files inside the directory will be analyzed.
separation: {"uplc", "hplc"}
Analytical platform used for separation. Used to set default the values
of `detect_peak_params`, `roi_params` and `filter_params`.
instrument: {"qtof". "orbitrap"}
MS instrument used for data acquisition. Used to set default value
of `roi_params`.
roi_params: dict, optional
parameters to pass to :py:meth:`tidyms.MSData.make_roi`
smoothing_strength: positive number, optional
Width of a gaussian window used to smooth the ROI. If None, no
smoothing is applied.
find_peaks_params : dict, optional
parameters to pass to :py:func:`tidyms.peaks.detect_peaks`
descriptors : dict, optional
descriptors to pass to :py:func:`tidyms.peaks.get_peak_descriptors`
filters : dict, optional
filters to pass to :py:func:`tidyms.peaks.get_peak_descriptors`
noise_params : dict, optional
parameters to pass to :py:func:`tidyms.peaks.estimate_noise`
baseline_params : dict, optional
parameters to pass to :py:func:`tidyms.peaks.estimate_baseline`
descriptors : dict, optional
pass custom descriptors to :py:func:`tidyms.peaks.get_peak_descriptors`
filters : dict, optional
pass custom filters to :py:func:`tidyms.peaks.get_peak_descriptors`
verbose: bool
Returns
-------
roi_dict: dict
dictionary of sample names to a list of ROI.
feature_table: DataFrame
A Pandas DataFrame where each row is a feature detected in a sample and
each column is a feature descriptor. By default the following
descriptors are computed:
mz
weighted average of the m/z in the peak region.
mz std
standard deviation of the m/z in the peak region.
rt
weighted average of the retention time in the peak region.
width
Chromatographic peak width.
height
Height of the chromatographic peak minus the baseline.
area
Area of the chromatographic peak. minus the baseline area.
sample
The sample name where the feature was detected.
Also, two additional columns have information to search each feature
in its correspondent Roi:
roi_index :
index in the list of ROI where the feature was detected.
peak_index :
index of the peaks attribute of each ROI associated to the feature.
Notes
-----
Features are detected as follows:
1. Default parameters are set based on the values of the parameters
`instrument` and `separation`.
2. Regions of interest (ROI) are detected in each sample. See the
documentation of :py:meth:`tidyms.fileio.MSData.make_roi` for a detailed
description of how ROI are created from raw data.
3. Features (chromatographic peaks) are detected on each ROI. See
:py:meth:`tidyms.lcms.Chromatogram.find_peaks` for a detailed
description of how peaks are detected and how descriptors are computed.
See Also
--------
fileio.MSData.make_roi : Finds ROIs in a mzML sample.
lcms.ROI.find_peaks : Detect peaks and compute peak estimators for a ROI.
"""
# parameter validation
# validation.validate_detect_peaks_params(detect_peak_params)
validation.validate_descriptors(descriptors)
validation.validate_filters(filters)
if roi_params is None:
roi_params = dict()
path_list = _get_path_list(path)
roi_dict = dict()
ft_table_list = list()
n_samples = len(path_list)
for k, sample_path in enumerate(path_list):
sample_name = sample_path.stem
sample_path_str = str(sample_path)
ms_data = MSData(sample_path_str, ms_mode="centroid",
instrument=instrument, separation=separation)
k_roi = ms_data.make_roi(**roi_params)
if verbose:
clear_output(wait=True)
msg = "Processing sample {} ({}/{})."
msg = msg.format(sample_name, k + 1, n_samples)
print(msg)
print("Searching features in {} ROI...".format(len(k_roi)), end=" ")
k_table = _build_feature_table(k_roi,
smoothing_strength=smoothing_strength,
descriptors=descriptors,
filters=filters,
noise_params=noise_params,
baseline_params=baseline_params,
find_peaks_params=find_peaks_params)
if verbose:
msg = "Found {} features".format(k_table.shape[0])
print(msg)
k_table["sample"] = sample_name
roi_dict[sample_name] = k_roi
ft_table_list.append(k_table)
feature_table = pd.concat(ft_table_list).reset_index(drop=True)
feature_table["roi index"] = feature_table["roi index"].astype(int)
feature_table["peak index"] = feature_table["peak index"].astype(int)
return roi_dict, feature_table
def _get_path_list(path: Union[str, List[str], Path]) -> List[Path]:
if isinstance(path, str):
path = Path(path)
if isinstance(path, list):
path_list = [Path(x) for x in path]
for p in path_list:
# check if all files in the list exists
if not p.is_file():
msg = "{} doesn't exist".format(p)
raise ValueError(msg)
else:
if path.is_dir():
path_list = list(path.glob("*.mzML"))
elif path.is_file():
path_list = [path]
else:
msg = ("Path must be a string or Path object pointing to a "
"directory with mzML files or a list strings with the "
"absolute path to mzML")
raise ValueError(msg)
return path_list
def _build_feature_table(roi: List[Roi],
smoothing_strength: Optional[float] = 1.0,
descriptors: Optional[dict] = None,
filters: Optional[dict] = None,
noise_params: Optional[dict] = None,
baseline_params: Optional[dict] = None,
find_peaks_params: Optional[dict] = None
) -> pd.DataFrame:
"""
Builds a DataFrame with feature descriptors.
Parameters
----------
roi : List[Roi]
find_peaks_params : dict, optional
parameters to pass to :py:func:`tidyms.peaks.detect_peaks`
smoothing_strength: positive number, optional
Width of a gaussian window used to smooth the signal. If None, no
smoothing is applied.
descriptors : dict, optional
descriptors to pass to :py:func:`tidyms.peaks.get_peak_descriptors`
filters : dict, optional
filters to pass to :py:func:`tidyms.peaks.get_peak_descriptors`
noise_params : dict, optional
parameters to pass to :py:func:`tidyms.peaks.estimate_noise`
baseline_params : dict, optional
parameters to pass to :py:func:`tidyms.peaks.estimate_baseline`
Returns
-------
DataFrame
"""
roi_index_list = list()
peak_index_list = list()
mz_mean_list = list()
mz_std_list = list()
descriptors_list = list()
for roi_index, k_roi in enumerate(roi):
k_roi.fill_nan()
k_params = k_roi.find_peaks(smoothing_strength=smoothing_strength,
descriptors=descriptors,
filters=filters,
noise_params=noise_params,
baseline_params=baseline_params,
find_peaks_params=find_peaks_params)
n_features = len(k_params)
descriptors_list.extend(k_params)
k_mz_mean, k_mz_std = k_roi.get_peaks_mz()
roi_index_list.append([roi_index] * n_features)
peak_index_list.append(range(n_features))
mz_mean_list.append(k_mz_mean)
mz_std_list.append(k_mz_std)
roi_index_list = np.hstack(roi_index_list)
peak_index_list = np.hstack(peak_index_list)
mz_mean_list = np.hstack(mz_mean_list)
mz_std_list = np.hstack(mz_std_list)
ft_table = pd.DataFrame(data=descriptors_list)
ft_table = ft_table.rename(columns={"loc": "rt"})
ft_table["mz"] = mz_mean_list
ft_table["mz std"] = mz_std_list
ft_table["roi index"] = roi_index_list
ft_table["peak index"] = peak_index_list
ft_table = ft_table.dropna(axis=0)
ft_table["roi index"] = ft_table["roi index"].astype(int)
ft_table["peak index"] = ft_table["peak index"].astype(int)
return ft_table
def feature_correspondence(feature_data: pd.DataFrame, mz_tolerance: float,
rt_tolerance: float, min_fraction: float = 0.2,
min_likelihood: float = 0.0):
r"""
Match features across different samples.
Feature matching is done using the DBSCAN algorithm and Gaussian mixture
models. After performing feature correspondence, features that come from the
same species are clustered together.
Parameters
----------
feature_data: DataFrame
Feature descriptors obtained from detect_features function
mz_tolerance: float
Maximum distance in m/z between two features in a cluster.
rt_tolerance: float
Maximum distance in rt between two features in a cluster.
min_fraction: float
Minimum fraction of samples forming a cluster.
min_likelihood: float
Minimum likelihood required to recover a missing value. Lower values
will recover more features, but with lower confidence.
Returns
-------
cluster: Series
The cluster number for each feature.
See Also
--------
detect_features
make_data_container
Notes
-----
The correspondence algorithm is as follows:
1. Features ares clustered using m/z and rt information with the DBSCAN
algorithm. Because the dispersion in the m/z and r/t dimension is
independent the Chebyshev distance is used to make the clusters.
`rt_tolerance` and `mz_tolerance` are used to build the :math:`\epsilon`
parameter of the model. rt is scaled using these two parameters to have
the same tolerance in both dimensions in the following way:
.. math::
rt_{scaled} = rt * \frac{mz_{tolerance}}{rt_{tolerance}}
The min_samples parameter is defined using from the minimum_dr
(minimum detection rate) and the total number of samples in the
data. This step gives us a matching of the features, but different
species can be clustered together if they are close, or some features
can be considered as noise and removed. These cases are analyzed in the
following steps.
2. In this step the possibility that more than one species is present in a
cluster is explored. The number of species is estimated computing the
number of repeated features, :math:`n_{repeated}` in the cluster (that
is, how many features come from only one sample, hoy many come from
two, etc...). The fraction of repeated samples is computed using the
total number of samples and then the number of species,
:math:`n_{species}` is found as the maximum of repetitions with a
fraction greater than `min_fraction`. Using :math:`n_{species}`, Each
cluster is fit to a gaussian mixture model. Once again, because
dispersion in rt and m/z is orthogonal, we used diagonal covariances
matrices in the GMM. After this step, for each cluster, subclusters
may be generated if :math:`n_{species}` is greater than one.
3. Each subcluster is analyzed then to remove repeated features. To
remove repeated features the log-likelihood is evaluated for
features coming from the same sample. The feature with the greatest
likelihood is kept and the others are flagged as noise.
4. The final step is to search for missing features (a feature is
missing if some of the samples are not present in a subcluster).
This is done by searching for features that come from missing samples
in the features flagged as noise by DBSCAN (or in the previous
step). If a feature from a missing sample has a log-likelihood
greater than `min_likelihood` then is added to the subcluster. If
more than one feature is possible the one with the greatest
likelihood is chosen.
"""
# sample names are used to search for missing samples.
sample_names = feature_data["sample"].unique()
# DBSCAN clustering
min_samples = int(sample_names.size * min_fraction + 1)
cluster = _make_initial_cluster(feature_data, mz_tolerance, rt_tolerance,
min_samples)
# split feature data into clustered data and noise
data = feature_data[cluster != -1]
noise = feature_data[cluster == -1]
# cluster number is converted to a string. This makes easier to assign
# subclusters using the notation 0-0, 0-1, etc...
# TODO : maybe there's a better solution to this and it's not necessary
# to convert values to str.
cluster = cluster.astype(str)
features_per_cluster = _estimate_n_species_per_cluster(data, cluster)
for name, group in data.groupby(cluster):
n_ft = features_per_cluster[name]
# Here each cluster is split into subclusters, repeated values are
# removed and missing values are searched in noise.
# Each change is made changing the values in cluster.
subcluster = _process_cluster(group, noise, cluster, sample_names, name,
n_species=n_ft,
min_likelihood=min_likelihood)
cluster[subcluster.index] = subcluster
# map cluster to numbers again
cluster_value = np.sort(cluster.unique())
n_cluster = cluster_value.size
has_noise = "-1" in cluster_value
# set a feature code for each feature
if has_noise:
cluster_names = _make_feature_names(n_cluster - 1)
cluster_names = ["noise"] + cluster_names
else:
cluster_names = _make_feature_names(n_cluster)
cluster_mapping = dict(zip(cluster_value, cluster_names))
cluster = cluster.map(cluster_mapping)
return cluster
def make_data_container(feature_data: pd.DataFrame, cluster: pd.Series,
sample_metadata: pd.DataFrame,
fill_na: bool = True) -> DataContainer:
"""
Organizes the detected and matched features into a DataContainer.
Parameters
----------
feature_data: DataFrame
DataFrame obtained from detect_features function.
cluster: pd.Series
Series obtained from feature_correspondence function.
sample_metadata: DataFrame
DataFrame with information from each analyzed sample. The index must
be the sample names used in feature_data. A column named "class", with
the class name of each sample is required. For further data processing
run order information in a column named "order" and analytical batch
information in a column named "batch" are recommended.
fill_na: bool, True
If True fill missing values in the data matrix with zeros.
Returns
-------
DataContainer
"""
# remove noise
feature_data["cluster"] = cluster
not_noise = cluster != "noise"
feature_data = feature_data[not_noise]
# compute aggregate statistics for each feature -> feature metadata
estimators = {"mz": ["mean", "std", "min", "max"],
"rt": ["mean", "std", "min", "max"]}
feature_metadata = feature_data.groupby("cluster").agg(estimators)
feature_metadata.columns = _flatten_column_multindex(feature_metadata)
feature_metadata.index.name = "feature"
# make data matrix
data_matrix = feature_data.pivot(index="sample", columns="cluster",
values="area")
data_matrix.columns.name = "feature"
if fill_na:
data_matrix = data_matrix.fillna(0)
# add samples without features as nan rows
missing_index = sample_metadata.index.difference(data_matrix.index)
# TODO: manage data inputting
missing = pd.DataFrame(data=0, index=missing_index,
columns=data_matrix.columns)
data_matrix = data_matrix.append(missing)
data_matrix = data_matrix.loc[sample_metadata.index, :]
dc = DataContainer(data_matrix, feature_metadata, sample_metadata)
return dc
def _make_feature_names(n_features: int):
max_ft_str_length = len(str(n_features))
def ft_formatter(x):
return "FT" + str(x + 1).rjust(max_ft_str_length, "0")
ft_names = [ft_formatter(x) for x in range(n_features)]
return ft_names
def _flatten_column_multindex(df: pd.DataFrame):
columns = df.columns
level_0 = columns.get_level_values(0)
level_1 = columns.get_level_values(1)
col_name_map = {"mzmean": "mz", "mzstd": "mz std", "mzmin": "mz min",
"mzmax": "mz max", "rtmean": "rt", "rtstd": "rt std",
"rtmin": "rt min", "rtmax": "rt max"}
new_names = [col_name_map[x + y] for x, y in zip(level_0, level_1)]
return new_names
def _make_initial_cluster(feature_data: pd.DataFrame, mz_tolerance: float,
rt_tolerance: float, min_samples: int = 8):
"""
First guess of correspondence between features using DBSCAN algorithm.
Auxiliary function to feature_correspondence.
Parameters
----------
feature_data : DataFrame
DataFrame obtained from `detect_features` function.
mz_tolerance : float
Used to build epsilon parameter of DBSCAN
rt_tolerance : float
Used to build epsilon parameter of DBSCAN.
min_samples : int
parameter to pass to DBSCAN
Returns
-------
cluster : Series
The assigned cluster by DBSCAN
"""
ft_points = feature_data.loc[:, ["mz", "rt"]].copy()
ft_points["rt"] = ft_points["rt"] * mz_tolerance / rt_tolerance
dbscan = DBSCAN(eps=mz_tolerance, min_samples=min_samples,
metric="chebyshev")
dbscan.fit(ft_points)
cluster = pd.Series(data=dbscan.labels_, index=feature_data.index)
return cluster
def _estimate_n_species_per_cluster(df: pd.DataFrame, cluster: pd. Series,
min_dr: float = 0.2):
"""
Estimates the number of features that forms a cluster.
The number of features is estimated as follows:
1. The number of features per sample is counted and normalized
to the total number of features.
2. The number of features in a cluster will be the maximum
normalized number of features per sample greater than the minimum
detection rate.
Parameters
----------
df: DataFrame
Feature data obtained from feature_correspondence function
min_dr: float, 0.2
Minimum detection rate.
"""
# sample_per_cluster counts the number of features that come from the same
# sample and express it as a fraction of the total number features
# the number of features in a cluster is the maximum number of samples
# in a cluster above the minimum detection rate.
def find_n_cluster(x):
return x.index[np.where(x > min_dr)[0]][-1]
sample_per_cluster = (df["sample"].groupby(cluster)
.value_counts()
.unstack(-1)
.fillna(0)
.astype(int)
.apply(lambda x: x.value_counts(), axis=1)
.fillna(0))
sample_per_cluster = sample_per_cluster / df["sample"].unique().size
features_per_cluster = sample_per_cluster.apply(find_n_cluster, axis=1)
return features_per_cluster
def _make_gmm(ft_data: pd.DataFrame, n_feature: int, cluster_name: str):
"""
fit a gaussian model and set subcluster names for each feature. Auxiliary
function to process cluster.
Parameters
----------
ft_data : DataFrame
The mz and rt columns of the cluster DataFrame
n_feature : int
Number of features estimated in the cluster.
cluster_name: str
Returns
-------
gmm : GaussianMixtureModel fitted with cluster data
score: The log-likelihood of each feature.
subcluster : pd.Series with subcluster labels.
"""
gmm = mixture.GaussianMixture(n_components=n_feature,
covariance_type="diag")
gmm.fit(ft_data.loc[:, ["mz", "rt"]])
# scores = pd.Series(data=gmm.score_samples(ft_data), index=ft_data.index)
ft_data["score"] = gmm.score_samples(ft_data.loc[:, ["mz", "rt"]])
# get index of features in the cases where the number of features is greater
# than the number of components in the gmm
noise_index = (ft_data
.groupby("sample")
.filter(lambda x: x.shape[0] > n_feature))
if not noise_index.empty:
noise_index = (noise_index
.groupby("sample")
.apply(lambda x: _noise_ind(x, n_feature))
.droplevel(0)
.index)
else:
noise_index = noise_index.index
noise = | pd.Series(data="-1", index=noise_index) | pandas.Series |
#Tools for mesh-based operations
#Smoothing,
import nibabel as nb
import numpy as np
import subprocess
import os
import potpourri3d as pp3d
import meld_classifier.paths as paths
# import paths as paths
def find_nearest_multi(array, value):
new_array = np.array([abs(x - value) for x in array])
min_array = new_array.min()
idx = [i for i, val in enumerate(new_array) if val == min_array]
return idx
def calibrate_smoothing(coords, faces, start_v=125000, n_iter=70, cortex_mask=None):
"""find the calibratin curve for smoothing of a surface mesh"""
print("Need to calibrate the smoothing curve for this surface mesh")
# initialise a null array with only one non-null vertices in the middle of the cortex
input_array = np.zeros(len(coords))
input_array[start_v] = 1
# find neighbours and include cortex mask
neighbours = get_neighbours_from_tris(faces)
old_array = input_array.copy()
if cortex_mask is not None:
for v, n in enumerate(neighbours):
n = np.array(n)
neighbours[v] = n[cortex_mask[n]]
else:
cortex_mask = np.ones(len(neighbours), dtype=bool)
# preload geodesic distance solver
solver = pp3d.MeshHeatMethodDistanceSolver(coords, faces)
# create an array of geodesic distance from the mask (vertices > 0) to outward
distance = solver.compute_distance_multisource([start_v])
# smooth the array at each iteration and find the full-width-at-half-maximum of non-null area
fwhm_values = []
for iteration in np.arange(n_iter):
new_array = smooth_array(old_array,neighbours,n_iter=1,cortex_mask=cortex_mask)
# This is the smoothing function
old_array = new_array.copy()
# threshold at half maximum value
max_pic = old_array.max()
edge_v = find_nearest_multi(old_array, max_pic / 2)
fwhm_value = distance[edge_v].mean() * 2
fwhm_values.append(fwhm_value)
fwhm_values[0] = 0
# fit polynome to the mode to find calibration curve
x = np.arange(n_iter)
y = fwhm_values
model = np.poly1d(np.polyfit(x, y, 2))
line = np.linspace(0, n_iter, 100)
try:
plt.plot(line, model(line))
plt.savefig(os.path.join(paths.BASE_PATH, "Images", "calibration_curve_smoothing.png"))
except:
pass
print("End of calibration")
return line, model(line)
def smooth_array(input_array, neighbours, n_iter=70, cortex_mask=None):
"""smooth a matrix of surface data surface data
input_array: variable to be smoothed n_vert x n_subs
neighbours: list of neighbours cohort.neighbours works
n_iter - number of iterations 5mm fwhm = 18 iterations
10mm fwhm = 70 iterations
cortex_mask: binary mask of cortex eg cohort.cortex_mask"""
dim = len(input_array.shape)
if dim==1:
input_array=input_array.reshape(-1,1)
old_array = input_array.T
if cortex_mask is not None:
for v, n in enumerate(neighbours):
n = np.array(n)
neighbours[v] = n[cortex_mask[n]]
else:
cortex_mask = np.ones(len(neighbours), dtype=bool)
#neighbours mask & array
neighbours_array = np.zeros((len(neighbours),7),dtype=int)
neighbours_mask=np.ones((len(neighbours),7),dtype=bool)
#create masked array
for ni,n in enumerate(neighbours):
neighbours_array[ni,:len(neighbours[ni])]=neighbours[ni]
neighbours_mask[ni,:len(neighbours[ni])]=False
neighbours_array[ni,len(neighbours[ni])]=ni
neighbours_mask[ni,len(neighbours[ni])]=False
size=input_array.shape[1]
for iteration in np.arange(n_iter):
arr=np.ma.masked_array(old_array[:,neighbours_array],np.tile(neighbours_mask,(size,1,1)))
new_array = arr.mean(axis=2)
old_array = new_array
if dim==1:
return np.squeeze(new_array.data)
return new_array.data.T
def save_mgh(filename, array, demo):
"""save mgh file using nibabel and imported demo mgh file"""
rand_int = np.random.randint(100)
mmap = np.memmap("/tmp/tmp" + str(rand_int), dtype="float32", mode="w+", shape=demo.get_data().shape)
mmap[:, 0, 0] = array[:]
output = nb.MGHImage(mmap, demo.affine, demo.header)
nb.save(output, filename)
def smoothing_fs(overlay, fwhm, subject="fsaverage_sym", hemi="lh"):
subjects_dir = paths.BASE_PATH
os.environ["SUBJECTS_DIR"] = paths.BASE_PATH
"""smooth surface overlay on fsaverage_sym"""
tmpdir = "/tmp/" + str(np.random.randint(1000000))
os.mkdir(tmpdir)
dum = nb.load(os.path.join(subjects_dir, subject, "surf", hemi + ".white.avg.area.mgh"))
save_mgh(os.path.join(tmpdir, hemi + ".tmp.mgh"), overlay, dum)
subprocess.call(
"mris_fwhm --s "
+ subject
+ " --hemi "
+ hemi
+ " --cortex --smooth-only --fwhm "
+ str(fwhm)
+ " --i "
+ os.path.join(tmpdir, hemi + ".tmp.mgh")
+ " --o "
+ os.path.join(tmpdir, hemi + ".sm_tmp.mgh"),
shell=True,
)
overlay_smoothed = load_mgh(os.path.join(tmpdir, hemi + ".sm_tmp.mgh"))
subprocess.call("rm -r " + tmpdir, shell=True)
return overlay_smoothed
def load_mgh(filename):
"""import mgh file using nibabel. returns flattened data array"""
mgh_file = nb.load(filename)
mmap_data = mgh_file.get_data()
array_data = np.ndarray.flatten(mmap_data)
return array_data
# function to load mesh geometry
def load_mesh_geometry(surf_mesh):
# if input is a filename, try to load it with nibabel
if isinstance(surf_mesh, str):
if (
surf_mesh.endswith("orig")
or surf_mesh.endswith("pial")
or surf_mesh.endswith("white")
or surf_mesh.endswith("sphere")
or surf_mesh.endswith("inflated")
):
coords, faces = nb.freesurfer.io.read_geometry(surf_mesh)
elif surf_mesh.endswith("gii"):
coords, faces = (
nb.gifti.read(surf_mesh).getArraysFromIntent(nb.nifti1.intent_codes["NIFTI_INTENT_POINTSET"])[0].data,
nb.gifti.read(surf_mesh).getArraysFromIntent(nb.nifti1.intent_codes["NIFTI_INTENT_TRIANGLE"])[0].data,
)
elif surf_mesh.endswith("vtk"):
coords, faces, _ = read_vtk(surf_mesh)
elif surf_mesh.endswith("ply"):
coords, faces = read_ply(surf_mesh)
elif surf_mesh.endswith("obj"):
coords, faces = read_obj(surf_mesh)
elif isinstance(surf_mesh, dict):
if "faces" in surf_mesh and "coords" in surf_mesh:
coords, faces = surf_mesh["coords"], surf_mesh["faces"]
else:
raise ValueError(
"If surf_mesh is given as a dictionary it must " 'contain items with keys "coords" and "faces"'
)
else:
raise ValueError(
"surf_mesh must be a either filename or a dictionary " 'containing items with keys "coords" and "faces"'
)
return {"coords": coords, "faces": faces}
# function to load mesh data
def load_mesh_data(surf_data, gii_darray=0):
# if the input is a filename, load it
if isinstance(surf_data, str):
if surf_data.endswith("nii") or surf_data.endswith("nii.gz") or surf_data.endswith("mgz"):
data = np.squeeze(nb.load(surf_data).get_data())
elif (
surf_data.endswith("curv")
or surf_data.endswith("sulc")
or surf_data.endswith("area")
or surf_data.endswith("thickness")
):
data = nb.freesurfer.io.read_morph_data(surf_data)
elif surf_data.endswith("annot"):
data = nb.freesurfer.io.read_annot(surf_data)[0]
elif surf_data.endswith("label"):
data = nb.freesurfer.io.read_label(surf_data)
# check if this works with multiple indices (if dim(data)>1)
elif surf_data.endswith("gii"):
fulldata = nb.gifti.giftiio.read(surf_data)
n_vectors = len(fulldata.darrays)
if n_vectors == 1:
data = fulldata.darrays[gii_darray].data
else:
print("Multiple data files found, output will be matrix")
data = np.zeros([len(fulldata.darrays[gii_darray].data), n_vectors])
for gii_darray in range(n_vectors):
data[:, gii_darray] = fulldata.darrays[gii_darray].data
elif surf_data.endswith("vtk"):
_, _, data = read_vtk(surf_data)
elif surf_data.endswith("txt"):
data = np.loadtxt(surf_data)
else:
raise ValueError("Format of data file not recognized.")
elif isinstance(surf_data, np.ndarray):
data = np.squeeze(surf_data)
return data
## function to write mesh data
def save_mesh_data(fname, surf_data):
if isinstance(fname, str) and isinstance(surf_data, np.ndarray):
if fname.endswith("curv") or fname.endswith("thickness") or fname.endswith("sulc"):
nb.freesurfer.io.write_morph_data(fname, surf_data)
elif fname.endswith("txt"):
np.savetxt(fname, surf_data)
elif fname.endswith("vtk"):
if "data" in surf_dict.keys():
write_vtk(fname, surf_dict["coords"], surf_dict["faces"], surf_dict["data"])
else:
write_vtk(fname, surf_dict["coords"], surf_dict["faces"])
elif fname.endswith("gii"):
print("please write lovely write gifti command")
elif fname.endswith("mgh"):
print("please write lovely write mgh command, or retry saving as .curv file")
else:
raise ValueError("fname must be a filename and surf_data must be a numpy array")
# function to read vtk files
# ideally use pyvtk, but it didn't work for our data, look into why
def read_vtk(file):
"""
Reads ASCII coded vtk files using pandas,
returning vertices, faces and data as three numpy arrays.
"""
import pandas as pd
import csv
# read full file while dropping empty lines
try:
vtk_df = pd.read_csv(file, header=None, engine="python")
except csv.Error:
raise ValueError("This vtk file appears to be binary coded currently only ASCII coded vtk files can be read")
vtk_df = vtk_df.dropna()
# extract number of vertices and faces
number_vertices = int(vtk_df[vtk_df[0].str.contains("POINTS")][0].iloc[0].split()[1])
number_faces = int(vtk_df[vtk_df[0].str.contains("POLYGONS")][0].iloc[0].split()[1])
# read vertices into df and array
start_vertices = (vtk_df[vtk_df[0].str.contains("POINTS")].index.tolist()[0]) + 1
vertex_df = pd.read_csv(
file, skiprows=range(start_vertices), nrows=number_vertices, sep="\s*", header=None, engine="python"
)
if np.array(vertex_df).shape[1] == 3:
vertex_array = np.array(vertex_df)
# sometimes the vtk format is weird with 9 indices per line, then it has to be reshaped
elif np.array(vertex_df).shape[1] == 9:
vertex_df = pd.read_csv(
file,
skiprows=range(start_vertices),
nrows=int(number_vertices / 3) + 1,
sep="\s*",
header=None,
engine="python",
)
vertex_array = np.array(vertex_df.iloc[0:1, 0:3])
vertex_array = np.append(vertex_array, vertex_df.iloc[0:1, 3:6], axis=0)
vertex_array = np.append(vertex_array, vertex_df.iloc[0:1, 6:9], axis=0)
for row in range(1, (int(number_vertices / 3) + 1)):
for col in [0, 3, 6]:
vertex_array = np.append(
vertex_array, np.array(vertex_df.iloc[row : (row + 1), col : (col + 3)]), axis=0
)
# strip rows containing nans
vertex_array = vertex_array[~np.isnan(vertex_array)].reshape(number_vertices, 3)
else:
print("vertex indices out of shape")
# read faces into df and array
start_faces = (vtk_df[vtk_df[0].str.contains("POLYGONS")].index.tolist()[0]) + 1
face_df = pd.read_csv(
file, skiprows=range(start_faces), nrows=number_faces, sep="\s*", header=None, engine="python"
)
face_array = np.array(face_df.iloc[:, 1:4])
# read data into df and array if exists
if vtk_df[vtk_df[0].str.contains("POINT_DATA")].index.tolist() != []:
start_data = (vtk_df[vtk_df[0].str.contains("POINT_DATA")].index.tolist()[0]) + 3
number_data = number_vertices
data_df = pd.read_csv(
file, skiprows=range(start_data), nrows=number_data, sep="\s*", header=None, engine="python"
)
data_array = np.array(data_df)
else:
data_array = np.empty(0)
return vertex_array, face_array, data_array
# function to read ASCII coded ply file
def read_ply(file):
import pandas as pd
import csv
# read full file and drop empty lines
try:
ply_df = pd.read_csv(file, header=None, engine="python")
except csv.Error:
raise ValueError("This ply file appears to be binary coded currently only ASCII coded ply files can be read")
ply_df = ply_df.dropna()
# extract number of vertices and faces, and row that marks the end of header
number_vertices = int(ply_df[ply_df[0].str.contains("element vertex")][0].iloc[0].split()[2])
number_faces = int(ply_df[ply_df[0].str.contains("element face")][0].iloc[0].split()[2])
end_header = ply_df[ply_df[0].str.contains("end_header")].index.tolist()[0]
# read vertex coordinates into dict
vertex_df = pd.read_csv(
file, skiprows=range(end_header + 1), nrows=number_vertices, sep="\s*", header=None, engine="python"
)
vertex_array = np.array(vertex_df)
# read face indices into dict
face_df = pd.read_csv(
file,
skiprows=range(end_header + number_vertices + 1),
nrows=number_faces,
sep="\s*",
header=None,
engine="python",
)
face_array = np.array(face_df.iloc[:, 1:4])
return vertex_array, face_array
# function to read MNI obj mesh format
def read_obj(file):
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n]
def indices(lst, element):
result = []
offset = -1
while True:
try:
offset = lst.index(element, offset + 1)
except ValueError:
return result
result.append(offset)
fp = open(file, "r")
n_vert = []
n_poly = []
k = 0
Polys = []
# Find number of vertices and number of polygons, stored in .obj file.
# Then extract list of all vertices in polygons
for i, line in enumerate(fp):
if i == 0:
# Number of vertices
n_vert = int(line.split()[6])
XYZ = np.zeros([n_vert, 3])
elif i <= n_vert:
XYZ[i - 1] = list(map(float, line.split()))
elif i > 2 * n_vert + 5:
if not line.strip():
k = 1
elif k == 1:
Polys.extend(line.split())
Polys = list(map(int, Polys))
npPolys = np.array(Polys)
triangles = np.array(list(chunks(Polys, 3)))
return XYZ, triangles
# function to save mesh geometry
def save_mesh_geometry(fname, surf_dict):
# if input is a filename, try to load it with nibabel
if isinstance(fname, str) and isinstance(surf_dict, dict):
if (
fname.endswith("orig")
or fname.endswith("pial")
or fname.endswith("white")
or fname.endswith("sphere")
or fname.endswith("inflated")
):
nb.freesurfer.io.write_geometry(fname, surf_dict["coords"], surf_dict["faces"])
# save_freesurfer(fname,surf_dict['coords'],surf_dict['faces'])
elif fname.endswith("gii"):
write_gifti(fname, surf_dict["coords"], surf_dict["faces"])
elif fname.endswith("vtk"):
if "data" in surf_dict.keys():
write_vtk(fname, surf_dict["coords"], surf_dict["faces"], surf_dict["data"])
else:
write_vtk(fname, surf_dict["coords"], surf_dict["faces"])
elif fname.endswith("ply"):
write_ply(fname, surf_dict["coords"], surf_dict["faces"])
elif fname.endswith("obj"):
save_obj(fname, surf_dict["coords"], surf_dict["faces"])
# print('to view mesh in brainview, run the command:\n')
# print('average_objects ' + fname + ' ' + fname)
else:
raise ValueError("fname must be a filename and surf_dict must be a dictionary")
def write_gifti(surf_mesh, coords, faces):
coord_array = nb.gifti.GiftiDataArray(data=coords, intent=nb.nifti1.intent_codes["NIFTI_INTENT_POINTSET"])
face_array = nb.gifti.GiftiDataArray(data=faces, intent=nb.nifti1.intent_codes["NIFTI_INTENT_TRIANGLE"])
gii = nb.gifti.GiftiImage(darrays=[coord_array, face_array])
nb.gifti.write(gii, surf_mesh)
def save_obj(surf_mesh, coords, faces):
# write out MNI - obj format
n_vert = len(coords)
norms = normal_vectors(coords, faces).tolist()
XYZ = coords.tolist()
Tri = faces.tolist()
with open(surf_mesh, "w") as s:
line1 = "P 0.3 0.3 0.4 10 1 " + str(n_vert) + "\n"
s.write(line1)
k = -1
for a in XYZ:
k += 1
cor = " " + " ".join(list(map(str, XYZ[k])))
s.write("%s\n" % cor)
s.write("\n")
for k, a in enumerate(XYZ):
normal = " " + " ".join(list(map(str, norms[k])))
s.write("%s\n" % normal)
s.write("\n")
l = " " + str(len(Tri)) + "\n"
s.write(l)
s.write(" 0 1 1 1 1\n")
s.write("\n")
nt = len(Tri) * 3
Triangles = np.arange(3, nt + 1, 3)
Rounded8 = np.shape(Triangles)[0] / 8
N8 = int(8 * Rounded8)
Triangles8 = Triangles[0:N8]
RowsOf8 = np.split(Triangles8, N8 / 8)
for r in RowsOf8:
L = r.tolist()
Lint = map(int, L)
Line = " " + " ".join(map(str, Lint))
s.write("%s\n" % Line)
L = Triangles[N8:].tolist()
Lint = map(int, L)
Line = " " + " ".join(map(str, Lint))
s.write("%s\n" % Line)
s.write("\n")
ListOfTriangles = np.array(Tri).flatten()
Rounded8 = np.shape(ListOfTriangles)[0] / 8
N8 = int(8 * Rounded8)
Triangles8 = ListOfTriangles[0:N8]
ListTri8 = ListOfTriangles[0:N8]
RowsOf8 = np.split(Triangles8, N8 / 8)
for r in RowsOf8:
L = r.tolist()
Lint = map(int, L)
Line = " " + " ".join(map(str, Lint))
s.write("%s\n" % Line)
L = ListOfTriangles[N8:].tolist()
Lint = map(int, L)
Line = " " + " ".join(map(str, Lint))
s.write("%s\n" % Line)
def write_vtk(filename, vertices, faces, data=None, comment=None):
"""
Creates ASCII coded vtk file from numpy arrays using pandas.
Inputs:
-------
(mandatory)
* filename: str, path to location where vtk file should be stored
* vertices: numpy array with vertex coordinates, shape (n_vertices, 3)
* faces: numpy array with face specifications, shape (n_faces, 3)
(optional)
* data: numpy array with data points, shape (n_vertices, n_datapoints)
NOTE: n_datapoints can be =1 but cannot be skipped (n_vertices,)
* comment: str, is written into the comment section of the vtk file
Usage:
---------------------
write_vtk('/path/to/vtk/file.vtk', v_array, f_array)
"""
import pandas as pd
# infer number of vertices and faces
number_vertices = vertices.shape[0]
number_faces = faces.shape[0]
if data is not None:
number_data = data.shape[0]
# make header and subheader dataframe
header = [
"# vtk DataFile Version 3.0",
"%s" % comment,
"ASCII",
"DATASET POLYDATA",
"POINTS %i float" % number_vertices,
]
header_df = pd.DataFrame(header)
sub_header = ["POLYGONS %i %i" % (number_faces, 4 * number_faces)]
sub_header_df = pd.DataFrame(sub_header)
# make dataframe from vertices
vertex_df = pd.DataFrame(vertices)
# make dataframe from faces, appending first row of 3's (indicating the polygons are triangles)
triangles = np.reshape(3 * (np.ones(number_faces)), (number_faces, 1))
triangles = triangles.astype(int)
faces = faces.astype(int)
faces_df = pd.DataFrame(np.concatenate((triangles, faces), axis=1))
# write dfs to csv
header_df.to_csv(filename, header=None, index=False)
with open(filename, "a") as f:
vertex_df.to_csv(f, header=False, index=False, float_format="%.3f", sep=" ")
with open(filename, "a") as f:
sub_header_df.to_csv(f, header=False, index=False)
with open(filename, "a") as f:
faces_df.to_csv(f, header=False, index=False, float_format="%.0f", sep=" ")
# if there is data append second subheader and data
if data != None:
datapoints = data.shape[1]
sub_header2 = [
"POINT_DATA %i" % (number_data),
"SCALARS EmbedVertex float %i" % (datapoints),
"LOOKUP_TABLE default",
]
sub_header_df2 = pd.DataFrame(sub_header2)
data_df = pd.DataFrame(data)
with open(filename, "a") as f:
sub_header_df2.to_csv(f, header=False, index=False)
with open(filename, "a") as f:
data_df.to_csv(f, header=False, index=False, float_format="%.16f", sep=" ")
def write_ply(filename, vertices, faces, comment=None):
import pandas as pd
print("writing ply format")
# infer number of vertices and faces
number_vertices = vertices.shape[0]
number_faces = faces.shape[0]
# make header dataframe
header = [
"ply",
"format ascii 1.0",
"comment %s" % comment,
"element vertex %i" % number_vertices,
"property float x",
"property float y",
"property float z",
"element face %i" % number_faces,
"property list uchar int vertex_indices",
"end_header",
]
header_df = pd.DataFrame(header)
# make dataframe from vertices
vertex_df = | pd.DataFrame(vertices) | pandas.DataFrame |
# --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# path [File location variable]
#Code starts here
data = | pd.read_csv(path) | pandas.read_csv |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
| pd.Index(['a', 'b'], dtype='object') | pandas.Index |
import pandas as pd
from typing import Tuple, Optional, List, Union
from ..tools import _to_list_if_str
DfListTuple = Tuple[pd.DataFrame, Optional[list]]
def fixed_effects_reg_df_and_cols_dict(df, fe_vars):
fe_vars = _to_list_if_str(fe_vars)
fe_cols_dict = {}
for fe_var in fe_vars:
df, cols = _fixed_effects_reg_df_and_cols(df, fe_var)
fe_cols_dict[fe_var] = cols
return df, fe_cols_dict
def _fixed_effects_reg_df_and_cols(df, fe_var):
dummies = _get_dummy_df(df, fe_var)
dummy_cols = [col for col in dummies.columns]
fe_df = pd.concat([df, dummies], axis=1)
if fe_var in fe_df.columns:
fe_df.drop(fe_var, axis=1, inplace=True)
return fe_df, dummy_cols
def _get_dummy_df(df: pd.DataFrame, fe_var: str) -> pd.DataFrame:
dummy_calc_df, index_cols = _get_dummy_calc_df(df, fe_var)
dummies = pd.get_dummies(dummy_calc_df[fe_var].astype(str))
if index_cols is not None:
# meed to add index back to dummy df
dummies = | pd.concat([dummy_calc_df[index_cols], dummies], axis=1) | pandas.concat |
# coding=utf-8
import pandas as pd
import re
import os
import json
from datetime import datetime
class dataset_object:
"""
This class allow you to store the data and the category of these data
"""
def __init__(self, dataset,name):
self.dataset, self.name = dataset, name
class file_object:
"""
This class allow you to store detailed informations concerning the raw files
"""
def __init__(self, name, category, extension, path):
self.name, self.category, self.extension, self.path = name, category, extension, path
def load_data (list_of_file_object):
"""
Description :
This function allow you to load data from a list of file_object
and store those data in a dataset_object instance with the category of file
those data have been loaded from. Then store these object in a dictionary where the
keys is the category of the file where the data are from.
Based on the extension of the files :
Loading the data from the file
If the are more one file, the respective dataframe are concatenate
Storing the in a dictionary with the category as the key
Args:
list_of_file_object: list of file_object
Returns:
output: dict, key : string
value : dataset_object
"""
dict_ds = {}
for f in list_of_file_object:
name_and_ext = re.split(r'\.',f.name)
if(f.extension == "json"):
if f.category in dict_ds:
ds = pd.read_json(f.path + f.name)
ds_concat = pd.concat([dict_ds[f.category].dataset,ds])
dict_ds[f.category] = dataset_object(ds_concat,name_and_ext[0])
else:
ds = pd.read_json(f.path + f.name)
dict_ds[f.category] = dataset_object(ds,name_and_ext[0])
elif(f.extension == "csv"):
if f.category in dict_ds:
ds = pd.read_csv(f.path + f.name)
ds_concat = | pd.concat([dict_ds[f.category].dataset,ds]) | pandas.concat |
import argparse
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
import pickle
from sklearn.decomposition import IncrementalPCA, MiniBatchDictionaryLearning
import gc
def load_subject(subject_filename):
with open(subject_filename, 'rb') as f:
subject_data = pickle.load(f)
return subject_data
class ImageLoader():
def __init__(self, transforms=None):
self.transforms = transforms
pass
def transform(self, X, y=None):
X = load_subject(X)
if self.transforms is not None:
X = self.transforms(image=X)['image']
return X
def main():
parser = argparse.ArgumentParser(description='train pca and dl features')
parser.add_argument('--data-path', default='./data/raw',
help='path to original data, default ./data/raw')
parser.add_argument('--imgs-path', default='./data/imgs',
help='path to resaved images, default ./data/imgs')
parser.add_argument('--path-to-save', default='./data/features',
help='path to save features, default ./data/features')
parser.add_argument('--path-to-save-model', default='./models/pca',
help='path to save models, default ./models/pca')
args = parser.parse_args()
data_path = args.data_path
imgs_path = args.imgs_path
path_to_save = args.path_to_save
path_to_save_model = args.path_to_save_model
for _path in [path_to_save, path_to_save_model,
os.path.join(path_to_save, '100dl_feats'),
os.path.join(path_to_save, '200pca_feats')]:
if not os.path.exists(_path):
os.makedirs(_path)
loading = pd.read_csv(os.path.join(data_path, 'loading.csv'), index_col = ['Id'])
# creates pathes to all images
img_path = pd.DataFrame(index=loading.index, columns=['path'])
for index in img_path.index:
path = str(index) + '.npy'
img_path.loc[index, 'path'] = os.path.join(imgs_path, path)
# start train and inference of pca feats
print('PCA started. ~13 hours')
for k in range(0, 6):
##fit
pca = IncrementalPCA(n_components=200)
batch = []
for n, i in enumerate(tqdm(img_path.values)):
f = ImageLoader().transform(i[0])
f = f[k*10:(k+1)*10].flatten()
batch.append(f)
if (n + 1) % 200 == 0:
batch = np.array(batch)
pca.partial_fit(batch)
del batch
gc.collect()
batch = []
##save pca
_p = os.path.join(path_to_save_model, f'200pca_3d_k{k}.pickle')
with open(_p, 'wb') as f:
pickle.dump(pca, f)
##transform
res = []
batch = []
for n, i in enumerate(tqdm(img_path.values)):
f = ImageLoader().transform(i[0])
f = f[k*10:(k+1)*10].flatten()
batch.append(f)
if (n + 1) % 200 == 0:
batch = np.array(batch)
res.append(pca.transform(batch))
del batch
gc.collect()
batch = []
lb = len(batch)
if lb > 0:
batch = np.array(batch)
if lb == 1:
res.append(pca.transform(batch.reshape(1, -1)))
else:
res.append(pca.transform(batch))
##save df
res = np.array(res)
df_res = pd.DataFrame(np.vstack(res), index=loading.index, columns=[f'200PCA_k{k}_' + str(i) for i in range(200)])
_p = os.path.join(path_to_save, f'200pca_feats/200pca_3d_k{k}.csv')
df_res.to_csv(_p)
print('Dictionary learning started. ~47 hours')
n_k = 100
for k in range(0, 6):
##fit
pca = MiniBatchDictionaryLearning(n_components=n_k, random_state=0, n_iter=10, batch_size=n_k)
batch = []
for n, i in enumerate(tqdm(img_path.values)):
f = ImageLoader().transform(i[0])
f = f[k*10:(k+1)*10].flatten()
batch.append(f)
if (n + 1) % 100 == 0:
batch = np.array(batch)
pca.partial_fit(batch)
del batch
gc.collect()
batch = []
##save pca
_p = os.path.join(path_to_save_model, f'dl_3d_k{k}.pickle')
with open(_p, 'wb') as f:
pickle.dump(pca, f)
##transform
res = []
batch = []
for n, i in enumerate(tqdm(img_path.values)):
f = ImageLoader().transform(i[0])
f = f[k*10:(k+1)*10].flatten()
batch.append(f)
if (n + 1) % 100 == 0:
batch = np.array(batch)
res.append(pca.transform(batch))
del batch
gc.collect()
batch = []
lb = len(batch)
if lb > 0:
batch = np.array(batch)
if lb == 1:
res.append(pca.transform(batch.reshape(1, -1)))
else:
res.append(pca.transform(batch))
##save df
res = np.array(res)
df_res = pd.DataFrame(np.vstack(res), index=loading.index, columns=[f'dl_k{k}_' + str(i) for i in range(n_k)])
_p = os.path.join(path_to_save, f'100dl_feats/dl_3d_k{k}.csv')
df_res.to_csv(_p)
#resave results
_p = os.path.join(path_to_save, '100dl_feats/dl_3d_k0.csv')
data_pca = pd.read_csv(_p)
for i in range(1, 6):
_p = os.path.join(path_to_save, '100dl_feats/dl_3d_k{}.csv'.format(i))
part = pd.read_csv(_p)
del part['Id']
data_pca = | pd.concat((data_pca, part), axis=1) | pandas.concat |
"""
Created on Jan 09 2021
<NAME> and <NAME>
database analysis from
https://data.gov.il/dataset/covid-19
Israel sities coordinates data
https://data-israeldata.opendata.arcgis.com/
"""
import json
import requests
import sys
import extract_israel_data
from Utils import *
import time
import pandas as pd
import os
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
from plotly.subplots import make_subplots
import datetime
import numpy as np
import warnings
plt.style.use('default')
warnings.filterwarnings("ignore")
line_statistic_plot_log=None
line_statistic_plot_fix_date=False
# data line plot
def line_statistic_plot(db, base, fields, title, ylabel, legend, text, save_name, log=None, fix_date=False):
f, ax = plt.subplots(figsize=(18, 6))
date = db[base]
date = pd.to_datetime(date)
len_data = len(date)
colors = plotly.colors.qualitative.Dark24 # ['blue', 'green', 'magenta', 'black', 'red', 'cyan', 'yellow']
sum_case = []
for cnt in range(len(fields)):
case = fields[cnt]
sum_case.append(db[case].max())
plt.plot(date, db[case], zorder=1, color=colors[cnt], linewidth=3)
plt.title(title, fontsize=20)
plt.ylabel(ylabel, fontsize=16)
plt.legend(legend, fontsize=14)
if fix_date:
datemin = pd.to_datetime('2020-03-01')
datemax = pd.to_datetime('2021-03-01')
else:
datemin = date.min()
datemax = date.max()
ax.set_xlim(datemin, datemax)
ax.grid(True)
# rotate and align the tick labels so they look better
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.fmt_xdata = formatter
f.autofmt_xdate()
if log:
ax.set_yscale('log')
if text is not None:
tline = 0.25*max(sum_case)
for kk in range(len(text)):
plt.plot((text[kk], text[kk]), (0, tline), '-k', linewidth=3)
plt.text(text[kk], 1.1*tline, text[kk].strftime('%d/%m/%y'), horizontalalignment='center', fontweight='bold', fontsize=14)
save_string = save_name + datemax.strftime('%d%m%y') + '.png'
f.savefig(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), save_string))
# Begin
full_data_file = os.path.join(os.getcwd(), time.strftime("%d%m%Y"), time.strftime("%d%m%Y") + '_loaded_files.csv')
if os.path.exists(full_data_file):
files_db = pd.read_csv(full_data_file, encoding="ISO-8859-8")
first_plt = False
else:
os.makedirs(os.path.join(os.getcwd(), time.strftime("%d%m%Y")), exist_ok=True)
# Extract Data from Israel Dataset COVID-19
files_db = extract_israel_data.extract_israel_data()
first_plt = True
# Print LOG to file
stdoutOrigin = sys.stdout
fout = open(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), 'israel_status_log.txt'), 'a')
sys.stdout = MyWriter(sys.stdout, fout)
text = None
# text = pd.date_range('2020-04-01', '2021-04-01', freq="MS")
# Isolation
isolated = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('isolation').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('isolation').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'date'
isolated[base] = pd.to_datetime(isolated[base])
isolated = isolated.sort_values([base])
for key in isolated.keys():
try:
isolated.loc[isolated[key].str.contains('15>') != False, key] = 15
isolated[key] = isolated[key].astype(int)
except:
pass
iso1 = isolated.new_contact_with_confirmed.astype(int).sum()
iso2 = isolated.new_from_abroad.astype(int).sum()
title = 'Israel (data from ' + isolated[base].max().strftime('%d/%m/%y') + ') - isolated persons, total ' + str(iso1+iso2) + ', now ' +\
str(isolated.isolated_today_contact_with_confirmed.iloc[-1] + isolated.isolated_today_abroad.iloc[-1])
ylabel = 'Number of individuals'
legend = ('Isolated due to contact with confirmed, total ' + str(iso1), 'Isolated due to arrived from abroad, total ' + str(iso2))
save_name = 'israelIsolatedPersons_'
fields = ['isolated_today_contact_with_confirmed', 'isolated_today_abroad']
# plot Isolated Total
line_statistic_plot(isolated, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot isolated daily
fields = ['new_contact_with_confirmed', 'new_from_abroad']
save_name = 'israelIsolatedPersons_Daily_'
title = 'Israel (data from ' + isolated[base].max().strftime('%d/%m/%y') + ') - Daily isolated persons, total ' + str(iso1+iso2) + ', now ' +\
str(isolated.isolated_today_contact_with_confirmed.iloc[-1] + isolated.isolated_today_abroad.iloc[-1])
line_statistic_plot(isolated, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
del isolated
###################################################################################################################
# Medical Staff
coronaMediaclStaffD = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('medical_staff').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('medical_staff').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'Date'
coronaMediaclStaffD[base] = pd.to_datetime(coronaMediaclStaffD[base])
coronaMediaclStaffD = coronaMediaclStaffD.sort_values([base])
for key in coronaMediaclStaffD.keys():
try:
coronaMediaclStaffD.loc[coronaMediaclStaffD[key].str.contains('<15') != False, key] = 15
coronaMediaclStaffD[key] = coronaMediaclStaffD[key].astype(int)
except:
pass
ylabel = 'Number of individuals'
title = 'Israel - medical staff confirmed (data from ' + coronaMediaclStaffD[base].max().strftime('%d/%m/%y') + ')'
save_name = 'coronaMediaclStaffConfirmed_'
fields = ['confirmed_cases_physicians', 'confirmed_cases_nurses', 'confirmed_cases_other_healthcare_workers']
legend = ['Confirmed physicians', 'Confirmed nurses', 'Confirmed other healthcare workers']
# plot coronaMediaclStaffConfirmed Total
line_statistic_plot(coronaMediaclStaffD, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot coronaMediaclStaffIsolated daily
title = 'Israel - medical staff in isolation (data from ' + coronaMediaclStaffD[base].max().strftime('%d/%m/%y') + ')'
fields = ['isolated_physicians', 'isolated_nurses', 'isolated_other_healthcare_workers']
legend = ['Isolated physicians', 'Isolated nurses', 'Isolated other healthcare workers']
save_name = 'coronaMediaclStaffIsolated_'
line_statistic_plot(coronaMediaclStaffD, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
del coronaMediaclStaffD
###################################################################################################################
# Hospitalization
hospitalization = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('hospitalization').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('hospitalization').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'תאריך'
hospitalization[base] = pd.to_datetime(hospitalization[base])
hospitalization = hospitalization.sort_values([base])
for key in hospitalization.keys():
try:
hospitalization.loc[hospitalization[key].str.contains('15>') != False, key] = 15
hospitalization.loc[hospitalization[key].str.contains('<15') != False, key] = 15
hospitalization[key] = hospitalization[key].astype(int)
except:
pass
ylabel = 'Number of individuals [persons]'
title = 'Israel - Critical conditions (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
save_name = 'israelHospitalized_'
fields = ['מונשמים', 'חולים קשה', 'מאושפזים']
legend = ('Ventilated patients', 'Seriously ill', 'Hospitalized')
# plot israelHospitalized Total
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
title = 'Israel - Critical conditions mean Age division (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
save_name = 'israelHospitalizedInAge_'
fields = ['גיל ממוצע מונשמים', 'גיל ממוצע חולים קשה', 'גיל ממוצע מאושפזים']
legend = ('Ventilated patients', 'Seriously ill', 'Hospitalized')
# plot israelHospitalizeInAgeTotal
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
title = 'Israel - Critical conditions percentage of Women (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
save_name = 'israelHospitalizedInWomens_'
fields = ['אחוז נשים מונשמות', 'אחוז נשים חולות קשה', 'אחוז נשים מאושפזות']
legend = ('Ventilated patients', 'Seriously ill', 'Hospitalized')
# plot israelHospitalizeInAgeTotal
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot israel Ill
title = 'Israel - ill conditions (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
fields = ['חולים קל', 'חולים בינוני', 'חולים קשה']
legend = ('Light ill', 'Mild ill', 'Seriously ill')
save_name = 'illConditions_'
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot israel mean Age Ill
title = 'Israel - ill conditions mean Age division (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
fields = ['גיל ממוצע חולים קל', 'גיל ממוצע חולים בינוני', 'גיל ממוצע חולים קשה']
legend = ('Light ill', 'Mild ill', 'Seriously ill')
save_name = 'illConditionsInAge_'
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot israel Women Percentage Ill
title = 'Israel - ill conditions percentage of Women (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
fields = ['אחוז נשים חולות קל', 'אחוז נשים חולות בינוני', 'אחוז נשים חולות קשה']
legend = ('Light ill', 'Middle ill', 'Seriously ill')
save_name = 'illConditionsInWomens_'
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
del hospitalization
###################################################################################################################
# Recovered
recovered = pd.read_excel(files_db.current_file_path[files_db.current_file_name.str.find('recovered').values.argmax()], encoding="ISO-8859-8")
###################################################################################################################
id = files_db.current_file_name.str.find('recovered').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
recoveredMeanTime = recovered.days_between_pos_and_recovery.mean()
recoveredMedianTime = recovered.days_between_pos_and_recovery.median()
print('Recovered Mean Time: ' + str(int(recoveredMeanTime*100)/100) + ' days')
print('Recovered Median Time: ' + str(int(recoveredMedianTime*100)/100) + ' days')
NN = int(recovered.days_between_pos_and_recovery.max())
hh = np.histogram(recovered.days_between_pos_and_recovery, bins=np.arange(NN+1))
f, ax = plt.subplots(figsize=(15, 6))
plt.plot(hh[1][1:], hh[0], linewidth=3)
# ax.set_yscale('log')
plt.plot([recoveredMedianTime, recoveredMedianTime], [0, hh[0].max()], 'k--')
plt.text(recoveredMedianTime, hh[0].max(), ' Recovered Median Time: ' + str(int(recoveredMedianTime*100)/100) + ' days')
plt.plot([recoveredMeanTime, recoveredMeanTime], [0, hh[0][int(recoveredMeanTime)]], 'k--')
plt.text(recoveredMeanTime, hh[0][int(recoveredMeanTime)], ' Recovered Mean Time: ' + str(int(recoveredMeanTime*100)/100) + ' days')
plt.grid()
plt.xlabel('Time to recovered [days]', fontsize=16)
plt.ylabel('Number of individuals [persons]', fontsize=16)
try:
data_from = pd.to_datetime(str(files_db.last_update[id]))
plt.title('Israel - Time to recovered. Num of persons ' + str(int(hh[0].sum())) + ' (data from ' + data_from.strftime('%d/%m/%y') + ')', fontsize=16)
except:
plt.title('Israel - Time to recovered. Num of persons ' + str(int(hh[0].sum())) + ' (data from ' + str(files_db.last_update[id]) + ')', fontsize=16)
save_string = 'israelRecovered_' + str(files_db.last_update[id]) + '.png'
f.savefig(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), save_string))
del recovered
###################################################################################################################
# Deceased
deceased = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('deceased').values.argmax()], encoding='latin-1')
###################################################################################################################
id = files_db.current_file_name.str.find('deceased').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
deceasedMeanTime = deceased.Time_between_positive_and_death.mean()
deceasedMedianTime = deceased.Time_between_positive_and_death.median()
print('Deceased Mean Time: ' + str(int(deceasedMeanTime*100)/100) + ' days')
print('Deceased Median Time: ' + str(int(deceasedMedianTime*100)/100) + ' days')
NN = int(deceased.Time_between_positive_and_death.max())
hh = np.histogram(deceased.Time_between_positive_and_death, bins=np.arange(NN+1))
f, ax = plt.subplots(figsize=(15, 6))
plt.plot(hh[1][1:], hh[0], linewidth=3)
plt.plot([deceasedMedianTime, deceasedMedianTime], [0, hh[0].max()], 'k--')
plt.text(deceasedMedianTime, hh[0].max(), ' Deceased Median Time: ' + str(int(deceasedMedianTime*100)/100) + ' days')
plt.plot([deceasedMeanTime, deceasedMeanTime], [0, hh[0][int(deceasedMeanTime)]], 'k--')
plt.text(deceasedMeanTime, hh[0][int(deceasedMeanTime)], ' Deceased Mean Time: ' + str(int(deceasedMeanTime*100)/100) + ' days')
plt.grid()
plt.xlabel('Time to deceased [days]', fontsize=16)
plt.ylabel('Number of individuals [persons]', fontsize=16)
try:
plt.title('Israel - Time to deceased. Num of persons ' + str(int(hh[0].sum())) + '. Num of Ventilated ' +
str(int(deceased.Ventilated.sum())) + ' (data from ' + data_from.strftime('%d/%m/%y') + ')', fontsize=16)
except:
plt.title('Israel - Time to deceased. Num of persons ' + str(int(hh[0].sum())) + '. Num of Ventilated ' +
str(int(deceased.Ventilated.sum())) + ' (data from ' + str(files_db.last_update[id]) + ')', fontsize=16)
save_string = 'israelDeceased_' + str(files_db.last_update[id]) + '.png'
f.savefig(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), save_string))
del deceased
###################################################################################################################
plt.close('all')
# Lab Test
lab_tests = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('lab_tests').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('lab_tests').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'result_date'
# lab_tests.loc[lab_tests['result_date'].isna() != False, 'result_date'] = lab_tests.loc[lab_tests['result_date'].isna() != False, 'test_date']
lab_tests = lab_tests[lab_tests['result_date'].isna() != True]
N = len(lab_tests.corona_result)
lab_tests[base] = pd.to_datetime(lab_tests[base])
lab_tests = lab_tests.sort_values([base])
possible_results = lab_tests.corona_result.unique()
FirstTest = lab_tests.loc[lab_tests['is_first_Test'].str.contains('Yes') != False, ['result_date', 'corona_result']].reset_index()
first_grouped = FirstTest.groupby(['result_date', 'corona_result'], as_index=False).count()
first = first_grouped.set_index(['result_date', 'corona_result']).unstack().fillna(0).astype(int).add_prefix('ראשון ')
del FirstTest, first_grouped
first_positive = first.xs("ראשון חיובי", level="corona_result", axis=1).values.squeeze()
first_negative = first.xs("ראשון שלילי", level="corona_result", axis=1).values.squeeze()
all_first = first.sum(axis=1).values.squeeze()
other_first = all_first - first_negative - first_positive
NotFirstTest = lab_tests.loc[lab_tests['is_first_Test'].str.contains('Yes') != True, ['result_date', 'corona_result']].reset_index()
not_first_grouped = NotFirstTest.groupby(['result_date', 'corona_result'], as_index=False).count()
not_first = not_first_grouped.set_index(['result_date', 'corona_result']).unstack().fillna(0).astype(int).add_prefix('לא ראשון ')
del NotFirstTest, not_first_grouped
not_first_positive = not_first.xs("לא ראשון חיובי", level="corona_result", axis=1).values.squeeze()
not_first_negative = not_first.xs("לא ראשון שלילי", level="corona_result", axis=1).values.squeeze()
all_not_first = not_first.sum(axis=1).values.squeeze()
other_not_first = all_not_first - not_first_positive - not_first_negative
full_lab_data = pd.concat([first.squeeze(), not_first.squeeze()], axis=1, sort=False)
# Saving full data
full_lab_data.to_csv(os.path.join(os.getcwd(), time.strftime("%d%m%Y"),
time.strftime("%d%m%Y") + 'complete_laboratory_data.csv'), encoding="windows-1255")
dateList = pd.DataFrame(lab_tests[base].unique(), columns=['Date'])
fields = ['PositiveFirst', 'NegativeFirst', 'OtherFirst', 'PositiveNotFirst', 'NegativeNotFirst', 'OtherNotFirst']
lab_data = pd.concat([dateList, pd.DataFrame(first_positive, columns=[fields[0]]),
pd.DataFrame(first_negative, columns=[fields[1]]),
pd.DataFrame(other_first, columns=[fields[2]]),
pd.DataFrame(not_first_positive, columns=[fields[3]]),
pd.DataFrame(not_first_negative, columns=[fields[4]]),
pd.DataFrame(other_not_first, columns=[fields[5]])],
axis=1, sort=False)
title = 'Israel ' + dateList.Date.max().strftime('%d/%m/%y') + ' - count of first test per person. Total tests performed ' + str(int(N))
ylabel = 'Number of individuals'
save_name = 'israelTestPerformed_'
base = 'Date'
legend = ['Positive First test, total ' + str(int(lab_data.PositiveFirst.sum())),
'Negative First test, total ' + str(int(lab_data.NegativeFirst.sum())),
'Other First test, total ' + str(int(lab_data.OtherFirst.sum())),
'Positive not a First test, total ' + str(int(lab_data.PositiveNotFirst.sum())),
'Negative not a First test, total ' + str(int(lab_data.NegativeNotFirst.sum())),
'Other not a First test, total ' + str(int(lab_data.OtherNotFirst.sum())), ]
# plot Test Performed Total
line_statistic_plot(lab_data, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot Test Performed Total Log
save_name = 'israelTestPerformed_Logy_'
line_statistic_plot(lab_data, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
del lab_tests
###################################################################################################################
# Individuals
individuals = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('tested_individuals_ver').values.argmax()])
individuals_last = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('tested_individuals_subset').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('tested_individual').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'test_date'
individuals = individuals[individuals['test_date'].isna() != True]
N = len(individuals.corona_result)
individuals[base] = pd.to_datetime(individuals[base])
individuals = individuals.sort_values([base])
individuals_last[base] = pd.to_datetime(individuals_last[base])
individuals_last = individuals_last.sort_values([base])
individuals = individuals[(individuals['test_date'] >= individuals_last['test_date'].unique().min()).values != True]
individuals = pd.concat([individuals, individuals_last])
individuals['symptoms'] = individuals.loc[:, ['cough', 'fever', 'sore_throat', 'shortness_of_breath', 'head_ache']].sum(axis=1)
possible_results = individuals.corona_result.unique()
dateList = pd.DataFrame(individuals[base].unique(), columns=['Date'])
# TestIndication
PosTest = individuals.loc[individuals['corona_result'].str.contains('חיובי') != False, ['test_date', 'test_indication']].reset_index()
posindicate = PosTest.groupby(['test_date', 'test_indication'], as_index=False).count()
posindicate = posindicate.set_index(['test_date', 'test_indication']).unstack().fillna(0).astype(int)
# plot israelPositiveTestIndication
fields = ['Abroad', 'Contact with confirmed', 'Other']
title = 'Israel (data from ' + dateList.Date.max().strftime('%d/%m/%y') + ')- Positive test indication (Total tests performed ' + str(int(N)) + ')'
ylabel = 'Number of positive tests'
save_name = 'israelPositiveTestIndication_'
Abroad = posindicate.xs('Abroad', level="test_indication", axis=1).values.squeeze()
Contact = posindicate.xs('Contact with confirmed', level="test_indication", axis=1).values.squeeze()
Other = posindicate.xs('Other', level="test_indication", axis=1).values.squeeze()
legend = ['Abroad, total ' + str(int(Abroad.sum())),
'Contact with confirmed, total ' + str(int(Contact.sum())),
'Other, total ' + str(int(Other.sum()))]
pos_indicate = pd.concat([dateList, pd.DataFrame(Abroad, columns=[fields[0]]),
pd.DataFrame(Contact, columns=[fields[1]]),
| pd.DataFrame(Other, columns=[fields[2]]) | pandas.DataFrame |
import logging
import os
import warnings
from pathlib import Path
from typing import Dict, Iterable, Union
import nibabel as nib
import numpy as np
import pandas as pd
import tqdm
from nilearn.image import resample_to_img
from nipype.interfaces.ants import ApplyTransforms
from nipype.interfaces.freesurfer import (
CALabel,
MRIsCALabel,
ParcellationStats,
SegStats,
)
from connecticity.parcellation import messages
warnings.filterwarnings("ignore")
#: Default parcellation logging configuration.
LOGGER_CONFIG = dict(
filemode="w",
format="%(asctime)s - %(message)s",
level=logging.INFO,
)
#: Command template to be used to run dwi2tensor.
DWI2TENSOR_COMMAND_TEMPLATE: str = "dwi2tensor -grad {grad} {dwi} {out_file}"
#: Custom mapping of dwi2tensor keyword arguments.
TENSOR2METRIC_KWARGS_MAPPING: Dict[str, str] = {"eval": "value"}
#: QSIPrep DWI file template.
QSIPREP_DWI_TEMPLATE = "{qsiprep_dir}/sub-{participant_label}/ses-{session}/dwi/sub-{participant_label}_ses-{session}_space-T1w_desc-preproc_dwi.{extension}" # noqa
#: Tensor metric file template.
TENSOR_METRICS_FILES_TEMPLATE = "{dmriprep_dir}/sub-{participant_label}/ses-{session}/dwi/sub-{participant_label}_ses-{session}_dir-FWD_space-anat_desc-{metric}_epiref.nii.gz" # noqa
#: Parcellated tensor metrics file template.
TENSOR_METRICS_OUTPUT_TEMPLATE = "{dmriprep_dir}/sub-{participant_label}/ses-{session}/dwi/sub-{participant_label}_ses-{session}_space-anat_desc-TensorMetrics_atlas-{parcellation_scheme}_meas-{measure}.csv" # noqa: E501
#: Command template to be used to run aparcstats2table.
APARCTSTATS2TABLE_TEMPLATE = "aparcstats2table --subjects {subjects} --parc={parcellation_scheme} --hemi={hemi} --measure={measure} --tablefile={out_file}" # noqa: E501
#: Hemisphere labels in file name templates.
HEMISPHERE_LABELS: Iterable[str] = ["lh", "rh"]
#: Surface labels in file name templates.
SURFACES: Iterable[str] = ["smoothwm", "curv", "sulc"]
#: Data types in file name templates.
DATA_TYPES: Iterable[str] = ["pial", "white"]
#: Registered file name template.
REG_FILE_NAME_TEMPLATE: str = "{hemisphere_label}.sphere.reg"
#: FreeSurfer's surfaces directory name.
SURFACES_DIR_NAME: str = "surf"
#: FreeSurfer's MRI directory name.
MRI_DIR_NAME: str = "mri"
#: FreeSurfer's labels directory name.
LABELS_DIR_NAME: str = "label"
#: FreeSurfer's stats directory name.
STATS_DIR_NAME: str = "stats"
#: mris_anatomical_stats parameter keys.
STATS_KEYS: Iterable[Union[str, bool]] = [
"brainmask",
"aseg",
"ribbon",
"wm",
"transform",
"tabular_output",
]
#: mris_anatomical_stats parameter values.
STATS_VALUES: Iterable[str] = [
"brainmask.mgz",
"aseg.presurf.mgz",
"ribbon.mgz",
"wm.mgz",
"transforms/talairach.xfm",
True,
]
STATS_MEASURES: Iterable[str] = [
"area",
"volume",
"thickness",
"thicknessstd",
"meancurv",
]
STATS_NAME_TEMPLATE: str = (
"{hemisphere_label}_{parcellation_scheme}_{measure}.csv"
)
SUBCORTICAL_STATS_NAME_TEMPLATE: str = "subcortex.{parcellation_scheme}.stats"
def generate_annotation_file(
subject_dir: Path,
hemisphere_label: str,
parcellation_scheme: str,
gcs_template: str,
):
surfaces_dir = subject_dir / SURFACES_DIR_NAME
labels_dir = subject_dir / LABELS_DIR_NAME
# Check for existing file in expected output path.
out_file_name = f"{hemisphere_label}.{parcellation_scheme}.annot"
out_file = labels_dir / out_file_name
if out_file.exists():
return out_file
# If no existing output is found, create mris_ca_label input
# configuration.
reg_file_name = REG_FILE_NAME_TEMPLATE.format(
hemisphere_label=hemisphere_label
)
reg_file = surfaces_dir / reg_file_name
curv, smoothwm, sulc = [
surfaces_dir / f"{hemisphere_label}.{surface_label}"
for surface_label in SURFACES
]
hemi_gcs = gcs_template.format(hemi=hemisphere_label)
# Log annotation file generation start.
message = messages.ANNOTATION_FILE_GENERATION_START.format(
parcellation_scheme=parcellation_scheme,
subject_label=subject_dir.name,
hemisphere_label=hemisphere_label,
)
logging.info(message)
# Create interface instance, run, and return the result.
ca_label = MRIsCALabel(
canonsurf=reg_file,
subjects_dir=subject_dir.parent,
curv=curv,
smoothwm=smoothwm,
sulc=sulc,
subject_id=subject_dir.parent.name,
hemisphere=hemisphere_label,
out_file=out_file,
classifier=hemi_gcs,
seed=42,
)
ca_label.run()
return out_file
def generate_annotations(
freesurfer_dir: Path,
subject_label: str,
parcellation_scheme: str,
gcs_template: str,
):
"""
For a single subject, produces an annotation file, in which each cortical
surface vertex is assigned a neuroanatomical label.
Parameters
----------
freesurfer_dir : Path
Path to Freesurfer's outputs directory
subject_label : str
A string representing an existing subject in *freesurfer_dir*
parcellation_scheme : str
The name of the parcellation scheme
gcs_template : str
A freesurfer's .gcs template file
Returns
-------
dict
A dictionary with keys of hemispheres and values as corresponding
*.annot* files
"""
subject_dir = freesurfer_dir / subject_label
return {
hemisphere_label: generate_annotation_file(
subject_dir, hemisphere_label, parcellation_scheme, gcs_template
)
for hemisphere_label in HEMISPHERE_LABELS
}
def generate_default_args(freesurfer_dir: Path, subject_label: str) -> dict:
"""
Gather default required arguments for nipype's implementation of
FreeSurfer's *mris_anatomical_stats*.
Parameters
----------
freesurfer_dir : Path
Path to Freesurfer's outputs directory
subject_label : str
A string representing an existing subject in *freesurfer_dir*
Returns
-------
dict
A dictionary with keys that map to nipype's required arguements
"""
subject_dir = freesurfer_dir / subject_label
surface_dir = subject_dir / SURFACES_DIR_NAME
mri_dir = subject_dir / MRI_DIR_NAME
args = {"subject_id": subject_label, "subjects_dir": freesurfer_dir}
for hemi in HEMISPHERE_LABELS:
for datatype in DATA_TYPES:
key = f"{hemi}_{datatype}"
file_name = f"{hemi}.{datatype}"
args[key] = surface_dir / file_name
for key, value in zip(STATS_KEYS, STATS_VALUES):
args[key] = mri_dir / value
return args
def map_subcortex(
freesurfer_dir: Path,
subject_label: str,
parcellation_scheme: str,
gcs_subcrotex: str,
):
"""
For a single subject, produces an annotation file, in which each
sub-cortical surface vertex is assigned a neuroanatomical label.
Parameters
----------
freesurfer_dir : Path
Path to Freesurfer's outputs directory
subject : str
A string representing an existing subject in *freesurfer_dir*
parcellation_scheme : str
The name of the parcellation scheme.
gcs_subcortex : str
A freesurfer's .gcs template file.
Returns
-------
dict
A dictionary with keys of hemispheres and values as corresponding
*.annot* files.
"""
# Check for an existing annotations file.
subject_dir = freesurfer_dir / subject_label
mri_dir = subject_dir / MRI_DIR_NAME
out_file = mri_dir / f"{parcellation_scheme}_subcortex.mgz"
if out_file.exists():
return out_file
# Create a subcortical annotations file if none was found.
target = mri_dir / "brain.mgz"
transform = mri_dir / "transforms" / "talairach.m3z"
# Log subcortical annotations file generation start.
message = messages.SUBCORTICAL_ANNOTATION_FILE_GENERATION_START.format(
parcellation_scheme=parcellation_scheme,
subject_label=subject_label,
)
logging.info(message)
# Create interface instance, run, and return result.
ca_label = CALabel(
subjects_dir=freesurfer_dir,
in_file=target,
transform=transform,
out_file=out_file,
template=gcs_subcrotex,
)
ca_label.run()
return out_file
def freesurfer_subcortical_parcellation(
freesurfer_dir: Path,
subject_label: str,
parcellation_scheme: str,
gcs_subcortex: str,
color_table: str,
):
"""
Calculates different Freesurfer-derived metrics according to subcortical
parcellation
Parameters
----------
freesurfer_dir : Path
Path to Freesurfer's outputs directory
subject_label : str
A string representing an existing subject in *freesurfer_dir*
parcellation_scheme : str
The name of the parcellation scheme
gcs_subcortex : str
A freesurfer's .gcs template file
Returns
-------
dict
A dictionary with keys corresponding to hemisphere's metrics acoording
to *parcellation_scheme*
"""
mapped_subcortex = map_subcortex(
freesurfer_dir, subject_label, parcellation_scheme, gcs_subcortex
)
subject_dir = freesurfer_dir / subject_label
stats_dir = subject_dir / STATS_DIR_NAME
file_name = SUBCORTICAL_STATS_NAME_TEMPLATE.format(
parcellation_scheme=parcellation_scheme
)
summary_file = stats_dir / file_name
if not summary_file.exists():
ss = SegStats(
segmentation_file=mapped_subcortex,
subjects_dir=freesurfer_dir,
summary_file=summary_file,
color_table_file=color_table,
exclude_id=0,
)
ss.run()
return summary_file
def freesurfer_anatomical_parcellation(
freesurfer_dir: Path,
subject_label: str,
parcellation_scheme: str,
gcs: str,
):
"""
Calculates different Freesurfer-derived metrics according to .annot files
Parameters
----------
freesurfer_dir : Path
Path to Freesurfer's outputs directory
subject_label : str
A string representing an existing subject in *freesurfer_dir*
parcellation_scheme : str
The name of the parcellation scheme.
gcs : str
A freesurfer's .gcs template file.
Returns
-------
dict
A dictionary with keys corresponding to hemisphere's metrics acoording
to *parcellation_scheme*
"""
annotations = generate_annotations(
freesurfer_dir, subject_label, parcellation_scheme, gcs
)
args = generate_default_args(freesurfer_dir, subject_label)
stats = {}
subject_dir = freesurfer_dir / subject_label
labels_dir = subject_dir / LABELS_DIR_NAME
stats_dir = subject_dir / STATS_DIR_NAME
surfaces_dir = subject_dir / SURFACES_DIR_NAME
for hemisphere_label, annotations_path in annotations.items():
stats[hemisphere_label] = {}
out_color = labels_dir / f"aparc.annot.{parcellation_scheme}.ctab"
out_table = (
stats_dir / f"{hemisphere_label}.{parcellation_scheme}.stats"
)
args["hemisphere"] = hemisphere_label
args["in_annotation"] = annotations_path
args["thickness"] = surfaces_dir / f"{hemisphere_label}.thickness"
if not out_table.exists() or not out_color.exists():
parcstats = ParcellationStats(**args)
parcstats.run()
stats[hemisphere_label]["table"] = out_table
stats[hemisphere_label]["color"] = out_color
return stats
def group_freesurfer_metrics(
subjects: list,
destination: Path,
parcellation_scheme: str,
force=True,
):
"""
Utilizes FreeSurfer's aparcstats2table to group different
FreeSurfer-derived across subjects according to *parcellation_scheme*.
Parameters
----------
subjects : list
A list of subjects located under *SUBJECTS_DIR*
destination : Path
The destination underwhich group-wise files will be stored
parcellation_scheme : str
The parcellation scheme (subjects must have
*stats/{hemi}.{parcellation_scheme}.stats* file for this to work)
"""
destination.mkdir(exist_ok=True, parents=True)
data = {}
for hemisphere_label in HEMISPHERE_LABELS:
data[hemisphere_label] = {}
for measure in STATS_MEASURES:
out_file_name = STATS_NAME_TEMPLATE.format(
hemisphere_label=hemisphere_label,
parcellation_scheme=parcellation_scheme,
measure=measure,
)
out_file = destination / out_file_name
if not out_file.exists() or force:
cmd = APARCTSTATS2TABLE_TEMPLATE.format(
subjects=" ".join(subjects),
parcellation_scheme=parcellation_scheme,
hemi=hemisphere_label,
measure=measure,
out_file=out_file,
)
os.system(cmd)
data[hemisphere_label][measure] = out_file
return data
def parcellate_image(
atlas: Path, image: Path, parcels: pd.DataFrame, np_operation="nanmean"
) -> pd.Series:
"""
Parcellates an image according to *atlas*.
Parameters
----------
atlas : Path
A parcellation atlas in *image* space
image : Path
An image to be parcellated
parcels : pd.DataFrame
A dataframe for *atlas* parcels
Returns
-------
pd.Series
The mean value of *image* in each *atlas* parcel
"""
out = | pd.Series(index=parcels.index) | pandas.Series |
""" Module for sleep periods from accelerometer """
import datetime
import numpy as np
import pandas as pd
import LAMP
from ..feature_types import primary_feature, log
from ..raw.accelerometer import accelerometer
@primary_feature(
name="cortex.feature.sleep_periods",
dependencies=[accelerometer]
)
def sleep_periods(attach=True,
**kwargs):
"""
Generate sleep periods with given data
Args:
attach (boolean):
**kwargs:
id (string): The participant's LAMP id. Required.
start (int): The initial UNIX timestamp (in ms) of the window for which the feature
is being generated. Required.
end (int): The last UNIX timestamp (in ms) of the window for which the feature
is being generated. Required.
Returns:
"""
def _expected_sleep_period(accelerometer_data_reduced):
"""
Return the expected sleep period for a set of accelerometer data
:param accelerometer_data (list)
:return _sleep_period_expted (dict): list bed/wake timestamps and mean
accel. magnitude for expected bedtime
"""
df = pd.DataFrame.from_dict(accelerometer_data_reduced)
# If we have data, we filter to remove duplicates here
if 'timestamp' in df.columns:
df = df[df['timestamp'] != df['timestamp'].shift()]
# Creating possible times for expected sleep period, which will be checked
times = [(datetime.time(hour=h, minute=m), (datetime.datetime.combine(datetime.date.today(), datetime.time(hour=h, minute=m)) + datetime.timedelta(hours=8, minutes=0)).time()) for h in range(18, 24) for m in [0, 30] ] + [(datetime.time(hour=h, minute=m), (datetime.datetime.combine(datetime.date.today(), datetime.time(hour=h, minute=m)) + datetime.timedelta(hours=8, minutes=0)).time()) for h in range(0, 4) for m in [0, 30]]
mean_activity = float('inf')
for t0, t1 in times:
if datetime.time(hour=18, minute=0) <= t0 <= datetime.time(hour=23, minute=30):
selection = | pd.concat([df.loc[t0 <= df.time, :], df.loc[df.time <= t1, :]]) | pandas.concat |
import os
from functools import partial
import logging
import pandas as pd
import numpy as np
from scipy import stats
logger = logging.getLogger('pylearn')
def rank_varset(row, rank_coefficient=200):
khat = float(row['KHAT'])
nvar = int(row['NVAR'])
return khat - (1 - khat) * nvar / (rank_coefficient - nvar - 1)
def rank_varset_assess(assess=None, rank_coefficient=200):
rank_with = partial(rank_varset, rank_coefficient=rank_coefficient)
assess['VARSETRANK'] = assess.apply(rank_with, axis=1)
return assess
def get_xvars(dfunct=None, varset=None):
"""Function returns variables names in specified varset.
Args:
dfunct (DataFrame): DFUNCT data from lda.
varset (Int): The integer representing the varset to extract.
Returns: Pandas Series
"""
if varset:
return dfunct[dfunct['VARSET3'] == varset]['VARNAMES3'].sort_values()
else:
return dfunct['VARNAMES3'].sort_values().unique()
def get_xy_summary(xy=None, dfunct=None, yvar=None):
"""Function returns mean values for each unique variable in DFUNCT.
Args:
xy (DataFrame): The xy reference data.
dfunct (DataFrame): DFUNCT data from lda.
yvar (String): Variable Name representing the Y Class.
Returns: Pandas DataFrame
"""
xvars = get_xvars(dfunct)
data = []
for xvar in xvars:
column = xy[xvar]
summary = pd.pivot_table(xy, values=column.name, index=[yvar], aggfunc=np.mean)
data.extend([{'ATTR': column.name, yvar: idx, 'MEAN': val} for idx, val in summary.iteritems()])
return pd.DataFrame(data=data)
def get_rloadrank(xy=None, dfunct=None, yvar=None):
"""Function Extends DFUNCT with rank based relative loading per varset.
Args:
xy (DataFrame): The xy reference data.
dfunct (DataFrame): DFUNCT data from lda.
yvar (String): Variable Name representing the Y Class.
Returns: Pandas DataFrame
"""
xy_summary = get_xy_summary(xy, dfunct, yvar)
ranks = []
dfunct_by_varset = dfunct.groupby(by='VARSET3')
for vset, df in dfunct_by_varset:
vdfunct = df.copy()
vdfunct.set_index('VARNAMES3', inplace=True)
xvars = vdfunct.index.values
xyref = xy.filter(items=xvars, axis=1)
vdfunct['SD'] = xyref.std()
vdfunct['B*'] = vdfunct['DFCOEF3'] * vdfunct['SD']
vdfunct['LOADING'] = np.sum(xyref.corr().mul(vdfunct['B*'].dropna(), axis=0))
vdfunct['RL'] = vdfunct['LOADING'] / vdfunct['LOADING'].max()
for xvar in xvars:
xvar_summary = xy_summary[xy_summary['ATTR'] == xvar]
vdfunct.loc[xvar, 'Y0'] = xvar_summary[xvar_summary[yvar] == 0]['MEAN'].iat[0]
vdfunct.loc[xvar, 'Y1'] = xvar_summary[xvar_summary[yvar] == 1]['MEAN'].iat[0]
z0 = np.sum(vdfunct['DFCOEF3'].mul(vdfunct['Y0'], axis=0))
z1 = np.sum(vdfunct['DFCOEF3'].mul(vdfunct['Y1'], axis=0))
vdfunct['Z0'] = z0
vdfunct['Z1'] = z1
vdfunct['DFCOEF3_ADJ'] = vdfunct['DFCOEF3'] if z1 > z0 else vdfunct['DFCOEF3'] * -1
vdfunct['Z0_ADJ'] = np.sum(vdfunct['DFCOEF3_ADJ'].mul(vdfunct['Y0'], axis=0))
vdfunct['Z1_ADJ'] = np.sum(vdfunct['DFCOEF3_ADJ'].mul(vdfunct['Y1'], axis=0))
vdfunct['RANK'] = vdfunct['RL'].rank(ascending=False)
ranks.append(vdfunct)
labels = list(dfunct.axes[1])
labels.extend(['Y0', 'Y1', 'Z0', 'Z1','DFCOEF3_ADJ', 'Z0_ADJ', 'Z1_ADJ', 'SD', 'B*', 'LOADING', 'RL', 'RANK'])
return | pd.concat(ranks) | pandas.concat |
from typing import Any, Dict, Optional, List
import argparse
import json
import os
import re
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from azureml.automl.core.shared import constants
from azureml.automl.core.shared.types import GrainType
from azureml.automl.runtime.shared.score import scoring
GRAIN = "time_series_id"
BACKTEST_ITER = "backtest_iteration"
ACTUALS = "actual_level"
PREDICTIONS = "predicted_level"
ALL_GRAINS = "all_sets"
FORECASTS_FILE = "forecast.csv"
SCORES_FILE = "scores.csv"
PLOTS_FILE = "plots_fcst_vs_actual.pdf"
RE_INVALID_SYMBOLS = re.compile("[: ]")
def _compute_metrics(df: pd.DataFrame, metrics: List[str]):
"""
Compute metrics for one data frame.
:param df: The data frame which contains actual_level and predicted_level columns.
:return: The data frame with two columns - metric_name and metric.
"""
scores = scoring.score_regression(
y_test=df[ACTUALS], y_pred=df[PREDICTIONS], metrics=metrics
)
metrics_df = pd.DataFrame(list(scores.items()), columns=["metric_name", "metric"])
metrics_df.sort_values(["metric_name"], inplace=True)
metrics_df.reset_index(drop=True, inplace=True)
return metrics_df
def _format_grain_name(grain: GrainType) -> str:
"""
Convert grain name to string.
:param grain: the grain name.
:return: the string representation of the given grain.
"""
if not isinstance(grain, tuple) and not isinstance(grain, list):
return str(grain)
grain = list(map(str, grain))
return "|".join(grain)
def compute_all_metrics(
fcst_df: pd.DataFrame,
ts_id_colnames: List[str],
metric_names: Optional[List[set]] = None,
):
"""
Calculate metrics per grain.
:param fcst_df: forecast data frame. Must contain 2 columns: 'actual_level' and 'predicted_level'
:param metric_names: (optional) the list of metric names to return
:param ts_id_colnames: (optional) list of grain column names
:return: dictionary of summary table for all tests and final decision on stationary vs nonstaionary
"""
if not metric_names:
metric_names = list(constants.Metric.SCALAR_REGRESSION_SET)
if ts_id_colnames is None:
ts_id_colnames = []
metrics_list = []
if ts_id_colnames:
for grain, df in fcst_df.groupby(ts_id_colnames):
one_grain_metrics_df = _compute_metrics(df, metric_names)
one_grain_metrics_df[GRAIN] = _format_grain_name(grain)
metrics_list.append(one_grain_metrics_df)
# overall metrics
one_grain_metrics_df = _compute_metrics(fcst_df, metric_names)
one_grain_metrics_df[GRAIN] = ALL_GRAINS
metrics_list.append(one_grain_metrics_df)
# collect into a data frame
return pd.concat(metrics_list)
def _draw_one_plot(
df: pd.DataFrame,
time_column_name: str,
grain_column_names: List[str],
pdf: PdfPages,
) -> None:
"""
Draw the single plot.
:param df: The data frame with the data to build plot.
:param time_column_name: The name of a time column.
:param grain_column_names: The name of grain columns.
:param pdf: The pdf backend used to render the plot.
"""
fig, _ = plt.subplots(figsize=(20, 10))
df = df.set_index(time_column_name)
plt.plot(df[[ACTUALS, PREDICTIONS]])
plt.xticks(rotation=45)
iteration = df[BACKTEST_ITER].iloc[0]
if grain_column_names:
grain_name = [df[grain].iloc[0] for grain in grain_column_names]
plt.title(f"Time series ID: {_format_grain_name(grain_name)} {iteration}")
plt.legend(["actual", "forecast"])
plt.close(fig)
pdf.savefig(fig)
def calculate_scores_and_build_plots(
input_dir: str, output_dir: str, automl_settings: Dict[str, Any]
):
os.makedirs(output_dir, exist_ok=True)
grains = automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES)
time_column_name = automl_settings.get(constants.TimeSeries.TIME_COLUMN_NAME)
if grains is None:
grains = []
if isinstance(grains, str):
grains = [grains]
while BACKTEST_ITER in grains:
grains.remove(BACKTEST_ITER)
dfs = []
for fle in os.listdir(input_dir):
file_path = os.path.join(input_dir, fle)
if os.path.isfile(file_path) and file_path.endswith(".csv"):
df_iter = pd.read_csv(file_path, parse_dates=[time_column_name])
for _, iteration in df_iter.groupby(BACKTEST_ITER):
dfs.append(iteration)
forecast_df = | pd.concat(dfs, sort=False, ignore_index=True) | pandas.concat |
import time
from definitions_toxicity import ROOT_DIR
import pandas as pd
from src.preprocessing import custom_transformers as ct
from sklearn.pipeline import Pipeline
import nltk
import pickle
from src.preprocessing.text_utils import tokenize_by_sentences, fit_tokenizer, tokenize_text_with_sentences
import numpy as np
from sklearn.model_selection import train_test_split
from skmultilearn.model_selection import iterative_train_test_split
def load_text_data(data_filepath, text_column, copy_text_column=False, copy_col_name=None):
"""Data loader
:param data_filepath: file path to data
:type data_filepath: string
:param text_column: name of column with text
:type text_column: string
:param copy_text_column: whether to copy column with text into new one, defaults to False
:type copy_text_column: bool, optional
:param copy_col_name: name of new column for copying original one, defaults to None
:type copy_col_name: boolean, optional
:return: dataframe with text data
:rtype: Pandas dataframe
"""
df = pd.read_csv(ROOT_DIR + data_filepath)
if copy_text_column:
df[copy_col_name] = df[text_column].copy()
return df
def split_dataset(df, columns, labels, split='train_test', test_size=0.3, random_state=111, stratify=False, multiclass=False):
"""Data split function that can use both sklearn train_test_split and skmultilearn iterative_train_test_split for cases with imbalanced multilabel data.
:param df: dataframe that requires splitting into train, test and possibly validation sets
:type df: Pandas dataframe
:param columns: names of columns that will be left in train/test/valiation sets
:type columns: list
:param labels: name of columns that represent labels
:type labels: list
:param split: selection of how to split dataset: train_test or train_val_test, defaults to 'train_test'
:type split: str, optional
:param test_size: fraction of whole dataset to be used as test set, defaults to 0.3
:type test_size: float, optional
:param random_state: random state for splitting, defaults to 111
:type random_state: int, optional
:param stratify: whether to stratify the data, defaults to False
:type stratify: bool, optional
:param multiclass: whether dataset has multiclass labels, defaults to False
:type multiclass: bool, optional
:return: train, test and optionally validation sets
:rtype: Pandas dataframe
"""
# split on train and validation sets
assert split == 'train_test' or split == 'train_val_test', "Split attribute accepts only 'train_test' or 'train_val_test'"
strat = None
if stratify:
strat = df[labels]
if not multiclass:
x_tr, x_test, y_tr, y_test = train_test_split(df[columns],
df[labels],
test_size=test_size,
random_state=random_state,
stratify=strat)
else:
x_tr, y_tr, x_test, y_test = iterative_train_test_split(df[columns].values,
df[labels].values,
test_size=test_size)
x_tr = pd.DataFrame(x_tr, columns=columns)
y_tr = pd.DataFrame(y_tr, columns=labels)
x_test = pd.DataFrame(x_test, columns=columns)
y_test = pd.DataFrame(y_test, columns=labels)
if split == 'train_test':
return x_tr, x_test, y_tr, y_test
elif split == 'train_val_test':
if stratify:
strat = y_test
if not multiclass:
x_val, x_test, y_val, y_test = train_test_split(x_test,
y_test,
test_size=0.5,
random_state=random_state,
stratify=strat)
else:
x_val, y_val, x_test, y_test = iterative_train_test_split(x_test.values,
y_test.values,
test_size=0.5)
x_val = pd.DataFrame(x_val, columns=columns)
y_val = pd.DataFrame(y_val, columns=labels)
x_test = | pd.DataFrame(x_test, columns=columns) | pandas.DataFrame |
import warnings
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension.base.base import BaseExtensionTests
class BaseReduceTests(BaseExtensionTests):
"""
Reduction specific tests. Generally these only
make sense for numeric/boolean operations.
"""
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
expected = getattr(s.astype("float64"), op_name)(skipna=skipna)
| tm.assert_almost_equal(result, expected) | pandas._testing.assert_almost_equal |
import unittest
import numpy as np
import pandas as pd
from numpy.testing import assert_almost_equal
from pandas.testing import assert_frame_equal, assert_series_equal
import cvxpy as cvx
from zipline.optimize import MaximizeAlpha, TargetWeights
from zipline.optimize.constraints import (
Basket, CannotHold, DollarNeutral, FactorExposure, FixedWeight, Frozen,
LongOnly, MaxGrossExposure, NetExposure, NetGroupExposure, NotConstrained,
NotExceed, NotLessThan, Pair, PositionConcentration, ReduceOnly, ShortOnly)
class TestObjectives(unittest.TestCase):
def setUp(self):
stocks = [str(i).zfill(6) for i in range(1, 8)]
alphas = pd.Series([-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3], index=stocks)
# 用于调整特定目标
# alphas = pd.Series([-0.2, -0.1, -0.1, 0, 0.1, 0.2, 0.3], index=stocks)
weights = pd.Series(alphas.values, index=stocks)
labels = {
'000001': 'A',
'000002': 'A',
'000003': 'B',
'000004': 'B',
'000005': 'C',
'000006': 'C',
'000007': 'D'
}
min_weights = {'A': -0.1, 'B': -0.2, 'C': -0.3, 'D': -0.4}
max_weights = {'A': 0.1, 'B': 0.2, 'C': 0.3, 'D': 0.4}
factors = ['f1', 'f2', 'f3']
data = [[3, 2, 3], [5, 1, 5], [2, 3, 1], [3, 5, 2], [4, 2, 3],
[3, 4, 4]] #, [2, 5, 3]],
loadings = pd.DataFrame(data=data, index=stocks[:6], columns=factors)
min_exposures = pd.Series([2, 1, 2], index=factors)
max_exposures = pd.Series([4, 5, 4], index=factors)
self.stocks = stocks
self.alphas = alphas
self.weights = weights
self.labels = labels
self.min_weights = min_weights
self.max_weights = max_weights
self.loadings = loadings
self.min_exposures = min_exposures
self.max_exposures = max_exposures
# 假设初始权重股票数量少于目标权重的股票数量(4只股票)
init_weights = pd.Series([-0.6, -0.1, 0.1, 0.55], index=stocks[:4])
self.init_weights = init_weights
def check(self, cons_obj, desired_value, desired_weights, init_w_s=None):
# 基础限制:单个权重不超过0.5,空头不超过-1.0,总体不超过1.5
obj = MaximizeAlpha(self.alphas)
cvx_objective = obj.to_cvxpy(None)
w = obj.new_weights
w_s = obj.new_weights_series
constraints = [
con for con in cons_obj.gen_constraints(w, w_s, init_w_s)
]
constraints += [cvx.norm(w, 1) <= 1.5, w <= 0.5, w >= -1.0]
prob = cvx.Problem(cvx_objective, constraints)
prob.solve()
# print('求解状态', prob.status)
# print('最优解', prob.value)
# print('权重序列\n', obj.new_weights_value)
# print('初始权重序列\n', init_w_s)
self.assertAlmostEqual(desired_value, prob.value, 2)
assert_series_equal(
obj.new_weights_value,
desired_weights,
check_exact=False,
check_less_precise=3) # 精度:小数点后3位
def test_MaxGrossExposure(self):
cons_obj = MaxGrossExposure(1.5)
desired_value = 0.45
desired_weights = pd.Series([-1.0] + [0.0] * 5 + [0.5], index=self.stocks)
self.check(cons_obj, desired_value, desired_weights)
def test_NetExposure(self):
cons_obj = NetExposure(-0.1, 0.1)
desired_value = 0.43
desired_weights = pd.Series(
[-0.80] + [0.0] * 4 + [0.2, 0.5], index=self.stocks)
self.check(cons_obj, desired_value, desired_weights)
def test_DollarNeutral(self):
cons_obj = DollarNeutral()
desired_value = 0.33
desired_weights = pd.Series(
[-0.25] + [0.0] * 2 + [0.25, 0, 0.5, 0.5], index=self.stocks)
self.check(cons_obj, desired_value, desired_weights, self.init_weights)
def test_NetGroupExposure_1(self):
"""检验输入"""
with self.assertRaises(TypeError):
NetGroupExposure('a', 'b', 'c')
# with self.assertRaises(AssertionError):
# labels = {'AAPL': 'TECH', 'MSFT': 'TECH', 'TSLA': 'CC', 'GM': 'CC'}
# # 限定不平衡
# min_exposures = {'TECH': -0.5, 'CC': -0.25}
# max_exposures = {'TECH': 0.5}
# NetGroupExposure(labels, min_exposures, max_exposures)
with self.assertRaises(AssertionError):
labels = {'AAPL': 'TECH', 'MSFT': 'TECH', 'TSLA': 'CC', 'GM': 'CC'}
min_exposures = {'TECH': -0.5, 'CC': -0.25}
max_exposures = {'TECH': 0.5, 'CC': -0.5} # 最大值小于最小值
ne = NetGroupExposure(labels, min_exposures, max_exposures)
self.check(ne, 1, 1, 1)
def test_NetGroupExposure_2(self):
"""分组净多头单边限制"""
min_weights = pd.Series(
[0.0, 0.1, 0.2, 0.3], index=self.min_weights.keys())
cons_obj = NetGroupExposure(self.labels, min_weights, self.max_weights)
desired_value = 0.21
# 精度累死
desired_weights = pd.Series(
[-0.1612, 0.1612, -0.1207, 0.2207, -0.068, 0.368, 0.400],
index=self.stocks)
self.check(cons_obj, desired_value, desired_weights)
def test_NetGroupExposure_3(self):
"""分组净空头单边限制"""
# 最大值负数
max_weights = pd.Series([-0.05] * 4, index=self.max_weights.keys())
cons_obj = NetGroupExposure(self.labels, self.min_weights, max_weights)
desired_value = 0.08
desired_weights = pd.Series(
[-0.2908, 0.1908, -0.3606, 0.1606, -0.2485, 0.1985, -0.05],
index=self.stocks)
self.check(cons_obj, desired_value, desired_weights)
def test_NetGroupExposure_4(self):
"""分组双边限制(下限为负,上限为正)"""
cons_obj = NetGroupExposure(self.labels, self.min_weights,
self.max_weights)
desired_value = 0.25
desired_weights = pd.Series(
[-0.2028, 0.1029, -0.284156, 0.084156, -0.062988, 0.3629, 0.4],
index=self.stocks)
self.check(cons_obj, desired_value, desired_weights)
def test_NetGroupExposure_5(self):
"""取消单组最大或最小限制"""
min_weights = pd.Series(
[-0.3, 0.1, 0., 0.3], index=self.min_weights.keys())
max_weights = pd.Series([0.35] * 4, index=self.max_weights.keys())
# 取消A组最小限制
min_weights.loc['A'] = NotConstrained # 可多分配空头
# 取消C组最大限制
max_weights.loc['C'] = NotConstrained
cons_obj = NetGroupExposure(self.labels, min_weights, max_weights)
desired_value = 0.41
desired_weights = pd.Series(
[-1, -0.000197, -0., 0.1, 0, 0.0498, 0.35], index=self.stocks)
self.check(cons_obj, desired_value, desired_weights)
def test_PositionConcentration_0(self):
"""验证输入及配置"""
min_weights = | pd.Series([-0.2, 0.1, 0.4], index=['000001', '000003','000004']) | pandas.Series |
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
full_data = pd.read_csv(filename).drop_duplicates().dropna()
full_data = full_data.loc[full_data["Month"] >= 1]
full_data = full_data.loc[full_data["Day"] >= 1]
full_data = full_data.loc[full_data["Year"] >= 1995]
full_data = full_data.loc[full_data["Temp"] >= -10]
full_data['Date'] = | pd.to_datetime(full_data['Date'], format='%Y-%m-%d') | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 5 19:53:58 2020
@author: chaowu
"""
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import matplotlib.cbook as cbook
import matplotlib.dates as mdates
from scipy.signal import savgol_filter
def smooth_list(l, window=3, poly=1):
return savgol_filter(l, window, poly)
df = | pd.read_csv("data/us-counties.csv") | pandas.read_csv |
import os
import pandas as pd
def _gen_photo_df_(photo_dir):
"""
"""
photo_dict = {"file_name": [], "dir_path": []}
# may have to pass photo_dir as a list to iterate through
# i.e. in case there are multiple locations
for root, _, files in os.walk(photo_dir):
for f in files:
photo_dict["file_name"].append(f)
photo_dict["dir_path"].append(root)
photo_df = | pd.DataFrame(photo_dict) | pandas.DataFrame |
import pandas as pd
import csv
import math
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
#################################################################
# #
# #
# Load Data #
# #
# #
#################################################################
# Reads the data from file.
data = pd.read_csv("gcse-english-and-maths-national-data-2019-20.csv")
# Load required columns
ethnicity = data["Ethnicity"]
gender = data["Gender"]
freeSchoolMeals = data["FSM"]
senType = data["SEN_type"]
senGroup = data["SEN_grouping"]
admissionType = data["Admission_type"]
schoolCharacteristic = data["School_characteristic"]
religion = data["Religious_denomination"]
value = data["Value"]
#################################################################
# #
# #
# Menu Function #
# #
# #
#################################################################
def menu(ethnicity, gender, freeSchoolMeals, senType, senGroup, admissionType, schoolCharacteristic, religion, value):
repeat=True
while repeat==True:
menuSelect=0
print("Enter a number between 1-4 or 5 to exit:\n1:Keyword search\n2:Co Efficient\n3:Total Passing\n4:Averages")
menuSelect=input()
if menuSelect == "1":
print("Keyword Search")
keywordSearch()
repeat = True
elif menuSelect == "2":
print ("Co Efficient")
CoefficientOfDev(ethnicity, gender, freeSchoolMeals, senType, senGroup, admissionType, schoolCharacteristic, religion, value)
repeat =True
elif menuSelect == "3":
print ("Total Passing")
TotalPassing(ethnicity, gender, freeSchoolMeals, senType, senGroup, admissionType, schoolCharacteristic, religion, value)
repeat = True
elif menuSelect == "4":
print ("Averages")
averages()
repeat = True
elif menuSelect == "5":
print("menu exit")
repeat = False
else:
print ("Invalid answer, please re enter")
repeat = True
#################################################################
# #
# #
# Calculate Coefficient of Standard Deviation #
# #
# #
#################################################################
def CoefficientOfDev(ethnicity, gender, freeSchoolMeals, senType, senGroup, admissionType, schoolCharacteristic, religion, value):
# Calculates coeficient of variation.
# Creates Series
CoefVariableList = []
CoefValueList = []
# Calculated Coefficient
def myCoef(myInput):
mean = 0
total = 0
counter = 0
# Calculates total of all the values of myInput
for i, val in myInput.iteritems():
if myInput[i] != -1:
mean = mean + myInput[i]
counter = counter + 1
# Calculates mean
mean = mean/counter
# Initiates count to 0
count = 0
# Appends to myInput2
myInput2 = []
for i, val in myInput.iteritems():
myInput2.append((myInput[i]-mean) ** 2)
# Adds up total and increases count
for i, val in myInput.iteritems():
total = myInput2[i] + total
count = count + 1
# Calculates mean
next = total/count
#Calculates square root of mean
next = math.sqrt(total/count)
# Divides by mean
next = next/mean
# Returns the coefficient of standard deviation
return next
# Calculates the coefficient of standard deviation for Ethnicity
def ethnicityCoef(myVariable):
newList = []
for i, val in ethnicity.iteritems():
if ethnicity[i] == myVariable:
newList.append(value[i])
print(myVariable, end=":\t\t")
newList2 = | pd.Series(newList) | pandas.Series |
# First Import Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
# Read CSV File For RAW Heating And Electrical Consumption Pattern From Scraped Data
df = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\Final\\Code and data\\Data\\Whole_scraped_data\\Total-Load-Whole-Premise.csv")
# Delete additional datas in RAW File
# Delete Extra Columns
df.drop('Date Start',axis=1,inplace=True)
df.drop('(Weekdays) or (Weekends and Holidays)',axis=1,inplace=True)
df.drop('Demand',axis=1,inplace=True)
# Delete Extra Raws
# Create an index to filter raws faster than normal for loop!
df['keep']=np.where( df['City'] == 'City', 1, 0)
df=df[df['keep']==0]
# Delete Auxilary index Column
df.drop('keep',axis=1,inplace=True)
# Change data type to float
df[['HR1','HR2','HR3','HR4','HR5','HR6','HR7','HR8','HR9','HR10','HR11','HR12','HR13','HR14','HR15','HR16','HR17','HR18','HR19','HR20','HR21','HR22','HR23','HR24']] = df[['HR1','HR2','HR3','HR4','HR5','HR6','HR7','HR8','HR9','HR10','HR11','HR12','HR13','HR14','HR15','HR16','HR17','HR18','HR19','HR20','HR21','HR22','HR23','HR24']].astype('float64')
# Convert Sqft To Sqm Sqft/10.764=Sqm
sqft_coef = 10.76391041671
#Convert wh/Sqft To wh/Sqm
df.loc[:,'HR1':'HR24']=df.loc[:,'HR1':'HR24']/(sqft_coef)
# Create 2 Column for State And City
df['city_name'] = df.iloc[:, 0]
df['city_state'] = df.iloc[:, 0]
# Split City and State
for i in range (0,120450):
ct = df.iloc[i, 0]
ct = ct.split(',', 1)
ctname = ct[0]
ctstate = ct[1]
df.iloc[i, 28] = ctname
df.iloc[i, 29] = ctstate
# Save File step1
df.to_csv(path_or_buf="C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step1.csv")
########################################################################################################
########################################################################################################
########################################################################################################
# Read New Modified CSV File
df = pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\step1.csv")
# Delete Old Index Column
df.drop('Unnamed: 0',axis=1,inplace=True)
#Create Day index
day=[]
for i in range (1,331):
for j in range (1,366):
day.append(j)
day_no=pd.Series(day)
df['day_num']=day_no
#Load Cities List
ct = | pd.read_csv("C:\\Users\\PowerMan\\Desktop\\KASR\\Final\\Code and data\\Codes\\datamunging\\sourcecity.csv") | pandas.read_csv |
import sqlite3
import glob
import pandas as pd
import sys
import time
import datetime
import os
import numpy as np
def main():
# save_irradiance_to_pickle_agg_by_station()
# save_irradiance_to_pickle_agg_by_day()
# plot_station_data()
load_irradiance_agg_by_station()
load_irradiance_agg_by_day()
def plot_station_data():
df = pd.read_csv('TMY3_StationsMeta.csv')
df.plot(x='Longitude', y='Latitude')
def load_irradiance_agg_by_station():
df = pd.read_pickle('tmy3_agg_by_station.pkl')
df.plot(x='long', y='lat')
def load_irradiance_agg_by_day():
df = pd.read_pickle('tmy3_agg_by_day.pkl')
df.plot(x='long', y='lat')
def save_irradiance_to_pickle_agg_by_day():
start, start_readable = time.time(), datetime.datetime.now()
print(f'Started at {start_readable}')
i, num_files = 0, len(list(glob.iglob('alltmy3a/*.csv')))
df_global = pd.DataFrame(columns=['date', 'GHI (W/m^2)', 'usaf', 'name', 'state', 'lat', 'long', 'elev'])
for filepath in glob.iglob('alltmy3a/*.csv'):
df_data = | pd.read_csv(filepath, header=1, parse_dates=[[0,1]]) | pandas.read_csv |
import json
import mmap
import os
import random
import re
from collections import Counter
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from tqdm import tqdm
class SUBEVENTKG_Processor(object):
"""
对EVENTKG数据集取适用于知识表示任务的子数据集
原数据集地址链接在https://eventkg.l3s.uni-hannover.de/data.html,采用3.0版本的数据包
"""
def __init__(self,
events_nq_path,
entities_nq_path,
relations_base_nq_path,
relations_events_other_nq_path,
relations_entities_temporal_nq_path,
processed_entities_path,
processed_events_path,
processed_event_event_path,
processed_event_entity_path,
processed_entity_entity_path,
filt_event_event_path,
filt_event_entity_path,
filt_entity_entity_path,
event_node_count_path,
entity_node_count_path,
event_rdf2name_path,
entity_rdf2name_path,
relation_rdf2name_path,
event_lut_path,
entity_lut_path,
relation_lut_path,
event_degree_list,
entity_degree_list
):
"""
Args:
events_nq_path:事件节点的原始数据集
entities_nq_path:实体节点的原始数据集
relations_base_nq_path:事件-事件(无时间信息)原始数据集
relations_events_other_nq_path:事件-实体,实体-事件原始数据集
relations_entities_temporal_nq_path:实体-实体(有时间信息)原始数据集
processed_entities_path:实体的字典,rdf->count(0)
processed_events_path:事件的字典,rdf->count(0)
processed_event_event_path:转化为五元组格式的 事件-事件
processed_event_entity_path:转化为五元组格式的 事件-实体
processed_entity_entity_path:转化为五元组格式的 实体-实体
filt_event_event_path:过滤后的 事件-事件 五元组
filt_event_entity_path:过滤后的 事件-实体 五元组
filt_entity_entity_path:过滤后的 实体-实体 五元组
event_node_count_path:统计出来的事件节点个数
entity_node_count_path:统计出来的实体节点个数
event_rdf2name_path:事件rdf转name
entity_rdf2name_path:实体rdf转name
relation_rdf2name_path:关系rdfname
event_lut_path:事件查找表路径
entity_lut_path:实体查找表路径
relation_lut_path:关系查找表路径
event_degree_list:事件度的列表
entity_degree_list:实体度的列表
"""
self.raw_events_path = events_nq_path
self.raw_entities_path = entities_nq_path
self.raw_event_event_path = relations_base_nq_path
self.raw_event_entity_path = relations_events_other_nq_path
self.raw_entity_entity_path = relations_entities_temporal_nq_path
self.processed_entities_path = processed_entities_path
self.processed_events_path = processed_events_path
self.processed_event_event_path = processed_event_event_path
self.processed_event_entity_path = processed_event_entity_path
self.processed_entity_entity_path = processed_entity_entity_path
self.filt_event_event_path = filt_event_event_path
self.filt_event_entity_path = filt_event_entity_path
self.filt_entity_entity_path = filt_entity_entity_path
self.event_node_count_path = event_node_count_path
self.entity_node_count_path = entity_node_count_path
self.event_rdf2name_path = event_rdf2name_path
self.entity_rdf2name_path = entity_rdf2name_path
self.relation_rdf2name_path = relation_rdf2name_path
self.event_lut_path = event_lut_path
self.entity_lut_path = entity_lut_path
self.relation_lut_path = relation_lut_path
self.event_degree_list = event_degree_list
self.entity_degree_list = entity_degree_list
self.entity_dict = None
self.event_dict = None
self.rdf_triplets_event_event = None
self.rdf_triplets_event_entity = None
self.rdf_triplets_entity_entity = None
self.filt_triplets_event_event = None
self.filt_triplets_event_entity = None
self.filt_triplets_entity_entity = None
self.event_rdf2name_dict = dict()
self.entity_rdf2name_dict = dict()
self.relation_rdf2name_dict = dict()
self.filt_event_set = set()
self.filt_entity_set = set()
self.filt_relation_set = set()
seed = 1
random.seed(seed)
np.random.seed(seed)
def _get_num_lines(self, file_path):
"""
统计txt文件的行数
:param file_path:待统计行数的txt文件路径
"""
fp = open(file_path, "r+")
buf = mmap.mmap(fp.fileno(), 0)
lines = 0
while buf.readline():
lines += 1
return lines
def create_entities_index(self, reprocess=True, describe=True):
"""
建立{实体rdf:count}字典,这个实体是全集
Args:
reprocess:True为重新处理
describe:True为显示数据集信息
"""
if reprocess:
self.entity_dict = dict()
print("processing entities index...")
with open(self.raw_entities_path, "r", encoding="utf-8") as file:
for line in tqdm(file, total=self._get_num_lines(self.raw_entities_path)):
line = line.strip().split(" ")
entity = line[0]
if entity not in self.entity_dict.keys():
self.entity_dict[entity] = 0
json.dump(self.entity_dict, open(self.processed_entities_path, "w"), indent=4, sort_keys=True)
print("processed_entities_dict has been saved in {}".format(self.processed_entities_path))
else:
if os.path.exists(self.processed_entities_path):
print("loading entities index...")
with open(self.processed_entities_path) as file:
self.entity_dict = json.load(file)
print("loading entities index succeed!")
else:
raise FileNotFoundError("processed_entities_path does not exists!")
if describe:
print("entities_dict_len", len(self.entity_dict))
def create_events_index(self, reprocess=True, describe=True):
"""
建立{事件rdf:count}字典,这个事件是全集
"""
if reprocess:
self.event_dict = dict()
print("processing events index...")
with open(self.raw_events_path, "r", encoding="utf-8") as file:
for line in tqdm(file, total=self._get_num_lines(self.raw_events_path)):
line = line.strip().split(" ")
event = line[0]
if event not in self.event_dict.keys():
self.event_dict[event] = 0
json.dump(self.event_dict, open(self.processed_events_path, "w"), indent=4, sort_keys=True)
print("processed_events_dict has been saved in {}".format(self.processed_events_path))
else:
if os.path.exists(self.processed_events_path):
print("loading events index...")
with open(self.processed_events_path) as file:
self.event_dict = json.load(file)
print("loading events index succeed!")
else:
raise FileNotFoundError("processed_entities_path does not exists!")
if describe:
print("events_dict_len", len(self.event_dict))
def event_event_raw2df(self, reprocess=True, describe=True):
"""
找出事件与事件的hassubevent,nextevent,previousevent三种关系,并转化成dataframe格式保存
原始格式
事件 关系 事件
存储格式
事件 关系 事件 开始时间 结束时间 (事件和事件三元组没有时间信息,表示为-1)
"""
if reprocess:
df_lines = []
with open(self.raw_event_event_path, "r", encoding="utf-8") as file:
print("processing event_event_raw2df...")
for line in tqdm(file, total=self._get_num_lines(self.raw_event_event_path)):
line = line.strip().split(" ")
if line[1] == "<http://dbpedia.org/ontology/nextEvent>" or \
line[1] == "<http://dbpedia.org/ontology/previousEvent>" or \
line[1] == "<http://semanticweb.cs.vu.nl/2009/11/sem/hasSubEvent>":
head = line[0]
relation = line[1]
tail = line[2]
df_lines.append([head, relation, tail, -1, -1])
self.rdf_triplets_event_event = pd.DataFrame(df_lines)
self.rdf_triplets_event_event.columns = ["head", "relation", "tail", "start_time", "end_time"]
self.rdf_triplets_event_event.to_csv(self.processed_event_event_path)
print("rdf_triplets_event_event has been saved in {}".format(self.processed_event_event_path))
else:
if os.path.exists(self.processed_event_event_path):
print("loading event_event_raw2df...")
self.rdf_triplets_event_event = pd.read_csv(self.processed_event_event_path)
print("loading event_event_raw2df succeed!")
else:
raise FileNotFoundError("processed_event_event_path does not exists!")
if describe:
print("rdf_triplets_event_event_len", len(self.rdf_triplets_event_event))
def _node_relation_datatype_raw2df(self,
reprocess=True,
describe=True,
datatype=None,
raw_data_path=None,
saved_path=None):
def init_relation_node_dict(relation_node_dict, raw_data_path):
"""嵌套字典初始化"""
with open(raw_data_path, "r", encoding="utf-8") as file:
for line in tqdm(file, total=self._get_num_lines(raw_data_path)):
line = line.strip().split(" ")
relation_node = line[0]
if relation_node not in relation_node_dict.keys():
relation_node_dict[relation_node]["head"] = -1
relation_node_dict[relation_node]["relation"] = -1
relation_node_dict[relation_node]["tail"] = -1
relation_node_dict[relation_node]["start_time"] = -1
relation_node_dict[relation_node]["end_time"] = -1
return relation_node_dict
def add_value_relation_node_dict(relation_node_dict, raw_data_path):
"""嵌套字典添加值"""
with open(raw_data_path, "r", encoding="utf-8") as file:
for line in tqdm(file, total=self._get_num_lines(raw_data_path)):
line = line.strip().split(" ")
relation_node = line[0]
arrow = line[1]
value = line[2]
if arrow == "<http://www.w3.org/1999/02/22-rdf-syntax-ns#subject>":
relation_node_dict[relation_node]["head"] = value
if arrow == "<http://semanticweb.cs.vu.nl/2009/11/sem/roleType>":
relation_node_dict[relation_node]["relation"] = value
if arrow == "<http://www.w3.org/1999/02/22-rdf-syntax-ns#object>":
relation_node_dict[relation_node]["tail"] = value
if arrow == "<http://semanticweb.cs.vu.nl/2009/11/sem/hasBeginTimeStamp>":
relation_node_dict[relation_node]["start_time"] = value
if arrow == "<http://semanticweb.cs.vu.nl/2009/11/sem/hasEndTimeStamp>":
relation_node_dict[relation_node]["end_time"] = value
return relation_node_dict
if reprocess:
relation_node_dict = defaultdict(dict)
print("processing {} _raw2df...".format(datatype))
relation_node_dict = init_relation_node_dict(relation_node_dict, raw_data_path)
relation_node_dict = add_value_relation_node_dict(relation_node_dict, raw_data_path)
# 嵌套字典转dataframe
df_lines = []
for key in tqdm(relation_node_dict.keys()):
df_lines.append([relation_node_dict[key]["head"],
relation_node_dict[key]["relation"],
relation_node_dict[key]["tail"],
relation_node_dict[key]["start_time"],
relation_node_dict[key]["end_time"]])
df = pd.DataFrame(df_lines)
df.columns = ["head", "relation", "tail", "start_time", "end_time"]
if datatype == "event_entity":
self.rdf_triplets_event_entity = df
if datatype == "entity_entity":
self.rdf_triplets_entity_entity = df
df.to_csv(saved_path)
print("rdf_triplets_{} has been saved in {}".format(datatype, saved_path))
else:
if os.path.exists(saved_path):
print("loading {}_raw2df...".format(datatype))
df = pd.read_csv(saved_path)
if datatype == "event_entity":
self.rdf_triplets_event_entity = df
if datatype == "entity_entity":
self.rdf_triplets_entity_entity = df
print("loading {}_raw2df succeed!".format(datatype))
else:
raise FileNotFoundError("processed_{}_path does not exists!".format(datatype))
if describe:
print("rdf_triplets_{}_len".format(datatype), len(df))
def event_entity_raw2df(self, reprocess=True, describe=True):
"""
找出事件与实体,实体与事件的所有关系,并转化成dataframe格式保存
原始格式--》嵌套字典格式--》存储格式dataframe
原始格式
relation结点 subject event/entity (头结点)
relation结点 object event/entity (尾结点)
relation结点 roleType 比如参与者(关系)
relation结点 hasbegintimestamp 时间
relation结点 hasendtimestamp 时间
嵌套字典格式
{relation结点:{头结点,关系,尾结点,开始时间,结束时间}。。。。。。。。。。}
存储格式
事件/实体 关系 实体/事件 开始时间 结束时间 如果有空值,则用-1表示,多个关系则随机选取一个
"""
self._node_relation_datatype_raw2df(reprocess=reprocess,
describe=describe,
datatype="event_entity",
raw_data_path=self.raw_event_entity_path,
saved_path=self.processed_event_entity_path)
def entity_entity_raw2df(self, reprocess=True, describe=True):
"""
找出实体与实体的所有关系,并转化成dataframe格式保存
原始格式--》嵌套字典格式--》存储格式dataframe
原始格式
relation结点 subject entity (头结点)
relation结点 object entity (尾结点)
relation结点 roleType 比如参与者(关系)
relation结点 hasbegintimestamp 时间
relation结点 hasendtimestamp 时间
嵌套字典格式
{relation结点:{头结点,关系,尾结点,开始时间,结束时间}。。。。。。。。。。}
存储格式
事件/实体 关系 实体/事件 开始时间 结束时间 如果有空值,则用-1表示,多个关系则随机选取一个
"""
self._node_relation_datatype_raw2df(reprocess=reprocess,
describe=describe,
datatype="entity_entity",
raw_data_path=self.raw_entity_entity_path,
saved_path=self.processed_entity_entity_path)
def count_event_node_num(self, reprocess=True,
describe=True,
event_event=True,
event_entity=True):
"""
统计事件节点的度
:param reprocess: True重新处理
:param describe: True展示数据描述
:param event_event: True统计event-event数据的事件节点数据量
:param event_entity: True统计event-entity数据的事件节点数据量
:return: None
"""
def count_node(rdf_triplets):
for i, row in tqdm(rdf_triplets.iterrows(), total=len(rdf_triplets)):
if row["head"] in self.event_dict.keys():
self.event_dict[row["head"]] = self.event_dict[row["head"]] + 1
if row["tail"] in self.event_dict.keys():
self.event_dict[row["tail"]] = self.event_dict[row["tail"]] + 1
def show(degree):
print("event_node degree > %d " % (degree), "num", sum(np.array(list(node_rank)) > degree), "percent",
"%.2f%%" % (sum(np.array(list(node_rank)) > degree) / event_len * 100))
if reprocess:
if event_event:
count_node(self.rdf_triplets_event_event)
if event_entity:
count_node(self.rdf_triplets_event_entity)
json.dump(self.event_dict, open(self.event_node_count_path, "w"), indent=4, sort_keys=True)
print("event_node_count has been saved in {}".format(self.event_node_count_path))
else:
if os.path.exists(self.event_node_count_path):
print("loading event_node_count...")
with open(self.event_node_count_path) as file:
self.event_dict = json.load(file)
print("loading event_node_count succeed!")
else:
raise FileNotFoundError("event_node_count_path does not exists!")
if describe:
print("top 10 event_node:")
count_node_rank_item = sorted(self.event_dict.items(), key=lambda x: x[1], reverse=True)
print(count_node_rank_item[:10])
node_rank = sorted(self.event_dict.values(), reverse=True)
event_len = sum(np.array(list(node_rank)) > 0)
print("all_event_num", event_len)
# interval=math.floor(event_len/100)
# sample_index=np.arange(100)*interval
# sample_rank=np.array(list(node_rank))[sample_index]
# print(sample_rank)
for degree in self.event_degree_list:
show(degree)
def count_entity_node_num(self,
reprocess=True,
describe=True,
entity_entity=True,
event_entity=True):
"""
统计实体节点的度
:param reprocess: True重新处理
:param describe: True展示数据描述
:param entity_entity: True统计entity-entity数据的事件节点数据量
:param event_entity: True统计event-entity数据的事件节点数据量
:return: None
"""
def count_node(rdf_triplets):
for i, row in tqdm(rdf_triplets.iterrows(), total=len(rdf_triplets)):
if row["head"] in self.entity_dict.keys():
self.entity_dict[row["head"]] = self.entity_dict[row["head"]] + 1
if row["tail"] in self.entity_dict.keys():
self.entity_dict[row["tail"]] = self.entity_dict[row["tail"]] + 1
def show(degree):
print("entity_node degree > %d " % (degree), "num", sum(np.array(list(node_rank)) > degree), "percent",
"%.2f%%" % (sum(np.array(list(node_rank)) > degree) / entity_len * 100))
if reprocess:
if entity_entity:
count_node(self.rdf_triplets_entity_entity)
if event_entity:
count_node(self.rdf_triplets_event_entity)
json.dump(self.entity_dict, open(self.entity_node_count_path, "w"), indent=4, sort_keys=True)
print("entity_node_count has been saved in {}".format(self.entity_node_count_path))
else:
if os.path.exists(self.entity_node_count_path):
print("loading entity_node_count...")
with open(self.entity_node_count_path) as file:
self.entity_dict = json.load(file)
print("loading entity_node_count succeed!")
else:
raise FileNotFoundError("entity_node_count_path does not exists!")
if describe:
print("top 10 entity_node:")
count_node_rank_item = sorted(self.entity_dict.items(), key=lambda x: x[1], reverse=True)
print(count_node_rank_item[:10])
node_rank = self.entity_dict.values()
entity_len = sum(np.array(list(node_rank)) > 0)
print("all_entity_num", entity_len)
# interval=math.floor(entity_len/100)
# sample_index=np.arange(100)*interval
# sample_rank=np.array(list(node_rank))[sample_index]
# print(sample_rank)
for degree in self.entity_degree_list:
show(degree)
def filt_event_event(self, event_degree, reprocess=True, describe=True):
"""
事件-事件 三元组过滤
:param event_degree: 保留事件节点的度大于degree 的三元组
"""
if reprocess:
filt_event_set = set([key for key, value in self.event_dict.items() if value > event_degree])
print(len(filt_event_set))
self.filt_triplets_event_event = list()
for i, row in tqdm(self.rdf_triplets_event_event.iterrows(), total=len(self.rdf_triplets_event_event)):
if row["head"] in filt_event_set and row["tail"] in filt_event_set:
self.filt_triplets_event_event.append(row[1:])
self.filt_event_set.add(row["head"])
self.filt_event_set.add(row["tail"])
self.filt_relation_set.add(row["relation"])
self.filt_triplets_event_event = pd.DataFrame(self.filt_triplets_event_event)
self.filt_triplets_event_event.columns = ["head", "relation", "tail", "start_time", "end_time"]
self.filt_triplets_event_event.to_csv(self.filt_event_event_path)
print("filt_triplets_event_event has been saved in {}".format(self.filt_event_event_path))
else:
if os.path.exists(self.filt_event_event_path):
print("loading {}...".format(self.filt_event_event_path))
self.filt_triplets_event_event = pd.read_csv(self.filt_event_event_path)
print("loading {} succeed!".format(self.filt_triplets_event_event))
else:
raise FileNotFoundError("{} does not exists!".format(self.filt_triplets_event_event))
if describe:
print("filt_triplets_event_event_len", len(self.filt_triplets_event_event))
print("raw_triplets_event_event_len", len(self.rdf_triplets_event_event))
print("filt_percentage %.2f%%" % (
len(self.filt_triplets_event_event) * 100 / len(self.rdf_triplets_event_event)))
def filt_event_entity(self, event_degree, entity_degree, reprocess=True, describe=True):
"""
事件-实体 三元组过滤
:param event_degree: 保留事件节点的度大于degree 的三元组
"""
if reprocess:
filt_entity_set = set([key for key, value in self.entity_dict.items() if value > entity_degree])
filt_event_set = set([key for key, value in self.event_dict.items() if value > event_degree])
self.filt_triplets_event_entity = list()
for i, row in tqdm(self.rdf_triplets_event_entity.iterrows(), total=len(self.rdf_triplets_event_entity)):
if row["head"] in filt_event_set and row["tail"] in filt_entity_set:
self.filt_triplets_event_entity.append(row[1:])
self.filt_event_set.add(row["head"])
self.filt_entity_set.add(row["tail"])
self.filt_relation_set.add(row["relation"])
if row["head"] in filt_entity_set and row["tail"] in filt_event_set:
self.filt_triplets_event_entity.append(row[1:])
self.filt_event_set.add(row["tail"])
self.filt_entity_set.add(row["head"])
self.filt_relation_set.add(row["relation"])
self.filt_triplets_event_entity = pd.DataFrame(self.filt_triplets_event_entity)
self.filt_triplets_event_entity.columns = ["head", "relation", "tail", "start_time", "end_time"]
self.filt_triplets_event_entity.to_csv(self.filt_event_entity_path)
print("filt_triplets_event_entity has been saved in {}".format(self.filt_event_entity_path))
else:
if os.path.exists(self.filt_event_entity_path):
print("loading {}...".format(self.filt_event_entity_path))
self.filt_triplets_event_entity = pd.read_csv(self.filt_event_entity_path)
print("loading {} succeed!".format(self.filt_triplets_event_entity))
else:
raise FileNotFoundError("{} does not exists!".format(self.filt_triplets_event_entity))
if describe:
print("filt_triplets_event_entity_len", len(self.filt_triplets_event_entity))
print("raw_triplets_event_entity_len", len(self.rdf_triplets_event_entity))
print("filt_percentage %.2f%%" % (
len(self.filt_triplets_event_entity) * 100 / len(self.rdf_triplets_event_entity)))
def filt_entity_entity(self, entity_degree, reprocess=True, describe=True):
"""
实体-实体 三元组过滤
:param entity_degree: 保留实体节点的度大于degree 的三元组
"""
if reprocess:
filt_entity_set = set([key for key, value in self.entity_dict.items() if value > entity_degree])
print(len(filt_entity_set))
self.filt_triplets_entity_entity = list()
for i, row in tqdm(self.rdf_triplets_entity_entity.iterrows(), total=len(self.rdf_triplets_entity_entity)):
if row["head"] in filt_entity_set and row["tail"] in filt_entity_set:
self.filt_triplets_entity_entity.append(row[1:])
self.filt_entity_set.add(row["head"])
self.filt_entity_set.add(row["tail"])
self.filt_relation_set.add(row["relation"])
self.filt_triplets_entity_entity = pd.DataFrame(self.filt_triplets_entity_entity)
self.filt_triplets_entity_entity.columns = ["head", "relation", "tail", "start_time", "end_time"]
self.filt_triplets_entity_entity.to_csv(self.filt_entity_entity_path)
print("filt_triplets_entity_entity has been saved in {}".format(self.filt_entity_entity_path))
else:
if os.path.exists(self.filt_entity_entity_path):
print("loading {}...".format(self.filt_entity_entity_path))
self.filt_triplets_entity_entity = pd.read_csv(self.filt_entity_entity_path)
print("loading {} succeed!".format(self.filt_triplets_entity_entity))
else:
raise FileNotFoundError("{} does not exists!".format(self.filt_triplets_entity_entity))
if describe:
print("filt_triplets_entity_entity_len", len(self.filt_triplets_entity_entity))
print("raw_triplets_entity_entity_len", len(self.rdf_triplets_entity_entity))
print("filt_percentage %.2f%%" % (
len(self.filt_triplets_entity_entity) * 100 / len(self.rdf_triplets_entity_entity)))
def create_subeventkg_rdf2name_all(self, event_degree, entity_degree, reprocess=True):
"""
建立rdf转name的三个字典
"""
if reprocess:
filt_event_set = set([key for key, value in self.event_dict.items() if value > event_degree])
filt_entity_set = set([key for key, value in self.entity_dict.items() if value > entity_degree])
filt_relation_set = set()
for i, row in tqdm(self.rdf_triplets_event_event.iterrows(), total=len(self.rdf_triplets_event_event)):
if row["head"] in filt_event_set and row["tail"] in filt_event_set:
filt_relation_set.add(row[2])
for i, row in tqdm(self.rdf_triplets_event_entity.iterrows(), total=len(self.rdf_triplets_event_entity)):
if (row["head"] in filt_event_set and row["tail"] in filt_entity_set) \
or (row["head"] in filt_entity_set and row["tail"] in filt_event_set):
filt_relation_set.add(row[2])
for i, row in tqdm(self.rdf_triplets_entity_entity.iterrows(), total=len(self.rdf_triplets_entity_entity)):
if row["head"] in filt_entity_set and row["tail"] in filt_entity_set:
filt_relation_set.add(row[2])
index = 0
for event in tqdm(filt_event_set):
self.event_rdf2name_dict[event] = "Q_{}".format(index)
index = index + 1
index = 0
for entity in tqdm(filt_entity_set):
self.entity_rdf2name_dict[entity] = "E_{}".format(index)
index = index + 1
index = 0
for relation in tqdm(filt_relation_set):
self.relation_rdf2name_dict[relation] = "R_{}".format(index)
index = index + 1
json.dump(self.event_rdf2name_dict, open(self.event_rdf2name_path, "w"), indent=4, sort_keys=True)
print("event_rdf2name has been saved in {}".format(self.event_rdf2name_path))
json.dump(self.entity_rdf2name_dict, open(self.entity_rdf2name_path, "w"), indent=4, sort_keys=True)
print("entity_rdf2name has been saved in {}".format(self.entity_rdf2name_path))
json.dump(self.relation_rdf2name_dict, open(self.relation_rdf2name_path, "w"), indent=4, sort_keys=True)
print("relation_rdf2name has been saved in {}".format(self.relation_rdf2name_path))
print("event_rdf2name_dict_len", len(self.event_rdf2name_dict))
print("entity_rdf2name_dict_len", len(self.entity_rdf2name_dict))
print("relation_rdf2name_dict_len", len(self.relation_rdf2name_dict))
else:
print("loading {}...".format(self.event_rdf2name_path))
with open(self.event_rdf2name_path) as file:
self.event_rdf2name_dict = json.load(file)
print("event_rdf2name_dict_len", len(self.event_rdf2name_dict))
print("loading {}...".format(self.entity_rdf2name_path))
with open(self.entity_rdf2name_path) as file:
self.entity_rdf2name_dict = json.load(file)
print("entity_rdf2name_dict_len", len(self.entity_rdf2name_dict))
print("loading {}...".format(self.relation_rdf2name_path))
with open(self.relation_rdf2name_path) as file:
self.relation_rdf2name_dict = json.load(file)
print("relation_rdf2name_dict_len", len(self.relation_rdf2name_dict))
def create_relation_rdf2type_name(self, reprocess):
if reprocess:
self.type_name_dict = dict()
file_list = ["property_labels.nq", "type_labels_dbpedia.nq"]
for single_file in file_list:
with open(single_file, "r", encoding="utf-8") as file:
print("creating relation_rdf2name...")
for line in tqdm(file, total=self._get_num_lines(single_file)):
x = line.strip().split(" ")
if x[1] == "<http://www.w3.org/2000/01/rdf-schema#label>":
match_instance = re.findall(r"^<.*?> <.*?> (\".*?\"@en) <.*?> \D", line)
if len(match_instance) != 0:
self.type_name_dict[x[0]] = match_instance[0][1:-4]
json.dump(self.type_name_dict, open("rdf2type_name", "w"), indent=4, sort_keys=True)
else:
with open("rdf2type_name") as file:
self.type_name_dict = json.load(file)
def create_subeventkg_event_lut(self, event_degree, reprocess=True):
if reprocess:
# filt_event_set=set([key for key,value in self.event_dict.items() if value>event_degree])
filt_event_set = self.filt_event_set
event_lut = defaultdict(dict)
self.new_event_dict = dict()
index = 0
for key, value in self.event_rdf2name_dict.items():
if key in filt_event_set:
new_value = "Q_{}".format(index)
index = index + 1
self.new_event_dict[key] = new_value
event_lut[new_value]["name"] = -1
event_lut[new_value]["name_rdf"] = key
event_lut[new_value]["type"] = -1
event_lut[new_value]["type_rdf"] = -1
event_lut[new_value]["description"] = -1
# event_lut[value]["begintime"] = -1
# event_lut[value]["endtime"] = -1
print("event index num", index)
with open(self.raw_events_path, "r", encoding="utf-8") as file:
for line in tqdm(file, total=self._get_num_lines(self.raw_events_path)):
x = line.strip().split(" ")
if x[1] == "<http://www.w3.org/2000/01/rdf-schema#label>" and x[0] in filt_event_set:
match_instance = re.findall(r"^<.*?> <.*?> (\".*?\"@en) <.*?> \D", line)
if len(match_instance) != 0:
event_lut[self.new_event_dict[x[0]]]["name"] = match_instance[0][1:-4]
if x[1] == "<http://purl.org/dc/terms/description>" and x[0] in filt_event_set:
match_instance = re.findall(r"^<.*?> <.*?> (\".*?\"@en) <.*?> \D", line)
if len(match_instance) != 0:
event_lut[self.new_event_dict[x[0]]]["description"] = match_instance[0][1:-4]
with open("types_dbpedia.nq", "r", encoding="utf-8") as file:
for line in tqdm(file, total=self._get_num_lines("types_dbpedia.nq")):
x = line.strip().split(" ")
if x[1] == "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>" and x[0] in filt_event_set:
event_lut[self.new_event_dict[x[0]]]["type_rdf"] = x[2]
event_lut[self.new_event_dict[x[0]]]["type"] = self.type_name_dict[x[2]]
# with open("relations_base.nq","r",encoding="utf-8") as file:
# for line in tqdm(file, total=self._get_num_lines("relations_base.nq")):
# x=line.strip().split(" ")
# if x[1]=="<http://semanticweb.cs.vu.nl/2009/11/sem/hasBeginTimeStamp>" and x[0] in filt_event_set:
# event_lut[self.event_rdf2name_dict[x[0]]]["begintime"] = x[2][1:11]
# if x[1] == "<http://semanticweb.cs.vu.nl/2009/11/sem/hasEndTimeStamp>" and x[0] in filt_event_set:
# event_lut[self.event_rdf2name_dict[x[0]]]["endtime"] = x[2][1:11]
json.dump(event_lut, open(self.event_lut_path, "w"), indent=4, sort_keys=True)
def create_subeventkg_entity_lut(self, entity_degree, reprocess=True):
if reprocess:
# filt_entity_set=set([key for key,value in self.entity_dict.items() if value>entity_degree])
filt_entity_set = self.filt_entity_set
entity_lut = defaultdict(dict)
self.new_entity_dict = dict()
index = 0
for key, value in self.entity_rdf2name_dict.items():
if key in filt_entity_set:
new_value = "E_{}".format(index)
index = index + 1
self.new_entity_dict[key] = new_value
entity_lut[new_value]["name"] = -1
entity_lut[new_value]["name_rdf"] = key
entity_lut[new_value]["type"] = -1
entity_lut[new_value]["type_rdf"] = -1
entity_lut[new_value]["description"] = -1
# entity_lut[value]["begintime"] = -1
# entity_lut[value]["endtime"] = -1
print("entity index num", index)
with open(self.raw_entities_path, "r", encoding="utf-8") as file:
for line in tqdm(file, total=self._get_num_lines(self.raw_entities_path)):
x = line.strip().split(" ")
if x[1] == "<http://www.w3.org/2000/01/rdf-schema#label>" and x[0] in filt_entity_set:
match_instance = re.findall(r"^<.*?> <.*?> (\".*?\"@en) <.*?> \D", line)
if len(match_instance) != 0:
entity_lut[self.new_entity_dict[x[0]]]["name"] = match_instance[0][1:-4]
file_list = ["types_dbpedia.nq", "types.nq"]
for single_file in file_list:
with open(single_file, "r", encoding="utf-8") as file:
for line in tqdm(file, total=self._get_num_lines(single_file)):
x = line.strip().split(" ")
if x[1] == "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>" and x[0] in filt_entity_set:
entity_lut[self.new_entity_dict[x[0]]]["type_rdf"] = x[2]
entity_lut[self.new_entity_dict[x[0]]]["type"] = self.type_name_dict[x[2]]
# with open("relations_base.nq","r",encoding="utf-8") as file:
# for line in tqdm(file, total=self._get_num_lines("relations_base.nq")):
# x=line.strip().split(" ")
# if x[1]=="<http://semanticweb.cs.vu.nl/2009/11/sem/hasBeginTimeStamp>" and x[0] in filt_entity_set:
# entity_lut[self.entity_rdf2name_dict[x[0]]]["begintime"] = x[2][1:11]
# if x[1] == "<http://semanticweb.cs.vu.nl/2009/11/sem/hasEndTimeStamp>" and x[0] in filt_entity_set:
# entity_lut[self.entity_rdf2name_dict[x[0]]]["endtime"] = x[2][1:11]
json.dump(entity_lut, open(self.entity_lut_path, "w"), indent=4, sort_keys=True)
def create_subeventkg_relation_lut(self, reprocess=True):
if reprocess:
print("开始")
my_dict = defaultdict(dict)
for key, value in self.relation_rdf2name_dict.items():
my_dict[value]["rdf"] = key
my_dict[value]["name"] = -1
for key, value in self.relation_rdf2name_dict.items():
my_dict[value]["rdf"] = key
try:
my_dict[value]["name"] = self.type_name_dict[key]
except Exception as e:
match_instance = re.findall(r"<.*?/.*?/.*?/.*?/(.*?)>$", key)
my_dict[value]["name"] = match_instance[0]
json.dump(my_dict, open(self.relation_lut_path, "w"), indent=4, sort_keys=True)
def merge_all_data_convert_name(self, reprocess=True, event_degree=10, entity_degree=10):
filt_event_set = set([key for key, value in self.event_dict.items() if value > event_degree])
filt_entity_set = set([key for key, value in self.entity_dict.items() if value > entity_degree])
if reprocess:
df_lines = []
self.total_rdf_triplets = pd.concat([self.filt_triplets_event_event,
self.filt_triplets_event_entity,
self.filt_triplets_entity_entity], axis=0)
for i, row in tqdm(self.total_rdf_triplets.iterrows(), total=len(self.total_rdf_triplets)):
if row["head"] in filt_event_set:
new_head = self.new_event_dict[row["head"]]
if row["head"] in filt_entity_set:
new_head = self.new_entity_dict[row["head"]]
new_relation = self.relation_rdf2name_dict[row["relation"]]
if row["tail"] in filt_event_set:
new_tail = self.new_event_dict[row["tail"]]
if row["tail"] in filt_entity_set:
new_tail = self.new_entity_dict[row["tail"]]
if str(row["start_time"]) != "-1":
new_start_time = row["start_time"][1:11]
else:
new_start_time = row["start_time"]
if str(row["end_time"]) != "-1":
new_end_time = row["end_time"][1:11]
else:
new_end_time = row["end_time"]
df_lines.append([new_head, new_relation, new_tail, new_start_time, new_end_time])
self.triplets = pd.DataFrame(df_lines)
self.triplets.columns = ["head", "relation", "tail", "start_time", "end_time"]
self.triplets.to_csv("all_triplets_data.txt", index=None)
else:
self.triplets = | pd.read_csv("all_triplets_data.txt", low_memory=False) | pandas.read_csv |
#coding=utf-8
import os
import re
import json
import time
import redis
import socket
import random
import requests
import threading
import pandas as pd
from threading import Thread
from multiprocessing import Process, Queue, Lock
agents = [ "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.211.0 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.209.0 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.208.0 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.206.1 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.206.0 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.204.0 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.203.2 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.203.0 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/4.0.202.0 Safari/525.13. ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.201.1 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.201.0 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.198.0 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.197.11 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.196.2 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.6 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML,like Gecko) Chrome/3.0.195.27 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.27 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.24 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.21 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.20 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.17 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.10 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.1 Safari/532.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.2 (KHTML, like Gecko) Chrome/3.0.191.3 Safari/531.2 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.0 (KHTML, like Gecko) Chrome/3.0.191.0 Safari/531.0 ",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/530.8 (KHTML, like Gecko) Chrome/2.0.178.0 Safari/530.8 "]
url = 'http://zhaopin.baidu.com/api/quanzhiasync?'
redis_save = redis.Redis(host='127.0.0.1', port=6379, db=0)
class Spider_Job(object):
__params = {'rn': 20,
'tid': 4139,
'ie': 'utf8',
'oe': 'utf8',
'sort_type': 1,
'query': None,
'city': None,
'detailmode': 'close'}
# 初始化
def __init__(self, url, jobs='python', areas=None, page_num=1, pro_locks=None, pro_queue=None):
self.__pro_locks = pro_locks
self.__pro_queue = pro_queue
self.__params['query'] = jobs
self.__params['city'] = areas
self.__page_total = page_num
self.__total_list = []
self.__thd_area = areas
self.__thd_jobs = jobs
self.__file_name = jobs+'-'+areas+time.strftime('%Y-%m-%d',time.localtime(time.time()))
self.url_list = [url+'pn={}'.format(nums*20) for nums in range(self.__page_total)]
# 开始多线程爬取数据
def __thd_spider_data(self, url_nums, thd_lock):
try:
# 超时时间
socket.timeout = 7
thd_request = requests.get(self.url_list[url_nums],
params=self.__params,
headers={'User-Agent':random.choice(agents)})
data_source = thd_request.content.decode('utf-8')
data_source = json.loads(data_source)
data_display = data_source['data']['main']['data']['disp_data']
except Exception as error:
print(error)
return
try:
# 遍历数据并获取分类
for line in data_display:
source_list = []
try:
source_list.append({'公司名称': line['officialname']})
except Exception as error:
print(error)
source_list.append({'公司名称': line['commonname']})
# 企业类型
try:
com_type = line['ori_employertype']
except Exception as error:
try:
source_list.append({'企业类型': line['employertype']})
except Exception as error:
print(error)
source_list.append({'企业类型': '未展示'})
else:
source_list.append({'企业类型': com_type})
# 工作经验
try:
experience = line['t_experience0']
except Exception as error:
try:
source_list.append({'工作经验': line['ori_experience']})
except Exception as error:
print(error)
source_list.append({'工作经验': '未展示'})
else:
source_list.append({'工作经验': experience})
# 薪资范围
try:
source_list.append({'薪资范围': line['t_salary0']})
except Exception as error:
try:
source_list.append({'薪资范围': line['salary']})
except Exception as error:
print(error)
source_list.append({'薪资范围': '未展示'})
try:
source_list.append({'公司规模': line['size']})
except Exception as errror:
try:
source_list.append({'公司规模': line['ori_size']})
except Exception as error:
# print(error)
source_list.append({'公司规模': '未展示'})
try:
source_list.append({'经营范围': line['industry']})
except Exception as error:
try:
source_list.append({'经营范围': line['jobfirstclass']})
except Exception as er:
try:
source_list.append({'经营范围': line['jobthirdclass']})
except Exception as er:
print(er)
source_list.append({'经营范围': '不限'})
# 公司网址
try:
source_list.append({'公司网址': line['employerurl']})
except Exception as error:
# print(error)
source_list.append({'公司网址': '未展示'})
source_list.append({'学历要求': line['ori_education']})
source_list.append({'地区': line['district']})
source_list.append({'招聘来源': line['source']})
source_list.append({'职位名称': line['title']})
source_list.append({'职位描述': line['description_jd']})
source_list.append({'开始日期': line['startdate']})
source_list.append({'结束日期': line['enddate']})
# 字典形式
dicts = {}
try:
for list_dict in source_list:
for name_key, name_value in list_dict.items():
try:
dicts[name_key] = name_value
except Exception as error:
print(error)
continue
except Exception as error:
print(error)
# 将数据放到Redis数据库中
# try:
#
# redis_key = dicts['公司名称']
# redis_save.set(redis_key,dicts)
#
# except Exception as error:
#
# print(error)
# continue
# 将数据放到队列中
try:
self.__pro_queue.put(dicts)
except Exception as error:
print(error)
continue
except Exception as error:
print(error)
else:
print('------------------------{}-{},第{}页保存完成'.format(self.__thd_jobs,
self.__thd_area,
url_nums))
# 为每个线程添加一个随机时间
# time.sleep(random.randint(0,10)*0.001)
# 根据任务数开启相应线程
def run_spider_thd(self):
thd_list = []
thd_lock = threading.Lock()
# 根据单个地区的页码数量创建相应数量的线程
for thd_st in range(self.__page_total):
try:
thd = Thread(target=self.__thd_spider_data,
args=(thd_st, thd_lock))
thd.setDaemon(True)
thd.start()
except Exception as error:
print(error)
continue
else:
thd_list.append(thd)
# 等待线程结束
for thd_end in thd_list:
thd_end.join(timeout=8)
print('\n{}-{}数据爬取完成,正在处理数据...\n'.format(self.__thd_area,self.__thd_jobs))
# 创建进程处理进行大分类爬取
class Pro_Spider(object):
def __init__(self, pro_job, area_list, pro_total_num):
self.__pro_job = pro_job
self.__pro_area_list = area_list
self.__pro_total_num = pro_total_num
# 处理全局爬取的数据
def __deal_total_data(self, deal_list, file_name, save_queue):
# 数据分类
dicts_propertys = ['公司名称', '企业类型', '公司规模',
'经营范围', '地区', '招聘来源',
'职位名称', '薪资范围', '工作经验',
'学历要求', '职位描述', '开始日期',
'结束日期', '公司网址']
# 使用列表推导式进行数据的分类处理
try:
data_dicts = {mkey:[line[mkey] for line in deal_list] for mkey in dicts_propertys}
except Exception as error:
print(error)
else:
# 导出到excel
job_file_class = 'Jobs_Data'
if not job_file_class in os.listdir('./'): os.mkdir(job_file_class)
deal_data = | pd.DataFrame.from_dict(data_dicts, orient='index') | pandas.DataFrame.from_dict |
"""
Mixture Class
=============
Mixture of expert using clustering machine learning to form local surrogate
models.
:Example:
::
>> from batman.surrogate import Mixture
>> import numpy as np
>> samples = np.array([[1., 5.], [2., 5.], [8., 5.], [9., 5.]])
>> data = np.array([[50., 51., 52.], [49., 48., 47.], [10., 11., 12,],
[9., 8., 7.]])
>> sample_new = np.array([[0.5, 5.],[10., 5.],[8.5, 5.]])
>> plabels = ['x1', 'x2']
>> corners = np.array([[1., 5.], [9., 5.]])
>> fsizes = 3
>> algo = Mixture(samples, data, plabels, corners, fsizes)
>> algo.evaluate(sample_new)
(array([[30.196, 31.196, 32.196],
[29. , 28. , 27. ],
[28.804, 27.804, 26.804]]), array([[19.999, 19.999, 19.999],
[20. , 20. , 20. ],
[19.999, 19.999, 19.999]]))
>> algo.estimate_quality()
1.0, array([0., 0.])
"""
import logging
import numpy as np
from sklearn.decomposition import PCA
import sklearn.mixture
import sklearn.cluster
import sklearn.gaussian_process
import sklearn.svm
import sklearn.naive_bayes
import sklearn.neighbors
from sklearn import preprocessing
import pandas as pd
from pandas.plotting import parallel_coordinates
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from ..misc import save_show
class Mixture:
"""Mixture class.
Unsupervised machine learning separate the DoE into clusters, supervised
machine learning classify new sample to a cluster and local models
predict the new sample."""
logger = logging.getLogger(__name__)
def __init__(self, samples, data, corners, fsizes=None, pod=None,
standard=True, local_method=None, pca_percentage=0.8,
clusterer='cluster.KMeans(n_clusters=2)',
classifier='gaussian_process.GaussianProcessClassifier()'):
"""Cluster data and fit local models.
1. If :attr:`data` is not scalar, compute PCA on :attr:`data`.
2. Cluster data.
3. Each sample is affiliated to a cluster.
4. Fit a classifier to handle new samples.
5. A local model for each cluster is built.
If :attr:`local_method` is not None, set as list of dict with options.
Ex: `[{'kriging': {**args}}]`
:param array_like sample: Sample of parameters of Shape
(n_samples, n_params).
:param array_like data: Sample of realization which corresponds to the
sample of parameters :attr:`sample` (n_samples, n_features).
:param array_like corners: Hypercube ([min, n_features],
[max, n_features]).
:param int fsizes: Number of components of output features.
:param dict pod: Whether to compute POD or not in local models.
- **tolerance** (float) -- Basis modes filtering criteria.
- **dim_max** (int) -- Number of basis modes to keep.
:param bool standard: Whether to standardize data before clustering.
:param lst(dict) local_method: List of local surrrogate models
for clusters or None for Kriging local surrogate models.
:param float pca_percentage: Percentage of information kept for PCA.
:param str clusterer: Clusterer from sklearn (unsupervised machine
learning).
http://scikit-learn.org/stable/modules/clustering.html#clustering
:param str classifier: Classifier from sklearn (supervised machine
learning).
http://scikit-learn.org/stable/supervised_learning.html
"""
self.scaler = preprocessing.MinMaxScaler()
self.scaler.fit(np.array(corners))
samples = self.scaler.transform(samples)
corners = [[0 for i in range(samples.shape[1])],
[1 for i in range(samples.shape[1])]]
# Only do the clustering on the sensor
if fsizes is None:
self.fsizes = data.shape[1]
else:
self.fsizes = fsizes
if data.shape[1] > self.fsizes:
clust = data[:, self.fsizes:]
else:
clust = data
# Computation of PCA for vector output
if clust.shape[1] > 1:
pca = PCA(n_components=pca_percentage)
pca.fit(clust.T)
clust = pca.components_.T
if standard is True:
scaler = preprocessing.StandardScaler()
clust = scaler.fit_transform(clust)
# Acquisition of clusterer
try:
# Clusterer is already a sklearn object
self.logger.debug('Clusterer info:\n{}'
.format(clusterer.get_params))
except AttributeError:
# Instanciate clusterer from str
try:
clusterer = eval("sklearn." + clusterer,
{'__builtins__': None},
{'sklearn': __import__('sklearn'),
'sklearn.cluster': __import__('sklearn.cluster'),
'sklearn.mixture': __import__('sklearn.mixture')})
except (TypeError, AttributeError):
raise AttributeError('Clusterer unknown from sklearn.')
self.logger.debug('Clusterer info:\n{}'
.format(clusterer.get_params()))
# Clustering
try:
labels = clusterer.fit_predict(clust)
except AttributeError:
clusterer.fit(clust)
labels = clusterer.predict(clust)
self.logger.debug('Cluster of data :{}'.format(samples, labels))
self.clusters_id = np.unique(labels)
# Acqusition of Classifier
try:
# Classifier is already a sklearn object
self.logger.debug('Classifier info:\n{}'
.format(classifier.get_params))
except AttributeError:
# Instanciate classifier from str
try:
classifier = eval('ske.' + classifier,
{'__builtins__': None},
{'ske': __import__('sklearn'),
'sklearn.svm': __import__('sklearn.svm'),
'sklearn.naive_bayes': __import__('sklearn.naive_bayes'),
'sklearn.gaussian_process': __import__('sklearn.gaussian_process'),
'sklearn.neighbors': __import__('sklearn.neighbors'),
'sklearn.ensemble': __import__('sklearn.ensemble')})
except (TypeError, AttributeError):
raise AttributeError('Classifier unknown from sklearn.')
self.logger.debug('Classifier info:\n{}'
.format(classifier.get_params()))
# Classification
self.classifier = classifier
self.classifier.fit(samples, labels)
self.indice_clt = {}
self.local_models = {}
# Creation of local models
for i, k in enumerate(self.clusters_id):
idx = np.where(labels == k)[0]
self.indice_clt[k] = list(idx)
sample_ = [samples[j] for j in self.indice_clt[k]]
data_ = [data[j, :self.fsizes] for j in self.indice_clt[k]]
if pod is not None:
from otsurrogate.pod import Pod
local_pod = Pod(corners, **pod)
local_pod.fit(space=sample_, data=data_)
data_ = local_pod.VS
else:
local_pod = None
from otsurrogate.surrogate import SurrogateModel
if local_method is None:
self.local_models[k] = SurrogateModel('kriging', corners, plabels=None)
else:
method = [lm for lm in local_method[i]][0]
self.local_models[k] = SurrogateModel(method, corners, plabels=None,
**local_method[i][method])
self.local_models[k].fit(np.asarray(sample_), np.asarray(data_), pod=local_pod)
def boundaries(self, samples, plabels=None, fname=None):
"""Boundaries of clusters in the parameter space.
Plot the boundaries for 2D and 3D hypercube or parallel coordinates
plot for more than 3D to see the influence of sample variables on
cluster affiliation.
:param array_like samples: Sample features (n_samples, n_features).
:param list(str) plabels: Names of each parameters (n_features).
:param str fname: Whether to export to filename or display the figures.
:returns: figure.
:rtype: Matplotlib figure instances, Matplotlib AxesSubplot instances.
"""
samples = np.asarray(samples)
samples_ = self.scaler.transform(samples)
classif_samples = self.classifier.predict(samples_)
n_dim = samples.shape[1]
plabels = ['x' + str(i) for i in range(n_dim)]\
if plabels is None else plabels
resolution = 20
cmap = plt.cm.Set1
color_clt = Normalize(vmin=min(self.clusters_id),
vmax=max(self.clusters_id), clip=True)
markers = ['x', 'o', '+', 'h', '*', 's', 'p', '<', '>', '^', 'v']
mins = samples.min(axis=0)
maxs = samples.max(axis=0)
fig, ax = plt.subplots()
if n_dim == 1:
xx = np.linspace(mins, maxs, resolution)[:, None]
mesh = self.scaler.transform(xx)
classif = self.classifier.predict(mesh)
ax.scatter(xx, np.zeros(resolution),
alpha=0.3, c=cmap(color_clt(classif)))
for i, k in enumerate(self.clusters_id):
samples_ = samples[classif_samples == k]
ax.scatter(samples_, np.zeros(len(samples_)),
c=cmap(color_clt(k)), marker=markers[i])
ax.set_xlabel(plabels[0])
elif n_dim == 2:
xx, yy = np.meshgrid(np.linspace(mins[0], maxs[0], resolution),
np.linspace(mins[1], maxs[1], resolution))
mesh = np.stack((xx.ravel(), yy.ravel()), axis=-1)
mesh = self.scaler.transform(mesh)
classif = self.classifier.predict(mesh)
classif = classif.reshape(xx.shape)
ax.imshow(classif, extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', interpolation='gaussian')
for i, k in enumerate(self.clusters_id):
ax.scatter(samples[:, 0][classif_samples == k],
samples[:, 1][classif_samples == k],
c=cmap(color_clt(k)), marker=markers[i])
ax.set_xlabel(plabels[0])
ax.set_ylabel(plabels[1])
else:
classif_samples = classif_samples.reshape(-1, 1)
samples_ = np.concatenate((samples_, classif_samples), axis=-1)
df = pd.DataFrame(samples_, columns=plabels + ["cluster"])
ax = | parallel_coordinates(df, "cluster") | pandas.plotting.parallel_coordinates |
# Copyright 2019 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run: python -m unittest discover -s tests/ -p test*.py -t .
import unittest
try:
from unittest.mock import patch
except ImportError:
# py27
from mock import patch
import pandas as pd
import pytz
import numpy as np
import requests
from quantrocket import get_prices, get_prices_reindexed_like
from quantrocket.exceptions import ParameterError, MissingData, NoHistoricalData
class GetPricesTestCase(unittest.TestCase):
"""
Test cases for `quantrocket.get_prices`.
"""
def test_complain_if_invalid_timezone(self):
"""
Tests error handling when the timezone is invalid.
"""
with self.assertRaises(ParameterError) as cm:
get_prices("my-db", timezone="Timbuktu")
self.assertIn("invalid timezone: Timbuktu (see `pytz.all_timezones` for choices)", str(cm.exception))
@patch("quantrocket.price.list_realtime_databases")
@patch("quantrocket.price.list_history_databases")
@patch("quantrocket.price.list_bundles")
@patch("quantrocket.price.get_history_db_config")
@patch("quantrocket.price.download_history_file")
@patch("quantrocket.price.download_master_file")
def test_pass_args_correctly_single_db(self,
mock_download_master_file,
mock_download_history_file,
mock_get_history_db_config,
mock_list_bundles,
mock_list_history_databases,
mock_list_realtime_databases):
"""
Tests that args are correctly passed to download_history_file,
download_master_file, and get_db_config, when there is a single db.
"""
def _mock_get_history_db_config(db):
return {
"bar_size": "1 day",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def _mock_download_history_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456"
],
Date=[
"2018-04-01",
"2018-04-02",
"2018-04-03",
"2018-04-01",
"2018-04-02",
"2018-04-03"
],
Close=[
20.10,
20.50,
19.40,
50.5,
52.5,
51.59,
]))
prices.to_csv(f, index=False)
def _mock_list_history_databases():
return [
"usa-stk-1d",
"demo-stk-1min"
]
def _mock_list_realtime_databases():
return {}
def _mock_list_bundles():
return {}
mock_list_history_databases.side_effect = _mock_list_history_databases
mock_list_realtime_databases.side_effect = _mock_list_realtime_databases
mock_list_bundles.side_effect = _mock_list_bundles
mock_download_history_file.side_effect = _mock_download_history_file
mock_get_history_db_config.side_effect = _mock_get_history_db_config
get_prices(
"usa-stk-1d", start_date="2018-04-01", end_date="2018-04-03",
universes="usa-stk", sids=["FI12345","FI23456"], fields=["Close"],
exclude_universes=["usa-stk-pharm"],
exclude_sids=[99999], cont_fut=False, timezone="America/New_York")
mock_download_master_file.assert_not_called()
self.assertEqual(len(mock_get_history_db_config.mock_calls), 1)
db_config_call = mock_get_history_db_config.mock_calls[0]
_, args, kwargs = db_config_call
self.assertEqual(args[0], "usa-stk-1d")
mock_list_history_databases.assert_called_once_with()
mock_list_realtime_databases.assert_called_once_with()
mock_list_bundles.assert_called_once_with()
history_call = mock_download_history_file.mock_calls[0]
_, args, kwargs = history_call
self.assertEqual(args[0], "usa-stk-1d")
self.assertListEqual(kwargs["sids"], ["FI12345","FI23456"])
self.assertEqual(kwargs["start_date"], "2018-04-01")
self.assertEqual(kwargs["end_date"], "2018-04-03")
self.assertListEqual(kwargs["fields"], ["Close"])
self.assertEqual(kwargs["universes"], "usa-stk")
self.assertListEqual(kwargs["exclude_sids"], [99999])
self.assertListEqual(kwargs["exclude_universes"], ["usa-stk-pharm"])
self.assertFalse(kwargs["cont_fut"])
@patch("quantrocket.price.list_realtime_databases")
@patch("quantrocket.price.list_history_databases")
@patch("quantrocket.price.list_bundles")
@patch("quantrocket.price.get_realtime_db_config")
@patch("quantrocket.price.get_history_db_config")
@patch("quantrocket.price.get_bundle_config")
@patch("quantrocket.price.download_market_data_file")
@patch("quantrocket.price.download_history_file")
@patch("quantrocket.price.download_bundle_file")
def test_pass_args_correctly_multi_db(
self,
mock_download_bundle_file,
mock_download_history_file,
mock_download_market_data_file,
mock_get_bundle_config,
mock_get_history_db_config,
mock_get_realtime_db_config,
mock_list_bundles,
mock_list_history_databases,
mock_list_realtime_databases):
"""
Tests that args are correctly passed to download_history_file,
download_master_file, and get_db_config, when there are multiple dbs,
including 2 history dbs, 1 realtime db, and 1 zipline bundle.
"""
def _mock_get_history_db_config(db):
if db == "usa-stk-1d":
return {
"bar_size": "1 day",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
else:
return {
"bar_size": "1 day",
"universes": ["japan-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def _mock_get_realtime_db_config(db):
return {
"bar_size": "1 day",
"fields": ["LastClose", "LastOpen", "VolumeClose"],
"tick_db_code": "demo-stk-taq"
}
def _mock_get_bundle_config(db):
return {
"ingest_type": "usstock",
"sids": None,
"universes": None,
"free": False,
"data_frequency": "minute",
"calendar": "XNYS",
"start_date": "2007-01-03"
}
def _mock_download_history_file(code, f, *args, **kwargs):
if code == "usa-stk-1d":
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456"
],
Date=[
"2018-04-01",
"2018-04-02",
"2018-04-03",
"2018-04-01",
"2018-04-02",
"2018-04-03"
],
Close=[
20.10,
20.50,
19.40,
50.5,
52.5,
51.59,
]))
else:
prices = pd.DataFrame(
dict(
Sid=[
"FI56789",
"FI56789",
"FI56789",
],
Date=[
"2018-04-01",
"2018-04-02",
"2018-04-03",
],
Close=[
5900,
5920,
5950
]))
prices.to_csv(f, index=False)
def _mock_download_market_data_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456"
],
Date=[
"2018-04-01",
"2018-04-02",
"2018-04-03",
"2018-04-01",
"2018-04-02",
"2018-04-03"
],
LastClose=[
20.10,
20.50,
19.40,
50.5,
52.5,
51.59,
]))
prices.to_csv(f, index=False)
def _mock_download_bundle_file(code, f, *args, **kwargs):
raise NoHistoricalData("No minute data matches the query parameters")
def _mock_list_history_databases():
return [
"usa-stk-1d",
"demo-stk-1min",
"japan-stk-1d"
]
def _mock_list_realtime_databases():
return {
"demo-stk-taq": ["demo-stk-taq-1d"]
}
def _mock_list_bundles():
return {
"usstock-1min": True
}
mock_download_history_file.side_effect = _mock_download_history_file
mock_download_bundle_file.side_effect = _mock_download_bundle_file
mock_download_market_data_file.side_effect = _mock_download_market_data_file
mock_get_history_db_config.side_effect = _mock_get_history_db_config
mock_get_realtime_db_config.side_effect = _mock_get_realtime_db_config
mock_get_bundle_config.side_effect = _mock_get_bundle_config
mock_list_bundles.side_effect = _mock_list_bundles
mock_list_history_databases.side_effect = _mock_list_history_databases
mock_list_realtime_databases.side_effect = _mock_list_realtime_databases
get_prices(
["usa-stk-1d", "japan-stk-1d", "demo-stk-taq-1d", "usstock-1min"],
start_date="2018-04-01", end_date="2018-04-03",
sids=["FI12345","FI23456","FI56789"], fields=["Close", "LastClose"],
data_frequency="daily")
mock_list_history_databases.assert_called_once_with()
mock_list_realtime_databases.assert_called_once_with()
mock_list_bundles.assert_called_once_with()
self.assertEqual(len(mock_get_history_db_config.mock_calls), 2)
db_config_call = mock_get_history_db_config.mock_calls[0]
_, args, kwargs = db_config_call
db_config_call_arg1 = args[0]
db_config_call = mock_get_history_db_config.mock_calls[1]
_, args, kwargs = db_config_call
db_config_call_arg2 = args[0]
self.assertSetEqual({db_config_call_arg1, db_config_call_arg2},
{"usa-stk-1d", "japan-stk-1d"})
self.assertEqual(len(mock_download_history_file.mock_calls), 2)
history_call = mock_download_history_file.mock_calls[0]
_, args, kwargs = history_call
self.assertEqual(args[0], "usa-stk-1d")
self.assertListEqual(kwargs["sids"], ["FI12345","FI23456","FI56789"])
self.assertEqual(kwargs["start_date"], "2018-04-01")
self.assertEqual(kwargs["end_date"], "2018-04-03")
# only supported subset of fields is requested
self.assertListEqual(kwargs["fields"], ["Close"])
history_call = mock_download_history_file.mock_calls[1]
_, args, kwargs = history_call
self.assertEqual(args[0], "japan-stk-1d")
self.assertListEqual(kwargs["sids"], ["FI12345","FI23456","FI56789"])
self.assertEqual(kwargs["start_date"], "2018-04-01")
self.assertEqual(kwargs["end_date"], "2018-04-03")
self.assertListEqual(kwargs["fields"], ["Close"])
self.assertNotIn("data_frequency", kwargs)
self.assertEqual(len(mock_get_realtime_db_config.mock_calls), 1)
db_config_call = mock_get_realtime_db_config.mock_calls[0]
_, args, kwargs = db_config_call
self.assertEqual(args[0], "demo-stk-taq-1d")
self.assertEqual(len(mock_download_market_data_file.mock_calls), 1)
realtime_call = mock_download_market_data_file.mock_calls[0]
_, args, kwargs = realtime_call
self.assertEqual(args[0], "demo-stk-taq-1d")
self.assertListEqual(kwargs["sids"], ["FI12345","FI23456","FI56789"])
self.assertEqual(kwargs["start_date"], "2018-04-01")
self.assertEqual(kwargs["end_date"], "2018-04-03")
# only supported subset of fields is requested
self.assertListEqual(kwargs["fields"], ["LastClose"])
self.assertNotIn("data_frequency", kwargs)
# since we passed data_frequency, we didn't need to query the bundle config
self.assertEqual(len(mock_get_bundle_config.mock_calls), 0)
self.assertEqual(len(mock_download_bundle_file.mock_calls), 1)
zipline_call = mock_download_bundle_file.mock_calls[0]
_, args, kwargs = zipline_call
self.assertEqual(args[0], "usstock-1min")
self.assertListEqual(kwargs["sids"], ["FI12345","FI23456","FI56789"])
self.assertEqual(kwargs["start_date"], "2018-04-01")
self.assertEqual(kwargs["end_date"], "2018-04-03")
# only supported subset of fields is requested
self.assertListEqual(kwargs["fields"], ["Close"])
# data_frequency arg is passed
self.assertEqual(kwargs["data_frequency"], "daily")
@patch("quantrocket.price.download_market_data_file")
def test_maybe_pass_timezone_to_realtime_db(
self,
mock_download_market_data_file):
"""
Tests that the timezone, if passed, is appended to the start_date and/or
end_date, if passed when querying a realtime db.
"""
def mock_get_realtime_db_config(db):
return {
"bar_size": "1 day",
"fields": ["LastClose", "LastOpen", "VolumeClose"],
"tick_db_code": "demo-stk-taq"
}
def _mock_download_market_data_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456"
],
Date=[
"2018-04-01",
"2018-04-02",
"2018-04-03",
"2018-04-01",
"2018-04-02",
"2018-04-03"
],
LastClose=[
20.10,
20.50,
19.40,
50.5,
52.5,
51.59,
]))
prices.to_csv(f, index=False)
def mock_list_history_databases():
return []
def mock_list_realtime_databases():
return {
"demo-stk-taq": ["demo-stk-taq-1d"]
}
def mock_list_bundles():
return {}
mock_download_market_data_file.side_effect = _mock_download_market_data_file
with patch('quantrocket.price.get_realtime_db_config', new=mock_get_realtime_db_config):
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
# First, don't pass timezone
get_prices(
"demo-stk-taq-1d",
start_date="2018-04-01", end_date="2018-04-03")
self.assertEqual(len(mock_download_market_data_file.mock_calls), 1)
realtime_call = mock_download_market_data_file.mock_calls[0]
_, args, kwargs = realtime_call
self.assertEqual(kwargs["start_date"], "2018-04-01")
self.assertEqual(kwargs["end_date"], "2018-04-03")
# Pass timezone and start_date but not end date
get_prices(
"demo-stk-taq-1d",
start_date="2018-04-01", timezone="America/New_York")
self.assertEqual(len(mock_download_market_data_file.mock_calls), 2)
realtime_call = mock_download_market_data_file.mock_calls[1]
_, args, kwargs = realtime_call
self.assertEqual(kwargs["start_date"], "2018-04-01 America/New_York")
self.assertEqual(kwargs["end_date"], None)
# Pass timezone and start_date and end date
get_prices(
"demo-stk-taq-1d",
start_date="2018-04-01", end_date="2018-04-03 15:00:00",
timezone="America/New_York")
self.assertEqual(len(mock_download_market_data_file.mock_calls), 3)
realtime_call = mock_download_market_data_file.mock_calls[2]
_, args, kwargs = realtime_call
self.assertEqual(kwargs["start_date"], "2018-04-01 America/New_York")
self.assertEqual(kwargs["end_date"], "2018-04-03 15:00:00 America/New_York")
def test_query_eod_history_db(self):
"""
Tests maniuplation of a single EOD db.
"""
def mock_get_history_db_config(db):
return {
"bar_size": "1 day",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_download_history_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
],
Date=[
"2018-04-01",
"2018-04-02",
"2018-04-03",
"2018-04-01",
"2018-04-02",
"2018-04-03"
],
Close=[
20.10,
20.50,
19.40,
50.5,
52.5,
51.59,
],
Volume=[
15000,
7800,
12400,
98000,
179000,
142500
]
))
prices.to_csv(f, index=False)
def mock_list_history_databases():
return [
"usa-stk-1d",
"usa-stk-15min",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
prices = get_prices("usa-stk-1d", fields=["Close", "Volume"])
self.assertListEqual(list(prices.index.names), ["Field", "Date"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz) # EOD is tz-naive
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
self.assertListEqual(
list(prices.index.get_level_values("Field")),
["Close", "Close", "Close", "Volume", "Volume", "Volume"])
closes = prices.loc["Close"]
closes = closes.reset_index()
closes.loc[:, "Date"] = closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.1, "FI23456": 50.5},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.5, "FI23456": 52.5},
{'Date': '2018-04-03T00:00:00', "FI12345": 19.4, "FI23456": 51.59}]
)
volumes = prices.loc["Volume"]
volumes = volumes.reset_index()
volumes.loc[:, "Date"] = volumes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
volumes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 15000, "FI23456": 98000},
{'Date': '2018-04-02T00:00:00', "FI12345": 7800, "FI23456": 179000},
{'Date': '2018-04-03T00:00:00', "FI12345": 12400, "FI23456": 142500}]
)
def test_query_multiple_eod_history_dbs(self):
"""
Tests maniuplation of multiple EOD dbs.
"""
def mock_get_history_db_config(db):
if db == "usa-stk-1d":
return {
"bar_size": "1 day",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
else:
return {
"bar_size": "1 day",
"universes": ["japan-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_download_history_file(code, f, *args, **kwargs):
if code == "usa-stk-1d":
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
],
Date=[
"2018-04-01",
"2018-04-02",
"2018-04-03",
"2018-04-01",
"2018-04-02",
"2018-04-03"
],
Close=[
20.10,
20.50,
19.40,
50.5,
52.5,
51.59,
],
Volume=[
15000,
7800,
12400,
98000,
179000,
142500
]
))
else:
prices = pd.DataFrame(
dict(
Sid=[
"FI56789",
"FI56789",
"FI56789",
],
Date=[
"2018-04-01",
"2018-04-02",
"2018-04-03",
],
Close=[
5900,
5920,
5950],
Volume=[
18000,
17600,
5600
]
))
prices.to_csv(f, index=False)
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
prices = get_prices(["usa-stk-1d", "japan-stk-1d"], fields=["Close", "Volume"])
self.assertListEqual(list(prices.index.names), ["Field", "Date"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz) # EOD is tz-naive
self.assertListEqual(list(prices.columns), ["FI12345","FI23456","FI56789"])
closes = prices.loc["Close"]
closes = closes.reset_index()
closes.loc[:, "Date"] = closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.1, "FI23456": 50.5, "FI56789": 5900.0},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.5, "FI23456": 52.5, "FI56789": 5920.0},
{'Date': '2018-04-03T00:00:00', "FI12345": 19.4, "FI23456": 51.59, "FI56789": 5950.0}]
)
volumes = prices.loc["Volume"]
volumes = volumes.reset_index()
volumes.loc[:, "Date"] = volumes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
volumes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 15000, "FI23456": 98000, "FI56789": 18000},
{'Date': '2018-04-02T00:00:00', "FI12345": 7800, "FI23456": 179000, "FI56789": 17600},
{'Date': '2018-04-03T00:00:00', "FI12345": 12400, "FI23456": 142500, "FI56789": 5600}]
)
def test_query_eod_history_and_realtime_db(self):
"""
Tests querying of an EOD history db and EOD realtime aggregate db. This test
is to make sure we can handle combining a history db with dates like "2020-04-05"
and a 1-d realtime aggregate db with dates like "2020-04-05T00:00:00+00". It also
tests that dates are made uniform across all fields.
"""
def mock_get_history_db_config(db):
return {
"bar_size": "1 day",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_download_history_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
],
Date=[
"2018-04-01",
"2018-04-02",
"2018-04-03",
"2018-04-01",
"2018-04-02",
"2018-04-03"
],
Close=[
20.10,
20.50,
19.40,
50.5,
52.5,
51.59,
],
Volume=[
15000,
7800,
12400,
98000,
179000,
142500
]
)
)
prices.to_csv(f, index=False)
def mock_download_market_data_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI23456",
"FI23456",
],
Date=[
"2018-04-01T00:00:00+00",
"2018-04-02T00:00:00+00",
"2018-04-01T00:00:00+00",
"2018-04-02T00:00:00+00",
],
LastClose=[
30.50,
39.40,
79.5,
79.59,
],
LastCount=[
305,
940,
795,
959,
]
))
prices.to_csv(f, index=False)
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"usa-stk-tick": ["usa-stk-tick-1d"]}
def mock_get_realtime_db_config(db):
return {
"bar_size": "1 day",
"universes": ["japan-stk"],
"vendor": "ibkr",
"fields": ["LastClose","LastOpen","LastHigh","LastLow", "VolumeClose"]
}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.get_realtime_db_config', new=mock_get_realtime_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
with patch('quantrocket.price.download_market_data_file', new=mock_download_market_data_file):
prices = get_prices(["usa-stk-1d", "usa-stk-tick-1d"], fields=["Close", "LastClose"])
self.assertListEqual(list(prices.index.names), ["Field", "Date"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz) # EOD is tz-naive
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
closes = prices.loc["Close"]
closes = closes.reset_index()
closes.loc[:, "Date"] = closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.1, "FI23456": 50.5},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.5, "FI23456": 52.5},
{'Date': '2018-04-03T00:00:00', "FI12345": 19.4, "FI23456": 51.59}]
)
last_closes = prices.loc["LastClose"]
last_closes = last_closes.reset_index()
last_closes.loc[:, "Date"] = last_closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
last_closes.fillna("nan").to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 30.50, "FI23456": 79.50},
{'Date': '2018-04-02T00:00:00', "FI12345": 39.40, "FI23456": 79.59},
{'Date': '2018-04-03T00:00:00', "FI12345": "nan", "FI23456": "nan"}]
)
def test_consolidate_intraday_history_and_realtime_distinct_fields(self):
"""
Tests that when querying a history and real-time database with
distinct fields and overlapping dates/sids, both fields are
preserved.
"""
def mock_get_history_db_config(db):
return {
"bar_size": "15 mins",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_get_realtime_db_config(db):
return {
"bar_size": "15 min",
"fields": ["LastClose","LastOpen","LastHigh","LastLow", "VolumeClose"]
}
def mock_download_history_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
"FI23456",
],
Date=[
"2018-04-01T09:30:00-04:00",
"2018-04-01T15:30:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T15:30:00-04:00",
"2018-04-01T09:30:00-04:00",
"2018-04-01T15:30:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T15:30:00-04:00",
],
Close=[
20.10,
20.50,
19.40,
18.56,
50.5,
52.5,
51.59,
54.23
],
Volume=[
15000,
7800,
12400,
14500,
98000,
179000,
142500,
124000,
]
))
prices.to_csv(f, index=False)
def mock_download_market_data_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI23456",
"FI23456",
],
Date=[
# Data is UTC but will become NY
"2018-04-01T19:30:00+00",
"2018-04-02T19:30:00+00",
"2018-04-01T19:30:00+00",
"2018-04-02T19:30:00+00",
],
LastClose=[
30.50,
39.40,
79.5,
79.59,
]
))
prices.to_csv(f, index=False)
def mock_download_master_file(f, *args, **kwargs):
securities = pd.DataFrame(dict(Sid=["FI12345","FI23456"],
Timezone=["America/New_York", "America/New_York"]))
securities.to_csv(f, index=False)
f.seek(0)
def mock_list_history_databases():
return [
"usa-stk-15min",
"japan-stk-1d",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"usa-stk-snapshot": ["usa-stk-snapshot-15min"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.get_realtime_db_config', new=mock_get_realtime_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
with patch('quantrocket.price.download_market_data_file', new=mock_download_market_data_file):
with patch("quantrocket.price.download_master_file", new=mock_download_master_file):
prices = get_prices(
["usa-stk-15min", "usa-stk-snapshot-15min"],
fields=["Close", "LastClose", "Volume"])
self.assertListEqual(list(prices.index.names), ["Field", "Date", "Time"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz)
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
closes = prices.loc["Close"]
closes = closes.reset_index()
closes.loc[:, "Date"] = closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', 'Time': '09:30:00', "FI12345": 20.1, "FI23456": 50.5},
{'Date': '2018-04-01T00:00:00', 'Time': '15:30:00', "FI12345": 20.5, "FI23456": 52.5},
{'Date': '2018-04-02T00:00:00', 'Time': '09:30:00', "FI12345": 19.4, "FI23456": 51.59},
{'Date': '2018-04-02T00:00:00', 'Time': '15:30:00', "FI12345": 18.56, "FI23456": 54.23}]
)
volumes = prices.loc["Volume"]
volumes = volumes.reset_index()
volumes.loc[:, "Date"] = volumes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
volumes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', 'Time': '09:30:00', "FI12345": 15000.0, "FI23456": 98000.0},
{'Date': '2018-04-01T00:00:00', 'Time': '15:30:00', "FI12345": 7800.0, "FI23456": 179000.0},
{'Date': '2018-04-02T00:00:00', 'Time': '09:30:00', "FI12345": 12400.0, "FI23456": 142500.0},
{'Date': '2018-04-02T00:00:00', 'Time': '15:30:00', "FI12345": 14500.0, "FI23456": 124000.0}]
)
last_closes = prices.loc["LastClose"]
last_closes = last_closes.reset_index()
last_closes.loc[:, "Date"] = last_closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
last_closes.fillna("nan").to_dict(orient="records"),
# Data was UTC but now NY
[{'Date': '2018-04-01T00:00:00', 'Time': '09:30:00', "FI12345": "nan", "FI23456": "nan"},
{'Date': '2018-04-01T00:00:00', 'Time': '15:30:00', "FI12345": 30.5, "FI23456": 79.5},
{'Date': '2018-04-02T00:00:00', 'Time': '09:30:00', "FI12345": "nan", "FI23456": "nan"},
{'Date': '2018-04-02T00:00:00', 'Time': '15:30:00', "FI12345": 39.4, "FI23456": 79.59}]
)
def test_query_single_realtime_db(self):
"""
Tests querying a single real-time aggregate database, with no history
db in the query.
"""
def mock_get_realtime_db_config(db):
return {
"bar_size": "15 min",
"fields": ["LastClose","LastOpen","LastHigh","LastLow", "VolumeClose"]
}
def mock_download_market_data_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI23456",
"FI23456",
],
Date=[
# Data is UTC but will become Mexico_City
"2018-04-01T19:30:00+00",
"2018-04-02T19:30:00+00",
"2018-04-01T19:30:00+00",
"2018-04-02T19:30:00+00",
],
LastClose=[
30.50,
39.40,
79.5,
79.59,
],
LastCount=[
305,
940,
795,
959,
]
))
prices.to_csv(f, index=False)
def mock_download_master_file(f, *args, **kwargs):
securities = pd.DataFrame(dict(Sid=["FI12345","FI23456"],
Timezone=["America/Mexico_City", "America/Mexico_City"]))
securities.to_csv(f, index=False)
f.seek(0)
def mock_list_history_databases():
return [
"usa-stk-15min",
"japan-stk-1d",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"mexi-stk-tick": ["mexi-stk-tick-15min"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_realtime_db_config', new=mock_get_realtime_db_config):
with patch('quantrocket.price.download_market_data_file', new=mock_download_market_data_file):
with patch("quantrocket.price.download_master_file", new=mock_download_master_file):
prices = get_prices("mexi-stk-tick-15min")
self.assertListEqual(list(prices.index.names), ["Field", "Date", "Time"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz)
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
last_closes = prices.loc["LastClose"]
last_closes = last_closes.reset_index()
last_closes.loc[:, "Date"] = last_closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
last_closes.fillna("nan").to_dict(orient="records"),
# Data was UTC but now Mexico_City
[{'Date': '2018-04-01T00:00:00', 'Time': '14:30:00', "FI12345": 30.5, "FI23456": 79.5},
{'Date': '2018-04-02T00:00:00', 'Time': '14:30:00', "FI12345": 39.4, "FI23456": 79.59}]
)
last_counts = prices.loc["LastCount"]
last_counts = last_counts.reset_index()
last_counts.loc[:, "Date"] = last_counts.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
last_counts.fillna("nan").to_dict(orient="records"),
# Data was UTC but now Mexico_City
[{'Date': '2018-04-01T00:00:00', 'Time': '14:30:00', "FI12345": 305.0, "FI23456": 795.0},
{'Date': '2018-04-02T00:00:00', 'Time': '14:30:00', "FI12345": 940.0, "FI23456": 959.0}]
)
def test_query_single_zipline_bundle(self):
"""
Tests querying a single Zipline bundle, with no history
db or realtime db in the query.
"""
def mock_download_bundle_file(code, f, *args, **kwargs):
prices = pd.concat(
dict(
Close=pd.DataFrame(
dict(
FI12345=[
48.90,
49.40,
55.49,
56.78
],
FI23456=[
59.5,
59.59,
59.34,
51.56,
]
),
index=[
"2018-04-01T09:30:00-04:00",
"2018-04-01T15:30:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T15:30:00-04:00",
]),
Volume=pd.DataFrame(
dict(
FI12345=[
100,
200,
300,
400
],
FI23456=[
500,
600,
700,
800,
]
),
index=[
"2018-04-01T09:30:00-04:00",
"2018-04-01T15:30:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T15:30:00-04:00",
])
), names=["Field","Date"]
)
prices.to_csv(f)
def mock_download_master_file(f, *args, **kwargs):
securities = pd.DataFrame(dict(Sid=["FI12345","FI23456"],
Timezone=["America/New_York", "America/New_York"]))
securities.to_csv(f, index=False)
f.seek(0)
def mock_list_history_databases():
return [
"usa-stk-15min",
"japan-stk-1d",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"mexi-stk-tick": ["mexi-stk-tick-15min"]}
def mock_get_bundle_config(db):
return {
"ingest_type": "usstock",
"sids": None,
"universes": None,
"free": False,
"data_frequency": "minute",
"calendar": "XNYS",
"start_date": "2007-01-03"
}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.download_bundle_file', new=mock_download_bundle_file):
with patch("quantrocket.price.download_master_file", new=mock_download_master_file):
with patch("quantrocket.price.get_bundle_config", new=mock_get_bundle_config):
prices = get_prices("usstock-1min", fields=["Close","Volume"])
self.assertListEqual(list(prices.index.names), ["Field", "Date", "Time"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz)
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
closes = prices.loc["Close"]
closes = closes.reset_index()
closes.loc[:, "Date"] = closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes.fillna("nan").to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', 'Time': '09:30:00', 'FI12345': 48.9, 'FI23456': 59.5},
{'Date': '2018-04-01T00:00:00', 'Time': '15:30:00', 'FI12345': 49.4, 'FI23456': 59.59},
{'Date': '2018-04-02T00:00:00', 'Time': '09:30:00', 'FI12345': 55.49, 'FI23456': 59.34},
{'Date': '2018-04-02T00:00:00', 'Time': '15:30:00', 'FI12345': 56.78, 'FI23456': 51.56}]
)
volumes = prices.loc["Volume"]
volumes = volumes.reset_index()
volumes.loc[:, "Date"] = volumes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
volumes.fillna("nan").to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', 'Time': '09:30:00', 'FI12345': 100, 'FI23456': 500},
{'Date': '2018-04-01T00:00:00', 'Time': '15:30:00', 'FI12345': 200, 'FI23456': 600},
{'Date': '2018-04-02T00:00:00', 'Time': '09:30:00', 'FI12345': 300, 'FI23456': 700},
{'Date': '2018-04-02T00:00:00', 'Time': '15:30:00', 'FI12345': 400, 'FI23456': 800}]
)
@patch("quantrocket.price.download_bundle_file")
def test_pass_data_frequency_based_on_bundle_config(self, mock_download_bundle_file):
"""
Tests that when querying a Zipline bundle and not providing a data_frequency
arg, the data frequency is determined from the bundle config.
"""
def _mock_download_bundle_file(code, f, *args, **kwargs):
prices = pd.concat(
dict(
Close=pd.DataFrame(
dict(
FI12345=[
48.90,
49.40,
55.49,
56.78
],
FI23456=[
59.5,
59.59,
59.34,
51.56,
]
),
index=[
"2018-04-01T09:30:00-04:00",
"2018-04-01T15:30:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T15:30:00-04:00",
]),
Volume=pd.DataFrame(
dict(
FI12345=[
100,
200,
300,
400
],
FI23456=[
500,
600,
700,
800,
]
),
index=[
"2018-04-01T09:30:00-04:00",
"2018-04-01T15:30:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T15:30:00-04:00",
])
), names=["Field","Date"]
)
prices.to_csv(f)
def mock_download_master_file(f, *args, **kwargs):
securities = pd.DataFrame(dict(Sid=["FI12345","FI23456"],
Timezone=["America/New_York", "America/New_York"]))
securities.to_csv(f, index=False)
f.seek(0)
def mock_list_history_databases():
return [
"usa-stk-15min",
"japan-stk-1d",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"mexi-stk-tick": ["mexi-stk-tick-15min"]}
def mock_get_bundle_config(db):
return {
"ingest_type": "usstock",
"sids": None,
"universes": None,
"free": False,
"data_frequency": "minute",
"calendar": "XNYS",
"start_date": "2007-01-03"
}
def mock_list_bundles():
return {"usstock-1min": True}
mock_download_bundle_file.side_effect = _mock_download_bundle_file
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch("quantrocket.price.download_master_file", new=mock_download_master_file):
with patch("quantrocket.price.get_bundle_config", new=mock_get_bundle_config):
# query with no data_frequency arg
get_prices("usstock-1min")
self.assertEqual(len(mock_download_bundle_file.mock_calls), 1)
zipline_call = mock_download_bundle_file.mock_calls[0]
_, args, kwargs = zipline_call
self.assertEqual(args[0], "usstock-1min")
self.assertEqual(kwargs["data_frequency"], "minute")
# query with data_frequency arg
get_prices("usstock-1min", data_frequency='daily')
self.assertEqual(len(mock_download_bundle_file.mock_calls), 2)
zipline_call = mock_download_bundle_file.mock_calls[1]
_, args, kwargs = zipline_call
self.assertEqual(args[0], "usstock-1min")
self.assertEqual(kwargs["data_frequency"], "daily")
@patch("quantrocket.price.download_market_data_file")
@patch("quantrocket.price.download_history_file")
@patch("quantrocket.price.download_bundle_file")
def test_apply_times_filter_to_history_vs_realtime_database_vs_zipline_bundle(
self,
mock_download_bundle_file,
mock_download_history_file,
mock_download_market_data_file):
"""
Tests that the times filter is applied to a history database via the
history query but is applied to the realtime database after
converting to the exchange timezone.
"""
def mock_get_history_db_config(db):
return {
"bar_size": "1 mins",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Wap","Open","High","Low", "Volume"]
}
def mock_get_realtime_db_config(db):
return {
"bar_size": "1 min",
"fields": ["LastClose","LastOpen","LastHigh","LastLow", "VolumeClose"]
}
def mock_get_bundle_config(db):
return {
"ingest_type": "usstock",
"sids": None,
"universes": None,
"free": False,
"data_frequency": "minute",
"calendar": "XNYS",
"start_date": "2007-01-03"
}
def _mock_download_history_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
"FI23456",
],
Date=[
"2018-04-01T09:30:00-04:00",
"2018-04-01T15:30:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T15:30:00-04:00",
"2018-04-01T09:30:00-04:00",
"2018-04-01T15:30:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T15:30:00-04:00",
],
Wap=[
20.10,
20.50,
19.40,
18.56,
50.5,
52.5,
51.59,
54.23
],
Volume=[
15000,
7800,
12400,
14500,
98000,
179000,
142500,
124000,
]
))
prices.to_csv(f, index=False)
def _mock_download_market_data_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
"FI23456",
],
Date=[
# 19:00:00 UTC data is returned (15:00:00 NY) but will be filtered out
"2018-04-01T19:00:00+00",
"2018-04-01T19:30:00+00",
"2018-04-02T19:00:00+00",
"2018-04-02T19:30:00+00",
"2018-04-01T19:00:00+00",
"2018-04-01T19:30:00+00",
"2018-04-02T19:00:00+00",
"2018-04-02T19:30:00+00",
],
LastClose=[
30.50,
39.40,
45.49,
46.78,
79.5,
79.59,
89.34,
81.56,
]
))
prices.to_csv(f, index=False)
def _mock_download_bundle_file(code, f, *args, **kwargs):
prices = pd.concat(
dict(
Close=pd.DataFrame(
dict(
FI12345=[
48.90,
49.40,
55.49,
56.78
],
FI23456=[
59.5,
59.59,
59.34,
51.56,
]
),
index=[
"2018-04-01T09:30:00-04:00",
"2018-04-01T15:30:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T15:30:00-04:00",
])
), names=["Field","Date"]
)
prices.to_csv(f)
def mock_download_master_file(f, *args, **kwargs):
securities = pd.DataFrame(dict(Sid=["FI12345","FI23456"],
Timezone=["America/New_York", "America/New_York"]))
securities.to_csv(f, index=False)
f.seek(0)
def mock_list_history_databases():
return [
"usa-stk-1min",
"japan-stk-1d",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"usa-stk-snapshot": ["usa-stk-snapshot-1min"]}
def mock_list_bundles():
return {"usstock-1min": True, "usstock-free-1min": True}
mock_download_history_file.side_effect = _mock_download_history_file
mock_download_market_data_file.side_effect = _mock_download_market_data_file
mock_download_bundle_file.side_effect = _mock_download_bundle_file
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.get_realtime_db_config', new=mock_get_realtime_db_config):
with patch("quantrocket.price.download_master_file", new=mock_download_master_file):
with patch("quantrocket.price.get_bundle_config", new=mock_get_bundle_config):
prices = get_prices(
["usa-stk-1min", "usa-stk-snapshot-1min", "usstock-1min"],
fields=["Close", "LastClose", "Volume", "Wap"],
times=["09:30:00", "15:30:00"],
)
self.assertEqual(len(mock_download_history_file.mock_calls), 1)
history_call = mock_download_history_file.mock_calls[0]
_, args, kwargs = history_call
self.assertEqual(args[0], "usa-stk-1min")
# only supported subset of fields is requested
self.assertSetEqual(set(kwargs["fields"]), {"Wap", "Volume"})
# times filter was passed
self.assertListEqual(kwargs["times"], ["09:30:00", "15:30:00"])
self.assertEqual(len(mock_download_market_data_file.mock_calls), 1)
realtime_call = mock_download_market_data_file.mock_calls[0]
_, args, kwargs = realtime_call
self.assertEqual(args[0], "usa-stk-snapshot-1min")
# only supported subset of fields is requested
self.assertListEqual(kwargs["fields"], ["LastClose"])
# times filter not passed
self.assertNotIn("times", list(kwargs.keys()))
self.assertEqual(len(mock_download_bundle_file.mock_calls), 1)
minute_call = mock_download_bundle_file.mock_calls[0]
_, args, kwargs = minute_call
self.assertEqual(args[0], "usstock-1min")
# only supported subset of fields is requested
self.assertSetEqual(set(kwargs["fields"]), {"Close", "Volume"})
# times filter was passed
self.assertListEqual(kwargs["times"], ["09:30:00", "15:30:00"])
self.assertListEqual(list(prices.index.names), ["Field", "Date", "Time"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz)
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
waps = prices.loc["Wap"]
waps = waps.reset_index()
waps.loc[:, "Date"] = waps.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
waps.fillna("nan").to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', 'Time': '09:30:00', "FI12345": 20.1, "FI23456": 50.5},
{'Date': '2018-04-01T00:00:00', 'Time': '15:30:00', "FI12345": 20.5, "FI23456": 52.5},
{'Date': '2018-04-02T00:00:00', 'Time': '09:30:00', "FI12345": 19.4, "FI23456": 51.59},
{'Date': '2018-04-02T00:00:00', 'Time': '15:30:00', "FI12345": 18.56, "FI23456": 54.23}]
)
volumes = prices.loc["Volume"]
volumes = volumes.reset_index()
volumes.loc[:, "Date"] = volumes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
volumes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', 'Time': '09:30:00', "FI12345": 15000.0, "FI23456": 98000.0},
{'Date': '2018-04-01T00:00:00', 'Time': '15:30:00', "FI12345": 7800.0, "FI23456": 179000.0},
{'Date': '2018-04-02T00:00:00', 'Time': '09:30:00', "FI12345": 12400.0, "FI23456": 142500.0},
{'Date': '2018-04-02T00:00:00', 'Time': '15:30:00', "FI12345": 14500.0, "FI23456": 124000.0}]
)
last_closes = prices.loc["LastClose"]
last_closes = last_closes.reset_index()
last_closes.loc[:, "Date"] = last_closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
last_closes.fillna("nan").to_dict(orient="records"),
# Data was UTC but now NY
[{'Date': '2018-04-01T00:00:00', 'Time': '09:30:00', "FI12345": 'nan', "FI23456": 'nan'},
{'Date': '2018-04-01T00:00:00', 'Time': '15:30:00', "FI12345": 39.4, "FI23456": 79.59},
{'Date': '2018-04-02T00:00:00', 'Time': '09:30:00', "FI12345": 'nan', "FI23456": 'nan'},
{'Date': '2018-04-02T00:00:00', 'Time': '15:30:00', "FI12345": 46.78, "FI23456": 81.56}]
)
closes = prices.loc["Close"]
closes = closes.reset_index()
closes.loc[:, "Date"] = closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes.fillna("nan").to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', 'Time': '09:30:00', 'FI12345': 48.9, 'FI23456': 59.5},
{'Date': '2018-04-01T00:00:00', 'Time': '15:30:00', 'FI12345': 49.4, 'FI23456': 59.59},
{'Date': '2018-04-02T00:00:00', 'Time': '09:30:00', 'FI12345': 55.49, 'FI23456': 59.34},
{'Date': '2018-04-02T00:00:00', 'Time': '15:30:00', 'FI12345': 56.78, 'FI23456': 51.56}]
)
def test_consolidate_overlapping_fields_and_respect_priority(self):
"""
Tests that when querying two databases with overlapping
dates/sids/fields, the value is taken from the db which was passed
first as an argument.
"""
def mock_get_history_db_config(db):
if db == "usa-stk-1d":
return {
"bar_size": "1 day",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
else:
return {
"bar_size": "1 day",
"universes": ["nyse-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_download_history_file(code, f, *args, **kwargs):
if code == "usa-stk-1d":
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
],
Date=[
"2018-04-01",
"2018-04-02",
"2018-04-03",
"2018-04-01",
"2018-04-02",
"2018-04-03"
],
Close=[
20.10,
20.50,
19.40,
50.5,
52.5,
51.59,
],
Volume=[
15000,
7800,
12400,
98000,
179000,
142500
]
))
else:
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
],
Date=[
"2018-04-01",
"2018-04-02",
"2018-04-03",
],
Close=[
5900,
5920,
5950],
))
prices.to_csv(f, index=False)
def mock_list_history_databases():
return [
"usa-stk-1d",
"nyse-stk-1d",
"usa-stk-15min",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
# prioritize usa-stk-1d by passing first
prices = get_prices(["usa-stk-1d", "nyse-stk-1d"],
fields=["Close", "Volume"])
self.assertListEqual(list(prices.index.names), ["Field", "Date"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz)
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
closes = prices.loc["Close"]
closes = closes.reset_index()
closes.loc[:, "Date"] = closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.1, "FI23456": 50.5},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.5, "FI23456": 52.5},
{'Date': '2018-04-03T00:00:00', "FI12345": 19.4, "FI23456": 51.59}]
)
volumes = prices.loc["Volume"]
volumes = volumes.reset_index()
volumes.loc[:, "Date"] = volumes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
volumes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 15000, "FI23456": 98000},
{'Date': '2018-04-02T00:00:00', "FI12345": 7800, "FI23456": 179000},
{'Date': '2018-04-03T00:00:00', "FI12345": 12400, "FI23456": 142500}]
)
# repeat test but prioritize nyse-stk-1d by passing first
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
prices = get_prices(["nyse-stk-1d", "usa-stk-1d"],
fields=["Close", "Volume"])
self.assertListEqual(list(prices.index.names), ["Field", "Date"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz)
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
closes = prices.loc["Close"]
closes = closes.reset_index()
closes.loc[:, "Date"] = closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 5900.0, "FI23456": 50.5},
{'Date': '2018-04-02T00:00:00', "FI12345": 5920.0, "FI23456": 52.5},
{'Date': '2018-04-03T00:00:00', "FI12345": 5950.0, "FI23456": 51.59}]
)
volumes = prices.loc["Volume"]
volumes = volumes.reset_index()
volumes.loc[:, "Date"] = volumes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
# Since volume is null in nyse-stk-1d, we get the volume from usa-stk-1d
self.assertListEqual(
volumes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 15000, "FI23456": 98000},
{'Date': '2018-04-02T00:00:00', "FI12345": 7800, "FI23456": 179000},
{'Date': '2018-04-03T00:00:00', "FI12345": 12400, "FI23456": 142500}]
)
def test_parse_bar_sizes(self):
"""
Tests that when querying a history and real-time database which have
different bar size strings which are nevertheless equivalent, this is
allowed.
"""
def mock_get_history_db_config(db):
return {
"bar_size": "15 mins", # 15 mins plural (IB format)
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_get_realtime_db_config(db):
return {
"bar_size": "15 min", # 15 min Pandas format
"fields": ["LastClose","LastOpen","LastHigh","LastLow", "VolumeClose"]
}
def mock_download_history_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
"FI23456",
],
Date=[
"2018-04-01T09:30:00-04:00",
"2018-04-01T15:30:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T15:30:00-04:00",
"2018-04-01T09:30:00-04:00",
"2018-04-01T15:30:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T15:30:00-04:00",
],
Close=[
20.10,
20.50,
19.40,
18.56,
50.5,
52.5,
51.59,
54.23
],
Volume=[
15000,
7800,
12400,
14500,
98000,
179000,
142500,
124000,
]
))
prices.to_csv(f, index=False)
def mock_download_market_data_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI23456",
"FI23456",
],
Date=[
"2018-04-01T19:30:00+00",
"2018-04-02T19:30:00+00",
"2018-04-01T19:30:00+00",
"2018-04-02T19:30:00+00",
],
LastClose=[
30.50,
39.40,
79.5,
79.59,
]
))
prices.to_csv(f, index=False)
def mock_download_master_file(f, *args, **kwargs):
securities = pd.DataFrame(dict(Sid=["FI12345","FI23456"],
Timezone=["America/New_York", "America/New_York"]))
securities.to_csv(f, index=False)
f.seek(0)
def mock_list_history_databases():
return [
"usa-stk-15min",
"japan-stk-1d",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"usa-stk-snapshot": ["usa-stk-snapshot-15min"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.get_realtime_db_config', new=mock_get_realtime_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
with patch('quantrocket.price.download_market_data_file', new=mock_download_market_data_file):
with patch("quantrocket.price.download_master_file", new=mock_download_master_file):
prices = get_prices(
["usa-stk-15min", "usa-stk-snapshot-15min"],
fields=["Close", "LastClose", "Volume"])
self.assertListEqual(list(prices.index.names), ["Field", "Date", "Time"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz)
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
closes = prices.loc["Close"]
closes = closes.reset_index()
closes.loc[:, "Date"] = closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', 'Time': '09:30:00', "FI12345": 20.1, "FI23456": 50.5},
{'Date': '2018-04-01T00:00:00', 'Time': '15:30:00', "FI12345": 20.5, "FI23456": 52.5},
{'Date': '2018-04-02T00:00:00', 'Time': '09:30:00', "FI12345": 19.4, "FI23456": 51.59},
{'Date': '2018-04-02T00:00:00', 'Time': '15:30:00', "FI12345": 18.56, "FI23456": 54.23}]
)
volumes = prices.loc["Volume"]
volumes = volumes.reset_index()
volumes.loc[:, "Date"] = volumes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
volumes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', 'Time': '09:30:00', "FI12345": 15000.0, "FI23456": 98000.0},
{'Date': '2018-04-01T00:00:00', 'Time': '15:30:00', "FI12345": 7800.0, "FI23456": 179000.0},
{'Date': '2018-04-02T00:00:00', 'Time': '09:30:00', "FI12345": 12400.0, "FI23456": 142500.0},
{'Date': '2018-04-02T00:00:00', 'Time': '15:30:00', "FI12345": 14500.0, "FI23456": 124000.0}]
)
last_closes = prices.loc["LastClose"]
last_closes = last_closes.reset_index()
last_closes.loc[:, "Date"] = last_closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
last_closes.fillna("nan").to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', 'Time': '09:30:00', "FI12345": "nan", "FI23456": "nan"},
{'Date': '2018-04-01T00:00:00', 'Time': '15:30:00', "FI12345": 30.5, "FI23456": 79.5},
{'Date': '2018-04-02T00:00:00', 'Time': '09:30:00', "FI12345": "nan", "FI23456": "nan"},
{'Date': '2018-04-02T00:00:00', 'Time': '15:30:00', "FI12345": 39.4, "FI23456": 79.59}]
)
def test_no_complain_no_data_multiple_dbs(self):
"""
Tests that if multiple dbs are queried and one lacks data but the
other has data, no error is raised.
"""
def mock_get_history_db_config(db):
if db == "usa-stk-1d":
return {
"bar_size": "1 day",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
else:
return {
"bar_size": "1 day",
"universes": ["japan-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_download_history_file(code, f, *args, **kwargs):
if code == "usa-stk-1d":
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
],
Date=[
"2018-04-01",
"2018-04-02",
"2018-04-03",
"2018-04-01",
"2018-04-02",
"2018-04-03"
],
Close=[
20.10,
20.50,
19.40,
50.5,
52.5,
51.59,
],
Volume=[
15000,
7800,
12400,
98000,
179000,
142500
]
))
prices.to_csv(f, index=False)
else:
raise NoHistoricalData("no history matches the query parameters")
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
prices = get_prices(["usa-stk-1d", "japan-stk-1d"], fields=["Close", "Volume"])
self.assertListEqual(list(prices.index.names), ["Field", "Date"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz) # EOD is tz-naive
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
closes = prices.loc["Close"]
closes = closes.reset_index()
closes.loc[:, "Date"] = closes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.1, "FI23456": 50.5},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.5, "FI23456": 52.5},
{'Date': '2018-04-03T00:00:00', "FI12345": 19.4, "FI23456": 51.59}]
)
volumes = prices.loc["Volume"]
volumes = volumes.reset_index()
volumes.loc[:, "Date"] = volumes.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
volumes.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 15000, "FI23456": 98000},
{'Date': '2018-04-02T00:00:00', "FI12345": 7800, "FI23456": 179000},
{'Date': '2018-04-03T00:00:00', "FI12345": 12400, "FI23456": 142500}]
)
def test_complain_no_data_multiple_dbs(self):
"""
Tests that if multiple dbs are queried and all of them lack data, an
error is raised.
"""
def mock_get_history_db_config(db):
if db == "usa-stk-1d":
return {
"bar_size": "1 day",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
else:
return {
"bar_size": "1 day",
"universes": ["japan-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_download_history_file(code, f, *args, **kwargs):
raise NoHistoricalData("no history matches the query parameters")
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
with self.assertRaises(NoHistoricalData) as cm:
get_prices(["usa-stk-1d", "japan-stk-1d"])
self.assertIn(
"no price data matches the query parameters in any of usa-stk-1d, japan-stk-1d",
str(cm.exception))
def test_complain_no_data_single_db(self):
"""
Tests that if a single db is queried and it lacks data, an
error is raised.
"""
def mock_get_history_db_config(db):
return {
"bar_size": "1 day",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_download_history_file(code, f, *args, **kwargs):
raise NoHistoricalData("this error will be passed through as is")
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
with self.assertRaises(NoHistoricalData) as cm:
get_prices("usa-stk-1d")
self.assertIn(
"this error will be passed through as is",
str(cm.exception))
def test_complain_if_tick_db(self):
"""
Tests error handling when multiple dbs are queried and one is a tick
db rather than a history or real-time agg db.
"""
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min",
"japan-stk-15min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"],
"etf-taq": ["etf-taq-1h"],
}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with self.assertRaises(ParameterError) as cm:
get_prices(["usa-stk-1d", "etf-taq", "japan-stk-15min"])
self.assertIn((
"etf-taq is a real-time tick database, only history databases or "
"real-time aggregate databases are supported"
), str(cm.exception))
def test_complain_if_unknown_db(self):
"""
Tests error handling when an unknown db is queried.
"""
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min",
"japan-stk-15min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"],
"etf-taq": ["etf-taq-1h"],
}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with self.assertRaises(ParameterError) as cm:
get_prices(["asx-stk-1d"])
self.assertIn((
"no history or real-time aggregate databases or Zipline bundles called asx-stk-1d"
), str(cm.exception))
def test_complain_if_multiple_bar_sizes(self):
"""
Tests error handling when multiple dbs are queried and they have
different bar sizes.
"""
def mock_get_history_db_config(db):
if db == "usa-stk-1d":
return {
"bar_size": "1 day",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
else:
return {
"bar_size": "30 mins",
"universes": ["japan-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min",
"japan-stk-15min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"]}
def mock_list_bundles():
return {"usstock-1min": True}
def mock_get_bundle_config(db):
return {
"ingest_type": "usstock",
"sids": None,
"universes": None,
"free": False,
"data_frequency": "minute",
"calendar": "XNYS",
"start_date": "2007-01-03"
}
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.get_bundle_config', new=mock_get_bundle_config):
# two history dbs
with self.assertRaises(ParameterError) as cm:
get_prices(["usa-stk-1d", "japan-stk-15min"])
self.assertIn((
"all databases must contain same bar size but usa-stk-1d, japan-stk-15min have different "
"bar sizes:"
), str(cm.exception))
# history db and zipline bundle
with self.assertRaises(ParameterError) as cm:
get_prices(["usa-stk-1d", "usstock-1min"])
self.assertIn((
"all databases must contain same bar size but usa-stk-1d, usstock-1min have different "
"bar sizes:"
), str(cm.exception))
def test_warn_if_no_history_service(self):
"""
Tests that a warning is triggered if the history service is not
available.
"""
def mock_list_history_databases():
response = requests.models.Response()
response.status_code = 502
raise requests.HTTPError(response=response)
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"],
"etf-taq": ["etf-taq-1h"],
}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with self.assertWarns(RuntimeWarning) as warning_cm:
with self.assertRaises(ParameterError) as cm:
get_prices(["asx-stk-1d"])
self.assertIn(
"Error while checking if asx-stk-1d is a history database, will assume it's not",
str(warning_cm.warning))
self.assertIn((
"no history or real-time aggregate databases or Zipline bundles called asx-stk-1d"
), str(cm.exception))
def test_warn_if_no_realtime_service(self):
"""
Tests that a warning is triggered if the realtime service is not
available.
"""
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min",
"japan-stk-15min"
]
def mock_list_realtime_databases():
response = requests.models.Response()
response.status_code = 502
raise requests.HTTPError(response=response)
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with self.assertWarns(RuntimeWarning) as warning_cm:
with self.assertRaises(ParameterError) as cm:
get_prices(["asx-stk-1d"])
self.assertIn(
"Error while checking if asx-stk-1d is a realtime database, will assume it's not",
str(warning_cm.warning))
self.assertIn((
"no history or real-time aggregate databases or Zipline bundles called asx-stk-1d"
), str(cm.exception))
def test_warn_if_no_zipline_service(self):
"""
Tests that a warning is triggered if the zipline service is not
available.
"""
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min",
"japan-stk-15min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"],
"etf-taq": ["etf-taq-1h"],
}
def mock_list_bundles():
response = requests.models.Response()
response.status_code = 502
raise requests.HTTPError(response=response)
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with self.assertWarns(RuntimeWarning) as warning_cm:
with self.assertRaises(ParameterError) as cm:
get_prices(["asx-stk-1d"])
self.assertIn(
"Error while checking if asx-stk-1d is a Zipline bundle, will assume it's not",
str(warning_cm.warning))
self.assertIn((
"no history or real-time aggregate databases or Zipline bundles called asx-stk-1d"
), str(cm.exception))
def test_intraday_pass_timezone(self):
"""
Tests handling of an intraday db when a timezone is specified.
"""
def mock_get_history_db_config(db):
return {
"bar_size": "30 mins",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_download_history_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
"FI23456"
],
Date=[
"2018-04-01T09:30:00-04:00",
"2018-04-01T10:00:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T10:00:00-04:00",
"2018-04-01T09:30:00-04:00",
"2018-04-01T10:00:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T10:00:00-04:00",
],
Close=[
20.10,
20.25,
20.50,
20.38,
50.15,
50.59,
51.59,
51.67
],
Volume=[
1500,
7800,
1400,
800,
9000,
7100,
1400,
1500
]
))
prices.to_csv(f, index=False)
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
prices = get_prices(
"usa-stk-15min",
times=["09:30:00", "10:00:00"],
fields=["Close", "Volume"],
timezone="America/Chicago"
)
self.assertListEqual(list(prices.index.names), ["Field", "Date", "Time"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz)
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
self.assertSetEqual(
set(prices.index.get_level_values("Field")),
{"Close", "Volume"})
self.assertSetEqual(
set(prices.index.get_level_values("Time")),
{"08:30:00", "09:00:00"})
closes = prices.loc["Close"]
closes_830 = closes.xs("08:30:00", level="Time")
closes_830 = closes_830.reset_index()
closes_830.loc[:, "Date"] = closes_830.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes_830.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.1, "FI23456": 50.15},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.5, "FI23456": 51.59}]
)
closes_900 = closes.xs("09:00:00", level="Time")
closes_900 = closes_900.reset_index()
closes_900.loc[:, "Date"] = closes_900.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes_900.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.25, "FI23456": 50.59},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.38, "FI23456": 51.67}]
)
volumes = prices.loc["Volume"]
volumes_830 = volumes.xs("08:30:00", level="Time")
volumes_830 = volumes_830.reset_index()
volumes_830.loc[:, "Date"] = volumes_830.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
volumes_830.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 1500.0, "FI23456": 9000.0},
{'Date': '2018-04-02T00:00:00', "FI12345": 1400.0, "FI23456": 1400.0}]
)
def test_intraday_infer_timezone_from_securities(self):
"""
Tests handling of an intraday db when a timezone is not specified and
is therefore inferred from the securities master.
"""
def mock_get_history_db_config(db):
return {
"bar_size": "30 mins",
"universes": ["usa-stk"],
"vendor": "ibkr"
}
def mock_download_history_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
"FI23456"
],
Date=[
"2018-04-01T09:30:00-04:00",
"2018-04-01T10:00:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T10:00:00-04:00",
"2018-04-01T09:30:00-04:00",
"2018-04-01T10:00:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T10:00:00-04:00",
],
Close=[
20.10,
20.25,
20.50,
20.38,
50.15,
50.59,
51.59,
51.67
],
Volume=[
1500,
7800,
1400,
800,
9000,
7100,
1400,
1500
]
))
prices.to_csv(f, index=False)
def mock_download_master_file(f, *args, **kwargs):
securities = pd.DataFrame(dict(Sid=["FI12345","FI23456"],
Timezone=["America/New_York", "America/New_York"]))
securities.to_csv(f, index=False)
f.seek(0)
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
with patch("quantrocket.price.download_master_file", new=mock_download_master_file):
prices = get_prices(
"usa-stk-15min",
times=["09:30:00", "10:00:00"],
fields=["Close", "Volume"]
)
self.assertListEqual(list(prices.index.names), ["Field", "Date", "Time"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz)
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
self.assertSetEqual(
set(prices.index.get_level_values("Field")),
{"Close", "Volume"})
self.assertSetEqual(
set(prices.index.get_level_values("Time")),
{"09:30:00", "10:00:00"})
closes = prices.loc["Close"]
closes_930 = closes.xs("09:30:00", level="Time")
closes_930 = closes_930.reset_index()
closes_930.loc[:, "Date"] = closes_930.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes_930.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.1, "FI23456": 50.15},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.5, "FI23456": 51.59}]
)
closes_1000 = closes.xs("10:00:00", level="Time")
closes_1000 = closes_1000.reset_index()
closes_1000.loc[:, "Date"] = closes_1000.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes_1000.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.25, "FI23456": 50.59},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.38, "FI23456": 51.67}]
)
volumes = prices.loc["Volume"]
volumes_930 = volumes.xs("09:30:00", level="Time")
volumes_930 = volumes_930.reset_index()
volumes_930.loc[:, "Date"] = volumes_930.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
volumes_930.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 1500.0, "FI23456": 9000.0},
{'Date': '2018-04-02T00:00:00', "FI12345": 1400.0, "FI23456": 1400.0}]
)
def test_complain_if_cannot_infer_timezone_from_securities(self):
"""
Tests error handling when a timezone is not specified and it cannot
be inferred from the securities master because multiple timezones are
represented.
"""
def mock_get_history_db_config(db):
return {
"bar_size": "5 mins",
"universes": ["aapl-arb"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_download_history_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
"FI23456"
],
Date=[
"2018-04-01T09:30:00-04:00",
"2018-04-01T10:00:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T10:00:00-04:00",
"2018-04-01T08:30:00-05:00",
"2018-04-01T09:00:00-05:00",
"2018-04-02T08:30:00-05:00",
"2018-04-02T09:00:00-05:00",
],
Close=[
20.10,
20.25,
20.50,
20.38,
50.15,
50.59,
51.59,
51.67
],
Volume=[
1500,
7800,
1400,
800,
9000,
7100,
1400,
1500
]
))
prices.to_csv(f, index=False)
def mock_download_master_file(f, *args, **kwargs):
securities = pd.DataFrame(dict(Sid=["FI12345","FI23456"],
Timezone=["America/New_York", "America/Mexico_City"]))
securities.to_csv(f, index=False)
f.seek(0)
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min",
"aapl-arb-5min",
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"]}
def mock_list_bundles():
return {}
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
with patch("quantrocket.price.download_master_file", new=mock_download_master_file):
with patch("quantrocket.price.list_bundles", new=mock_list_bundles):
with self.assertRaises(ParameterError) as cm:
get_prices(
"aapl-arb-5min",
fields=["Close", "Volume"],
)
self.assertIn((
"cannot infer timezone because multiple timezones are present "
"in data, please specify timezone explicitly (timezones: America/New_York, America/Mexico_City)"
), str(cm.exception))
def test_multiple_timezones(self):
"""
Tests that multiple timezones are properly handled.
"""
def mock_get_history_db_config(db):
return {
"bar_size": "5 mins",
"universes": ["aapl-arb"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_download_history_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI23456",
"FI23456",
"FI23456",
"FI23456"
],
# These bars align but the times are different due to different timezones
Date=[
"2018-04-01T09:30:00-04:00",
"2018-04-01T10:00:00-04:00",
"2018-04-02T09:30:00-04:00",
"2018-04-02T10:00:00-04:00",
"2018-04-01T08:30:00-05:00",
"2018-04-01T09:00:00-05:00",
"2018-04-02T08:30:00-05:00",
"2018-04-02T09:00:00-05:00",
],
Close=[
20.10,
20.25,
20.50,
20.38,
50.15,
50.59,
51.59,
51.67
]
))
prices.to_csv(f, index=False)
def mock_download_master_file(f, *args, **kwargs):
securities = pd.DataFrame(dict(Sid=["FI12345","FI23456"],
Timezone=["America/New_York", "America/Mexico_City"]))
securities.to_csv(f, index=False)
f.seek(0)
def mock_list_history_databases():
return [
"usa-stk-1d",
"aapl-arb-5min",
"japan-stk-1d",
"usa-stk-15min",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
with patch("quantrocket.price.download_master_file", new=mock_download_master_file):
prices = get_prices(
"aapl-arb-5min",
fields=["Close"],
timezone="America/New_York"
)
self.assertListEqual(list(prices.index.names), ["Field", "Date", "Time"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz)
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
self.assertSetEqual(
set(prices.index.get_level_values("Field")), {"Close"})
self.assertSetEqual(
set(prices.index.get_level_values("Time")),
{"09:30:00", "10:00:00"})
closes = prices.loc["Close"]
closes_930 = closes.xs("09:30:00", level="Time")
closes_930 = closes_930.reset_index()
closes_930.loc[:, "Date"] = closes_930.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes_930.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.1, "FI23456": 50.15},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.5, "FI23456": 51.59}]
)
closes_1000 = closes.xs("10:00:00", level="Time")
closes_1000 = closes_1000.reset_index()
closes_1000.loc[:, "Date"] = closes_1000.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes_1000.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.25, "FI23456": 50.59},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.38, "FI23456": 51.67}]
)
# repeat with a different timezone
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
with patch("quantrocket.price.download_master_file", new=mock_download_master_file):
prices = get_prices(
"aapl-arb-5min",
fields=["Close"],
timezone="America/Mexico_City"
)
self.assertListEqual(list(prices.index.names), ["Field", "Date", "Time"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz)
self.assertListEqual(list(prices.columns), ["FI12345","FI23456"])
self.assertSetEqual(
set(prices.index.get_level_values("Field")), {"Close"})
self.assertSetEqual(
set(prices.index.get_level_values("Time")),
{"08:30:00", "09:00:00"})
closes = prices.loc["Close"]
closes_830 = closes.xs("08:30:00", level="Time")
closes_830 = closes_830.reset_index()
closes_830.loc[:, "Date"] = closes_830.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes_830.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.1, "FI23456": 50.15},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.5, "FI23456": 51.59}]
)
closes_900 = closes.xs("09:00:00", level="Time")
closes_900 = closes_900.reset_index()
closes_900.loc[:, "Date"] = closes_900.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
closes_900.to_dict(orient="records"),
[{'Date': '2018-04-01T00:00:00', "FI12345": 20.25, "FI23456": 50.59},
{'Date': '2018-04-02T00:00:00', "FI12345": 20.38, "FI23456": 51.67}]
)
def test_intraday_fill_missing_times(self):
"""
Tests that each date is expanded to include all times represented in
any date. (This makes the data easier to work when there early close
days or when the data is queried intraday.)
"""
def mock_get_history_db_config(db):
return {
"bar_size": "2 hours",
"universes": ["usa-stk"],
"vendor": "ibkr",
"fields": ["Close","Open","High","Low", "Volume"]
}
def mock_download_history_file(code, f, *args, **kwargs):
prices = pd.DataFrame(
dict(
Sid=[
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI12345",
"FI12345"
],
Date=[
# complete day
"2018-04-01T10:00:00-04:00",
"2018-04-01T12:00:00-04:00",
"2018-04-01T14:00:00-04:00",
# early close
"2018-04-02T10:00:00-04:00",
"2018-04-02T12:00:00-04:00",
# Complete day
"2018-04-03T10:00:00-04:00",
"2018-04-03T12:00:00-04:00",
"2018-04-03T14:00:00-04:00",
# intraday query
"2018-04-04T10:00:00-04:00",
],
Close=[
20.10,
20.25,
20.50,
20.38,
21.15,
22.59,
21.59,
20.67,
21.34
]
))
prices.to_csv(f, index=False)
def mock_list_history_databases():
return [
"usa-stk-1d",
"japan-stk-1d",
"usa-stk-2h",
"usa-stk-15min",
"demo-stk-1min"
]
def mock_list_realtime_databases():
return {"demo-stk-taq": ["demo-stk-taq-1h"]}
def mock_list_bundles():
return {"usstock-1min": True}
with patch('quantrocket.price.list_bundles', new=mock_list_bundles):
with patch('quantrocket.price.list_realtime_databases', new=mock_list_realtime_databases):
with patch('quantrocket.price.list_history_databases', new=mock_list_history_databases):
with patch('quantrocket.price.get_history_db_config', new=mock_get_history_db_config):
with patch('quantrocket.price.download_history_file', new=mock_download_history_file):
prices = get_prices(
"usa-stk-2h",
timezone="America/New_York"
)
self.assertListEqual(list(prices.index.names), ["Field", "Date", "Time"])
self.assertEqual(prices.columns.name, "Sid")
dt = prices.index.get_level_values("Date")
self.assertTrue(isinstance(dt, pd.DatetimeIndex))
self.assertIsNone(dt.tz)
self.assertListEqual(list(prices.columns), ["FI12345"])
self.assertSetEqual(
set(prices.index.get_level_values("Field")),
{"Close"})
self.assertSetEqual(
set(prices.index.get_level_values("Time")),
{"10:00:00", "12:00:00", "14:00:00"})
closes = prices.loc["Close"]
all_days = pd.DatetimeIndex(["2018-04-01", "2018-04-02", "2018-04-03", "2018-04-04"])
for time in ["10:00:00", "12:00:00", "14:00:00"]:
idx = closes.xs(time, level="Time").index
self.assertListEqual(
list(idx),
list(all_days),
msg=time
)
# replace nan with "nan" to allow equality comparisons
closes = closes.where(closes.notnull(), "nan")
closes = closes["FI12345"]
self.assertEqual(
closes.xs("14:00:00", level="Time").loc["2018-04-02"], "nan"
)
self.assertEqual(
closes.xs("12:00:00", level="Time").loc["2018-04-04"], "nan"
)
self.assertEqual(
closes.xs("14:00:00", level="Time").loc["2018-04-04"], "nan"
)
class GetPricesReindexedLikeTestCase(unittest.TestCase):
"""
Test cases for `quantrocket.get_prices_reindexed_like`.
"""
def test_complain_if_date_level_not_in_index(self):
"""
Tests error handling when reindex_like doesn't have an index named
Date.
"""
closes = pd.DataFrame(
np.random.rand(3,2),
columns=["FI12345","FI23456"],
index=pd.date_range(start="2018-01-01", periods=3, freq="D"))
with self.assertRaises(ParameterError) as cm:
get_prices_reindexed_like(closes, "custom-fundamental-1d")
self.assertIn("reindex_like must have index called 'Date'", str(cm.exception))
def test_complain_if_not_datetime_index(self):
"""
Tests error handling when the reindex_like index is named Date but is
not a DatetimeIndex.
"""
closes = pd.DataFrame(
np.random.rand(3,2),
columns=["FI12345","FI23456"],
index=pd.Index(["foo","bar","bat"], name="Date"))
with self.assertRaises(ParameterError) as cm:
get_prices_reindexed_like(closes, "custom-fundamental-1d")
self.assertIn("reindex_like must have a DatetimeIndex", str(cm.exception))
@patch("quantrocket.price.get_prices")
def test_pass_args_correctly(self, mock_get_prices):
"""
Tests that codes, sids, date ranges and and other args are correctly
passed to get_prices.
"""
closes = pd.DataFrame(
np.random.rand(6,2),
columns=["FI12345","FI23456"],
index=pd.date_range(start="2018-03-06", periods=6, freq="D", name="Date"))
def _mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-03-04"])
fields = ["EPS","Revenue"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# EPS
9,
# Revenue
10e5,
],
"FI23456": [
# EPS
19.89,
# Revenue
5e8
],
},
index=idx
)
return prices
mock_get_prices.return_value = _mock_get_prices()
get_prices_reindexed_like(
closes, "custom-fundamental-1d",
fields=["Revenue", "EPS"],
lookback_window=2,
timezone="America/New_York",
times=["11:00:00", "12:00:00"],
cont_fut=False,
data_frequency="daily")
get_prices_call = mock_get_prices.mock_calls[0]
_, args, kwargs = get_prices_call
self.assertEqual(args[0], "custom-fundamental-1d")
self.assertListEqual(kwargs["sids"], ["FI12345","FI23456"])
self.assertEqual(kwargs["start_date"], "2018-03-04") # 2018-03-06 - 2 day lookback window
self.assertEqual(kwargs["end_date"], "2018-03-11")
self.assertEqual(kwargs["fields"], ["Revenue", "EPS"])
self.assertEqual(kwargs["timezone"], "America/New_York")
self.assertEqual(kwargs["times"], ["11:00:00", "12:00:00"])
self.assertFalse(kwargs["cont_fut"])
self.assertEqual(kwargs["data_frequency"], "daily")
# repeat with default lookback
get_prices_reindexed_like(
closes, "custom-fundamental-1d",
fields="Revenue")
get_prices_call = mock_get_prices.mock_calls[1]
_, args, kwargs = get_prices_call
self.assertEqual(args[0], "custom-fundamental-1d")
self.assertListEqual(kwargs["sids"], ["FI12345","FI23456"])
self.assertEqual(kwargs["start_date"], "2018-02-24") # 2018-03-06 - 10 day lookback window
self.assertEqual(kwargs["end_date"], "2018-03-11")
self.assertEqual(kwargs["fields"], "Revenue")
def test_tz_aware_index(self):
"""
Tests that reindex_like.index can be tz-naive or tz-aware.
"""
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-03-31", "2018-06-30"])
fields = ["EPS"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
9,
9.5,
],
"FI23456": [
19.89,
17.60
],
},
index=idx
)
return prices
with patch('quantrocket.price.get_prices', new=mock_get_prices):
# request with tz_naive
closes = pd.DataFrame(
np.random.rand(4,1),
columns=["FI12345"],
index=pd.date_range(start="2018-06-30", periods=4, freq="D", name="Date"))
data = get_prices_reindexed_like(
closes, "custom-fundamental", fields="EPS")
self.assertSetEqual(set(data.index.get_level_values("Field")), {"EPS"})
eps = data.loc["EPS"]
self.assertListEqual(list(eps.index), list(closes.index))
self.assertListEqual(list(eps.columns), list(closes.columns))
eps = eps.reset_index()
eps.loc[:, "Date"] = eps.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
eps.to_dict(orient="records"),
[{'Date': '2018-06-30T00:00:00', 'FI12345': 9.0},
{'Date': '2018-07-01T00:00:00', 'FI12345': 9.5},
{'Date': '2018-07-02T00:00:00', 'FI12345': 9.5},
{'Date': '2018-07-03T00:00:00', 'FI12345': 9.5}]
)
with patch('quantrocket.price.get_prices', new=mock_get_prices):
# request with tz aware
closes = pd.DataFrame(
np.random.rand(4,1),
columns=["FI12345"],
index=pd.date_range(start="2018-06-30", periods=4, freq="D",
tz="America/New_York",name="Date"))
data = get_prices_reindexed_like(
closes, "custom-fundamental", fields="EPS")
self.assertSetEqual(set(data.index.get_level_values("Field")), {"EPS"})
eps = data.loc["EPS"]
self.assertListEqual(list(eps.index), list(closes.index))
self.assertListEqual(list(eps.columns), list(closes.columns))
eps = eps.reset_index()
eps.loc[:, "Date"] = eps.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertListEqual(
eps.to_dict(orient="records"),
[{'Date': '2018-06-30T00:00:00-0400', 'FI12345': 9.0},
{'Date': '2018-07-01T00:00:00-0400', 'FI12345': 9.5},
{'Date': '2018-07-02T00:00:00-0400', 'FI12345': 9.5},
{'Date': '2018-07-03T00:00:00-0400', 'FI12345': 9.5}])
def test_shift(self):
"""
Tests handling of the shift parameter.
"""
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-03-31", "2018-06-30"])
fields = ["EPS"]
idx = | pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"]) | pandas.MultiIndex.from_product |
import asyncio
import sys
import random as rand
import os
from .integration_test_utils import setup_teardown_test, _generate_table_name, V3ioHeaders, V3ioError
from storey import build_flow, CSVSource, CSVTarget, SyncEmitSource, Reduce, Map, FlatMap, AsyncEmitSource, ParquetTarget, ParquetSource, \
DataframeSource, ReduceToDataFrame
import pandas as pd
import aiohttp
import pytest
import v3io
import uuid
import datetime
@pytest.fixture()
def v3io_create_csv():
# Setup
file_path = _generate_table_name('bigdata/csv_test')
asyncio.run(_write_test_csv(file_path))
# Test runs
yield file_path
# Teardown
asyncio.run(_delete_file(file_path))
@pytest.fixture()
def v3io_teardown_file():
# Setup
file_path = _generate_table_name('bigdata/csv_test')
# Test runs
yield file_path
# Teardown
asyncio.run(_delete_file(file_path))
async def _write_test_csv(file_path):
connector = aiohttp.TCPConnector()
v3io_access = V3ioHeaders()
client_session = aiohttp.ClientSession(connector=connector)
try:
data = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
await client_session.put(f'{v3io_access._webapi_url}/{file_path}', data=data,
headers=v3io_access._get_put_file_headers, ssl=False)
finally:
await client_session.close()
async def _delete_file(path):
connector = aiohttp.TCPConnector()
v3io_access = V3ioHeaders()
client_session = aiohttp.ClientSession(connector=connector)
try:
response = await client_session.delete(f'{v3io_access._webapi_url}/{path}',
headers=v3io_access._get_put_file_headers, ssl=False)
if response.status >= 300 and response.status != 404 and response.status != 409:
body = await response.text()
raise V3ioError(f'Failed to delete item at {path}. Response status code was {response.status}: {body}')
finally:
await client_session.close()
def test_csv_reader_from_v3io(v3io_create_csv):
controller = build_flow([
CSVSource(f'v3io:///{v3io_create_csv}', header=True),
FlatMap(lambda x: x),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 495
def test_csv_reader_from_v3io_error_on_file_not_found():
controller = build_flow([
CSVSource('v3io:///bigdatra/tests/idontexist.csv', header=True),
]).run()
try:
controller.await_termination()
assert False
except FileNotFoundError:
pass
async def async_test_write_csv_to_v3io(v3io_teardown_csv):
controller = build_flow([
AsyncEmitSource(),
CSVTarget(f'v3io:///{v3io_teardown_csv}', columns=['n', 'n*10'], header=True)
]).run()
for i in range(10):
await controller.emit([i, 10 * i])
await controller.terminate()
await controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.aio.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_csv.split('/', 1)
result = await v3io_client.object.get(container, path)
finally:
await v3io_client.close()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_csv_to_v3io(v3io_teardown_file):
asyncio.run(async_test_write_csv_to_v3io(v3io_teardown_file))
def test_write_csv_with_dict_to_v3io(v3io_teardown_file):
file_path = f'v3io:///{v3io_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, columns=['n', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_file.split('/', 1)
result = v3io_client.object.get(container, path)
finally:
v3io_client.close()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_csv_infer_columns_without_header_to_v3io(v3io_teardown_file):
file_path = f'v3io:///{v3io_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_file.split('/', 1)
result = v3io_client.object.get(container, path)
finally:
v3io_client.close()
expected = "0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_csv_from_lists_with_metadata_and_column_pruning_to_v3io(v3io_teardown_file):
file_path = f'v3io:///{v3io_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, columns=['event_key=$key', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_file.split('/', 1)
result = v3io_client.object.get(container, path)
finally:
v3io_client.close()
expected = "event_key,n*10\nkey0,0\nkey1,10\nkey2,20\nkey3,30\nkey4,40\nkey5,50\nkey6,60\nkey7,70\nkey8,80\nkey9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_to_parquet_to_v3io(setup_teardown_test):
out_dir = f'v3io:///{setup_teardown_test}'
columns = ['my_int', 'my_string']
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_dir, partition_cols='my_int', columns=columns, max_events=1)
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}'])
expected_in_pyarrow1 = pd.DataFrame(expected, columns=columns)
expected_in_pyarrow3 = expected_in_pyarrow1.copy()
expected_in_pyarrow1['my_int'] = expected_in_pyarrow1['my_int'].astype('int32')
expected_in_pyarrow3['my_int'] = expected_in_pyarrow3['my_int'].astype('category')
controller.terminate()
controller.await_termination()
read_back_df = | pd.read_parquet(out_dir, columns=columns) | pandas.read_parquet |
"""
This script is for analysing the outputs from the implementation of DeepAR in GluonTS
"""
import os, time
from pathlib import Path
import streamlit as st
import pandas as pd
import numpy as np
from gluonts.model.predictor import Predictor
from gluonts.dataset.common import ListDataset
from gluonts.transform import FieldName
from gluonts.evaluation.backtest import make_evaluation_predictions
import autodraft.visualization as viz
import autodraft.gluonts as glu
import autodraft.api as nhl
from bokeh.sampledata.perceptions import probly
# @st.cache
def load_model(file_path):
model = Predictor.deserialize(Path(file_path))
return model
@st.cache
def get_data(path='../../data/input/full_dataset_4_seasons.csv'):
data = pd.read_csv(path)
return data
# @st.cache
# def load_predictions(path='/home/ubuntu/AutoDraft/data/deepar_truncated_results_ne100_lre-4_bs64.csv'):
# data = pd.read_csv(path, index_col=2)
# return data
@st.cache
def load_predictions(path='../../data/output/deepar_truncated_results_unit_s_ne300_lr1e-3_bs64_nl3_cl3.csv'):
data = pd.read_csv(path, index_col=2)
model_name = path.split('/')[-1].split('.')[0]
return data, model_name
@st.cache
def load_joe():
joe = pd.read_csv('../../data/input/joe_schmo_4_seasons.csv')
return joe
@st.cache
def get_roster(path='../../data/input/full_roster_4_seasons.csv'):
data = pd.read_csv(path)
return data
@st.cache
def process_data(data, roster):
train, test, targets, targets_meta, stat_cat_features, dyn_cat_features, dyn_real_features, dyn_real_features_meta = glu.prep_df(data,
roster,
column_list=['name', 'date', 'gameNumber', 'cumStatpoints'],
streamlit=True,
scale=True)
return train, test, targets, targets_meta, stat_cat_features, dyn_cat_features, dyn_real_features, dyn_real_features_meta
# @st.cache
def run_prediction(model, data):
predictions = model.predict(dataset=data)
return list(predictions)
def process_prediction(prediction):
mean = prediction.mean_ts
mean = mean.reset_index()
mean = mean.rename(columns={'index': 'predictions'})
mean = mean.reset_index()
mean = mean.rename(columns={'index': 'gameNumber'})
mean = mean.drop(columns=[0])
mean.loc[:, 'gameNumber'] = mean.loc[:, 'gameNumber'] + 1
conf = pd.DataFrame()
conf.loc[:, 'low'] = prediction.quantile('0.05')
conf.loc[:, 'high'] = prediction.quantile('0.95')
full_df = pd.concat([mean, conf], axis=1)
return full_df
def generate_prediction_df(predictions, train_data, test_data, drop=True, target='cumStatpoints', scaled=None, scaling_loc=None):
if scaled is not None:
scaling_meta = pd.read_pickle(scaling_loc)
st.write(scaling_meta)
names = test_data.loc[:, 'name'].unique()
full_predictions = pd.DataFrame()
for prediction, name in zip(predictions, names):
player_df = pd.DataFrame()
player_test_data = test_data.loc[test_data.loc[:, 'name'] == name].loc[:, ['name', 'date', target]]
player_train_data = train_data.loc[train_data.loc[:, 'name'] == name].loc[:, ['name', 'date', target]]
player_test_data.loc[:, 'date'] = pd.to_datetime(player_test_data.loc[:, 'date'])
player_train_data.loc[:, 'date'] = pd.to_datetime(player_train_data.loc[:, 'date'])
test_length = player_test_data.shape[0]
prediction_df = process_prediction(prediction)
# prediction_df.loc[:, 'name'] = name
if drop:
prediction_df = prediction_df.iloc[:test_length, :]
player_test_data.reset_index(drop=True, inplace=True)
prediction_df.reset_index(drop=True, inplace=True)
if scaled == 'ss':
scale_data = scaling_meta.loc[scaling_meta.loc[:, 'name'] == name]
for column in ['predictions', 'low', 'high']:
prediction_df.loc[:, column] = ((prediction_df.loc[:, column] * scale_data['maxabs']) \
* scale_data['std']) + scale_data['mean']
elif scaled == 'unit':
scale_data = scaling_meta.loc[scaling_meta.loc[:, 'name'] == name]
for column in ['predictions', 'low', 'high']:
prediction_df.loc[:, column] = (prediction_df.loc[:, column] - scale_data['min'].values) / scale_data['scale'].values
player_train_data.reset_index(drop=True, inplace=True)
player_test_df = pd.concat([player_test_data, prediction_df], axis=1)
player_df = | pd.concat([player_train_data, player_test_df]) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.