prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 14:52:35 2019
@author: KatieSi
"""
# Import packages
import numpy as np
import pandas as pd
import pdsql
from datetime import datetime, timedelta
# Set Variables
ReportName= 'Summary Tables'
RunDate = datetime.now()
# Set Risk Paramters
SummaryTableRunDate = datetime.strptime('2019-08-08', '%Y-%m-%d').date()
# Water Use Summary
ConsentSummaryCol = [
'SummaryConsentID',
'Consent',
'Activity',
'ErrorMsg',
# 'MaxAnnualVolume', #not used previous years
# 'MaxConsecutiveDayVolume', #not used previous years
# 'NumberOfConsecutiveDays', #not used previous years
'MaxTakeRate', #not used previous years
# 'HasFlowRestrictions', #unreliable. not used previous years
# 'ComplexAllocation', #not used previous years
# 'TotalVolumeAboveRestriction',
# 'TotalDaysAboveRestriction',
# 'TotalVolumeAboveNDayVolume', #unreliable. not used previous years
# 'TotalDaysAboveNDayVolume', #unreliable. not used previous years
# 'MaxVolumeAboveNDayVolume', #unreliable. not used previous years
# 'MedianVolumeAboveNDayVolume', #unreliable. not used previous years
'PercentAnnualVolumeTaken',
'TotalTimeAboveRate', #not used previous years
'MaxRateTaken',
'MedianRateTakenAboveMaxRate' #not available in previous year
]
ConsentSummaryColNames = {
'SummaryConsentID' : 'CS_ID',
'Consent' : 'ConsentNo',
'ErrorMsg' : 'CS_ErrorMSG',
'MaxTakeRate' : 'ConsentRate',
'PercentAnnualVolumeTaken': 'CS_PercentAnnualVolume',
'TotalTimeAboveRate' : 'CS_TimeAboveRate', #not used previous years
'MaxRateTaken' : 'CS_MaxRate',
'MedianRateTakenAboveMaxRate' : 'CS_MedianRateAbove'
}
ConsentSummaryImportFilter = {
'RunDate' : SummaryTableRunDate
}
ConsentSummary_date_col = 'RunDate'
ConsentSummary_from_date = SummaryTableRunDate
ConsentSummary_to_date = SummaryTableRunDate
ConsentSummaryServer = 'SQL2012Prod03'
ConsentSummaryDatabase = 'Hilltop'
ConsentSummaryTable = 'ComplianceSummaryConsent' #change after run is finished
ConsentSummary = pdsql.mssql.rd_sql(
server = ConsentSummaryServer,
database = ConsentSummaryDatabase,
table = ConsentSummaryTable,
col_names = ConsentSummaryCol,
date_col = ConsentSummary_date_col,
from_date= ConsentSummary_from_date,
to_date = ConsentSummary_to_date
)
ConsentSummary.rename(columns=ConsentSummaryColNames, inplace=True)
ConsentSummary['ConsentNo'] = ConsentSummary['ConsentNo'] .str.strip().str.upper()
ConsentSummary['Activity'] = ConsentSummary['Activity'] .str.strip().str.lower()
ConsentSummary['CS_PercentMaxRate'] = (
ConsentSummary['CS_MaxRate']/ConsentSummary['ConsentRate'])*100
ConsentSummary['CS_PercentMedRate'] = (
ConsentSummary['CS_MedianRateAbove']/ConsentSummary['ConsentRate'])*100
print('\nConsentSummary Table ',
ConsentSummary.shape,'\n',
ConsentSummary.nunique(), '\n\n')
# SummaryConsent = SummaryConsent[SummaryConsent['RunDate'] == SummaryTableRunDate]
WAPSummaryCol = [
'SummaryWAPID',
'Consent',
'Activity',
'WAP',
'MeterName',
'ErrorMsg',
# 'MaxAnnualVolume', #not used previous years
# 'MaxConsecutiveDayVolume', #not used previous years
# 'NumberOfConsecutiveDays', #not used previous years
'MaxTakeRate', #not used previous years
'FromMonth',#not used previous years
'ToMonth',#not used previous years
# 'TotalVolumeAboveRestriction',
# 'TotalDaysAboveRestriction',
# 'TotalVolumeAboveNDayVolume', #unreliable. not used previous years
# 'TotalDaysAboveNDayVolume', #unreliable. not used previous years
# 'MaxVolumeAboveNDayVolume', #unreliable. not used previous years
# 'MedianVolumeTakenAboveMaxVolume', #unreliable. not used previous years
'PercentAnnualVolumeTaken',
'TotalTimeAboveRate', #not used previous years
'MaxRateTaken',#not used previous years
'MedianRateTakenAboveMaxRate', #not available in previous year
'TotalMissingRecord'#unreliable.
]
WAPSummaryColNames = {
'SummaryWAPID' : 'WS_ID',
'Consent' : 'ConsentNo',
'MaxTakeRate' : 'WAPRate',
'FromMonth' : 'WAPFromMonth',
'ToMonth' : 'WAPToMonth',
'MeterName' : 'WS_MeterName',
'ErrorMsg' : 'WS_ErrorMsg',
'PercentAnnualVolumeTaken': 'WS_PercentAnnualVolume',
'TotalTimeAboveRate' : 'WS_TimeAboveRate',
'MaxRateTaken' : 'WS_MaxRate',#not used previous years
'MedianRateTakenAboveMaxRate' : 'WS_MedianRateAbove', #not available in previous year
'TotalMissingRecord' : 'WS_TotalMissingRecord'
}
WAPSummaryImportFilter = {
'RunDate' : SummaryTableRunDate
}
WAPSummary_date_col = 'RunDate'
WAPSummary_from_date = SummaryTableRunDate
WAPSummary_to_date = SummaryTableRunDate
WAPSummaryServer = 'SQL2012Prod03'
WAPSummaryDatabase = 'Hilltop'
WAPSummaryTable = 'ComplianceSummaryWAP' #change after run is finished
WAPSummary = pdsql.mssql.rd_sql(
server = WAPSummaryServer,
database = WAPSummaryDatabase,
table = WAPSummaryTable,
col_names = WAPSummaryCol,
date_col = WAPSummary_date_col,
from_date= WAPSummary_from_date,
to_date = WAPSummary_to_date
)
WAPSummary.rename(columns=WAPSummaryColNames, inplace=True)
WAPSummary['ConsentNo'] = WAPSummary['ConsentNo'] .str.strip().str.upper()
WAPSummary['Activity'] = WAPSummary['Activity'] .str.strip().str.lower()
WAPSummary['WAP'] = WAPSummary['WAP'] .str.strip().str.upper()
WAPSummary['WS_PercentMaxRoT'] = (
WAPSummary['WS_MaxRate']/WAPSummary['WAPRate'])*100
WAPSummary['WS_PercentMedRoT'] = (
WAPSummary['WS_MedianRateAbove']/WAPSummary.WAPRate)*100
print('\nWAPSummary Table ',
WAPSummary.shape,'\n',
WAPSummary.nunique(), '\n\n')
############################################################################
Baseline = pd.merge(Baseline, ConsentSummary,
on = ['ConsentNo','Activity'],
how = 'left')
Baseline = pd.merge(Baseline, WAPSummary,
on = ['ConsentNo','WAP','Activity','WAPFromMonth','WAPToMonth'],
how = 'left')
print('\nBaseline Table ',
Baseline.shape,'\n',
Baseline.nunique(), '\n\n')
# many examples of limits on Accela not matching from hilltop
# 219 record w/o ConsentSummary but has ConsentLimit
# 2 records w/ ConsentSummary but no ConsentLimit
# 2312 records w/o WAPSummary but has WAPLimit
# 3266 records w/ WAPSummary but no REAL WAPLimits
# 4027 records have expected Summary - Limits matching
############################################################################
#SQL
# distinct Consent
# ,([master].[dbo].[fRemoveExtraCharacters](ErrorMsg)) as ErrorMessage
# ,[TotalMissingRecord]
# ,(Case when ([TotalVolumeAboveRestriction] is null or [TotalVolumeAboveRestriction] = 0 or [TotalDaysAboveRestriction] is null) then 0 else
# (case when ([TotalVolumeAboveRestriction]>100 and [TotalDaysAboveRestriction] > 2) then 100 else 1 end) end) as LFNC
# ,(case when ([PercentAnnualVolumeTaken] <=100 or [PercentAnnualVolumeTaken] is null ) then 0 else
# (case when ([PercentAnnualVolumeTaken] >200 ) then 2000 else
# (case when ([PercentAnnualVolumeTaken] >100 ) then 100 else 1 end)end)end) as AVNC
# ,(case when ([MaxVolumeAboveNDayVolume] is null or (([MaxVolumeAboveNDayVolume]+[MaxConsecutiveDayVolume])/[MaxConsecutiveDayVolume]*100)<=100) then 0 else
# (case when (([NumberOfConsecutiveDays]=1 and (([MaxVolumeAboveNDayVolume]+[MaxConsecutiveDayVolume])/[MaxConsecutiveDayVolume]*100)>105) or
# ([NumberOfConsecutiveDays]>1 and (([MaxVolumeAboveNDayVolume]+[MaxConsecutiveDayVolume])/[MaxConsecutiveDayVolume]*100)>120) ) then 100 else
# 1 end)end) as CDNC
# ,(case when ([MaxTakeRate] is null or [MaxRateTaken] is null or ([MaxRateTaken]/[MaxTakeRate])*100<=100 ) then 0 else
# (case when (([MaxRateTaken]/[MaxTakeRate])*100>105) then 100 else 1 end) end) as RoTNC
# ,(case when ([TotalMissingRecord] is null or [TotalMissingRecord] = 0) then 0 else
# (case when ([TotalMissingRecord] > 0 and [TotalMissingRecord] <= 10) then 5 else
# (case when ([TotalMissingRecord] > 100) then 10000 else 5000 end)end)end) as MRNC
#MRNC - zeros vs Nulls. SQL says 0 is null Python says false
HighThresholdMR = 100
LowThresholdMR = 10
HighRiskMR = 10000
MedRiskMR = 5
LowRiskMR = 0
OtherRiskMR = 5000
conditions = [
(pd.isnull(WAPSummary['WS_TotalMissingRecord'])) |
(WAPSummary['WS_TotalMissingRecord'] == 0),
(WAPSummary['WS_TotalMissingRecord'] > HighThresholdMR),
(WAPSummary['WS_TotalMissingRecord'] > 0) &
(WAPSummary['WS_TotalMissingRecord'] <= LowThresholdMR)
]
choices = [LowRiskMR, HighRiskMR, MedRiskMR]
WAPSummary['WS_MRNC'] = np.select(conditions, choices, default = OtherRiskMR)
#AVNC
HighVolumeThresholdAV = 200
LowVolumeThresholdAV = 100
HighRiskAV = 2000
MedRiskAV = 100
LowRiskAV = 0
OtherRiskAV = 1
conditions = [
(pd.isnull(ConsentSummary['CS_PercentAnnualVolume'])) |
(ConsentSummary['CS_PercentAnnualVolume'] <= LowVolumeThresholdAV),
(ConsentSummary['CS_PercentAnnualVolume'] > HighVolumeThresholdAV),
(ConsentSummary['CS_PercentAnnualVolume'] > LowVolumeThresholdAV) &
(ConsentSummary['CS_PercentAnnualVolume'] <= HighVolumeThresholdAV)
]
choices = [LowRiskAV, HighRiskAV, MedRiskAV]
ConsentSummary['CS_AVNC'] = np.select(conditions, choices, default = OtherRiskAV)
conditions = [
( | pd.isnull(WAPSummary['WS_PercentAnnualVolume']) | pandas.isnull |
#!/usr/bin/env python
import pandas as pd
import numpy as np
import multiprocessing
import argparse
import operator
import os
import random
import sys
import time
import random
import subprocess
import pysam
import collections
import warnings
import math
import re
from Bio import SeqIO
base_path = os.path.split(__file__)[0]
def fragment_distribution(samfile):
all_reads = samfile.fetch()
size_freq = collections.defaultdict(int)
for read in all_reads:
if read.rnext == read.tid and read.is_paired:
size = abs(read.isize)
size_freq[size] += 1
return size_freq
def FragMAD(freq):
"""
calculate median and median absolute deviation fragment size distribution
"""
all_size = []
for key, value in freq.items():
all_size.extend([key] * int(value))
median_size = np.median(all_size)
residuals = abs(np.array(all_size) - median_size)
mad_size = 1.4826 * np.median(residuals)
return median_size, mad_size
def split_sam(args):
split_command = ' '.join(['sh',
os.path.join(base_path, "split_sam.sh"),
args.assemblies,
args.bamfile,
args.output,
args.samtools])
os.system(split_command)
def seq_parse(args):
input = SeqIO.parse(args.assemblies, "fasta")
contig_seqs = {}
for record in input:
if len(record.seq) >= args.min_length:
contig_seqs[record.id] = str(record.seq)
return contig_seqs
def kmer_parse(seq, pool):
seq_kmer = {"position": [], "KAD": []}
for i in range(len(seq)):
if seq[i:(i + 25)] in pool:
seq_kmer["KAD"].append(pool[seq[i:(i + 25)]])
seq_kmer["position"].append(i + 1)
if (i + 25) >= len(seq):
break
return seq_kmer
def KAD_window_cal(seq_kmer):
KAD_window_dict = {"start_pos": [],
"mean_KAD": [],
"abnormal_KAD_ratio": [],
"dev_KAD": []}
for i in range(300, len(seq_kmer['position']), 100):
KAD_window_dict["start_pos"].append(i)
mean_KAD = np.mean(np.abs(seq_kmer['KAD'][i:i + 100]))
KAD_window_dict["mean_KAD"].append(mean_KAD)
KAD_window_dict["abnormal_KAD_ratio"].append(
np.sum(np.abs(seq_kmer['KAD'][i:i + 100]) > 0.5) / 100)
KAD_window_dict["dev_KAD"].append(
np.sqrt(np.var(np.abs(seq_kmer['KAD'][i:i + 100]))))
return KAD_window_dict
def KAD_feature(args):
seq_data = seq_parse(args)
KAD_dict = {"contig": [],
'start_pos': [],
'mean_KAD': [],
'abnormal_KAD_ratio': [],
'dev_KAD': []}
for contig, seq in seq_data.items():
if len(seq) < args.min_length:
continue
if os.path.exists(os.path.join(args.output, "temp/KAD/KAD_data/",
"{}.KAD".format(str(contig)))):
try:
KAD_data = pd.read_csv(os.path.join(args.output, "temp/KAD/KAD_data/",
"{}.KAD".format(str(contig))), index_col=0, sep="\t")
KAD_data = KAD_data.drop_duplicates(['k-mer'])
except BaseException:
continue
KAD_data.index = KAD_data['k-mer']
KAD_pool = KAD_data.loc[:, 'KAD'].to_dict()
seq_kmer = kmer_parse(seq, KAD_pool)
KAD_window = KAD_window_cal(seq_kmer)
KAD_dict["contig"].extend([contig] * len(KAD_window['start_pos']))
KAD_dict["start_pos"].extend(KAD_window['start_pos'])
KAD_dict["mean_KAD"].extend(KAD_window["mean_KAD"])
KAD_dict["abnormal_KAD_ratio"].extend(
KAD_window["abnormal_KAD_ratio"])
KAD_dict["dev_KAD"].extend(KAD_window["dev_KAD"])
return KAD_dict
def KAD(args, contig, file):
if os.path.exists(os.path.join(args.output, "temp/KAD/KAD_data/",
str(contig), ".KAD")):
return 0
contig_file = os.path.join(args.output, "temp/split/contigs/", "{}.fa".format(file))
read_file = os.path.join(args.output,
"temp/split/reads/{}.read.fa".format(str(contig)))
# kmer count
outputdir = os.path.join(args.output, "temp/KAD/temp")
contig_command1 = ' '.join([args.jellyfish,
"count -m 25 -o",
os.path.join(outputdir, '{}.jf'.format(str(contig))),
"-s 100M -t 8",
contig_file])
contig_command2 = ' '.join([args.jellyfish,
"dump -c -t -o",
os.path.join(outputdir, '{}_count.txt'.format(str(contig))),
os.path.join(outputdir, '{}.jf'.format(str(contig)))])
os.system(contig_command1)
os.system(contig_command2)
read_command1 = ' '.join([args.jellyfish,
"count -m 25 -o",
os.path.join(outputdir, '{}.read.jf'.format(str(contig))),
"-s 100M -t 8",
read_file])
read_command2 = ' '.join([args.jellyfish,
"dump -c -t -o",
os.path.join(outputdir, '{}_count.read.txt'.format(str(contig))),
os.path.join(outputdir, '{}.read.jf'.format(str(contig)))])
os.system(read_command1)
os.system(read_command2)
assembly_kmer = pd.read_csv(os.path.join(args.output, "temp/KAD/temp/",
"{}_count.txt".format(str(contig))), sep="\t", header=None)
assembly_kmer.index = assembly_kmer[0]
try:
read_kmer = pd.read_csv(os.path.join(args.output, "temp/KAD/temp/",
"{}_count.read.txt".format(str(contig))),
sep="\t", header=None)
read_kmer.index = read_kmer[0]
except BaseException:
# zero reads mapped to contig
return 0
shared_kmer = set(assembly_kmer.loc[assembly_kmer[1] == 1, 0]).intersection(read_kmer.index)
if len(shared_kmer) == 0:
kmer_depth = pd.value_counts(read_kmer.loc[read_kmer[1] > 5, 1]).index[0]
else:
kmer_depth = pd.value_counts(read_kmer.loc[shared_kmer, ][1]).index[0]
assembly_kmer.columns = ['k-mer', 'assembly_count']
read_kmer.columns = ['k-mer', 'read_count']
assembly_kmer.index = range(assembly_kmer.shape[0])
read_kmer.index = range(read_kmer.shape[0])
kmer_result = pd.merge(assembly_kmer, read_kmer, how='outer')
kmer_result = kmer_result.fillna(0)
kmer_result['KAD'] = np.log2((kmer_result['read_count'] + kmer_depth)
/ (kmer_depth * (kmer_result['assembly_count'] + 1)))
kmer_result.loc[(kmer_result['read_count'] == 1) *
(kmer_result['assembly_count'] == 0), 'KAD'] = np.nan
kmer_result = kmer_result.loc[kmer_result['KAD'] == kmer_result['KAD'], ]
kmer_result.loc[:, ['k-mer', 'KAD']].to_csv(
os.path.join(args.output, "temp/KAD/KAD_data/", "{}.KAD".format(str(contig))), sep="\t")
def fragment_coverage_cal(reads, mu, dev, length):
"""
calculate fragment coverage per contig
"""
frag_coverage = np.array([0] * length)
for read in reads:
if read.rnext == read.tid and read.is_proper_pair:
size = abs(read.isize)
if (mu - 3 * dev <= size <= mu + 3 * dev):
if read.next_reference_start < read.reference_start:
start = min(read.next_reference_start,
read.reference_start,
read.reference_end)
end = start + size
frag_coverage[start:end] += 1
return frag_coverage
def window_read_cal(reads, mu, dev):
read_dict = {"start_pos": [], "read_count": [], "proper_read_count": [], "inversion_read_count": [], "clipped_read_count": [],
"supplementary_read_count": [], "discordant_size_count": [], "discordant_loc_count": []}
read_temp = {"num_read": 0, "num_proper": 0, "num_inversion": 0, "num_clipped": 0, "num_supplementary": 0, "num_discordant_size": 0,
"num_discordant_loc": 0}
pos = 0
for read in reads:
new_pos = math.floor((read.reference_start - 300) / 100) * 100 + 300
if read.reference_start < 300:
continue
if pos == 0:
pos = new_pos
elif new_pos != pos:
read_dict["start_pos"].append(pos)
read_dict["read_count"].append(read_temp["num_read"])
read_dict["proper_read_count"].append(read_temp["num_proper"])
read_dict["inversion_read_count"].append(
read_temp["num_inversion"])
read_dict["clipped_read_count"].append(read_temp["num_clipped"])
read_dict["supplementary_read_count"].append(
read_temp["num_supplementary"])
read_dict["discordant_size_count"].append(
read_temp["num_discordant_size"])
read_dict["discordant_loc_count"].append(
read_temp["num_discordant_loc"])
read_temp = {"num_read": 0,
"num_proper": 0,
"num_inversion": 0,
"num_clipped": 0,
"num_supplementary": 0,
"num_discordant_size": 0,
"num_discordant_loc": 0}
pos = new_pos
read_temp["num_read"] += 1
if read.is_paired:
if read.rnext == read.tid:
if read.is_proper_pair:
read_temp["num_proper"] += 1
if (read.is_reverse + read.mate_is_reverse) != 1:
read_temp["num_inversion"] += 1
if not mu - 3 * dev <= abs(read.isize) <= mu + 3 * dev:
read_temp["num_discordant_size"] += 1
else:
read_temp["num_discordant_loc"] += 1
if read.get_cigar_stats()[0][4] > 20:
read_temp["num_clipped"] += 1
if (read.is_supplementary and read.get_cigar_stats()[0][5] > 20):
read_temp["num_supplementary"] += 1
return read_dict
def window_frag_cal(coverage):
"""
Using sliding window approach to smooth out features
"""
coverage = np.array(coverage)
cov = {"pos": [], "coverage": [], "deviation": []}
for i in range(300, len(coverage), 100):
start = i
end = i + 100
cov["coverage"].append(np.mean(coverage[start:end]))
cov["deviation"].append(
np.sqrt(np.var(coverage[start:end])) / np.mean(coverage[start:end]))
cov["pos"].append(start)
if len(coverage) - end <= 300:
break
return cov
def contig_pool(samfile):
contig_len = {}
for (ref, lens) in zip(samfile.references, samfile.lengths):
contig_len[ref] = lens
return contig_len
def pileup_window_cal(pileup_dict):
window_dict = {"contig": [], "start_pos": [], "correct_portion": [], "ambiguous_portion": [], "disagree_portion": [],
"deletion_portion": [], "insert_portion": [], "coverage": [], "deviation": []}
for i in range(300, len(pileup_dict['correct']), 100):
start = i
end = i + 100
total = np.sum(pileup_dict['depth'][start:end])
window_dict["contig"].append(pileup_dict["contig"][0])
window_dict["start_pos"].append(start)
window_dict["correct_portion"].append(
np.sum(pileup_dict['correct'][start:end]) / total)
window_dict["ambiguous_portion"].append(
np.sum(pileup_dict["ambiguous"][start:end]) / total)
window_dict["insert_portion"].append(
np.sum(pileup_dict['insert'][start:end]) / total)
window_dict["deletion_portion"].append(
np.sum(pileup_dict['deletion'][start:end]) / total)
window_dict["disagree_portion"].append(
np.sum(pileup_dict['disagree'][start:end]) / total)
window_dict["coverage"].append(
np.mean(pileup_dict["depth"][start:end]))
window_dict["deviation"].append(np.sqrt(np.var(
pileup_dict["depth"][start:end])) / np.mean(pileup_dict["depth"][start:end]))
if len(pileup_dict['correct']) - (i + 100) <= 300:
break
return window_dict
def read_breakpoint_per_contig(samfile, ref, lens):
reads = samfile.fetch(contig=ref)
break_count = {"breakcount": np.array([0] * lens),
"readcount": np.array( [0] * lens)}
for read in reads:
ref_end = read.reference_end
ref_start = read.reference_start
read_start = read.query_alignment_start
read_end = read.query_alignment_end
break_count["readcount"][ref_start:ref_end] += 1
if read.is_supplementary:
if re.match('^([0-9]+H)', read.cigarstring):
break_count["breakcount"][read.get_blocks()[0][0]] += 1
else:
if len(read.get_blocks()) == 1:
break_count["breakcount"][read.get_blocks()[0][1] - 1] += 1
else:
break_count["breakcount"][read.get_blocks()[-1][1] - 1] += 1
if read.get_cigar_stats()[0][4] > 0:
if re.match('^([0-9]+S)', read.cigarstring):
break_count["breakcount"][read.get_blocks()[0][0]] += 1
if (read.cigarstring).endswith('S'):
if len(read.get_blocks()) == 1:
break_count["breakcount"][read.get_blocks()[0][1] - 1] += 1
else:
break_count["breakcount"][read.get_blocks()[-1][1] - 1] += 1
data = pd.DataFrame(break_count)
data['position'] = data.index + 1
data['contig'] = ref
data = data.loc[data['breakcount'] > 0, ]
return data
def window_break_cal(data):
data['start_pos'] = [math.floor(x) * 100 + 300 for x in (data['position'] - 300) / 100]
data = data.loc[data['start_pos'] >= 300, ]
data['read_breakpoint_ratio'] = data['read_breakpoint_count'] / \
data['read_count']
data['index'] = data['contig'] + '_' + \
[str(int(x)) for x in data['start_pos']]
grouped = data.groupby(['index'])
read_break_ratio = pd.DataFrame(grouped['read_breakpoint_ratio'].max())
read_break_ratio['contig'] = ['_'.join(x.split("_")[:-1]) for x in read_break_ratio.index]
read_break_ratio['start_pos'] = [int(x.split("_")[-1]) for x in read_break_ratio.index]
read_break_ratio.index = range(read_break_ratio.shape[0])
return read_break_ratio
def read_breakpoint_cal(args):
if os.path.exists(os.path.join(args.output,
"temp/read_breakpoint/read_breakpoint_per_window.txt")):
return 0
if os.path.exists(os.path.join(args.output,
"temp/read_breakpoint/read_breakpoint_per_base.txt")):
read_breakpoint_data = pd.read_csv(os.path.join(args.output,
"temp/read_breakpoint/read_breakpoint_per_base.txt"), sep="\t", index_col=0)
window_read_breakpoint_data = window_break_cal(read_breakpoint_data)
window_read_breakpoint_data.to_csv(os.path.join(args.output,
"temp/read_breakpoint/read_breakpoint_per_window.txt"),sep="\t")
return 0
samfile = pysam.AlignmentFile(args.bamfile, "rb")
references = samfile.references
lengths = samfile.lengths
read_breakpoint_pool = {"contig": [],
"position": [],
"read_breakpoint_count": [],
"read_count": []}
for ref, lens in zip(references, lengths):
if lens < args.min_length:
continue
contig_break_data = read_breakpoint_per_contig(samfile, ref, lens)
if contig_break_data.shape[0] > 0:
read_breakpoint_pool["read_breakpoint_count"].extend(
list(contig_break_data['breakcount']))
read_breakpoint_pool["read_count"].extend(
list(contig_break_data['readcount']))
read_breakpoint_pool["contig"].extend(
[ref] * contig_break_data.shape[0])
read_breakpoint_pool["position"].extend(
list(contig_break_data['position']))
read_breakpoint_data = pd.DataFrame(read_breakpoint_pool)
read_breakpoint_data.to_csv(os.path.join(args.output,
"temp/read_breakpoint/read_breakpoint_per_base.txt"), sep="\t")
window_read_breakpoint_data = window_break_cal(read_breakpoint_data)
window_read_breakpoint_data.to_csv(os.path.join(args.output,
"temp/read_breakpoint/read_breakpoint_per_window.txt"), sep="\t")
def pileupfile_parse(args):
"""
process pileup file
"""
if os.path.exists(os.path.join(args.output,
"temp/pileup/pileup_feature.txt")):
return 0
if not os.path.exists(args.pileup):
if os.path.exists(os.path.join(args.output,
"temp/pileup/contigs_pipelup.out")):
args.pileup = os.path.join(args.output, "temp/pileup/contigs_pipelup.out")
else:
if not os.path.exists(args.assemblies):
if os.path.exists(os.path.join(args.output,
"temp/contig/filtered_contigs.fa")):
args.assemblies = os.path.join(args.output, "temp/contig/filtered_contigs.fa")
else:
sys.stderr.write(f"Error: Can not find assemblies:{args.assemblies}!\n")
sys.exit(1)
os.makedirs(os.path.join(args.output, "temp/pileup"), exist_ok=True)
pileup_command = ' '.join([args.samtools,
'mpileup -C 50 -A -f',
args.assemblies,
args.bamfile,
" | awk", "'", "$3 !=", "\"N\"", "'", ">",
os.path.join(args.output, "temp/pileup/contigs_pipelup.out")])
args.pileup = os.path.join(args.output, "temp/pileup/contigs_pipelup.out")
os.system(pileup_command)
samfile = pysam.AlignmentFile(args.bamfile, "rb")
contig_len = contig_pool(samfile)
prev_contig = None
pileup_dict = {"contig": [], "correct": [], "ambiguous": [], "insert": [],
"deletion": [], "disagree": [], "depth": []}
window_pileup_dict = {"contig": [], "start_pos": [], "correct_portion": [], "ambiguous_portion": [], "disagree_portion": [],
"deletion_portion": [], "insert_portion": [], "normalized_coverage": [], "normalized_deviation": [], "mean_coverage": []}
for line in open(args.pileup, "r"):
record = line.strip().split('\t')
if contig_len[record[0]] < args.min_length:
continue
if prev_contig is None:
prev_contig = record[0]
if record[0] != prev_contig:
window_data = pileup_window_cal(pileup_dict)
mean_cov = np.mean(window_data["coverage"])
window_pileup_dict["contig"].extend(window_data["contig"])
window_pileup_dict["start_pos"].extend(window_data["start_pos"])
window_pileup_dict["correct_portion"].extend(
window_data["correct_portion"])
window_pileup_dict["ambiguous_portion"].extend(
window_data["ambiguous_portion"])
window_pileup_dict["disagree_portion"].extend(
window_data["disagree_portion"])
window_pileup_dict["deletion_portion"].extend(
window_data["deletion_portion"])
window_pileup_dict["insert_portion"].extend(
window_data["insert_portion"])
window_pileup_dict["normalized_coverage"].extend(
window_data["coverage"] / mean_cov)
window_pileup_dict["normalized_deviation"].extend(
window_data["deviation"])
window_pileup_dict["mean_coverage"].extend(
[mean_cov] * len(window_data["start_pos"]))
pileup_dict = {"contig": [],
"correct": [],
"ambiguous": [],
"insert": [],
"deletion": [],
"disagree": [],
"depth": []}
prev_contig = record[0]
pileup_dict['contig'].append(record[0])
match_detail = record[4]
pileup_dict['correct'].append(match_detail.count('.') + match_detail.count(','))
pileup_dict['ambiguous'].append(match_detail.count('*'))
pileup_dict['insert'].append(match_detail.count("+"))
pileup_dict['deletion'].append(match_detail.count("-"))
pileup_dict['depth'].append(int(record[3]))
st = ''.join(re.split(r'[\+|\-][0-9]+[ATCGatcg]+', match_detail))
numd = st.count('a') + st.count('A') + st.count('t') + st.count('T') + \
st.count('c') + st.count('C') + st.count('g') + st.count('G')
pileup_dict['disagree'].append(numd)
if not os.path.exists(os.path.join(args.output, "temp/pileup")):
os.makedirs(os.path.join(args.output, "temp/pileup"), exist_ok=True)
data = pd.DataFrame(window_pileup_dict)
data.to_csv(os.path.join(args.output, "temp/pileup/pileup_feature.txt"), sep="\t")
return data
def read_cal(args, mu, dev):
if os.path.exists(os.path.join(args.output,
"temp/read_feature/read_feature.txt")):
return 0
samfile = pysam.AlignmentFile(args.bamfile, "rb")
references = samfile.references
lengths = samfile.lengths
read_dicts = {"contig": [], "start_pos": [], "read_count": [], "proper_read_count": [], "inversion_read_count": [],
"clipped_read_count": [], "supplementary_read_count": [], "discordant_size_count": [], "discordant_loc_count": [], "length": []}
for ref, lens in zip(references, lengths):
if lens < args.min_length:
continue
contig_reads = samfile.fetch(ref)
read_dict = window_read_cal(contig_reads, mu, dev)
read_dicts["start_pos"].extend(read_dict["start_pos"])
read_dicts["contig"].extend([ref] * len(read_dict["start_pos"]))
read_dicts["read_count"].extend(read_dict["read_count"])
read_dicts["proper_read_count"].extend(read_dict["proper_read_count"])
read_dicts["inversion_read_count"].extend(
read_dict["inversion_read_count"])
read_dicts["clipped_read_count"].extend(
read_dict["clipped_read_count"])
read_dicts["supplementary_read_count"].extend(
read_dict["supplementary_read_count"])
read_dicts["discordant_size_count"].extend(
read_dict["discordant_size_count"])
read_dicts["discordant_loc_count"].extend(
read_dict["discordant_loc_count"])
read_dicts["length"].extend([lens] * len(read_dict["start_pos"]))
data = pd.DataFrame(read_dicts)
data.to_csv(os.path.join(args.output,
"temp/read_feature/read_feature.txt"), sep="\t")
def fragment_cal(args, mu, dev):
if os.path.exists(os.path.join(args.output,
"temp/coverage/fragment_coverage.txt")):
return 0
samfile = pysam.AlignmentFile(args.bamfile, "rb")
references = samfile.references
lengths = samfile.lengths
frag_dict = {
"contig": [],
"start_pos": [],
"normalized_fragment_coverage": [],
"normalized_fragment_deviation": []}
for ref, lens in zip(references, lengths):
if lens < args.min_length:
continue
reads = samfile.fetch(ref)
frag_coverage = fragment_coverage_cal(reads, mu, dev, lens)
fragcov = window_frag_cal(frag_coverage)
frag_dict["contig"].extend([ref] * len(fragcov['pos']))
frag_dict["start_pos"].extend(fragcov["pos"])
frag_dict["normalized_fragment_coverage"].extend(
fragcov["coverage"] / np.mean(fragcov["coverage"]))
frag_dict["normalized_fragment_deviation"].extend(fragcov["deviation"])
data = pd.DataFrame(frag_dict)
data.to_csv(os.path.join(args.output,
"temp/coverage/fragment_coverage.txt"), sep="\t")
def KAD_cal(args):
if os.path.exists(os.path.join(args.output,
"temp/KAD/KAD_window_data.txt")):
return 0
contig_data = pd.read_csv(os.path.join(args.output,
"temp/split/contig_name.txt"), header=None)
split_data = pd.read_csv(os.path.join(args.output,
"temp/split/split_file_name.txt"), header=None)
data = | pd.concat([contig_data, split_data], axis=1) | pandas.concat |
"""
Sklearn dependent models
Decision Tree, Elastic Net, Random Forest, MLPRegressor, KNN, Adaboost
"""
import datetime
import random
import numpy as np
import pandas as pd
from autots.models.base import ModelObject, PredictionObject
from autots.tools.probabilistic import Point_to_Probability
from autots.tools.seasonal import date_part, seasonal_int
from autots.tools.window_functions import window_maker, last_window
def rolling_x_regressor(
df,
mean_rolling_periods: int = 30,
macd_periods: int = None,
std_rolling_periods: int = 7,
max_rolling_periods: int = None,
min_rolling_periods: int = None,
quantile90_rolling_periods: int = None,
quantile10_rolling_periods: int = None,
ewm_alpha: float = 0.5,
ewm_var_alpha: float = None,
additional_lag_periods: int = 7,
abs_energy: bool = False,
rolling_autocorr_periods: int = None,
add_date_part: str = None,
holiday: bool = False,
holiday_country: str = 'US',
polynomial_degree: int = None,
window: int = None,
):
"""
Generate more features from initial time series.
macd_periods ignored if mean_rolling is None.
Returns a dataframe of statistical features. Will need to be shifted by 1 or more to match Y for forecast.
"""
X = df.copy()
if str(mean_rolling_periods).isdigit():
temp = df.rolling(int(mean_rolling_periods), min_periods=1).median()
X = pd.concat([X, temp], axis=1)
if str(macd_periods).isdigit():
temp = df.rolling(int(macd_periods), min_periods=1).median() - temp
X = pd.concat([X, temp], axis=1)
if str(std_rolling_periods).isdigit():
X = pd.concat([X, df.rolling(std_rolling_periods, min_periods=1).std()], axis=1)
if str(max_rolling_periods).isdigit():
X = pd.concat([X, df.rolling(max_rolling_periods, min_periods=1).max()], axis=1)
if str(min_rolling_periods).isdigit():
X = pd.concat([X, df.rolling(min_rolling_periods, min_periods=1).min()], axis=1)
if str(quantile90_rolling_periods).isdigit():
X = pd.concat(
[X, df.rolling(quantile90_rolling_periods, min_periods=1).quantile(0.9)],
axis=1,
)
if str(quantile10_rolling_periods).isdigit():
X = pd.concat(
[X, df.rolling(quantile10_rolling_periods, min_periods=1).quantile(0.1)],
axis=1,
)
if str(ewm_alpha).replace('.', '').isdigit():
X = pd.concat(
[X, df.ewm(alpha=ewm_alpha, ignore_na=True, min_periods=1).mean()], axis=1
)
if str(ewm_var_alpha).replace('.', '').isdigit():
X = pd.concat(
[X, df.ewm(alpha=ewm_var_alpha, ignore_na=True, min_periods=1).var()],
axis=1,
)
if str(additional_lag_periods).isdigit():
X = pd.concat([X, df.shift(additional_lag_periods)], axis=1).fillna(
method='bfill'
)
if abs_energy:
X = pd.concat([X, df.pow(other=([2] * len(df.columns))).cumsum()], axis=1)
if str(rolling_autocorr_periods).isdigit():
temp = df.rolling(rolling_autocorr_periods).apply(
lambda x: x.autocorr(), raw=False
)
X = pd.concat([X, temp], axis=1).fillna(method='bfill')
if add_date_part in ['simple', 'expanded', 'recurring']:
date_part_df = date_part(df.index, method=add_date_part)
date_part_df.index = df.index
X = pd.concat(
[
X,
],
axis=1,
)
if holiday:
from autots.tools.holiday import holiday_flag
X['holiday_flag_'] = holiday_flag(X.index, country=holiday_country)
X['holiday_flag_future_'] = holiday_flag(
X.index.shift(1, freq=pd.infer_freq(X.index)), country=holiday_country
)
if str(polynomial_degree).isdigit():
polynomial_degree = abs(int(polynomial_degree))
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(polynomial_degree)
X = pd.DataFrame(poly.fit_transform(X))
# unlike the others, this pulls the entire window, not just one lag
if str(window).isdigit():
# we already have lag 1 using this
for curr_shift in range(1, window):
X = pd.concat([X, df.shift(curr_shift)], axis=1).fillna(method='bfill')
X = X.replace([np.inf, -np.inf], np.nan)
X = X.fillna(method='ffill').fillna(method='bfill')
X.columns = [str(x) for x in range(len(X.columns))]
return X
def rolling_x_regressor_regressor(
df,
mean_rolling_periods: int = 30,
macd_periods: int = None,
std_rolling_periods: int = 7,
max_rolling_periods: int = None,
min_rolling_periods: int = None,
quantile90_rolling_periods: int = None,
quantile10_rolling_periods: int = None,
ewm_alpha: float = 0.5,
ewm_var_alpha: float = None,
additional_lag_periods: int = 7,
abs_energy: bool = False,
rolling_autocorr_periods: int = None,
add_date_part: str = None,
holiday: bool = False,
holiday_country: str = 'US',
polynomial_degree: int = None,
window: int = None,
future_regressor=None,
):
"""Adds in the future_regressor."""
X = rolling_x_regressor(
df,
mean_rolling_periods=mean_rolling_periods,
macd_periods=macd_periods,
std_rolling_periods=std_rolling_periods,
max_rolling_periods=max_rolling_periods,
min_rolling_periods=min_rolling_periods,
ewm_var_alpha=ewm_var_alpha,
quantile90_rolling_periods=quantile90_rolling_periods,
quantile10_rolling_periods=quantile10_rolling_periods,
additional_lag_periods=additional_lag_periods,
ewm_alpha=ewm_alpha,
abs_energy=abs_energy,
rolling_autocorr_periods=rolling_autocorr_periods,
add_date_part=add_date_part,
holiday=holiday,
holiday_country=holiday_country,
polynomial_degree=polynomial_degree,
window=window,
)
if future_regressor is not None:
X = pd.concat([X, future_regressor], axis=1)
return X
def retrieve_regressor(
regression_model: dict = {
"model": 'Adaboost',
"model_params": {
'n_estimators': 50,
'base_estimator': 'DecisionTree',
'loss': 'linear',
'learning_rate': 1.0,
},
},
verbose: int = 0,
verbose_bool: bool = False,
random_seed: int = 2020,
n_jobs: int = 1,
multioutput: bool = True,
):
"""Convert a model param dict to model object for regression frameworks."""
model_class = regression_model['model']
model_param_dict = regression_model.get("model_params", {})
if model_class == 'ElasticNet':
if multioutput:
from sklearn.linear_model import MultiTaskElasticNet
regr = MultiTaskElasticNet(
alpha=1.0, random_state=random_seed, **model_param_dict
)
else:
from sklearn.linear_model import ElasticNet
regr = ElasticNet(alpha=1.0, random_state=random_seed, **model_param_dict)
return regr
elif model_class == 'DecisionTree':
from sklearn.tree import DecisionTreeRegressor
regr = DecisionTreeRegressor(random_state=random_seed, **model_param_dict)
return regr
elif model_class == 'MLP':
from sklearn.neural_network import MLPRegressor
regr = MLPRegressor(
random_state=random_seed, verbose=verbose_bool, **model_param_dict
)
return regr
elif model_class == 'KerasRNN':
from autots.models.dnn import KerasRNN
regr = KerasRNN(verbose=verbose, random_seed=random_seed, **model_param_dict)
return regr
elif model_class == 'Transformer':
from autots.models.dnn import Transformer
regr = Transformer(verbose=verbose, random_seed=random_seed, **model_param_dict)
return regr
elif model_class == 'KNN':
from sklearn.neighbors import KNeighborsRegressor
if multioutput:
from sklearn.multioutput import MultiOutputRegressor
regr = MultiOutputRegressor(
KNeighborsRegressor(**model_param_dict),
n_jobs=n_jobs,
)
else:
regr = KNeighborsRegressor(**model_param_dict, n_jobs=n_jobs)
return regr
elif model_class == 'HistGradientBoost':
try:
from sklearn.experimental import enable_hist_gradient_boosting # noqa
except Exception:
pass
from sklearn.ensemble import HistGradientBoostingRegressor
if multioutput:
from sklearn.multioutput import MultiOutputRegressor
regr = MultiOutputRegressor(
HistGradientBoostingRegressor(
verbose=int(verbose_bool),
random_state=random_seed,
**model_param_dict,
)
)
else:
regr = HistGradientBoostingRegressor(
verbose=int(verbose_bool),
random_state=random_seed,
**model_param_dict,
)
return regr
elif model_class == 'LightGBM':
from lightgbm import LGBMRegressor
regr = LGBMRegressor(
verbose=int(verbose_bool),
random_state=random_seed,
n_jobs=n_jobs,
**model_param_dict,
)
if multioutput:
from sklearn.multioutput import RegressorChain
return RegressorChain(regr)
else:
return regr
elif model_class == 'Adaboost':
from sklearn.ensemble import AdaBoostRegressor
if regression_model["model_params"]['base_estimator'] == 'SVR':
from sklearn.svm import LinearSVR
svc = LinearSVR(verbose=verbose, random_state=random_seed, max_iter=1500)
regr = AdaBoostRegressor(
base_estimator=svc,
n_estimators=regression_model["model_params"]['n_estimators'],
loss=regression_model["model_params"]['loss'],
learning_rate=regression_model["model_params"]['learning_rate'],
random_state=random_seed,
)
elif regression_model["model_params"]['base_estimator'] == 'LinReg':
from sklearn.linear_model import LinearRegression
linreg = LinearRegression()
regr = AdaBoostRegressor(
base_estimator=linreg,
n_estimators=regression_model["model_params"]['n_estimators'],
loss=regression_model["model_params"]['loss'],
learning_rate=regression_model["model_params"]['learning_rate'],
random_state=random_seed,
)
else:
regr = AdaBoostRegressor(random_state=random_seed, **model_param_dict)
if multioutput:
from sklearn.multioutput import MultiOutputRegressor
return MultiOutputRegressor(regr, n_jobs=n_jobs)
else:
return regr
elif model_class == 'xgboost':
import xgboost as xgb
if multioutput:
from sklearn.multioutput import MultiOutputRegressor
regr = MultiOutputRegressor(
xgb.XGBRegressor(verbosity=verbose, **model_param_dict),
n_jobs=n_jobs,
)
else:
regr = xgb.XGBRegressor(
verbosity=verbose, **model_param_dict, n_jobs=n_jobs
)
return regr
elif model_class == 'SVM':
from sklearn.svm import LinearSVR
if multioutput:
from sklearn.multioutput import MultiOutputRegressor
regr = MultiOutputRegressor(
LinearSVR(verbose=verbose_bool, **model_param_dict),
n_jobs=n_jobs,
)
else:
regr = LinearSVR(verbose=verbose_bool, **model_param_dict)
return regr
elif model_class == 'BayesianRidge':
from sklearn.linear_model import BayesianRidge
regr = BayesianRidge(**model_param_dict)
if multioutput:
from sklearn.multioutput import RegressorChain
return RegressorChain(regr)
else:
return regr
elif model_class == "ExtraTrees":
from sklearn.ensemble import ExtraTreesRegressor
return ExtraTreesRegressor(
n_jobs=n_jobs, random_state=random_seed, **model_param_dict
)
elif model_class == "RadiusNeighbors":
from sklearn.neighbors import RadiusNeighborsRegressor
regr = RadiusNeighborsRegressor(n_jobs=n_jobs, **model_param_dict)
return regr
elif model_class == "PoissonRegresssion":
from sklearn.linear_model import PoissonRegressor
if multioutput:
from sklearn.multioutput import MultiOutputRegressor
regr = MultiOutputRegressor(
PoissonRegressor(fit_intercept=True, max_iter=200, **model_param_dict),
n_jobs=n_jobs,
)
else:
regr = PoissonRegressor(**model_param_dict)
return regr
elif model_class == 'RANSAC':
from sklearn.linear_model import RANSACRegressor
return RANSACRegressor(random_state=random_seed, **model_param_dict)
else:
regression_model['model'] = 'RandomForest'
from sklearn.ensemble import RandomForestRegressor
regr = RandomForestRegressor(
random_state=random_seed,
verbose=verbose,
n_jobs=n_jobs,
**model_param_dict,
)
return regr
# models that can more quickly handle many X/Y obs, with modest number of features
sklearn_model_dict = {
'RandomForest': 0.02,
'ElasticNet': 0.05,
'MLP': 0.05,
'DecisionTree': 0.05,
'KNN': 0.05,
'Adaboost': 0.03,
'SVM': 0.05, # was slow, LinearSVR seems much faster
'BayesianRidge': 0.05,
'xgboost': 0.01,
'KerasRNN': 0.02,
'Transformer': 0.02,
'HistGradientBoost': 0.03,
'LightGBM': 0.03,
'ExtraTrees': 0.05,
'RadiusNeighbors': 0.02,
'PoissonRegresssion': 0.03,
'RANSAC': 0.05,
}
multivariate_model_dict = {
'RandomForest': 0.02,
# 'ElasticNet': 0.05,
'MLP': 0.03,
'DecisionTree': 0.05,
'KNN': 0.05,
'Adaboost': 0.03,
'SVM': 0.05,
# 'BayesianRidge': 0.05,
'xgboost': 0.01,
'KerasRNN': 0.01,
'HistGradientBoost': 0.03,
'LightGBM': 0.03,
'ExtraTrees': 0.05,
'RadiusNeighbors': 0.02,
'PoissonRegresssion': 0.03,
'RANSAC': 0.05,
}
# these should train quickly with low dimensional X/Y, and not mind being run multiple in parallel
univariate_model_dict = {
'ElasticNet': 0.05,
'MLP': 0.05,
'DecisionTree': 0.05,
'KNN': 0.03,
'Adaboost': 0.05,
'SVM': 0.05,
'BayesianRidge': 0.03,
'HistGradientBoost': 0.02,
'LightGBM': 0.01,
'ExtraTrees': 0.05,
'RadiusNeighbors': 0.05,
'RANSAC': 0.02,
}
# for high dimensionality, many-feature X, many-feature Y, but with moderate obs count
rolling_regression_dict = {
'RandomForest': 0.02,
'ElasticNet': 0.05,
'MLP': 0.05,
'DecisionTree': 0.05,
'KNN': 0.05,
'Adaboost': 0.03,
'SVM': 0.05,
'KerasRNN': 0.02,
'LightGBM': 0.03,
'ExtraTrees': 0.05,
'RadiusNeighbors': 0.01,
'PoissonRegresssion': 0.03,
'RANSAC': 0.05,
}
# models where we can be sure the model isn't sharing information across multiple Y's...
no_shared_model_dict = {
'KNN': 0.1,
'Adaboost': 0.1,
'SVM': 0.1,
'xgboost': 0.1,
'HistGradientBoost': 0.1,
'PoissonRegresssion': 0.05,
}
# these are models that are relatively fast with large multioutput Y, small n obs
datepart_model_dict: dict = {
'RandomForest': 0.05,
'ElasticNet': 0.05,
'MLP': 0.05,
'DecisionTree': 0.05,
'Adaboost': 0.05,
'SVM': 0.05,
'KerasRNN': 0.05,
'Transformer': 0.05,
'ExtraTrees': 0.07,
'RadiusNeighbors': 0.05,
}
def generate_regressor_params(
model_dict=None,
):
if model_dict is None:
model_dict = sklearn_model_dict
"""Generate new parameters for input to regressor."""
model = random.choices(list(model_dict.keys()), list(model_dict.values()), k=1)[0]
if model in [
'xgboost',
'Adaboost',
'DecisionTree',
'LightGBM',
'MLP',
'KNN',
'KerasRNN',
'Transformer',
'HistGradientBoost',
'RandomForest',
'ExtraTrees',
]:
if model == 'Adaboost':
param_dict = {
"model": 'Adaboost',
"model_params": {
"n_estimators": random.choices([50, 100, 500], [0.7, 0.2, 0.1])[0],
"loss": random.choices(
['linear', 'square', 'exponential'], [0.8, 0.01, 0.1]
)[0],
"base_estimator": random.choices(
[None, 'LinReg', 'SVR'], [0.8, 0.1, 0.1]
)[0],
"learning_rate": random.choices([1, 0.5], [0.9, 0.1])[0],
},
}
elif model == 'xgboost':
param_dict = {
"model": 'xgboost',
"model_params": {
"objective": np.random.choice(
['count:poisson', 'reg:squarederror', 'reg:gamma'],
p=[0.4, 0.5, 0.1],
size=1,
).item(),
"eta": np.random.choice([0.3], p=[1.0], size=1).item(),
"min_child_weight": np.random.choice(
[1, 2, 5], p=[0.8, 0.1, 0.1], size=1
).item(),
"max_depth": np.random.choice(
[3, 6, 9], p=[0.1, 0.8, 0.1], size=1
).item(),
"subsample": np.random.choice(
[1, 0.7, 0.5], p=[0.9, 0.05, 0.05], size=1
).item(),
},
}
elif model == 'MLP':
solver = np.random.choice(
['lbfgs', 'sgd', 'adam'], p=[0.5, 0.1, 0.4], size=1
).item()
if solver in ['sgd', 'adam']:
early_stopping = np.random.choice([True, False], size=1).item()
learning_rate_init = np.random.choice(
[0.01, 0.001, 0.0001, 0.00001], p=[0.1, 0.7, 0.1, 0.1], size=1
).item()
else:
early_stopping = False
learning_rate_init = 0.001
param_dict = {
"model": 'MLP',
"model_params": {
"hidden_layer_sizes": random.choices(
[
(100,),
(25, 15, 25),
(72, 36, 72),
(25, 50, 25),
(32, 64, 32),
(32, 32, 32),
],
[0.1, 0.3, 0.3, 0.1, 0.1, 0.1],
)[0],
"max_iter": np.random.choice(
[250, 500, 1000], p=[0.8, 0.1, 0.1], size=1
).item(),
"activation": np.random.choice(
['identity', 'logistic', 'tanh', 'relu'],
p=[0.05, 0.05, 0.6, 0.3],
size=1,
).item(),
"solver": solver,
"early_stopping": early_stopping,
"learning_rate_init": learning_rate_init,
},
}
elif model == 'KNN':
param_dict = {
"model": 'KNN',
"model_params": {
"n_neighbors": np.random.choice(
[3, 5, 10], p=[0.2, 0.7, 0.1], size=1
).item(),
"weights": np.random.choice(
['uniform', 'distance'], p=[0.7, 0.3], size=1
).item(),
},
}
elif model == 'RandomForest':
param_dict = {
"model": 'RandomForest',
"model_params": {
"n_estimators": random.choices(
[300, 100, 1000, 5000], [0.4, 0.4, 0.2, 0.01]
)[0],
"min_samples_leaf": random.choices([2, 4, 1], [0.2, 0.2, 0.8])[0],
"bootstrap": random.choices([True, False], [0.9, 0.1])[0],
# absolute_error is noticeably slower
# "criterion": random.choices(
# ["squared_error", "poisson"], [0.99, 0.001]
# )[0],
},
}
elif model == 'ExtraTrees':
max_depth_choice = random.choices([None, 5, 10, 20], [0.2, 0.1, 0.5, 0.3])[
0
]
estimators_choice = random.choices([50, 100, 500], [0.05, 0.9, 0.05])[0]
param_dict = {
"model": 'ExtraTrees',
"model_params": {
"n_estimators": estimators_choice,
"min_samples_leaf": random.choices([2, 4, 1], [0.1, 0.1, 0.8])[0],
"max_depth": max_depth_choice,
# "criterion": "squared_error",
},
}
elif model == 'KerasRNN':
init_list = [
'glorot_uniform',
'lecun_uniform',
'glorot_normal',
'RandomUniform',
'he_normal',
'zeros',
]
param_dict = {
"model": 'KerasRNN',
"model_params": {
"kernel_initializer": random.choices(init_list)[0],
"epochs": random.choices(
[50, 100, 200, 500, 750], [0.75, 0.2, 0.05, 0.01, 0.001]
)[0],
"batch_size": random.choices([8, 16, 32, 72], [0.2, 0.2, 0.5, 0.1])[
0
],
"optimizer": random.choices(
['adam', 'rmsprop', 'adagrad'], [0.4, 0.5, 0.1]
)[0],
"loss": random.choices(
['mae', 'Huber', 'poisson', 'mse', 'mape'],
[0.2, 0.3, 0.1, 0.2, 0.2],
)[0],
"hidden_layer_sizes": random.choices(
[
(100,),
(32,),
(72, 36, 72),
(25, 50, 25),
(32, 64, 32),
(32, 32, 32),
],
[0.1, 0.3, 0.3, 0.1, 0.1, 0.1],
)[0],
"rnn_type": random.choices(
['LSTM', 'GRU', "E2D2", "CNN"], [0.5, 0.3, 0.15, 0.01]
)[0],
"shape": random.choice([1, 2]),
},
}
elif model == 'Transformer':
param_dict = {
"model": 'Transformer',
"model_params": {
"epochs": random.choices(
[50, 100, 200, 500, 750], [0.75, 0.2, 0.05, 0.01, 0.001]
)[0],
"batch_size": random.choices(
[8, 16, 32, 64, 72], [0.01, 0.2, 0.5, 0.1, 0.1]
)[0],
"optimizer": random.choices(
['adam', 'rmsprop', 'adagrad'], [0.4, 0.5, 0.1]
)[0],
"loss": random.choices(
['mae', 'Huber', 'poisson', 'mse', 'mape'],
[0.2, 0.3, 0.1, 0.2, 0.2],
)[0],
"head_size": random.choices(
[32, 64, 128, 256, 384], [0.1, 0.1, 0.3, 0.5, 0.05]
)[0],
"num_heads": random.choices([2, 4], [0.2, 0.2])[0],
"ff_dim": random.choices(
[2, 3, 4, 32, 64], [0.1, 0.1, 0.8, 0.05, 0.05]
)[0],
"num_transformer_blocks": random.choices(
[1, 2, 4, 6],
[0.2, 0.2, 0.6, 0.05],
)[0],
"mlp_units": random.choices(
[32, 64, 128, 256],
[0.2, 0.3, 0.8, 0.2],
),
"mlp_dropout": random.choices(
[0.05, 0.2, 0.4],
[0.2, 0.8, 0.2],
)[0],
"dropout": random.choices(
[0.05, 0.2, 0.4],
[0.2, 0.8, 0.2],
)[0],
},
}
elif model == 'HistGradientBoost':
param_dict = {
"model": 'HistGradientBoost',
"model_params": {
"loss": random.choices(
['squared_error', 'poisson', 'absolute_error'], [0.8, 0.1, 0.1]
)[0],
"learning_rate": random.choices([1, 0.1, 0.01], [0.3, 0.4, 0.3])[0],
"max_depth": random.choices(
[None, 5, 10, 20], [0.7, 0.1, 0.1, 0.1]
)[0],
"min_samples_leaf": random.choices(
[20, 5, 10, 30], [0.9, 0.1, 0.1, 0.1]
)[0],
"max_iter": random.choices(
[100, 250, 50, 500], [0.9, 0.1, 0.1, 0.001]
)[0],
"l2_regularization": random.choices(
[0, 0.01, 0.02, 0.4], [0.9, 0.1, 0.1, 0.1]
)[0],
},
}
elif model == 'LightGBM':
param_dict = {
"model": 'LightGBM',
"model_params": {
"objective": random.choices(
[
'regression',
'gamma',
'huber',
'regression_l1',
'tweedie',
'poisson',
'quantile',
],
[0.4, 0.2, 0.2, 0.2, 0.2, 0.05, 0.01],
)[0],
"learning_rate": random.choices(
[0.001, 0.1, 0.01],
[0.1, 0.6, 0.3],
)[0],
"num_leaves": random.choices(
[31, 127, 70],
[0.6, 0.1, 0.3],
)[0],
"max_depth": random.choices(
[-1, 5, 10],
[0.6, 0.1, 0.3],
)[0],
"boosting_type": random.choices(
['gbdt', 'rf', 'dart', 'goss'],
[0.6, 0, 0.2, 0.2],
)[0],
"n_estimators": random.choices(
[100, 250, 50, 500],
[0.6, 0.1, 0.3, 0.0010],
)[0],
},
}
else:
min_samples = np.random.choice(
[1, 2, 0.05], p=[0.5, 0.3, 0.2], size=1
).item()
min_samples = int(min_samples) if min_samples in [2] else min_samples
param_dict = {
"model": 'DecisionTree',
"model_params": {
"max_depth": np.random.choice(
[None, 3, 9], p=[0.5, 0.3, 0.2], size=1
).item(),
"min_samples_split": min_samples,
},
}
else:
param_dict = {"model": model, "model_params": {}}
return param_dict
class RollingRegression(ModelObject):
"""General regression-framed approach to forecasting using sklearn.
Who are you who are so wise in the ways of science?
I am Arthur, King of the Britons. -Python
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
holiday (bool): If true, include holiday flags
regression_type (str): type of regression (None, 'User')
"""
def __init__(
self,
name: str = "RollingRegression",
frequency: str = 'infer',
prediction_interval: float = 0.9,
regression_type: str = None,
holiday_country: str = 'US',
verbose: int = 0,
random_seed: int = 2020,
regression_model: dict = {
"model": 'ExtraTrees',
"model_params": {},
},
holiday: bool = False,
mean_rolling_periods: int = 30,
macd_periods: int = None,
std_rolling_periods: int = 7,
max_rolling_periods: int = 7,
min_rolling_periods: int = 7,
ewm_var_alpha: int = None,
quantile90_rolling_periods: int = None,
quantile10_rolling_periods: int = None,
ewm_alpha: float = 0.5,
additional_lag_periods: int = 7,
abs_energy: bool = False,
rolling_autocorr_periods: int = None,
add_date_part: str = None,
polynomial_degree: int = None,
x_transform: str = None,
window: int = None,
n_jobs: int = -1,
**kwargs,
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
regression_type=regression_type,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
self.regression_model = regression_model
self.holiday = holiday
self.mean_rolling_periods = mean_rolling_periods
if mean_rolling_periods is None:
self.macd_periods = None
else:
self.macd_periods = macd_periods
self.std_rolling_periods = std_rolling_periods
self.max_rolling_periods = max_rolling_periods
self.min_rolling_periods = min_rolling_periods
self.ewm_var_alpha = ewm_var_alpha
self.quantile90_rolling_periods = quantile90_rolling_periods
self.quantile10_rolling_periods = quantile10_rolling_periods
self.ewm_alpha = ewm_alpha
self.additional_lag_periods = additional_lag_periods
self.abs_energy = abs_energy
self.rolling_autocorr_periods = rolling_autocorr_periods
self.add_date_part = add_date_part
self.polynomial_degree = polynomial_degree
self.x_transform = x_transform
self.window = window
def _x_transformer(self):
if self.x_transform == 'FastICA':
from sklearn.decomposition import FastICA
x_transformer = FastICA(n_components=None, random_state=2020, whiten=True)
elif self.x_transform == 'Nystroem':
from sklearn.kernel_approximation import Nystroem
half_size = int(self.sktraindata.shape[0] / 2) + 1
max_comp = 200
n_comp = max_comp if half_size > max_comp else half_size
x_transformer = Nystroem(
kernel='rbf', gamma=0.2, random_state=2020, n_components=n_comp
)
else:
# self.x_transform = 'RmZeroVariance'
from sklearn.feature_selection import VarianceThreshold
x_transformer = VarianceThreshold(threshold=0.0)
return x_transformer
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
future_regressor (pandas.DataFrame or Series): Datetime Indexed
"""
df = self.basic_profile(df)
self.df_train = df
# if external regressor, do some check up
if self.regression_type is not None:
if future_regressor is None:
raise ValueError(
"future_regressor not supplied, necessary for regression_type"
)
self.regressor_train = future_regressor
# define X and Y
self.sktraindata = self.df_train.dropna(how='all', axis=0)
self.sktraindata = self.sktraindata.fillna(method='ffill').fillna(
method='bfill'
)
Y = self.sktraindata.drop(self.sktraindata.head(2).index)
Y.columns = [x for x in range(len(Y.columns))]
X = rolling_x_regressor(
self.sktraindata,
mean_rolling_periods=self.mean_rolling_periods,
macd_periods=self.macd_periods,
std_rolling_periods=self.std_rolling_periods,
max_rolling_periods=self.max_rolling_periods,
min_rolling_periods=self.min_rolling_periods,
ewm_var_alpha=self.ewm_var_alpha,
quantile90_rolling_periods=self.quantile90_rolling_periods,
quantile10_rolling_periods=self.quantile10_rolling_periods,
additional_lag_periods=self.additional_lag_periods,
ewm_alpha=self.ewm_alpha,
abs_energy=self.abs_energy,
rolling_autocorr_periods=self.rolling_autocorr_periods,
add_date_part=self.add_date_part,
holiday=self.holiday,
holiday_country=self.holiday_country,
polynomial_degree=self.polynomial_degree,
window=self.window,
)
if self.regression_type == 'User':
X = pd.concat([X, self.regressor_train], axis=1)
if self.x_transform in ['FastICA', 'Nystroem', 'RmZeroVariance']:
self.x_transformer = self._x_transformer()
self.x_transformer = self.x_transformer.fit(X)
X = pd.DataFrame(self.x_transformer.transform(X))
X = X.replace([np.inf, -np.inf], 0).fillna(0)
"""
Tail(1) is dropped to shift data to become forecast 1 ahead
and the first one is dropped because it will least accurately represent
rolling values
"""
X = X.drop(X.tail(1).index).drop(X.head(1).index)
if isinstance(X, pd.DataFrame):
X.columns = [str(xc) for xc in X.columns]
multioutput = True
if Y.ndim < 2:
multioutput = False
elif Y.shape[1] < 2:
multioutput = False
# retrieve model object to train
self.regr = retrieve_regressor(
regression_model=self.regression_model,
verbose=self.verbose,
verbose_bool=self.verbose_bool,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
multioutput=multioutput,
)
self.regr = self.regr.fit(X, Y)
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self,
forecast_length: int,
future_regressor=None,
just_point_forecast: bool = False,
):
"""Generate forecast data immediately following dates of index supplied to .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
index = self.create_forecast_index(forecast_length=forecast_length)
if self.regression_type in ['User', 'user']:
complete_regressor = pd.concat(
[self.regressor_train, future_regressor], axis=0
)
combined_index = self.df_train.index.append(index)
forecast = pd.DataFrame()
self.sktraindata.columns = [x for x in range(len(self.sktraindata.columns))]
# forecast, 1 step ahead, then another, and so on
for x in range(forecast_length):
x_dat = rolling_x_regressor(
self.sktraindata,
mean_rolling_periods=self.mean_rolling_periods,
macd_periods=self.macd_periods,
std_rolling_periods=self.std_rolling_periods,
max_rolling_periods=self.max_rolling_periods,
min_rolling_periods=self.min_rolling_periods,
ewm_var_alpha=self.ewm_var_alpha,
quantile90_rolling_periods=self.quantile90_rolling_periods,
quantile10_rolling_periods=self.quantile10_rolling_periods,
additional_lag_periods=self.additional_lag_periods,
ewm_alpha=self.ewm_alpha,
abs_energy=self.abs_energy,
rolling_autocorr_periods=self.rolling_autocorr_periods,
add_date_part=self.add_date_part,
holiday=self.holiday,
holiday_country=self.holiday_country,
polynomial_degree=self.polynomial_degree,
)
if self.regression_type == 'User':
x_dat = pd.concat(
[x_dat, complete_regressor.head(x_dat.shape[0])], axis=1
).fillna(0)
if self.x_transform in ['FastICA', 'Nystroem', 'RmZeroVariance']:
x_dat = pd.DataFrame(self.x_transformer.transform(x_dat))
x_dat = x_dat.replace([np.inf, -np.inf], 0).fillna(0)
if isinstance(x_dat, pd.DataFrame):
x_dat.columns = [str(xc) for xc in x_dat.columns]
rfPred = pd.DataFrame(self.regr.predict(x_dat.tail(1).to_numpy()))
forecast = pd.concat([forecast, rfPred], axis=0, ignore_index=True)
self.sktraindata = pd.concat(
[self.sktraindata, rfPred], axis=0, ignore_index=True
)
self.sktraindata.index = combined_index[: len(self.sktraindata.index)]
forecast.columns = self.column_names
forecast.index = index
if just_point_forecast:
return forecast
else:
upper_forecast, lower_forecast = Point_to_Probability(
self.df_train,
forecast,
method='inferred_normal',
prediction_interval=self.prediction_interval,
)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=forecast.index,
forecast_columns=forecast.columns,
lower_forecast=lower_forecast,
forecast=forecast,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
rolling_model_dict = sklearn_model_dict.copy()
del rolling_model_dict['KNN']
model_choice = generate_regressor_params(model_dict=rolling_model_dict)
mean_rolling_periods_choice = random.choices(
[None, 5, 7, 12, 30], [0.2, 0.2, 0.2, 0.2, 0.2]
)[0]
if mean_rolling_periods_choice is not None:
macd_periods_choice = seasonal_int()
if macd_periods_choice == mean_rolling_periods_choice:
macd_periods_choice = mean_rolling_periods_choice + 10
else:
macd_periods_choice = None
std_rolling_periods_choice = random.choices(
[None, 5, 7, 10, 30], [0.6, 0.1, 0.1, 0.1, 0.1]
)[0]
max_rolling_periods_choice = random.choices([None, seasonal_int()], [0.5, 0.5])[
0
]
min_rolling_periods_choice = random.choices([None, seasonal_int()], [0.5, 0.5])[
0
]
lag_periods_choice = seasonal_int() - 1
lag_periods_choice = 2 if lag_periods_choice < 2 else lag_periods_choice
ewm_choice = random.choices(
[None, 0.05, 0.1, 0.2, 0.5, 0.8], [0.4, 0.01, 0.05, 0.1, 0.1, 0.05]
)[0]
abs_energy_choice = random.choices([True, False], [0.3, 0.7])[0]
rolling_autocorr_periods_choice = random.choices(
[None, 2, 7, 12, 30], [0.8, 0.05, 0.05, 0.05, 0.05]
)[0]
add_date_part_choice = random.choices(
[None, 'simple', 'expanded', 'recurring'], [0.7, 0.1, 0.1, 0.1]
)[0]
holiday_choice = random.choices([True, False], [0.2, 0.8])[0]
polynomial_degree_choice = random.choices([None, 2], [0.99, 0.01])[0]
x_transform_choice = random.choices(
[None, 'FastICA', 'Nystroem', 'RmZeroVariance'],
[0.85, 0.05, 0.05, 0.05],
)[0]
if "regressor" in method:
regression_choice = "User"
else:
regression_choice = random.choices([None, 'User'], [0.7, 0.3])[0]
parameter_dict = {
'regression_model': model_choice,
'holiday': holiday_choice,
'mean_rolling_periods': mean_rolling_periods_choice,
'macd_periods': macd_periods_choice,
'std_rolling_periods': std_rolling_periods_choice,
'max_rolling_periods': max_rolling_periods_choice,
'min_rolling_periods': min_rolling_periods_choice,
'ewm_alpha': ewm_choice,
'additional_lag_periods': lag_periods_choice,
'abs_energy': abs_energy_choice,
'rolling_autocorr_periods': rolling_autocorr_periods_choice,
'add_date_part': add_date_part_choice,
'polynomial_degree': polynomial_degree_choice,
'x_transform': x_transform_choice,
'regression_type': regression_choice,
}
return parameter_dict
def get_params(self):
"""Return dict of current parameters."""
parameter_dict = {
'regression_model': self.regression_model,
'holiday': self.holiday,
'mean_rolling_periods': self.mean_rolling_periods,
'macd_periods': self.macd_periods,
'std_rolling_periods': self.std_rolling_periods,
'max_rolling_periods': self.max_rolling_periods,
'min_rolling_periods': self.min_rolling_periods,
"ewm_var_alpha": self.ewm_var_alpha,
"quantile90_rolling_periods": self.quantile90_rolling_periods,
"quantile10_rolling_periods": self.quantile10_rolling_periods,
'ewm_alpha': self.ewm_alpha,
'additional_lag_periods': self.additional_lag_periods,
'abs_energy': self.abs_energy,
'rolling_autocorr_periods': self.rolling_autocorr_periods,
'add_date_part': self.add_date_part,
'polynomial_degree': self.polynomial_degree,
'x_transform': self.x_transform,
'regression_type': self.regression_type,
}
return parameter_dict
class WindowRegression(ModelObject):
"""Regression use the last n values as the basis of training data.
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
# regression_type: str = None,
"""
def __init__(
self,
name: str = "WindowRegression",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2022,
verbose: int = 0,
window_size: int = 10,
regression_model: dict = {
"model": 'RandomForest',
"model_params": {},
},
input_dim: str = 'univariate',
output_dim: str = 'forecast_length',
normalize_window: bool = False,
shuffle: bool = False,
forecast_length: int = 1,
max_windows: int = 5000,
regression_type: str = None,
n_jobs: int = -1,
**kwargs,
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
regression_type=regression_type,
verbose=verbose,
n_jobs=n_jobs,
)
self.window_size = abs(int(window_size))
self.regression_model = regression_model
self.input_dim = input_dim
self.output_dim = output_dim
self.normalize_window = normalize_window
self.shuffle = shuffle
self.forecast_length = forecast_length
self.max_windows = abs(int(max_windows))
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
if (
df.shape[1] * self.forecast_length
) > 200 and self.input_dim == "multivariate":
raise ValueError(
"Scale exceeds recommendation for input_dim == `multivariate`"
)
df = self.basic_profile(df)
if self.regression_type in ["User", "user"]:
if future_regressor is None:
raise ValueError(
"regression_type='User' but no future_regressor passed"
)
self.df_train = df
X, Y = window_maker(
df,
window_size=self.window_size,
input_dim=self.input_dim,
normalize_window=self.normalize_window,
shuffle=self.shuffle,
output_dim=self.output_dim,
forecast_length=self.forecast_length,
max_windows=self.max_windows,
regression_type=self.regression_type,
future_regressor=future_regressor,
random_seed=self.random_seed,
)
multioutput = True
if Y.ndim < 2:
multioutput = False
elif Y.shape[1] < 2:
multioutput = False
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
self.regr = retrieve_regressor(
regression_model=self.regression_model,
verbose=self.verbose,
verbose_bool=self.verbose_bool,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
multioutput=multioutput,
)
self.regr = self.regr.fit(X, Y)
self.last_window = df.tail(self.window_size)
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self,
forecast_length: int,
future_regressor=None,
just_point_forecast: bool = False,
):
"""Generate forecast data immediately following dates of .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
if int(forecast_length) > int(self.forecast_length):
print("Regression must be refit to change forecast length!")
predictStartTime = datetime.datetime.now()
index = self.create_forecast_index(forecast_length=forecast_length)
if self.output_dim == '1step':
# combined_index = (self.df_train.index.append(index))
forecast = pd.DataFrame()
# forecast, 1 step ahead, then another, and so on
for x in range(forecast_length):
pred = last_window(
self.last_window,
window_size=self.window_size,
input_dim=self.input_dim,
normalize_window=self.normalize_window,
)
if self.regression_type in ["User", "user"]:
blasted_thing = future_regressor.iloc[x].to_frame().transpose()
tmerg = pd.concat([blasted_thing] * pred.shape[0], axis=0)
tmerg.index = pred.index
pred = pd.concat([pred, tmerg], axis=1, ignore_index=True)
if isinstance(pred, pd.DataFrame):
pred = pred.to_numpy()
rfPred = pd.DataFrame(self.regr.predict(pred))
if self.input_dim == 'univariate':
rfPred = rfPred.transpose()
rfPred.columns = self.last_window.columns
forecast = pd.concat([forecast, rfPred], axis=0, ignore_index=True)
self.last_window = pd.concat(
[self.last_window, rfPred], axis=0, ignore_index=True
)
df = forecast
else:
pred = last_window(
self.last_window,
window_size=self.window_size,
input_dim=self.input_dim,
normalize_window=self.normalize_window,
)
if self.regression_type in ["User", "user"]:
tmerg = future_regressor.tail(1).loc[
future_regressor.tail(1).index.repeat(pred.shape[0])
]
tmerg.index = pred.index
pred = pd.concat([pred, tmerg], axis=1)
if isinstance(pred, pd.DataFrame):
pred = pred.to_numpy()
cY = pd.DataFrame(self.regr.predict(pred))
if self.input_dim == 'multivariate':
cY.index = ['values']
cY.columns = np.tile(self.column_names, reps=self.forecast_length)
cY = cY.transpose().reset_index()
cY['timestep'] = np.repeat(
range(forecast_length), repeats=len(self.column_names)
)
cY = pd.pivot_table(cY, index='timestep', columns='index')
else:
cY = cY.transpose()
df = cY
df.columns = self.column_names
df.index = index
if just_point_forecast:
return df
else:
upper_forecast, lower_forecast = Point_to_Probability(
self.df_train,
df,
prediction_interval=self.prediction_interval,
method='historic_quantile',
)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=df.index,
forecast_columns=df.columns,
lower_forecast=lower_forecast,
forecast=df,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
window_size_choice = random.choice([5, 10, 20, seasonal_int()])
model_choice = generate_regressor_params()
if "regressor" in method:
regression_type_choice = "User"
input_dim_choice = 'univariate'
output_dim_choice = random.choice(
['forecast_length', '1step'],
)
else:
input_dim_choice = random.choices(
['multivariate', 'univariate'], [0.01, 0.99]
)[0]
if input_dim_choice == "multivariate":
output_dim_choice = "1step"
regression_type_choice = None
else:
output_dim_choice = random.choice(
['forecast_length', '1step'],
)
regression_type_choice = random.choices(
[None, "User"], weights=[0.8, 0.2]
)[0]
normalize_window_choice = random.choices([True, False], [0.05, 0.95])[0]
max_windows_choice = random.choices([5000, 1000, 50000], [0.85, 0.05, 0.1])[0]
return {
'window_size': window_size_choice,
'input_dim': input_dim_choice,
'output_dim': output_dim_choice,
'normalize_window': normalize_window_choice,
'max_windows': max_windows_choice,
'regression_type': regression_type_choice,
'regression_model': model_choice,
}
def get_params(self):
"""Return dict of current parameters."""
return {
'window_size': self.window_size,
'input_dim': self.input_dim,
'output_dim': self.output_dim,
'normalize_window': self.normalize_window,
'max_windows': self.max_windows,
'regression_type': self.regression_type,
'regression_model': self.regression_model,
}
class ComponentAnalysis(ModelObject):
"""Forecasting on principle components.
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
model (str): An AutoTS model str
model_parameters (dict): parameters to pass to AutoTS model
n_components (int): int or 'NthN' number of components to use
decomposition (str): decomposition method to use from scikit-learn
"""
def __init__(
self,
name: str = "ComponentAnalysis",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
n_components: int = 10,
forecast_length: int = 14,
model: str = 'GLS',
model_parameters: dict = {},
decomposition: str = 'PCA',
n_jobs: int = -1,
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
self.model = model
self.model_parameters = model_parameters
self.decomposition = decomposition
self.n_components = n_components
self.forecast_length = forecast_length
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
self.df_train = df
if 'thN' in str(self.n_components):
n_int = int(''.join([x for x in str(self.n_components) if x.isdigit()]))
n_int = int(np.floor(df.shape[1] / n_int))
n_int = n_int if n_int >= 2 else 2
else:
n_int = int(''.join([x for x in str(self.n_components) if x.isdigit()]))
self.n_int = n_int
if self.decomposition == 'TruncatedSVD':
from sklearn.decomposition import TruncatedSVD
transformer = TruncatedSVD(
n_components=self.n_int, random_state=self.random_seed
)
elif self.decomposition == 'WhitenedPCA':
from sklearn.decomposition import PCA
transformer = PCA(
n_components=self.n_int, whiten=True, random_state=self.random_seed
)
elif self.decomposition == 'PCA':
from sklearn.decomposition import PCA
transformer = PCA(
n_components=self.n_int, whiten=False, random_state=self.random_seed
)
elif self.decomposition == 'KernelPCA':
from sklearn.decomposition import KernelPCA
transformer = KernelPCA(
n_components=self.n_int,
kernel='rbf',
random_state=self.random_seed,
fit_inverse_transform=True,
)
elif self.decomposition == 'FastICA':
from sklearn.decomposition import FastICA
transformer = FastICA(
n_components=self.n_int,
whiten=True,
random_state=self.random_seed,
max_iter=500,
)
try:
self.transformer = transformer.fit(df)
except ValueError:
raise ValueError(
"n_components and decomposition not suitable for this dataset."
)
X = self.transformer.transform(df)
X = pd.DataFrame(X)
X.index = df.index
from autots.evaluator.auto_model import ModelMonster
try:
self.modelobj = ModelMonster(
self.model,
parameters=self.model_parameters,
frequency=self.frequency,
prediction_interval=self.prediction_interval,
holiday_country=self.holiday_country,
random_seed=self.random_seed,
verbose=self.verbose,
n_jobs=self.n_jobs,
forecast_length=self.forecast_length,
).fit(X, future_regressor=future_regressor)
except Exception as e:
raise ValueError(f"Model {str(self.model)} with error: {repr(e)}")
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self,
forecast_length: int,
future_regressor=None,
just_point_forecast: bool = False,
):
"""Generate forecast data immediately following dates of .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
XA = self.modelobj.predict(
forecast_length=forecast_length, future_regressor=future_regressor
)
Xf = self.transformer.inverse_transform(np.array(XA.forecast))
if not isinstance(Xf, pd.DataFrame):
Xf = pd.DataFrame(Xf)
Xf.columns = self.column_names
Xf.index = self.create_forecast_index(forecast_length=forecast_length)
Xf = Xf.astype(float)
if just_point_forecast:
return Xf
else:
"""
upper_forecast = self.transformer.inverse_transform(np.array(XA.upper_forecast))
if not isinstance(upper_forecast, pd.DataFrame):
upper_forecast = pd.DataFrame(upper_forecast)
upper_forecast.columns = self.column_names
upper_forecast.index = self.create_forecast_index(forecast_length=forecast_length)
lower_forecast = self.transformer.inverse_transform(np.array(XA.lower_forecast))
if not isinstance(lower_forecast, pd.DataFrame):
lower_forecast = pd.DataFrame(lower_forecast)
lower_forecast.columns = self.column_names
lower_forecast.index = self.create_forecast_index(forecast_length=forecast_length)
"""
upper_forecast, lower_forecast = Point_to_Probability(
self.df_train,
Xf,
method='inferred_normal',
prediction_interval=self.prediction_interval,
)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=Xf.index,
forecast_columns=Xf.columns,
lower_forecast=lower_forecast,
forecast=Xf,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
n_components_choice = np.random.choice(
a=[10, '10thN'], size=1, p=[0.6, 0.4]
).item()
decomposition_choice = np.random.choice(
a=['TruncatedSVD', 'WhitenedPCA', 'PCA', 'KernelPCA', 'FastICA'],
size=1,
p=[0.05, 0.05, 0.5, 0.2, 0.2],
).item()
model_list = [
'LastValueNaive',
'GLS',
'TensorflowSTS',
'GLM',
'ETS',
'FBProphet',
'MotifSimulation',
'RollingRegression',
'WindowRegression',
'UnobservedComponents',
'VECM',
]
model_str = np.random.choice(
model_list,
size=1,
p=[0.01, 0.01, 0.01, 0.01, 0.01, 0.7, 0.01, 0.02, 0.1, 0.1, 0.02],
).item()
model_str = np.random.choice(model_list)
from autots.evaluator.auto_model import ModelMonster
param_dict = ModelMonster(model_str).get_new_params()
return {
'model': model_str,
'model_parameters': param_dict,
'decomposition': decomposition_choice,
'n_components': n_components_choice,
}
def get_params(self):
"""Return dict of current parameters."""
return {
'model': self.model,
'model_parameters': self.model_parameters,
'decomposition': self.decomposition,
'n_components': self.n_components,
}
class DatepartRegression(ModelObject):
"""Regression not on series but datetime
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
"""
def __init__(
self,
name: str = "DatepartRegression",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
forecast_length: int = 1,
n_jobs: int = None,
regression_model: dict = {
"model": 'DecisionTree',
"model_params": {"max_depth": 5, "min_samples_split": 2},
},
datepart_method: str = 'expanded',
regression_type: str = None,
**kwargs,
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
regression_type=regression_type,
verbose=verbose,
n_jobs=n_jobs,
)
self.regression_model = regression_model
self.datepart_method = datepart_method
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
# if external regressor, do some check up
if self.regression_type is not None:
if future_regressor is None:
raise ValueError(
"regression_type='User' but no future_regressor passed"
)
y = df.to_numpy()
X = date_part(df.index, method=self.datepart_method)
if self.regression_type in ['User', 'user']:
# regr = future_regressor.copy()
# regr.index = X.index
X = pd.concat([X, future_regressor], axis=1)
X.columns = [str(xc) for xc in X.columns]
multioutput = True
if y.ndim < 2:
multioutput = False
elif y.shape[1] < 2:
multioutput = False
y = y.ravel()
self.model = retrieve_regressor(
regression_model=self.regression_model,
verbose=self.verbose,
verbose_bool=self.verbose_bool,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
multioutput=multioutput,
)
self.df_train = df
self.model = self.model.fit(X, y)
self.shape = df.shape
return self
def predict(
self,
forecast_length: int,
future_regressor=None,
just_point_forecast: bool = False,
):
"""Generate forecast data immediately following dates of index supplied to .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
future_regressor (pandas.DataFrame or Series): Datetime Indexed
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
index = self.create_forecast_index(forecast_length=forecast_length)
X = date_part(index, method=self.datepart_method)
if self.regression_type in ['User', 'user']:
X = pd.concat([X, future_regressor], axis=1)
X.columns = [str(xc) for xc in X.columns]
forecast = pd.DataFrame(self.model.predict(X))
forecast.columns = self.column_names
forecast.index = index
if just_point_forecast:
return forecast
else:
upper_forecast, lower_forecast = Point_to_Probability(
self.df_train,
forecast,
method='inferred_normal',
prediction_interval=self.prediction_interval,
)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=forecast.index,
forecast_columns=forecast.columns,
lower_forecast=lower_forecast,
forecast=forecast,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
model_choice = generate_regressor_params(model_dict=datepart_model_dict)
datepart_choice = random.choices(
["recurring", "simple", "expanded"], [0.4, 0.3, 0.3]
)[0]
if "regressor" in method:
regression_choice = "User"
else:
regression_choice = random.choices([None, 'User'], [0.7, 0.3])[0]
parameter_dict = {
'regression_model': model_choice,
'datepart_method': datepart_choice,
'regression_type': regression_choice,
}
return parameter_dict
def get_params(self):
"""Return dict of current parameters."""
parameter_dict = {
'regression_model': self.regression_model,
'datepart_method': self.datepart_method,
'regression_type': self.regression_type,
}
return parameter_dict
class UnivariateRegression(ModelObject):
"""Regression-framed approach to forecasting using sklearn.
A univariate version of rolling regression: ie each series is modeled independently
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
holiday (bool): If true, include holiday flags
regression_type (str): type of regression (None, 'User')
"""
def __init__(
self,
name: str = "UnivariateRegression",
frequency: str = 'infer',
prediction_interval: float = 0.9,
regression_type: str = None,
holiday_country: str = 'US',
verbose: int = 0,
random_seed: int = 2020,
forecast_length: int = 7,
regression_model: dict = {
"model": 'ExtraTrees',
"model_params": {},
},
holiday: bool = False,
mean_rolling_periods: int = 30,
macd_periods: int = None,
std_rolling_periods: int = 7,
max_rolling_periods: int = 7,
min_rolling_periods: int = 7,
ewm_var_alpha: float = None,
ewm_alpha: float = 0.5,
additional_lag_periods: int = 7,
abs_energy: bool = False,
rolling_autocorr_periods: int = None,
add_date_part: str = None,
polynomial_degree: int = None,
x_transform: str = None,
window: int = None,
n_jobs: int = -1,
**kwargs,
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
regression_type=regression_type,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
self.forecast_length = forecast_length
self.regression_model = regression_model
self.holiday = holiday
self.mean_rolling_periods = mean_rolling_periods
if mean_rolling_periods is None:
self.macd_periods = None
else:
self.macd_periods = macd_periods
self.std_rolling_periods = std_rolling_periods
self.max_rolling_periods = max_rolling_periods
self.min_rolling_periods = min_rolling_periods
self.ewm_var_alpha = ewm_var_alpha
self.ewm_alpha = ewm_alpha
self.additional_lag_periods = additional_lag_periods
self.abs_energy = abs_energy
self.rolling_autocorr_periods = rolling_autocorr_periods
self.add_date_part = add_date_part
self.polynomial_degree = polynomial_degree
self.x_transform = x_transform
self.window = window
def _x_transformer(self):
if self.x_transform == 'FastICA':
from sklearn.decomposition import FastICA
x_transformer = FastICA(n_components=None, random_state=2020, whiten=True)
elif self.x_transform == 'Nystroem':
from sklearn.kernel_approximation import Nystroem
half_size = int(self.sktraindata.shape[0] / 2) + 1
max_comp = 200
n_comp = max_comp if half_size > max_comp else half_size
x_transformer = Nystroem(
kernel='rbf', gamma=0.2, random_state=2020, n_components=n_comp
)
else:
# self.x_transform = 'RmZeroVariance'
from sklearn.feature_selection import VarianceThreshold
x_transformer = VarianceThreshold(threshold=0.0)
return x_transformer
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
future_regressor (pandas.DataFrame or Series): Datetime Indexed
"""
df = self.basic_profile(df)
self.sktraindata = df
# if external regressor, do some check up
if self.regression_type is not None:
if future_regressor is None:
raise ValueError(
"regression_type='User' but not future_regressor supplied."
)
elif future_regressor.shape[0] != df.shape[0]:
raise ValueError(
"future_regressor shape does not match training data shape."
)
else:
self.regressor_train = future_regressor
cols = self.sktraindata.columns
def forecast_by_column(self, args, parallel, n_jobs, col):
"""Run one series and return prediction."""
base = pd.DataFrame(self.sktraindata[col])
Y = base.copy()
for curr_shift in range(1, self.forecast_length):
Y = pd.concat([Y, base.shift(-curr_shift)], axis=1)
# drop incomplete data
Y = Y.drop(index=Y.tail(self.forecast_length - 1).index)
# drop the most recent because there's no future for it
Y = Y.drop(index=Y.index[0])
Y.columns = [x for x in range(len(Y.columns))]
X = rolling_x_regressor(
base,
mean_rolling_periods=self.mean_rolling_periods,
macd_periods=self.macd_periods,
std_rolling_periods=self.std_rolling_periods,
max_rolling_periods=self.max_rolling_periods,
min_rolling_periods=self.min_rolling_periods,
ewm_var_alpha=self.ewm_var_alpha,
additional_lag_periods=self.additional_lag_periods,
ewm_alpha=self.ewm_alpha,
abs_energy=self.abs_energy,
rolling_autocorr_periods=self.rolling_autocorr_periods,
add_date_part=self.add_date_part,
holiday=self.holiday,
holiday_country=self.holiday_country,
polynomial_degree=self.polynomial_degree,
window=self.window,
)
if self.regression_type == 'User':
X = pd.concat([X, self.regressor_train], axis=1)
if self.x_transform in ['FastICA', 'Nystroem', 'RmZeroVariance']:
self.x_transformer = self._x_transformer()
self.x_transformer = self.x_transformer.fit(X)
X = pd.DataFrame(self.x_transformer.transform(X))
X = X.replace([np.inf, -np.inf], 0).fillna(0)
X = X.drop(index=X.tail(self.forecast_length).index)
Y.index = X.index # and just hope I got the adjustments right
# retrieve model object to train
if not parallel and n_jobs > 1:
n_jobs_passed = n_jobs
else:
n_jobs_passed = 1
multioutput = True
if Y.ndim < 2:
multioutput = False
elif Y.shape[1] < 2:
multioutput = False
# because the training messages get annoying
inner_verbose = self.verbose - 1 if self.verbose > 0 else self.verbose
dah_model = retrieve_regressor(
regression_model=self.regression_model,
verbose=inner_verbose,
verbose_bool=False,
random_seed=self.random_seed,
n_jobs=n_jobs_passed,
multioutput=multioutput,
)
dah_model.fit(X.to_numpy(), Y)
return {col: dah_model}
self.parallel = True
self.not_parallel_models = [
'LightGBM',
'RandomForest',
"BayesianRidge",
'Transformer',
"KerasRNN",
"HistGradientBoost",
]
out_n_jobs = int(self.n_jobs - 1)
out_n_jobs = 1 if out_n_jobs < 1 else out_n_jobs
if out_n_jobs in [0, 1] or len(cols) < 3:
self.parallel = False
elif (
self.regression_model.get("model", "ElasticNet") in self.not_parallel_models
):
self.parallel = False
else:
try:
from joblib import Parallel, delayed
except Exception:
self.parallel = False
args = {}
# joblib multiprocessing to loop through series
if self.parallel:
df_list = Parallel(n_jobs=out_n_jobs)(
delayed(forecast_by_column)(self, args, self.parallel, self.n_jobs, col)
for (col) in cols
)
self.models = {k: v for d in df_list for k, v in d.items()}
else:
df_list = []
for col in cols:
df_list.append(
forecast_by_column(self, args, self.parallel, self.n_jobs, col)
)
self.models = {k: v for d in df_list for k, v in d.items()}
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self,
forecast_length: int = None,
just_point_forecast: bool = False,
future_regressor=None,
):
"""Generate forecast data immediately following dates of index supplied to .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
ignored here for this model, must be set in __init__ before .fit()
future_regressor (pd.DataFrame): additional regressor
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
index = self.create_forecast_index(forecast_length=self.forecast_length)
forecast = pd.DataFrame()
for x_col in self.sktraindata.columns:
base = pd.DataFrame(self.sktraindata[x_col])
x_dat = rolling_x_regressor(
base,
mean_rolling_periods=self.mean_rolling_periods,
macd_periods=self.macd_periods,
std_rolling_periods=self.std_rolling_periods,
max_rolling_periods=self.max_rolling_periods,
min_rolling_periods=self.min_rolling_periods,
ewm_var_alpha=self.ewm_var_alpha,
additional_lag_periods=self.additional_lag_periods,
ewm_alpha=self.ewm_alpha,
abs_energy=self.abs_energy,
rolling_autocorr_periods=self.rolling_autocorr_periods,
add_date_part=self.add_date_part,
holiday=self.holiday,
holiday_country=self.holiday_country,
polynomial_degree=self.polynomial_degree,
window=self.window,
)
if self.regression_type == 'User':
x_dat = pd.concat([x_dat, self.regressor_train], axis=1).fillna(0)
if self.x_transform in ['FastICA', 'Nystroem', 'RmZeroVariance']:
x_dat = pd.DataFrame(self.x_transformer.transform(x_dat))
x_dat = x_dat.replace([np.inf, -np.inf], 0).fillna(0)
rfPred = self.models[x_col].predict(x_dat.tail(1).to_numpy())
# rfPred = pd.DataFrame(rfPred).transpose()
# rfPred.columns = [x_col]
rfPred = pd.Series(rfPred.flatten())
rfPred.name = x_col
forecast = pd.concat([forecast, rfPred], axis=1)
forecast = forecast[self.column_names]
forecast.index = index
if just_point_forecast:
return forecast
else:
upper_forecast, lower_forecast = Point_to_Probability(
self.sktraindata,
forecast,
method='inferred_normal',
prediction_interval=self.prediction_interval,
)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=self.forecast_length,
forecast_index=forecast.index,
forecast_columns=forecast.columns,
lower_forecast=lower_forecast,
forecast=forecast,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
model_choice = generate_regressor_params(model_dict=univariate_model_dict)
mean_rolling_periods_choice = random.choices(
[None, 5, 7, 12, 30], [0.6, 0.1, 0.1, 0.1, 0.1]
)[0]
if mean_rolling_periods_choice is not None:
macd_periods_choice = seasonal_int()
if macd_periods_choice == mean_rolling_periods_choice:
macd_periods_choice = mean_rolling_periods_choice + 10
else:
macd_periods_choice = None
std_rolling_periods_choice = random.choices(
[None, 5, 7, 10, 30], [0.6, 0.1, 0.1, 0.1, 0.1]
)[0]
max_rolling_periods_choice = random.choices([None, seasonal_int()], [0.5, 0.5])[
0
]
min_rolling_periods_choice = random.choices([None, seasonal_int()], [0.5, 0.5])[
0
]
lag_periods_choice = seasonal_int() - 1
lag_periods_choice = 2 if lag_periods_choice < 2 else lag_periods_choice
ewm_choice = random.choices(
[None, 0.1, 0.2, 0.5, 0.8], [0.75, 0.05, 0.1, 0.1, 0.05]
)[0]
ewm_var_alpha = random.choices(
[None, 0.05, 0.1, 0.2, 0.5, 0.8], [0.7, 0.01, 0.05, 0.1, 0.1, 0.05]
)[0]
abs_energy_choice = random.choices([True, False], [0.1, 0.9])[0]
rolling_autocorr_periods_choice = random.choices(
[None, 2, 7, 12, 30], [0.86, 0.01, 0.01, 0.01, 0.01]
)[0]
add_date_part_choice = random.choices(
[None, 'simple', 'expanded', 'recurring'], [0.7, 0.1, 0.1, 0.1]
)[0]
holiday_choice = random.choices([True, False], [0.2, 0.8])[0]
polynomial_degree_choice = None
x_transform_choice = random.choices(
[None, 'FastICA', 'Nystroem', 'RmZeroVariance'],
[1.0, 0.0, 0.0, 0.0],
)[0]
if "regressor" in method:
regression_choice = "User"
else:
regression_choice = random.choices([None, 'User'], [0.7, 0.3])[0]
window_choice = random.choices([None, 3, 7, 10], [0.7, 0.2, 0.05, 0.05])[0]
parameter_dict = {
'regression_model': model_choice,
'holiday': holiday_choice,
'mean_rolling_periods': mean_rolling_periods_choice,
'macd_periods': macd_periods_choice,
'std_rolling_periods': std_rolling_periods_choice,
'max_rolling_periods': max_rolling_periods_choice,
'min_rolling_periods': min_rolling_periods_choice,
"ewm_var_alpha": ewm_var_alpha,
'ewm_alpha': ewm_choice,
'additional_lag_periods': lag_periods_choice,
'abs_energy': abs_energy_choice,
'rolling_autocorr_periods': rolling_autocorr_periods_choice,
'add_date_part': add_date_part_choice,
'polynomial_degree': polynomial_degree_choice,
'x_transform': x_transform_choice,
'regression_type': regression_choice,
'window': window_choice,
}
return parameter_dict
def get_params(self):
"""Return dict of current parameters."""
parameter_dict = {
'regression_model': self.regression_model,
'holiday': self.holiday,
'mean_rolling_periods': self.mean_rolling_periods,
'macd_periods': self.macd_periods,
'std_rolling_periods': self.std_rolling_periods,
'max_rolling_periods': self.max_rolling_periods,
'min_rolling_periods': self.min_rolling_periods,
"ewm_var_alpha": self.ewm_var_alpha,
'ewm_alpha': self.ewm_alpha,
'additional_lag_periods': self.additional_lag_periods,
'abs_energy': self.abs_energy,
'rolling_autocorr_periods': self.rolling_autocorr_periods,
'add_date_part': self.add_date_part,
'polynomial_degree': self.polynomial_degree,
'x_transform': self.x_transform,
'regression_type': self.regression_type,
'window': self.window,
}
return parameter_dict
class MultivariateRegression(ModelObject):
"""Regression-framed approach to forecasting using sklearn.
A multiariate version of rolling regression: ie each series is agged independently but modeled together
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
holiday (bool): If true, include holiday flags
regression_type (str): type of regression (None, 'User')
"""
def __init__(
self,
name: str = "MultivariateRegression",
frequency: str = 'infer',
prediction_interval: float = 0.9,
regression_type: str = None,
holiday_country: str = 'US',
verbose: int = 0,
random_seed: int = 2020,
forecast_length: int = 7,
regression_model: dict = {
"model": 'RandomForest',
"model_params": {},
},
holiday: bool = False,
mean_rolling_periods: int = 30,
macd_periods: int = None,
std_rolling_periods: int = 7,
max_rolling_periods: int = 7,
min_rolling_periods: int = 7,
ewm_var_alpha: float = None,
quantile90_rolling_periods: int = None,
quantile10_rolling_periods: int = None,
ewm_alpha: float = 0.5,
additional_lag_periods: int = 7,
abs_energy: bool = False,
rolling_autocorr_periods: int = None,
datepart_method: str = None,
polynomial_degree: int = None,
window: int = None,
quantile_params: dict = {
'learning_rate': 0.1,
'max_depth': 20,
'min_samples_leaf': 4,
'min_samples_split': 5,
'n_estimators': 250,
},
n_jobs: int = -1,
**kwargs,
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
regression_type=regression_type,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
self.forecast_length = forecast_length
self.regression_model = regression_model
self.holiday = holiday
self.mean_rolling_periods = mean_rolling_periods
if mean_rolling_periods is None:
self.macd_periods = None
else:
self.macd_periods = macd_periods
self.std_rolling_periods = std_rolling_periods
self.max_rolling_periods = max_rolling_periods
self.min_rolling_periods = min_rolling_periods
self.ewm_var_alpha = ewm_var_alpha
self.quantile90_rolling_periods = quantile90_rolling_periods
self.quantile10_rolling_periods = quantile10_rolling_periods
self.ewm_alpha = ewm_alpha
self.additional_lag_periods = additional_lag_periods
self.abs_energy = abs_energy
self.rolling_autocorr_periods = rolling_autocorr_periods
self.datepart_method = datepart_method
self.polynomial_degree = polynomial_degree
self.window = window
self.quantile_params = quantile_params
self.regressor_train = None
# detect just the max needed for cutoff (makes faster)
starting_min = 90 # based on what effects ewm alphas, too
list_o_vals = [
mean_rolling_periods,
macd_periods,
std_rolling_periods,
max_rolling_periods,
min_rolling_periods,
quantile90_rolling_periods,
quantile10_rolling_periods,
additional_lag_periods,
rolling_autocorr_periods,
window,
starting_min,
]
self.min_threshold = max([x for x in list_o_vals if str(x).isdigit()])
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
future_regressor (pandas.DataFrame or Series): Datetime Indexed
"""
df = self.basic_profile(df)
from sklearn.ensemble import GradientBoostingRegressor
# if external regressor, do some check up
if self.regression_type is not None:
if future_regressor is None:
raise ValueError(
"regression_type='User' but not future_regressor supplied."
)
elif future_regressor.shape[0] != df.shape[0]:
raise ValueError(
"future_regressor shape does not match training data shape."
)
else:
self.regressor_train = future_regressor
# define X and Y
Y = df[1:].to_numpy().ravel(order="F")
# drop look ahead data
base = df[:-1]
if self.regression_type is not None:
cut_regr = self.regressor_train[1:]
cut_regr.index = base.index
else:
cut_regr = None
# open to suggestions on making this faster
X = pd.concat(
[
rolling_x_regressor_regressor(
base[x_col].to_frame(),
mean_rolling_periods=self.mean_rolling_periods,
macd_periods=self.macd_periods,
std_rolling_periods=self.std_rolling_periods,
max_rolling_periods=self.max_rolling_periods,
min_rolling_periods=self.min_rolling_periods,
ewm_var_alpha=self.ewm_var_alpha,
quantile90_rolling_periods=self.quantile90_rolling_periods,
quantile10_rolling_periods=self.quantile10_rolling_periods,
additional_lag_periods=self.additional_lag_periods,
ewm_alpha=self.ewm_alpha,
abs_energy=self.abs_energy,
rolling_autocorr_periods=self.rolling_autocorr_periods,
add_date_part=self.datepart_method,
holiday=self.holiday,
holiday_country=self.holiday_country,
polynomial_degree=self.polynomial_degree,
window=self.window,
future_regressor=cut_regr,
)
for x_col in base.columns
]
)
del base
alpha_base = (1 - self.prediction_interval) / 2
self.model_upper = GradientBoostingRegressor(
loss='quantile',
alpha=(1 - alpha_base),
random_state=self.random_seed,
**self.quantile_params,
)
self.model_lower = GradientBoostingRegressor(
loss='quantile',
alpha=alpha_base,
random_state=self.random_seed,
**self.quantile_params,
)
multioutput = True
if Y.ndim < 2:
multioutput = False
elif Y.shape[1] < 2:
multioutput = False
self.model = retrieve_regressor(
regression_model=self.regression_model,
verbose=self.verbose,
verbose_bool=self.verbose_bool,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
multioutput=multioutput,
)
self.model.fit(X.to_numpy(), Y)
self.model_upper.fit(X.to_numpy(), Y)
self.model_lower.fit(X.to_numpy(), Y)
# we only need the N most recent points for predict
self.sktraindata = df.tail(self.min_threshold)
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self,
forecast_length: int = None,
just_point_forecast: bool = False,
future_regressor=None,
):
"""Generate forecast data immediately following dates of index supplied to .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
ignored here for this model, must be set in __init__ before .fit()
future_regressor (pd.DataFrame): additional regressor
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
index = self.create_forecast_index(forecast_length=forecast_length)
forecast = | pd.DataFrame() | pandas.DataFrame |
"""Aggregate plant parts to make an EIA master plant-part table.
Practically speaking, a plant is a collection of generator(s). There are many
attributes of generators (i.e. prime mover, primary fuel source, technology
type). We can use these generator attributes to group generator records into
larger aggregate records which we call "plant-parts". A plant part is a record
which corresponds to a particular collection of generators that all share an
identical attribute. E.g. all of the generators with unit_id=2, or all of the
generators with coal as their primary fuel source.
The EIA data about power plants (from EIA 923 and 860) is reported in tables
with records that correspond to mostly generators and plants. Other datasets
(cough cough FERC1) are less well organized and include plants, generators and
other plant-parts all in the same table without any clear labels. The master
plant-part table is an attempt to create records corresponding to many
different plant-parts in order to connect specific slices of EIA plants to
other datasets.
Because generators are often owned by multiple utilities, another dimention of
the master unit list involves generating two records for each owner: one of the
portion of the plant part they own and one for the plant part as a whole. The
portion records are labeled in the ``ownership`` column as "owned" and the total
records are labeled as "total".
This module refers to "true granularies". Many plant parts we cobble together
here in the master plant-part list refer to the same collection of
infrastructure as other plant-part list records. For example, if we have a
"plant_prime_mover" plant part record and a "plant_unit" plant part record
which were both cobbled together from the same two generators. We want to be
able to reduce the plant-part list to only unique collections of generators,
so we label the first unique granularity as a true granularity and label the
subsequent records as false granularities with the ``true_gran`` column. In
order to choose which plant-part to keep in these instances, we assigned a
:py:const:`PLANT_PARTS_ORDERED` and label whichever plant-part comes first as
the unique granularity.
**Recipe Book for the plant-part list**
:py:const:`PLANT_PARTS` is the main recipe book for how each of the plant-parts
need to be compiled. These plant-parts represent ways to group generators based
on widely reported values in EIA. All of these are logical ways to group
collections of generators - in most cases - but some groupings of generators
are more prevelant or relevant than others for certain types of plants.
The canonical example here is the ``plant_unit``. A unit is a collection of
generators that operate together - most notably the combined-cycle natural gas
plants. Combined-cycle units generally consist of a number of gas turbines
which feed excess steam to a number of steam turbines.
>>> df_gens = pd.DataFrame({
... 'plant_id_eia': [1, 1, 1],
... 'generator_id': ['a', 'b', 'c'],
... 'unit_id_pudl': [1, 1, 1],
... 'prime_mover_code': ['CT', 'CT', 'CA'],
... 'capacity_mw': [50, 50, 100],
... })
>>> df_gens
plant_id_eia generator_id unit_id_pudl prime_mover_code capacity_mw
0 1 a 1 CT 50
1 1 b 1 CT 50
2 1 c 1 CA 100
A good example of a plant-part that isn't really logical also comes from a
combined-cycle unit. Grouping this example plant by the ``prime_mover_code``
would generate two records that would basically never show up in FERC1.
This stems from the inseparability of the generators.
>>> df_plant_prime_mover = pd.DataFrame({
... 'plant_id_eia': [1, 1],
... 'plant_part': ['plant_prime_mover', 'plant_prime_mover'],
... 'prime_mover_code': ['CT', 'CA'],
... 'capacity_mw': [100, 100],
... })
>>> df_plant_prime_mover
plant_id_eia plant_part prime_mover_code capacity_mw
0 1 plant_prime_mover CT 100
1 1 plant_prime_mover CA 100
In this case the unit is more relevant:
>>> df_plant_unit = pd.DataFrame({
... 'plant_id_eia': [1],
... 'plant_part': ['plant_unit'],
... 'unit_id_pudl': [1],
... 'capacity_mw': [200],
... })
>>> df_plant_unit
plant_id_eia plant_part unit_id_pudl capacity_mw
0 1 plant_unit 1 200
But if this same plant had both this combined-cycle unit and two more
generators that were self contained "GT" or gas combustion turbine, a frequent
way to group these generators is differnt for the combined-cycle unit and the
gas-turbine.
>>> df_gens = pd.DataFrame({
... 'plant_id_eia': [1, 1, 1, 1, 1],
... 'generator_id': ['a', 'b', 'c', 'd', 'e'],
... 'unit_id_pudl': [1, 1, 1, 2, 3],
... 'prime_mover_code': ['CT', 'CT', 'CA', 'GT', 'GT'],
... 'capacity_mw': [50, 50, 100, 75, 75],
... })
>>> df_gens
plant_id_eia generator_id unit_id_pudl prime_mover_code capacity_mw
0 1 a 1 CT 50
1 1 b 1 CT 50
2 1 c 1 CA 100
3 1 d 2 GT 75
4 1 e 3 GT 75
>>> df_plant_part = pd.DataFrame({
... 'plant_id_eia': [1, 1],
... 'plant_part': ['plant_unit', 'plant_prime_mover'],
... 'unit_id_pudl': [1, pd.NA],
... 'prime_mover_code': [pd.NA, 'GT',],
... 'capacity_mw': [200, 150],
... })
>>> df_plant_part
plant_id_eia plant_part unit_id_pudl prime_mover_code capacity_mw
0 1 plant_unit 1 <NA> 200
1 1 plant_prime_mover <NA> GT 150
In this case last, the ``plant_unit`` record would have a null
``plant_prime_mover`` because the unit contains more than one
``prime_mover_code``. Same goes for the ``unit_id_pudl`` of the
``plant_prime_mover``. This is handled in the :class:``AddConsistentAttributes``.
**Overview of flow for generating the master unit list:**
The two main classes which enable the generation of the plant-part table are:
* :class:`MakeMegaGenTbl`: All of the plant parts are compiled from generators.
So this class generates a big dataframe of generators with any ID and data
columns we'll need. This is also where we add records regarding utility
ownership slices. The table includes two records for every generator-owner:
one for the "total" generator (assuming the owner owns 100% of the generator)
and one for the report ownership fraction of that generator with all of the
data columns scaled to the ownership fraction.
* :class:`MakePlantParts`: This class uses the generator dataframe as well as
the information stored in :py:const:`PLANT_PARTS` to know how to aggregate each
of the plant parts. Then we have plant part dataframes with the columns which
identify the plant part and all of the data columns aggregated to the level of
the plant part. With that compiled plant part dataframe we also add in qualifier
columns with :class:`AddConsistentAttributes`. A qualifer column is a column which
contain data that is not endemic to the plant part record (it is not one of
the identifying columns or aggregated data columns) but the data is still
useful data that is attributable to each of the plant part records. For more
detail on what a qualifier column is, see :meth:`AddConsistentAttributes.execute`.
**Generating the plant-parts list**
There are two ways to generate the plant-parts table: one directly using the
:class:`pudl.output.pudltabl.PudlTabl` object and the other using the classes
from this module. Either option needs a :class:`pudl.output.pudltabl.PudlTabl`
object.
Create the :class:`pudl.output.pudltabl.PudlTabl` object:
.. code-block:: python
import pudl
pudl_engine = sa.create_engine(pudl.workspace.setup.get_defaults()['pudl_db'])
pudl_out = pudl.output.pudltabl.PudlTabl(pudl_engine,freq='AS')
Then make the table via pudl_out:
.. code-block:: python
plant_parts_eia = pudl_out.plant_parts_eia()
OR make the table via objects in this module:
.. code-block:: python
gens_mega = MakeMegaGenTbl().execute(mcoe, own_eia860)
parts_compiler = MakePlantParts(pudl_out)
plant_parts_eia = parts_compiler.execute(gens_mega=gens_mega)
"""
import logging
import warnings
from copy import deepcopy
from typing import Dict, List, Literal, Optional
import numpy as np
import pandas as pd
import pudl
logger = logging.getLogger(__name__)
# HALP: I need both of these setting set in order for the dfs in the docstrings
# to pass the doctests. Without them the formatting get all jumbled.
# but obviously this is the wrong place to do this.
# I tried adding these into conftest.py in pandas_terminal_width().
# I tried adding this into __init__.py.
# I tried adding this into the module docstring.
pd.options.display.width = 1000
pd.options.display.max_columns = 1000
PLANT_PARTS: Dict[str, Dict[str, List]] = {
"plant": {
"id_cols": ["plant_id_eia"],
},
"plant_gen": {
"id_cols": ["plant_id_eia", "generator_id"],
},
"plant_unit": {
"id_cols": ["plant_id_eia", "unit_id_pudl"],
},
"plant_technology": {
"id_cols": ["plant_id_eia", "technology_description"],
},
"plant_prime_fuel": { # 'plant_primary_fuel': {
"id_cols": ["plant_id_eia", "energy_source_code_1"],
},
"plant_prime_mover": {
"id_cols": ["plant_id_eia", "prime_mover_code"],
},
"plant_ferc_acct": {
"id_cols": ["plant_id_eia", "ferc_acct_name"],
},
}
"""
dict: this dictionary contains a key for each of the 'plant parts' that should
end up in the mater unit list. The top-level value for each key is another
dictionary, which contains keys:
* id_cols (the primary key type id columns for this plant part). The
plant_id_eia column must come first.
"""
PLANT_PARTS_ORDERED: List[str] = [
"plant",
"plant_unit",
"plant_prime_mover",
"plant_technology",
"plant_prime_fuel",
"plant_ferc_acct",
"plant_gen",
]
IDX_TO_ADD: List[str] = ["report_date", "operational_status_pudl"]
"""
list: list of additional columns to add to the id_cols in :py:const:`PLANT_PARTS`.
The id_cols are the base columns that we need to aggregate on, but we also need
to add the report date to keep the records time sensitive and the
operational_status_pudl to separate the operating plant-parts from the
non-operating plant-parts.
"""
IDX_OWN_TO_ADD: List[str] = ["utility_id_eia", "ownership"]
"""
list: list of additional columns beyond the :py:const:`IDX_TO_ADD` to add to the
id_cols in :py:const:`PLANT_PARTS` when we are dealing with plant-part records
that have been broken out into "owned" and "total" records for each of their
owners.
"""
SUM_COLS: List[str] = [
"total_fuel_cost",
"net_generation_mwh",
"capacity_mw",
"capacity_eoy_mw",
"total_mmbtu",
]
"""list: list of columns to sum when aggregating a table."""
WTAVG_DICT = {
"fuel_cost_per_mwh": "capacity_mw",
"heat_rate_mmbtu_mwh": "capacity_mw",
"fuel_cost_per_mmbtu": "capacity_mw",
}
"""
dict: a dictionary of columns (keys) to perform weighted averages on and
the weight column (values)
"""
CONSISTENT_ATTRIBUTE_COLS = [
"fuel_type_code_pudl",
"planned_retirement_date",
"retirement_date",
"generator_id",
"unit_id_pudl",
"technology_description",
"energy_source_code_1",
"prime_mover_code",
"ferc_acct_name",
]
"""
list: a list of column names to add as attributes when they are consistent into
the aggregated plant-part records.
"""
PRIORITY_ATTRIBUTES_DICT = {
"operational_status": ["existing", "proposed", "retired"],
}
MAX_MIN_ATTRIBUTES_DICT = {
"installation_year": {
"assign_col": {"installation_year": lambda x: x.operating_date.dt.year},
"dtype": "Int64",
"keep": "first",
},
"construction_year": {
"assign_col": {"construction_year": lambda x: x.operating_date.dt.year},
"dtype": "Int64",
"keep": "last",
},
}
FIRST_COLS = [
"plant_id_eia",
"report_date",
"plant_part",
"generator_id",
"unit_id_pudl",
"prime_mover_code",
"energy_source_code_1",
"technology_description",
"ferc_acct_name",
"utility_id_eia",
"true_gran",
"appro_part_label",
]
class MakeMegaGenTbl(object):
"""Compiler for a MEGA generator table with ownership integrated.
Examples
--------
**Input Tables**
Here is an example of one plant with three generators. We will use
``capacity_mw`` as the data column.
>>> mcoe = pd.DataFrame({
... 'plant_id_eia': [1, 1, 1],
... 'report_date': ['2020-01-01', '2020-01-01','2020-01-01'],
... 'generator_id': ['a', 'b', 'c'],
... 'utility_id_eia': [111, 111, 111],
... 'unit_id_pudl': [1, 1, 1],
... 'prime_mover_code': ['CT', 'CT', 'CA'],
... 'technology_description': [
... 'Natural Gas Fired Combined Cycle', 'Natural Gas Fired Combined Cycle', 'Natural Gas Fired Combined Cycle'
... ],
... 'operational_status': ['existing', 'existing','existing'],
... 'retirement_date': [pd.NA, pd.NA, pd.NA],
... 'capacity_mw': [50, 50, 100],
... }).astype({
... 'retirement_date': "datetime64[ns]",
... 'report_date': "datetime64[ns]",
... })
>>> mcoe
plant_id_eia report_date generator_id utility_id_eia unit_id_pudl prime_mover_code technology_description operational_status retirement_date capacity_mw
0 1 2020-01-01 a 111 1 CT Natural Gas Fired Combined Cycle existing NaT 50
1 1 2020-01-01 b 111 1 CT Natural Gas Fired Combined Cycle existing NaT 50
2 1 2020-01-01 c 111 1 CA Natural Gas Fired Combined Cycle existing NaT 100
The ownership table from EIA 860 includes one record for every owner of
each generator. In this example generator ``c`` has two owners.
>>> df_own_eia860 = pd.DataFrame({
... 'plant_id_eia': [1, 1, 1, 1],
... 'report_date': ['2020-01-01', '2020-01-01','2020-01-01', '2020-01-01'],
... 'generator_id': ['a', 'b', 'c', 'c'],
... 'utility_id_eia': [111, 111, 111, 111],
... 'owner_utility_id_eia': [111, 111, 111, 888],
... 'fraction_owned': [1, 1, .75, .25]
... }).astype({'report_date': "datetime64[ns]"})
>>> df_own_eia860
plant_id_eia report_date generator_id utility_id_eia owner_utility_id_eia fraction_owned
0 1 2020-01-01 a 111 111 1.00
1 1 2020-01-01 b 111 111 1.00
2 1 2020-01-01 c 111 111 0.75
3 1 2020-01-01 c 111 888 0.25
**Output Mega Generators Table**
``MakeMegaGenTbl().execute(mcoe, df_own_eia860, slice_cols=['capacity_mw'])``
produces the output table ``gens_mega`` which includes two main sections:
the generators with a "total" ownership stake for each of their owners and
the generators with an "owned" ownership stake for each of their
owners. For the generators that are owned 100% by one utility, the
records are identical except the ``ownership`` column. For the
generators that have more than one owner, there are two "total" records
with 100% of the capacity of that generator - one for each owner - and
two "owned" records with the capacity scaled to the ownership stake
of each of the owner utilites - represented by ``fraction_owned``.
"""
def __init__(self):
"""Initialize object which creates a MEGA generator table.
The coordinating function here is :meth:`execute`.
"""
self.id_cols_list = make_id_cols_list()
def execute(
self,
mcoe: pd.DataFrame,
own_eia860: pd.DataFrame,
slice_cols: List[str] = SUM_COLS,
validate_own_merge: str = "1:m",
) -> pd.DataFrame:
"""Make the mega generators table with ownership integrated.
Args:
mcoe: generator-based mcoe table from :meth:`pudl.output.PudlTabl.mcoe()`
own_eia860: ownership table from :meth:`pudl.output.PudlTabl.own_eia860()`
scale_cols: list of columns to slice by ownership fraction in
:meth:`MakeMegaGenTbl.scale_by_ownership`. Default is :py:const:`SUM_COLS`
validate_own_merge: how the merge between ``mcoe`` and ``own_eia860``
is to be validated via ``pd.merge``. If there should be one
record for each plant/generator/date in ``mcoe`` then the default
`1:m` should be used.
Returns:
a table of all of the generators with identifying columns and data
columns, sliced by ownership which makes "total" and "owned"
records for each generator owner. The "owned" records have the
generator's data scaled to the ownership percentage (e.g. if a 200
MW generator has a 75% stake owner and a 25% stake owner, this will
result in two "owned" records with 150 MW and 50 MW). The "total"
records correspond to the full plant for every owner (e.g. using
the same 2-owner 200 MW generator as above, each owner will have a
records with 200 MW).
"""
logger.info("Generating the mega generator table with ownership.")
gens_mega = (
self.get_gens_mega_table(mcoe)
.pipe(self.label_operating_gens)
.pipe(self.scale_by_ownership, own_eia860, slice_cols, validate_own_merge)
)
return gens_mega
def get_gens_mega_table(self, mcoe):
"""Compile the main generators table that will be used as base of PPL.
Get a table of all of the generators there ever were and all of the
data PUDL has to offer about those generators. This generator table
will be used to compile all of the "plant-parts", so we need to ensure
that any of the id columns from the other plant-parts are in this
generator table as well as all of the data columns that we are going to
aggregate to the various plant-parts.
Returns:
pandas.DataFrame
"""
all_gens = pd.merge( # Add EIA FERC acct fields
mcoe,
pudl.helpers.get_eia_ferc_acct_map(),
on=["technology_description", "prime_mover_code"],
validate="m:1",
how="left",
)
return all_gens
def label_operating_gens(self, gen_df: pd.DataFrame) -> pd.DataFrame:
"""Label the operating generators.
We want to distinguish between "operating" generators (those that
report as "existing" and those that retire mid-year) and everything
else so that we can group the operating generators into their own
plant-parts separate from retired or proposed generators. We do this by
creating a new label column called "operational_status_pudl".
This method also adds a column called "capacity_eoy_mw", which is the
end of year capacity of the generators. We assume that if a generator
isn't "existing", its EOY capacity should be zero.
Args:
gen_df (pandas.DataFrame): annual table of all generators from EIA.
Returns
pandas.DataFrame: annual table of all generators from EIA that
operated within each reporting year.
TODO:
This function results in warning: `PerformanceWarning: DataFrame
is highly fragmented...` I expect this is because of the number of
columns that are being assigned here via `.loc[:, col_to_assign]`.
"""
mid_year_retiree_mask = (
gen_df.retirement_date.dt.year == gen_df.report_date.dt.year
)
existing_mask = gen_df.operational_status == "existing"
operating_mask = existing_mask | mid_year_retiree_mask
# we've going to make a new column which combines both the mid-year
# reitrees and the fully existing gens into one code so we can group
# them together later on
gen_df.loc[:, "operational_status_pudl"] = gen_df.loc[
:, "operational_status"
].mask(operating_mask, "operating")
gen_df.loc[:, "capacity_eoy_mw"] = gen_df.loc[:, "capacity_mw"].mask(
~existing_mask, 0
)
logger.info(
f"Labeled {len(gen_df.loc[~existing_mask])/len(gen_df):.02%} of "
"generators as non-operative."
)
return gen_df
def scale_by_ownership(
self, gens_mega, own_eia860, scale_cols=SUM_COLS, validate="1:m"
):
"""Generate proportional data by ownership %s.
Why do we have to do this at all? Sometimes generators are owned by
many different utility owners that own slices of that generator. EIA
reports which portion of each generator is owned by which utility
relatively clearly in their ownership table. On the other hand, in
FERC1, sometimes a partial owner reports the full plant-part, sometimes
they report only their ownership portion of the plant-part. And of
course it is not labeld in FERC1. Because of this, we need to compile
all of the possible ownership slices of the EIA generators.
In order to accumulate every possible version of how a generator could
be reported, this method generates two records for each generator's
reported owners: one of the portion of the plant part they own and one
for the plant-part as a whole. The portion records are labeled in the
``ownership`` column as "owned" and the total records are labeled as
"total".
In this function we merge in the ownership table so that generators
with multiple owners then have one record per owner with the
ownership fraction (in column ``fraction_owned``). Because the ownership
table only contains records for generators that have multiple owners,
we assume that all other generators are owned 100% by their operator.
Then we generate the "total" records by duplicating the "owned" records
but assigning the ``fraction_owned`` to be 1 (i.e. 100%).
"""
# grab the ownership table, and reduce it to only the columns we need
own860 = own_eia860[
[
"plant_id_eia",
"generator_id",
"report_date",
"fraction_owned",
"owner_utility_id_eia",
]
].pipe(pudl.helpers.convert_cols_dtypes, "eia")
# we're left merging BC we've removed the retired gens, which are
# reported in the ownership table
gens_mega = (
gens_mega.merge(
own860,
how="left",
on=["plant_id_eia", "generator_id", "report_date"],
validate=validate,
)
.assign( # assume gens that don't show up in the own table have one 100% owner
fraction_owned=lambda x: x.fraction_owned.fillna(value=1),
# assign the operator id as the owner if null bc if a gen isn't
# reported in the own_eia860 table we can assume the operator
# is the owner
owner_utility_id_eia=lambda x: x.owner_utility_id_eia.fillna(
x.utility_id_eia
),
ownership="owned",
) # swap in the owner as the utility
.drop(columns=["utility_id_eia"])
.rename(columns={"owner_utility_id_eia": "utility_id_eia"})
)
# duplicate all of these "owned" records, asign 1 to all of the
# fraction_owned column to indicate 100% ownership, and add these new
# "total" records to the "owned"
gens_mega = pd.concat(
[gens_mega, gens_mega.copy().assign(fraction_owned=1, ownership="total")]
)
gens_mega.loc[:, scale_cols] = gens_mega.loc[:, scale_cols].multiply(
gens_mega["fraction_owned"], axis="index"
)
return gens_mega
class MakePlantParts(object):
"""Compile the plant parts for the master unit list.
This object generates a master list of different "plant-parts", which
are various collections of generators - i.e. units, fuel-types, whole
plants, etc. - as well as various ownership arrangements. Each
plant-part is included in the master plant-part table associated with
each of the plant-part's owner twice - once with the data scaled to the
fraction of each owners' ownership and another for a total plant-part
for each owner.
This master plant parts table is generated by first creating a complete
generators table - with all of the data columns we will be aggregating
to different plant-part's and sliced and scaled by ownership. Then we use the
complete generator table to aggregate by each of the plant-part
categories. Next we add a label for each plant-part record which indicates
whether or not the record is a unique grouping of generator records.
The coordinating function here is :meth:`execute`.
"""
def __init__(self, pudl_out):
"""Initialize instance of :class:`MakePlantParts`.
Args:
pudl_out (pudl.output.pudltabl.PudlTabl): An object used to create
the tables for EIA and FERC Form 1 analysis.
"""
self.pudl_out = pudl_out
self.freq = pudl_out.freq
self.parts_to_ids = make_parts_to_ids_dict()
# get a list of all of the id columns that constitue the primary keys
# for all of the plant parts
self.id_cols_list = make_id_cols_list()
def execute(self, gens_mega):
"""Aggregate and slice data points by each plant part.
Returns:
pandas.DataFrame: The complete plant parts list
"""
# aggregate everything by each plant part
part_dfs = []
for part_name in PLANT_PARTS_ORDERED:
part_df = PlantPart(part_name).execute(gens_mega)
# add in the attributes!
for attribute_col in CONSISTENT_ATTRIBUTE_COLS:
part_df = AddConsistentAttributes(attribute_col, part_name).execute(
part_df, gens_mega
)
for attribute_col in PRIORITY_ATTRIBUTES_DICT.keys():
part_df = AddPriorityAttribute(attribute_col, part_name).execute(
part_df, gens_mega
)
for attribute_col in MAX_MIN_ATTRIBUTES_DICT.keys():
part_df = AddMaxMinAttribute(
attribute_col,
part_name,
assign_col_dict=MAX_MIN_ATTRIBUTES_DICT[attribute_col][
"assign_col"
],
).execute(
part_df,
gens_mega,
att_dtype=MAX_MIN_ATTRIBUTES_DICT[attribute_col]["dtype"],
keep=MAX_MIN_ATTRIBUTES_DICT[attribute_col]["keep"],
)
part_dfs.append(part_df)
plant_parts_eia = pd.concat(part_dfs)
plant_parts_eia = TrueGranLabeler().execute(plant_parts_eia)
# clean up, add additional columns
self.plant_parts_eia = (
self.add_additonal_cols(plant_parts_eia)
.pipe(pudl.helpers.organize_cols, FIRST_COLS)
.pipe(self._clean_plant_parts)
)
self.validate_ownership_for_owned_records(self.plant_parts_eia)
validate_run_aggregations(self.plant_parts_eia, gens_mega)
return self.plant_parts_eia
#######################################
# Add Entity Columns and Final Cleaning
#######################################
def add_additonal_cols(self, plant_parts_eia):
"""Add additonal data and id columns.
This method adds a set of either calculated columns or PUDL ID columns.
Returns:
pandas.DataFrame: master unit list table with these additional
columns:
* utility_id_pudl +
* plant_id_pudl +
* capacity_factor +
* ownership_dupe (boolean): indicator of whether the "owned"
record has a corresponding "total" duplicate.
"""
plant_parts_eia = (
pudl.helpers.calc_capacity_factor(
df=plant_parts_eia, min_cap_fact=-0.5, max_cap_fact=1.5, freq=self.freq
)
.merge(
self.pudl_out.plants_eia860()[
["plant_id_eia", "plant_id_pudl"]
].drop_duplicates(),
how="left",
on=[
"plant_id_eia",
],
)
.merge(
self.pudl_out.utils_eia860()[
["utility_id_eia", "utility_id_pudl"]
].drop_duplicates(),
how="left",
on=["utility_id_eia"],
)
.assign(
ownership_dupe=lambda x: np.where(
(x.ownership == "owned") & (x.fraction_owned == 1), True, False
)
)
)
return plant_parts_eia
def _clean_plant_parts(self, plant_parts_eia):
return (
plant_parts_eia.assign(
report_year=lambda x: x.report_date.dt.year,
plant_id_report_year=lambda x: x.plant_id_pudl.astype(str)
+ "_"
+ x.report_year.astype(str),
)
.pipe(
pudl.helpers.cleanstrings_snake,
["record_id_eia", "appro_record_id_eia"],
)
.set_index("record_id_eia")
)
#################
# Testing Methods
#################
def validate_ownership_for_owned_records(self, plant_parts_eia):
"""Test ownership - fraction owned for owned records.
This test can be run at the end of or with the result of
:meth:`MakePlantParts.execute`. It tests a few aspects of the the
fraction_owned column and raises assertions if the tests fail.
"""
test_own_df = (
plant_parts_eia.groupby(
by=self.id_cols_list + ["plant_part", "ownership"],
dropna=False,
observed=True,
)[["fraction_owned", "capacity_mw"]]
.sum(min_count=1)
.reset_index()
)
owned_one_frac = test_own_df[
(~np.isclose(test_own_df.fraction_owned, 1))
& (test_own_df.capacity_mw != 0)
& (test_own_df.capacity_mw.notnull())
& (test_own_df.ownership == "owned")
]
if not owned_one_frac.empty:
self.test_own_df = test_own_df
self.owned_one_frac = owned_one_frac
raise AssertionError(
"Hello friend, you did a bad. It happens... There are "
f"{len(owned_one_frac)} rows where fraction_owned does not sum "
"to 100% for the owned records. "
"Check cached `owned_one_frac` & `test_own_df` and `scale_by_ownership()`"
)
no_frac_n_cap = test_own_df[
(test_own_df.capacity_mw == 0) & (test_own_df.fraction_owned == 0)
]
self.no_frac_n_cap = no_frac_n_cap
if len(no_frac_n_cap) > 60:
self.no_frac_n_cap = no_frac_n_cap
warnings.warn(
f"""Too many nothings, you nothing. There shouldn't been much
more than 60 instances of records with zero capacity_mw (and
therefor zero fraction_owned) and you got {len(no_frac_n_cap)}.
"""
)
class PlantPart(object):
"""Plant-part table maker.
The coordinating method here is :meth:`execute`.
**Examples**
Below are some examples of how the main processing step in this class
operates: :meth:`PlantPart.ag_part_by_own_slice`. If we have a plant with
four generators that looks like this:
>>> gens_mega = pd.DataFrame({
... 'plant_id_eia': [1, 1, 1, 1],
... 'report_date': ['2020-01-01', '2020-01-01', '2020-01-01', '2020-01-01',],
... 'utility_id_eia': [111, 111, 111, 111],
... 'generator_id': ['a', 'b', 'c', 'd'],
... 'prime_mover_code': ['ST', 'GT', 'CT', 'CA'],
... 'energy_source_code_1': ['BIT', 'NG', 'NG', 'NG'],
... 'ownership': ['total', 'total', 'total', 'total',],
... 'operational_status_pudl': ['operating', 'operating', 'operating', 'operating'],
... 'capacity_mw': [400, 50, 125, 75],
... }).astype({
... 'report_date': 'datetime64[ns]',
... })
>>> gens_mega
plant_id_eia report_date utility_id_eia generator_id prime_mover_code energy_source_code_1 ownership operational_status_pudl capacity_mw
0 1 2020-01-01 111 a ST BIT total operating 400
1 1 2020-01-01 111 b GT NG total operating 50
2 1 2020-01-01 111 c CT NG total operating 125
3 1 2020-01-01 111 d CA NG total operating 75
This ``gens_mega`` table can then be aggregated by ``plant``, ``plant_prime_fuel``,
``plant_prime_mover``, or ``plant_gen``.
"""
def __init__(self, part_name):
"""Initialize an object which makes a tbl for a specific plant-part.
Args:
part_name (str): the name of the part to aggregate to. Names can be
only those in :py:const:`PLANT_PARTS`
"""
self.part_name = part_name
self.id_cols = PLANT_PARTS[part_name]["id_cols"]
def execute(
self,
gens_mega: pd.DataFrame,
sum_cols: List[str] = SUM_COLS,
wtavg_dict: Dict = WTAVG_DICT,
) -> pd.DataFrame:
"""Get a table of data aggregated by a specific plant-part.
This method will take ``gens_mega`` and aggregate the generator records
to the level of the plant-part. This is mostly done via
:meth:`ag_part_by_own_slice`. Then several additional columns are added
and the records are labeled as true or false granularities.
Returns:
a table with records that have been aggregated to a plant-part.
"""
part_df = (
self.ag_part_by_own_slice(
gens_mega, sum_cols=sum_cols, wtavg_dict=wtavg_dict
)
.pipe(self.ag_fraction_owned)
.assign(plant_part=self.part_name)
.pipe( # add standard record id w/ year
add_record_id,
id_cols=self.id_cols,
plant_part_col="plant_part",
year=True,
)
.pipe( # add additional record id that DOESN'T CARE ABOUT TIME
add_record_id,
id_cols=self.id_cols,
plant_part_col="plant_part",
year=False,
)
.pipe(self.add_new_plant_name, gens_mega)
.pipe(self.add_record_count_per_plant)
)
return part_df
def ag_part_by_own_slice(
self,
gens_mega,
sum_cols=SUM_COLS,
wtavg_dict=WTAVG_DICT,
) -> pd.DataFrame:
"""Aggregate the plant part by seperating ownership types.
There are total records and owned records in this master unit list.
Those records need to be aggregated differently to scale. The "total"
ownership slice is now grouped and aggregated as a single version of the
full plant and then the utilities are merged back. The "owned"
ownership slice is grouped and aggregated with the utility_id_eia, so
the portions of generators created by scale_by_ownership will be
appropriately aggregated to each plant part level.
Returns:
pandas.DataFrame: dataframe aggregated to the level of the
part_name
"""
logger.info(f"begin aggregation for: {self.part_name}")
# id_cols = PLANT_PARTS[self.part_name]['id_cols']
# split up the 'owned' slices from the 'total' slices.
# this is because the aggregations are different
part_own = gens_mega.loc[gens_mega.ownership == "owned"].copy()
part_tot = gens_mega.loc[gens_mega.ownership == "total"].copy()
if len(gens_mega) != len(part_own) + len(part_tot):
raise AssertionError(
"Error occured in breaking apart ownership types."
"The total and owned slices should equal the total records."
"Check for nulls in the ownership column."
)
part_own = pudl.helpers.sum_and_weighted_average_agg(
df_in=part_own,
by=self.id_cols + IDX_TO_ADD + IDX_OWN_TO_ADD,
sum_cols=sum_cols,
wtavg_dict=wtavg_dict,
)
# we want a "total" record for each of the utilities that own any slice
# of a particular plant-part. To achieve this, we are going to remove
# the utility info (and drop duplicates bc a plant-part with many
# generators will have multiple duplicate records for each owner)
# we are going to generate the aggregated output for a utility-less
# "total" record and then merge back in the many utilites so each of
# the utilities is associated with an aggergated "total" plant-part
# record
part_tot_no_utils = part_tot.drop(columns=["utility_id_eia"]).drop_duplicates()
# still need to re-calc the fraction owned for the part
part_tot_out = (
pudl.helpers.sum_and_weighted_average_agg(
df_in=part_tot_no_utils,
by=self.id_cols + IDX_TO_ADD,
sum_cols=sum_cols,
wtavg_dict=wtavg_dict,
)
.pipe(pudl.helpers.convert_cols_dtypes, "eia")
.merge(
part_tot[self.id_cols + IDX_TO_ADD + IDX_OWN_TO_ADD].drop_duplicates(),
on=self.id_cols + IDX_TO_ADD,
how="left",
validate="1:m",
)
)
part_ag = | pd.concat([part_own, part_tot_out]) | pandas.concat |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(postgres_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(postgres_url, query)
def test_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_float) FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query,
partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(postgres_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(postgres_url: str) -> None:
query = "select MAX(test_int), MIN(test_int) from test_table"
df = read_sql(postgres_url, query,
partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(postgres_url: str) -> None:
query = "select increment(test_int) as test_int from test_table ORDER BY test_int"
df = read_sql(postgres_url, query,
partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(postgres_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(postgres_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 0, 2, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, 5, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "a", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 3.1, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, None, False, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_without_partition(postgres_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition(postgres_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_without_partition_range(postgres_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": pd.Series([0, 4], dtype="Int64"),
"test_nullint": pd.Series([5, 9], dtype="Int64"),
"test_str": pd.Series(
["a", "c"], dtype="object"
),
"test_float": | pd.Series([3.1, 7.8], dtype="float64") | pandas.Series |
from kamodo import Kamodo, kamodofy
import pandas as pd
import numpy as np
import scipy
import time
import datetime
from datetime import timezone
import urllib, json
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
from pandas import DatetimeIndex
from collections.abc import Iterable
def ror_get_info(runID):
'''Query run information for given runID'''
server = "https://ccmc.gsfc.nasa.gov/RoR_WWW/VMR/"
query = '{}/files.php?id={}'.format(server, runID)
response = urllib.request.urlopen(query)
data = json.loads(response.read())
return data
def ror_show_info(runID):
'''Display run information for a given runID.'''
result=ror_get_info(runID)
print("Run information for runID =",runID,"from the CCMC RoR system.")
for item in result['info']:
for k in item:
print(k.rjust(25),':',item[k])
sats = []
for sat in result['satellites']:
satname = sat['name']
sats.append(satname)
k='Satellite extractions'
print(k.rjust(25),':',sats)
def ror_return_satellites(runID):
'''Display list of satellites as python array for given runID.'''
result=ror_get_info(runID)
sats = []
for sat in result['satellites']:
satname = sat['name']
sats.append(satname)
return sats
def ror_get_extraction(runID, coord, satellite):
'''Query for file contents from server'''
server = "https://ccmc.gsfc.nasa.gov/RoR_WWW/VMR/"
query = '{}/{}/{}/{}_{}.txt'.format(server, runID, satellite, coord, satellite)
print(query)
response = urllib.request.urlopen(query)
file = response.read()
return file
class SATEXTRACT(Kamodo):
def __init__(self,
runID, # ccmc runs-on-request run id
coord, # coordinate system
satellite, # satellite
debug=1,
server="https://ccmc.gsfc.nasa.gov/RoR_WWW/VMR/",
**kwargs):
super(SATEXTRACT, self).__init__(**kwargs)
self.verbose=False # overrides kwarg
# self.symbol_registry=dict() # wipes any user-defined symbols
# self.signatures=dict() # wipes any user-defined signatures
self.RE=6.3781E3
self.server = server # should be set by keyword
self.runID = runID
self.coordinates = coord
self.satellite = satellite
self.debug = debug
if self.debug > 0:
print(' -server: CCMC RoR')
print(' -runID: ',runID)
print(' -coordinate system: ',coord)
print(' -satellite: ',satellite)
self.variables=dict()
self.file = ror_get_extraction(runID, coord, satellite).decode('ascii')
self.parse_file()
ts=self.tsarray[0]
self.start = datetime.datetime.fromtimestamp(ts,tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
ts=self.tsarray[-1]
self.stop = datetime.datetime.fromtimestamp(ts,tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
if self.debug > 0:
print(" ")
print(" -date start: ",self.start)
print(" end: ",self.stop)
for varname in self.variables:
if varname == "N":
continue
units = self.variables[varname]['units']
if self.debug > 0:
print('... registering ',varname,units)
self.register_variable(varname, units)
# classification of position into coordinates to assist visualizion
self.possible_coords=('TOD','J2K','GEO','GM','GSM','GSE','SM')
self.possible_directions=('x','y','z')
self.coords=dict()
for varname in self.variables:
size = self.variables[varname]['size']
if size == 1:
# Look for position values
direction = varname.lower()
key = self.coordinates
if key in self.possible_coords and direction in self.possible_directions:
if key not in self.coords:
self.coords[key] = dict(coord=key)
self.coords[key]['size'] = size
self.coords[key][direction] = varname
# Change 'fill' values in data to NaNs
self.fill2nan()
def parse_file(self):
import re
vars=[]
units=[]
times=[]
arrays = []
if self.debug > 0:
print("===> Printing File Header ...")
for line in self.file.splitlines(False):
A = re.match('^# ', line)
B = re.match('# Run', line)
C = re.match('# Coordinate', line)
D = re.match('# Satellite', line)
E = re.match('# Year', line)
F = re.match('# \[year\]', line)
G = re.match('# Data type', line)
if A or B or C or D or E or F or G:
if A:
if self.debug > 0:
print("-> ",line)
if B:
# Find runname and fill value
parts=re.sub(' +', ' ', line).split(' ')
self.runname = parts[3]
self.fillvalue = parts[6]
if C:
# Check that coordinate system matches
parts=re.sub(' +', ' ', line).split(' ')
if self.coordinates != parts[3]:
print("ERROR: Coordinate system does not match.",self.coordinates,parts[3])
if D:
# Check that satellite name matches
parts=re.sub(' +', ' ', line).split(' ')
if self.satellite != parts[3]:
print("ERROR: Satellite does not match.",self.satellite,parts[3])
if E:
# Variable names, remove . and change N and B_1
parts=re.sub(' +', ' ', line).strip().split(' ')
for p in parts[7:]:
p=re.sub("\.","",p)
p=re.sub("B_1","B1",p)
p=re.sub("^N$","rho",p)
vars.append(p)
if self.debug > 1:
print(len(vars), vars)
if F:
# Variable units, remove [] and fix exponents
parts=re.sub(' +', ' ', line).strip().split(' ')
if self.modelname == "GUMICS":
# missing x,y,z --put before other units
units.append('R_E')
units.append('R_E')
units.append('R_E')
for p in parts[7:]:
if self.modelname == "BATSRUS":
# need a unit applied for status variable, currently missing
if vars[len(units)] == "status":
units.append('')
p=re.sub("cm\^-3","1/cm^3",p)
p=re.sub("m2","m^2",p)
p=re.sub("m3","m^3",p)
p=re.sub("\[","",p)
p=re.sub("\]","",p)
p=re.sub("Vm/A","V/A",p) # This is wrong but use it for now
p=re.sub("nJ/m","J/m",p) # This is wrong but use it for now
units.append(p)
if self.modelname == "LFM" and p == "nPa":
# missing X,Y,Z,Vol --unknown Vol units
units.append('R_E')
units.append('R_E')
units.append('R_E')
units.append('')
if self.modelname == "OpenGGCM" and p == "V/A":
# missing eflx,efly,eflz --units are unknown
units.append('')
units.append('')
units.append('')
if self.debug > 1:
print(len(units), units)
if G:
# Pull out the model name
parts=re.sub(' +', ' ', line).split(' ')
self.modelname = parts[3]
else:
parts=re.sub(' +', ' ', line).strip().split(' ')
year=parts[0]
month=parts[1]
day=parts[2]
hour=parts[3]
minute=parts[4]
second=parts[5]
ms=0
if '.' in second:
(second,ms)=second.split('.')
dd=datetime.datetime(int(year),int(month),int(day),
hour=int(hour),minute=int(minute),second=int(second),
microsecond=int(ms)*1000,tzinfo=datetime.timezone.utc)
times.append(dd)
for s in parts[6:]:
arrays.append(float(s))
# self.dtarray=np.array([dd for dd in times])
self.dtarray = pd.to_datetime(times)
self.tsarray = np.array([d.timestamp() for d in self.dtarray])
self.dtarrayclean = np.array([datetime.datetime.fromtimestamp(d,tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S") for d in self.tsarray])
nvar=len(vars)
nval=len(arrays)
npos=int(nval/nvar)
arrays=np.array(arrays)
arrays=arrays.reshape((npos,nvar))
i=0
for var in vars:
self.variables[var] = dict(units=units[i],
data=arrays[:,i],
size=1,
fill=self.fillvalue)
i+=1
return
def register_variable(self, varname, units):
"""register variables into Kamodo for this service, CCMC ROR satellite extractions"""
data = self.variables[varname]['data']
times = self.dtarray
ser = pd.Series(data, index=pd.DatetimeIndex(times))
@kamodofy(units = units,
citation = "De Zeeuw 2020",
data = None)
def interpolate(t=times):
ts = t
isiterable = isinstance(t, Iterable)
if isinstance(ts, DatetimeIndex):
pass
elif isinstance(ts, float):
ts = pd.to_datetime([ts], utc=True, unit='s')
elif isiterable:
if isinstance(ts[0], float):
ts = pd.to_datetime(ts, utc=True, unit='s')
ts = DatetimeIndex(ts)
else:
raise NotImplementedError(ts)
ser_ = ser.reindex(ser.index.union(ts))
ser_interpolated = ser_.interpolate(method='time')
result = ser_interpolated.reindex(ts)
if isiterable:
return result.values
else:
return result.values[0]
# store the interpolator
self.variables[varname]['interpolator'] = interpolate
# update docstring for this variable
interpolate.__doc__ = "A function that returns {} in [{}].".format(varname,units)
var_reg = '{}__{}'.format(varname, self.coordinates)
self[var_reg] = interpolate
def fill2nan(self):
'''
Replaces fill value in data with NaN.
Not Yet called by default. Call as needed.
'''
for varname in self.variables:
data = self.variables[varname]['data']
fill = self.variables[varname]['fill']
if fill is not None:
mask = data==float(fill)
nbad = np.count_nonzero(mask)
if nbad > 0:
if self.debug > 0:
print("Found",nbad,"fill values, replacing with NaN for variable",
varname,"of size",data.size)
data[mask]=np.nan
self.variables[varname]['data'] = data
def get_plot(self, type="1Dpos", scale="R_E", var="", groupby="all",
quiver=False, quiverscale="5.", quiverskip="0"):
'''
Return a plotly figure object.
type = 1Dvar => 1D plot of variable value vs Time (also all variables option)
1Dpos (default) => 1D location x,y,z vs Time
3Dpos => 3D location colored by altitude
3Dvar => View variable on 3D position (also quiver and groupby options)
scale = km, R_E (default)
var = variable name for variable value plots
groupby = day, hour, all (default) => groupings for 3Dvar plots
quiver = True, False (default) => if var is a vector value and 3Dvar plot, then
turn on quivers and color by vector magnitude
quiverscale = 5. (default) => length of quivers in units of RE
quiverskip = (default) => now many quivers to skip displaying
'''
quiverscale=float(quiverscale)
if scale == "km":
quiverscale=quiverscale*self.RE
quiverskip=int(quiverskip)
coord=self.coordinates
# Set plot title for plots
txttop=self.satellite + " position extracted from run " + self.runname + "<br>"\
+ self.start + " to " + self.stop + "<br>" + coord
if type == '1Dvar':
if var == "":
print("No plot variable passed in.")
return
fig=go.Figure()
if var == "all":
# Create menu pulldown for each variable
steps = []
i=0
for varname in self.variables:
ytitle=varname+" ["+self.variables[varname]['units']+"]"
step = dict(
label=ytitle,
method="update",
args=[{"visible": [False] * len(self.variables)}],
)
step["args"][0]["visible"][i] = True # Toggle i'th trace to "visible"
steps.append(step)
if self.variables[varname]['size'] == 1:
x=self.variables[varname]['data']
fig.add_trace(go.Scatter(x=self.dtarray, y=x, mode='lines+markers',
name=varname, visible=False))
elif self.variables[varname]['size'] == 3:
x=self.variables[varname]['data'][:,0]
y=self.variables[varname]['data'][:,1]
z=self.variables[varname]['data'][:,2]
fig.add_trace(go.Scatter(x=self.dtarray, y=x, mode='lines+markers',
name=varname, visible=False))
fig.add_trace(go.Scatter(x=self.dtarray, y=y, mode='lines+markers',
name=varname, visible=False))
fig.add_trace(go.Scatter(x=self.dtarray, y=z, mode='lines+markers',
name=varname, visible=False))
i+=1
fig.data[0].visible=True
fig.update_layout(updatemenus=list([dict(buttons=steps)]))
fig.update_xaxes(title_text="Time")
fig.update_layout(hovermode="x")
fig.update_layout(title_text=txttop)
else:
# Standard single/vector variable plot
if self.variables[var]['size'] == 1:
x=self.variables[var]['data']
fig.add_trace(go.Scatter(x=self.dtarray, y=x, mode='lines+markers', name=var))
elif self.variables[var]['size'] == 3:
x=self.variables[var]['data'][:,0]
y=self.variables[var]['data'][:,1]
z=self.variables[var]['data'][:,2]
fig.add_trace(go.Scatter(x=self.dtarray, y=x, mode='lines+markers', name=var))
fig.add_trace(go.Scatter(x=self.dtarray, y=y, mode='lines+markers', name=var))
fig.add_trace(go.Scatter(x=self.dtarray, y=z, mode='lines+markers', name=var))
ytitle=var+" ["+self.variables[var]['units']+"]"
fig.update_xaxes(title_text="Time")
fig.update_yaxes(title_text=ytitle)
fig.update_layout(hovermode="x")
fig.update_layout(title_text=txttop)
return fig
if type == '1Dpos':
fig=go.Figure()
xvarname = self.coords[coord]['x']
if self.coords[coord]['size'] == 1:
x=self.variables[self.coords[coord]['x']]['data']
y=self.variables[self.coords[coord]['y']]['data']
z=self.variables[self.coords[coord]['z']]['data']
elif self.coords[coord]['size'] == 3:
x=self.variables[self.coords[coord]['x']]['data'][:,0]
y=self.variables[self.coords[coord]['y']]['data'][:,1]
z=self.variables[self.coords[coord]['z']]['data'][:,2]
if scale == "km":
if self.variables[xvarname]['units'] == "R_E":
x=x*self.RE
y=y*self.RE
z=z*self.RE
ytitle="Position [km]"
else:
if self.variables[xvarname]['units'] == "km":
x=x/self.RE
y=y/self.RE
z=z/self.RE
ytitle="Position [R_E]"
fig.add_trace(go.Scatter(x=self.dtarray, y=x,
mode='lines+markers', name=self.coords[coord]['x']))
fig.add_trace(go.Scatter(x=self.dtarray, y=y,
mode='lines+markers', name=self.coords[coord]['y']))
fig.add_trace(go.Scatter(x=self.dtarray, y=z,
mode='lines+markers', name=self.coords[coord]['z']))
fig.update_xaxes(title_text="Time")
fig.update_yaxes(title_text=ytitle)
fig.update_layout(hovermode="x")
fig.update_layout(title_text=txttop)
return fig
if type == "3Dpos":
xvarname = self.coords[coord]['x']
if self.coords[coord]['size'] == 1:
x=self.variables[self.coords[coord]['x']]['data']
y=self.variables[self.coords[coord]['y']]['data']
z=self.variables[self.coords[coord]['z']]['data']
elif self.coords[coord]['size'] == 3:
x=self.variables[self.coords[coord]['x']]['data'][:,0]
y=self.variables[self.coords[coord]['y']]['data'][:,1]
z=self.variables[self.coords[coord]['z']]['data'][:,2]
if scale == "km":
if self.variables[xvarname]['units'] == "R_E":
x=x*self.RE
y=y*self.RE
z=z*self.RE
r=(np.sqrt(x**2 + y**2 + z**2))-self.RE
else:
if self.variables[xvarname]['units'] == "km":
x=x/self.RE
y=y/self.RE
z=z/self.RE
r=(np.sqrt(x**2 + y**2 + z**2))-1.
fig=px.scatter_3d(
x=x,
y=y,
z=z,
color=r)
bartitle = "Altitude [" + scale + "]"
fig.update_layout(coloraxis=dict(colorbar=dict(title=bartitle)))
fig.update_layout(scene=dict(xaxis=dict(title=dict(text="X ["+scale+"]")),
yaxis=dict(title=dict(text="Y ["+scale+"]")),
zaxis=dict(title=dict(text="Z ["+scale+"]"))))
fig.update_layout(title_text=txttop)
return fig
if type == "3Dvar":
if var == "":
print("No plot variable passed in.")
return
xvarname = self.coords[coord]['x']
vard=self.variables[var]['data']
varu=self.variables[var]['units']
if quiver:
if "_x" in var or "_y" in var or"_z" in var:
var=var.split('_')[0]
qxvar=var+"_x"
qyvar=var+"_y"
qzvar=var+"_z"
qxvard=self.variables[qxvar]['data']
qyvard=self.variables[qyvar]['data']
qzvard=self.variables[qzvar]['data']
vard = np.sqrt(np.square(qxvard) +\
np.square(qyvard) +\
np.square(qzvard))
else:
print("A vector variable was not passed, turning quiver off.")
quiver=False
cmin=np.amin(vard)
cmax=np.amax(vard)
if self.coords[coord]['size'] == 1:
x=self.variables[self.coords[coord]['x']]['data']
y=self.variables[self.coords[coord]['y']]['data']
z=self.variables[self.coords[coord]['z']]['data']
elif self.coords[coord]['size'] == 3:
x=self.variables[self.coords[coord]['x']]['data'][:,0]
y=self.variables[self.coords[coord]['y']]['data'][:,1]
z=self.variables[self.coords[coord]['z']]['data'][:,2]
bodyscale=1.
if scale == "km":
if self.variables[xvarname]['units'] == "R_E":
x=x*self.RE
y=y*self.RE
z=z*self.RE
bodyscale=self.RE
r=(np.sqrt(x**2 + y**2 + z**2))-self.RE
else:
if self.variables[xvarname]['units'] == "km":
x=x/self.RE
y=y/self.RE
z=z/self.RE
r=(np.sqrt(x**2 + y**2 + z**2))-1.
xmin=np.amin(x)
xmax=np.amax(x)
ymin=np.amin(y)
ymax=np.amax(y)
zmin=np.amin(z)
zmax=np.amax(z)
if quiver:
tmpx=x+(qxvard*quiverscale/cmax)
tmpy=y+(qyvard*quiverscale/cmax)
tmpz=z+(qzvard*quiverscale/cmax)
xmin=min(xmin,np.amin(tmpx))
xmax=max(xmax,np.amax(tmpx))
ymin=min(ymin,np.amin(tmpy))
ymax=max(ymax,np.amax(tmpy))
zmin=min(zmin,np.amin(tmpz))
zmax=max(zmax,np.amax(tmpz))
# Create empty figure to build pieces into
fig=go.Figure()
Nplot = 0
# Start 1 RE sphere, padded to cover all data positions
dataXYZ = pd.read_csv('https://ccmc.gsfc.nasa.gov/Kamodo/demo/sphereXYZ.csv')
dataIJK = pd.read_csv('https://ccmc.gsfc.nasa.gov/Kamodo/demo/sphereIJK.csv')
if scale == "km":
dataXYZ=dataXYZ*self.RE
fig.add_mesh3d()
fig.data[Nplot].x = np.append(dataXYZ['x'],(xmin,xmax))
fig.data[Nplot].y = np.append(dataXYZ['y'],(ymin,ymax))
fig.data[Nplot].z = np.append(dataXYZ['z'],(zmin,zmax))
fig.data[Nplot].i = dataIJK['i']
fig.data[Nplot].j = dataIJK['j']
fig.data[Nplot].k = dataIJK['k']
fig.data[Nplot].facecolor = dataIJK['c']
fig.data[Nplot].flatshading = True
fig.data[Nplot].name = '1 R_E sphere'
fig.data[Nplot].hovertemplate="Earth<extra></extra>"
fig.data[Nplot].visible=True
Nplot += 1
# Array of 'groupby' values and unique values
if groupby == "day":
datearray = np.array([datetime.datetime.fromtimestamp(d,tz=timezone.utc).strftime\
("%Y-%m-%d") for d in self.tsarray])
elif groupby == "hour":
datearray = np.array([datetime.datetime.fromtimestamp(d,tz=timezone.utc).strftime\
("%Y-%m-%d %H") for d in self.tsarray])
else:
datearray = np.array([datetime.datetime.fromtimestamp(d,tz=timezone.utc).strftime\
("all") for d in self.tsarray])
udates = np.unique(datearray)
nsteps = len(udates)
for date in udates:
# Compute mast to restrict all data in trace
mask = date == datearray
# Create figure with data
if quiver:
fig.add_trace(go.Scatter3d(
name=date, x=x[mask], y=y[mask], z=z[mask],
mode='markers',
marker=dict(
size=4,
cmin=cmin, cmax=cmax,
color=vard[mask],
showscale=True,
colorscale='Viridis',
colorbar=dict(title=var+" ["+varu+"]"),
),
customdata=np.vstack((self.dtarrayclean[mask],\
qxvard[mask],\
qyvard[mask],\
qzvard[mask])).T,
hovertemplate="<b>"+self.satellite+" Position</b>"+
"<br>X: %{x:.4f}<br>Y: %{y:.4f}<br>Z: %{z:.4f}<br>"+
qxvar+": %{customdata[1]:.2f}<br>"+
qyvar+": %{customdata[2]:.2f}<br>"+
qzvar+": %{customdata[3]:.2f}<br>"+
" "+var+": %{marker.color:.2f}"+varu+"<br>"+
"%{customdata[0]}<br>"+"<extra></extra>",
visible=False,
))
Nplot += 1
else:
fig.add_trace(go.Scatter3d(
name=date, x=x[mask], y=y[mask], z=z[mask],
mode='markers',
marker=dict(
size=4,
cmin=cmin, cmax=cmax,
color=vard[mask],
showscale=True,
colorscale='Viridis',
colorbar=dict(title=var+" ["+varu+"]"),
),
customdata=self.dtarrayclean[mask],
hovertemplate="<b>"+self.satellite+" Position</b>"+
"<br>X: %{x:.4f}<br>Y: %{y:.4f}<br>Z: %{z:.4f}<br>"+
" "+var+": %{marker.color:.2f}"+varu+"<br>"+
"%{customdata}<br>"+"<extra></extra>",
visible=False,
))
Nplot += 1
fig.data[1].visible=True
if quiver:
for date in udates:
# Compute mask to restrict all data in trace
mask = date == datearray
# Precompute needed values
xm=x[mask]
ym=y[mask]
zm=z[mask]
vm=vard[mask]
qxm=qxvard[mask]
qym=qyvard[mask]
qzm=qzvard[mask]
xm=xm.reshape(len(xm),1)
ym=ym.reshape(len(ym),1)
zm=zm.reshape(len(zm),1)
vm=vm.reshape(len(vm),1)
qxm=qxm.reshape(len(qxm),1)
qym=qym.reshape(len(qym),1)
qzm=qzm.reshape(len(qzm),1)
if quiverskip > 0:
for i in range(len(qxm)):
if i%(quiverskip+1) > 0:
qxm[i]=0.
qym[i]=0.
qzm[i]=0.
xx=np.concatenate((xm,xm+qxm*quiverscale/cmax,xm),axis=1).reshape(3*len(xm))
yy=np.concatenate((ym,ym+qym*quiverscale/cmax,ym),axis=1).reshape(3*len(ym))
zz=np.concatenate((zm,zm+qzm*quiverscale/cmax,zm),axis=1).reshape(3*len(zm))
# Create figure with data
fig.add_trace(go.Scatter3d(
name=date,
x = xx,
y = yy,
z = zz,
mode='lines',
line=dict(
width=2,
color='rgba(22,22,22,0.2)',
),
hoverinfo='skip',
visible=False,
))
Nplot += 1
fig.data[1+nsteps].visible=True
# Start selection slider build
steps = []
for i in range(nsteps):
step = dict(
method="update",
args=[{"visible": [False] * len(fig.data)}],
label=udates[i]
)
step["args"][0]["visible"][0] = True # Set first trace to "visible"
step["args"][0]["visible"][i+1] = True # Toggle i'th trace to "visible"
if quiver:
step["args"][0]["visible"][i+1+nsteps] = True # Toggle i'th trace to "visible"
steps.append(step)
sliders = [dict(
active=0,
currentvalue={"prefix": "Currently showing: "},
pad={"t": 50},
steps=steps
)]
fig.update_layout(sliders=sliders)
# Update axis labels and plot title
fig.update_layout(scene=dict(xaxis=dict(title=dict(text="X ["+scale+"]")),
yaxis=dict(title=dict(text="Y ["+scale+"]")),
zaxis=dict(title=dict(text="Z ["+scale+"]"))))
fig.update_layout(title_text=txttop)
fig.update_layout(showlegend=False)
return fig
print('ERROR, reached end of get_plot without any action taken.')
return
#======
# New class to collect all satellites
#
class SATEXTRACTALL(Kamodo):
def __init__(self, runID, coord, debug=1, sats=[], **kwargs):
super(SATEXTRACTALL, self).__init__(**kwargs)
# Start timer
tic = time.perf_counter()
self.verbose=False
self.symbol_registry=dict()
self.signatures=dict()
self.RE=6.3781E3
self.runID = runID
self.coordinates = coord
self.debug = debug
print(' -server: CCMC RoR')
print(' -runID: ',runID)
print(' -coordinate system: ',coord)
self.satellites = dict()
result=ror_get_info(runID)
for sat in result['satellites']:
satname = sat['name']
if len(sats) == 0 or satname in sats:
ror = SATEXTRACT(runID, coord, satname, debug=0)
self.satellites[satname] = ror
self.start=ror.start
self.stop=ror.stop
self.runname=ror.runname
self.modelname=ror.modelname
print(' -data extracted from model: ',self.modelname)
# end timer
toc = time.perf_counter()
print(f"Time loading files and registering satellites: {toc - tic:0.4f} seconds")
def get_plot(self, type="3Dvar", scale="R_E", var="", groupby="all",
quiver=False, quiverscale="5.", quiverskip="0"):
'''
Return a plotly figure object.
type = 3Dvar => View variable on 3D position (also quiver and groupby options)
scale = km, R_E (default)
var = variable name for variable value plots
groupby = day, hour, all (default) => groupings for 3Dvar plots
quiver = True, False (default) => if var is a vector value and 3Dvar plot, then
turn on quivers and color by vector magnitude
quiverscale = 5. (default) => length of quivers in units of RE
quiverskip = (default) => now many quivers to skip displaying
'''
quiverscale=float(quiverscale)
if scale == "km":
quiverscale=quiverscale*self.RE
quiverskip=int(quiverskip)
coord=self.coordinates
# Set plot title for plots
txttop="Satellite positions extracted from run " + self.runname + "<br>"\
+ self.start + " to " + self.stop + "<br>" + coord
# set initial values used later
xmin=0.
xmax=0.
ymin=0.
ymax=0.
zmin=0.
zmax=0.
cmin= 1.e99
cmax=-1.e99
if type == "3Dvar":
if var == "":
print("No plot variable passed in.")
return
# Create empty figure to build pieces into
fig=go.Figure()
Nplot = 0
# Pre-loop to find cmin,cmax to use in all plot pieces
for sat in self.satellites:
ror = self.satellites[sat]
vard=ror.variables[var]['data']
if quiver:
if "_x" in var or "_y" in var or"_z" in var:
var2=var.split('_')[0]
qxvar=var2+"_x"
qyvar=var2+"_y"
qzvar=var2+"_z"
qxvard=ror.variables[qxvar]['data']
qyvard=ror.variables[qyvar]['data']
qzvard=ror.variables[qzvar]['data']
vard = np.sqrt(np.square(qxvard) +\
np.square(qyvard) +\
np.square(qzvard))
else:
print("A vector variable was not passed, turning quiver off.")
quiver=False
cmin=min(cmin,np.amin(vard))
cmax=max(cmax,np.amax(vard))
# Actual plot creation loop
for sat in self.satellites:
ror = self.satellites[sat]
vard=ror.variables[var]['data']
varu=ror.variables[var]['units']
if quiver:
if "_x" in var or "_y" in var or"_z" in var:
var2=var.split('_')[0]
qxvar=var2+"_x"
qyvar=var2+"_y"
qzvar=var2+"_z"
qxvard=ror.variables[qxvar]['data']
qyvard=ror.variables[qyvar]['data']
qzvard=ror.variables[qzvar]['data']
vard = np.sqrt(np.square(qxvard) +\
np.square(qyvard) +\
np.square(qzvard))
if ror.coords[coord]['size'] == 1:
x=ror.variables[ror.coords[coord]['x']]['data']
y=ror.variables[ror.coords[coord]['y']]['data']
z=ror.variables[ror.coords[coord]['z']]['data']
elif ror.coords[coord]['size'] == 3:
x=ror.variables[ror.coords[coord]['x']]['data'][:,0]
y=ror.variables[ror.coords[coord]['y']]['data'][:,1]
z=ror.variables[ror.coords[coord]['z']]['data'][:,2]
bodyscale=1.
if scale == "km":
if ror.variables[ror.coords[coord]['x']]['units'] == "R_E":
x=x*ror.RE
y=y*ror.RE
z=z*ror.RE
bodyscale=ror.RE
r=(np.sqrt(x**2 + y**2 + z**2))-ror.RE
else:
if ror.variables[ror.coords[coord]['x']]['units'] == "km":
x=x/ror.RE
y=y/ror.RE
z=z/ror.RE
r=(np.sqrt(x**2 + y**2 + z**2))-1.
# Array of 'groupby' values and unique values
if groupby == "day":
datearray = np.array([datetime.datetime.fromtimestamp(d,tz=timezone.utc).strftime\
("%Y-%m-%d") for d in ror.tsarray])
elif groupby == "hour":
datearray = np.array([datetime.datetime.fromtimestamp(d,tz=timezone.utc).strftime\
("%Y-%m-%d %H") for d in ror.tsarray])
else:
datearray = np.array([datetime.datetime.fromtimestamp(d,tz=timezone.utc).strftime\
("all") for d in ror.tsarray])
udates = np.unique(datearray)
nsteps = len(udates)
for date in udates:
# Compute mast to restrict all data in trace
mask = date == datearray
# Create figure with data
if quiver:
fig.add_trace(go.Scatter3d(
name=date, x=x[mask], y=y[mask], z=z[mask],
mode='markers',
marker=dict(
size=4,
cmin=cmin, cmax=cmax,
color=vard[mask],
showscale=True,
colorscale='Viridis',
colorbar=dict(title=var2+" ["+varu+"]"),
),
customdata=np.vstack((ror.dtarrayclean[mask],\
qxvard[mask],\
qyvard[mask],\
qzvard[mask])).T,
hovertemplate="<b>"+ror.satellite+" Position</b>"+
"<br>X: %{x:.4f}<br>Y: %{y:.4f}<br>Z: %{z:.4f}<br>"+
qxvar+": %{customdata[1]:.2f}<br>"+
qyvar+": %{customdata[2]:.2f}<br>"+
qzvar+": %{customdata[3]:.2f}<br>"+
" "+var2+": %{marker.color:.2f}"+varu+"<br>"+
"%{customdata[0]}<br>"+"<extra></extra>",
visible=False,
))
Nplot += 1
else:
fig.add_trace(go.Scatter3d(
name=date, x=x[mask], y=y[mask], z=z[mask],
mode='markers',
marker=dict(
size=4,
cmin=cmin, cmax=cmax,
color=vard[mask],
showscale=True,
colorscale='Viridis',
colorbar=dict(title=var+" ["+varu+"]"),
),
customdata=ror.dtarrayclean[mask],
hovertemplate="<b>"+ror.satellite+" Position</b>"+
"<br>X: %{x:.4f}<br>Y: %{y:.4f}<br>Z: %{z:.4f}<br>"+
" "+var+": %{marker.color:.2f}"+varu+"<br>"+
"%{customdata}<br>"+"<extra></extra>",
visible=False,
))
Nplot += 1
if quiver:
for date in udates:
# Compute mask to restrict all data in trace
mask = date == datearray
# Precompute needed values
xm=x[mask]
ym=y[mask]
zm=z[mask]
vm=vard[mask]
qxm=qxvard[mask]
qym=qyvard[mask]
qzm=qzvard[mask]
xm=xm.reshape(len(xm),1)
ym=ym.reshape(len(ym),1)
zm=zm.reshape(len(zm),1)
vm=vm.reshape(len(vm),1)
qxm=qxm.reshape(len(qxm),1)
qym=qym.reshape(len(qym),1)
qzm=qzm.reshape(len(qzm),1)
if quiverskip > 0:
for i in range(len(qxm)):
if i%(quiverskip+1) > 0:
qxm[i]=0.
qym[i]=0.
qzm[i]=0.
xx=np.concatenate((xm,xm+qxm*quiverscale/cmax,xm),axis=1).reshape(3*len(xm))
yy=np.concatenate((ym,ym+qym*quiverscale/cmax,ym),axis=1).reshape(3*len(ym))
zz=np.concatenate((zm,zm+qzm*quiverscale/cmax,zm),axis=1).reshape(3*len(zm))
# Create figure with data
fig.add_trace(go.Scatter3d(
name=date,
x = xx,
y = yy,
z = zz,
mode='lines',
line=dict(
width=2,
color='rgba(22,22,22,0.2)',
),
hoverinfo='skip',
visible=False,
))
Nplot += 1
# find cmin,cmax for traces and set to global min,max
for i in range(len(fig.data)):
if fig.data[i].marker.cmin is not None:
cmin=min(cmin,float(fig.data[i].marker.cmin))
cmax=max(cmax,float(fig.data[i].marker.cmax))
for i in range(len(fig.data)):
if fig.data[i].marker.cmin is not None:
fig.data[i].marker.cmin=cmin
fig.data[i].marker.cmax=cmax
# find min,max locations to pad sphere points
for i in range(len(fig.data)):
xmin=min(xmin,np.amin(fig.data[i].x))
xmax=max(xmax,np.amax(fig.data[i].x))
ymin=min(ymin,np.amin(fig.data[i].y))
ymax=max(ymax,np.amax(fig.data[i].y))
zmin=min(zmin,np.amin(fig.data[i].z))
zmax=max(zmax,np.amax(fig.data[i].z))
# Add 1 RE sphere, padded to cover all data positions
dataXYZ = | pd.read_csv('https://ccmc.gsfc.nasa.gov/Kamodo/demo/sphereXYZ.csv') | pandas.read_csv |
from textblob import TextBlob, Word
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem import PorterStemmer
import nltk
# nltk.download()
import urllib.request
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
import re
import pandas as pd
# reading from the web
response = urllib.request.urlopen('http://localhost/github/btt/carReview.php')
html = response.read()
# print(html)
# cleaning html tagova
soup = BeautifulSoup(html, 'html5lib')
text = soup.get_text()
text2 = text.replace(" ", "")
# text2 = text2.split('\n')
# print(text2)
# brisanje sve sto nije letter
letters_only = re.sub("[^a-zA-Z]", " ", text2)
letters_only = letters_only.replace(" ", "\n")
lines = letters_only.splitlines()
# print(lines)
# brisanje viska whitespace
lines2 = []
for sentence in lines:
lines2.append(sentence.strip())
# print(lines2)
# brisanje prazan string
linesFinal = list(filter(None, lines2))
# print(linesFinal)
# Lower case
lower_case = [x.lower() for x in linesFinal]
# print(lower_case)
"""OVO NIJE POTREBNO
# convert to tuple
tuple_all = tuple(lower_case)
# print(tuple_all)
# po 2 elementa u tuple
tuple_two_elements = tuple(tuple_all[x:x + 2]
for x in range(0, len(tuple_all), 2))
# print(tuple_two_elements)
# feedback bez pos i neg
feedbacks = [x[0] for x in tuple_two_elements]
# print(feedbacks)
# pos or neg
pos_neg = [x[1] for x in tuple_two_elements]
# print(pos_neg)
"""
# word tokenization
""" words = lower_case[0].split(' ')
print(words) """
# from nltk.tokenize import word_tokenize
wordToken = []
sentenceToken = []
for a in lower_case:
sentenceToken.append(a)
for i, word in enumerate(a.split()):
wordToken.append(word)
# print(sentenceToken)
# print(wordToken)
# spelling coorection
count = 0
for x in wordToken:
# print(x)
w = Word(x)
# print(w.spellcheck())
if (w.spellcheck()[0][1] != 1):
print("\n Incorrect word '" + w +
"' --- Corrent word: '" + w.correct() + "' \n")
""" for i in sentenceToken:
if (w in sentenceToken[count]):
sentenceToken[count] = TextBlob(sentenceToken[count]).correct()
count = count + 1
count = 0 """
correntWord = []
for x in wordToken:
correntWord.append(TextBlob(x).correct())
count = count + 1
# print(correntWord)
correntSentence = []
for i in sentenceToken:
correntSentence.append(TextBlob(i).correct())
count = count + 1
# print(correntSentence)
# word tokenizer 2
""" tokens = [t for t in wordToken]
print(tokens) """
sr = stopwords.words('english')
clean_tokens = correntWord[:]
for token in correntWord:
if token in stopwords.words('english'):
clean_tokens.remove(token)
# print(clean_tokens)
# frequency
freq = nltk.FreqDist(clean_tokens)
""" for key, val in freq.items():
print(str(key) + ':' + str(val))
freq.plot(20, cumulative=False) """
ps = PorterStemmer()
""" for word in clean_tokens:
print("{0:20}{1:20}".format(word, ps.stem(word))) """
print("\n")
lem = WordNetLemmatizer()
# for word in clean_tokens:
# print ("{0:20}{1:20}".format(word,lem.lemmatize(word)))
sentiment = []
for a in correntSentence:
sentiment.append((a.sentiment.polarity) * 100)
print(sentiment)
def merge(correntSentence, sentiment):
merged_list = [(correntSentence[i], sentiment[i])
for i in range(0, len(correntSentence))]
return merged_list
combine = merge(correntSentence, sentiment)
# print(combine)
output = | pd.DataFrame(data={"text": correntSentence, "sentiment": sentiment}) | pandas.DataFrame |
# coding: utf-8
from .abs_loader import AbsLoader
import os
import pandas as pd
import wfile
import wdfproc
##################################################
# データロードクラス
##################################################
class Loader(AbsLoader):
"""データロードクラス
Attributes:
属性の名前 (属性の型): 属性の説明
属性の名前 (:obj:`属性の型`): 属性の説明.
"""
##################################################
# コンストラクタ
##################################################
def __init__(self, base_dir, temp_dirname, input_dirname):
# 抽象クラスのコンストラクタ
super(Loader, self).__init__(base_dir, temp_dirname, input_dirname)
##################################################
# データをロードする
##################################################
def load(self, reload=False):
# ワーク用ディレクトリを作成する
self.temp_dir = os.path.join(self.base_dir, self.temp_dirname)
os.makedirs(self.temp_dir, exist_ok=True)
# 入力データ格納ディレクトリをセットする
self.input_dir = os.path.join(self.base_dir, self.input_dirname)
# 地上気象データを取得する
gdf = self._load_ground_weather(reload)
# 地上気象データに前処理を施す
gdf = self._preprocess_ground_weather(gdf)
# 高層気象データを取得する
hdf = self._load_highrise_weather(reload)
# 高層気象データに前処理を施す
hdf = self._preprocess_highrise_weather(hdf)
# 地上気象データと高層気象データをマージする
df = pd.merge(gdf, hdf, on=('日付','時'))
return df
##################################################
# 地上気象データを読み込む
##################################################
def _load_ground_weather(self, reload):
# 保存ファイルの有無を確認する
ground_weather_csv = os.path.join(self.temp_dir, 'ground_weather.csv')
exist_csv = os.path.isfile(ground_weather_csv)
if (reload == False) and (exist_csv == True):
# 読み込み済み、かつ、リロード無しの場合は、
# 保存したファイルを読み込む
ground_df = | pd.read_csv(ground_weather_csv, index_col=0, parse_dates=[1]) | pandas.read_csv |
'''
__author__=<NAME>
MIT License
Copyright (c) 2020 crewml
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import logging
import pandas as pd
import datetime
import crewml.common as st
import traceback
'''
The cost ranges from 0-1000. 0 is no cost for the flight to operate and
1000 is the worst cost to operate the flight.
1. Flight has to be in a pairing if not it is not a valid flight - cost 1000
2. All pairing must start and end in a base if not - cost 1000
3. Duty time must be 8-14 hours if not cost - 10 per hour more or less
4. If a flight is not in a pairing - cost 1000
5. If a flight has negative total pairing hours - cost 1000
6. If a flight has a layover - cost 100
All the above rules are checked for each flight. If a flight has 1000
already then a rule is not checked. So the maximum cost for a flight
will be 1000
'''
class CostCalculator:
def __init__(self, input_file, output_file, fa_bases):
self.logger = logging.getLogger(__name__)
self.input_file = input_file
self.output_file = output_file
self.fa_bases = fa_bases
self.logger.info('info message in CostCalculator')
def process(self):
try:
self.logger.info('info message in CostCalculator process')
flights_df = pd.read_csv(st.DATA_DIR+self.input_file)
self.logger.info("flight data read from:", self.input_file)
flights_df['cost'] = 0
df = self.check_negative_pairing(flights_df)
df = self.check_flight_in_pairing(df)
df = self.check_org_dest_airports(df)
df = self.check_duty_duration(df)
df = self.check_layover(df)
df.drop(flights_df.filter(regex="Unname"), axis=1, inplace=True)
df.to_csv(st.DATA_DIR+self.output_file)
self.logger.info("flight data write to:", self.output_file)
except Exception as e:
self.logger.error(traceback.format_exc())
raise
'''
Check for empty pairinId and store 1000 for cost if it is empty
'''
def check_flight_in_pairing(self, df):
temp = df[df['pairingId'].isnull()]
temp['cost'] = 1000
df = df[~df['fltID'].isin(temp.fltID)]
df = df.append(temp)
return df
'''
5. If a flight has negative total pairing hours - cost 1000
Duty should never be negative, this should be fixed
'''
def check_negative_pairing(self, df):
df['totPairingUTC'] = pd.to_timedelta(df['totPairingUTC'])
temp = df[df['totPairingUTC'] < datetime.timedelta(0)]
temp['cost'] = 1000
df = df[~df['fltID'].isin(temp.fltID)]
df = df.append(temp)
return df
'''
All pairing must start and end in a base if not - cost 1000
'''
def check_org_dest_airports(self, flights_df):
df = pd.DataFrame()
pairing_id = 1
total = len(flights_df)
while pairing_id < total:
temp = flights_df[flights_df['pairingId'].isin([pairing_id])]
if len(temp) == 0:
pairing_id += 1
continue
h = temp.head(1)
t = temp.tail(1)
orgin_airport = h['Origin'].iloc[0]
dest_airport = t['Dest'].iloc[0]
if orgin_airport == dest_airport and orgin_airport in \
self.fa_bases and dest_airport in self.fa_bases:
pairing_id += 1
continue
else:
# temp['cost'] = temp.apply(add_cost,axis=1)
temp['cost'] = 1000
df = df.append(temp)
pairing_id += 1
if len(df) != 0:
temp = flights_df[~flights_df['fltID'].isin(df.fltID)]
df = df.append(temp)
else:
df = flights_df
return df
'''
3. Duty time must be 8-14 hours if not cost - 10 per hour more or less
'''
def check_duty_duration(self, flights_df):
df = pd.DataFrame()
newDuty_id = 1
total = len(flights_df)
hour_14 = datetime.timedelta(hours=+14)
flights_df['totDutyTm'] = | pd.to_timedelta(flights_df['totDutyTm']) | pandas.to_timedelta |
# -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import warnings
def test(x):
print('类型:\n{}\n'.format(type(x)))
if isinstance(x, pd.Series):
print('竖标:\n{}\n'.format(x.index))
else:
print('竖标:\n{}\n'.format(x.index))
print('横标:\n{}\n'.format(x.columns))
# print('内容:\n{}\n'.format(x.values))
print('------------------------------------\n')
def func_pdf(x, a, u, o):
return a * (1 / (2 * np.pi * o ** 2) ** 0.5) * np.exp(-(x - u) ** 2 / (2 * o ** 2))
def choose_right(x, u):
if x < u:
x = u
return x
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# ----------- 连接 -----------
# excel 内只能包含表格,要把表格外的东西删掉(比如 数据来源:wind)
addr = r'C:\Users\Administrator\Desktop\ANA'
addr_in = addr + '\原始表.xlsx'
addr_out = addr + '\函数拟合表.xlsx'
addr_final = addr + '\概率计算表.xlsx'
df_origin = pd.read_excel(addr_in, sheet_name='对数')
stata_list = ['均值', '标准差']
result_list = []
for i in stata_list:
df_stata = pd.read_excel(addr_out, sheet_name=i)
df_stata.rename(columns={'Unnamed: 0': '三级行业'}, inplace=True)
result = pd.merge(df_origin, df_stata, on='三级行业', how='left')
num = len(df_stata.columns) + 3
b = result[result.columns[num:]]
b.columns = [item.strip('_y') for item in b.columns]
result_list.append(b)
a = df_origin[df_origin.columns[4:]]
b = result_list[0]
a[a < b] = -100000000000
# 计算指标
c = func_pdf(a, 1, result_list[0], result_list[1])
c = c[df_stata.columns[2:]]
c.to_csv(addr_final, encoding='utf-8-sig')
c.replace(0, inplace=True)
product = []
for i, v in c.iterrows():
trans = v.sort_values().tolist()
product_trans = trans[0] * trans[1] * trans[2]
product.append(product_trans)
product_trans = pd.DataFrame(product)
c = pd.concat([c, product_trans], axis=1)
c.rename(columns={0: '指标'}, inplace=True)
print(c)
# 输出
write = | pd.ExcelWriter(addr_final) | pandas.ExcelWriter |
import gc
import sys
sys.path.append(os.getenv("Analysis"))
import math
import pandas as pd
from Analysis import Plotting
import plotly.express as px
import plotly.figure_factory as ff
import hdbscan
from scipy.spatial import distance
#HDBScan with Andrews Curve Plot
def HDBScan(df, min_cluster_size = None, min_samples = None, epsilon = None, alpha = 1.0,
single_cluster = False, outliers = None, new_column = "Cluster", offline = None,
transform = None):
"""
Min_cluster_size = Litteral
min_samples = how conservative the model is.
epsilon = how agressive similiar clusters merge, higher equals fewer clusters. Value based on data distance units, thus value could be 50 for 50 unit distance.
alpha = another conservative value that you probably shouldn't need to messs with, but works on a tighter scale.'
Note Outlier should be a cutoff value.
Allow single cluster for anomoly detection / outlier detection.
"""
assert all([pd.api.types.is_numeric_dtype(df[column]) for column in df.columns])
if not min_cluster_size:
min_cluster_size = int(len(df) * 0.1)
if not min_samples:
min_samples = int(min_cluster_size * .333)
if not epsilon:
distances = distance.cdist(df.values, df.values)
mean = distances.mean()
std = distances.std()
epsilon = float(min(mean, std))
clusterer = hdbscan.HDBSCAN(
min_cluster_size = min_cluster_size,
min_samples = min_samples,
cluster_selection_epsilon = epsilon,
alpha = alpha,
allow_single_cluster = single_cluster,
core_dist_n_jobs = -1,
gen_min_span_tree = True
)
clusterer.fit(df)
tmp = df.copy()
tmp[new_column] = clusterer.labels_
tmp["{}_Probabilities".format(new_column)] = clusterer.probabilities_
tmp['{}_outlier_prob'.format(new_column)] = clusterer.outlier_scores_
if not offline:
Plotting.dfTable(tmp.head(50), title = "Results of HDBScan")
Plotting.dfTable(pd.DataFrame(clusterer.cluster_persistence_), title = "Cluster Persistence")
Plotting.Title("Density Based Cluster Validity Score", description = clusterer.relative_validity_)
else:
print("Cluster Persistence")
print( | pd.DataFrame(clusterer.cluster_persistence_) | pandas.DataFrame |
#%%
from jmespath import search
import pandas as pd
import geopandas as gpd
import requests
import json
from shapely import Point
# %%
print("Reading the HUC-12 names and shapes")
huc_shapes = gpd.read_file(
"R:\\WilliamPenn_Delaware River\\PollutionAssessment\\Stage2\\DRB_GWLFE\\HUC12s in 020401, 020402, 020403 v2.json",
)
huc_shapes = huc_shapes.set_crs("EPSG:3857").to_crs("EPSG:4326")
#%%
# Searching for boundary ID's
for idx, huc in huc_shapes.iloc[0:30].iterrows():
search_results = []
for search_text in [huc["huc12"], huc["name"]]:
search_results.extend(
requests.get(
"{}/{}/{}".format(
"https://modelmywatershed.org",
"mmw",
"modeling/boundary-layers-search",
),
params={"text": search_text},
headers={
"Content-Type": "application/json",
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://staging.modelmywatershed.org/draw",
},
).json()["suggestions"]
)
dedupped=[dict(t) for t in {tuple(d.items()) for d in search_results}]
huc12_results = [result for result in dedupped if result["code"] == "huc12"]
best_result = {}
for result in huc12_results:
if (
(result["text"] == huc["huc12"])
or (result["text"] == huc["huc12"] + "-" + huc["name"])
or (len(huc12_results) == 1 and result["text"] == huc["name"])
):
best_result = result
break
if best_result != {}:
if pd.isna(huc["wkaoi_id"]):
print()
print("Got match for {}".format(huc["name"]))
print(json.dumps(best_result, indent=2))
if | pd.notna(huc["wkaoi_id"]) | pandas.notna |
import pandas as pd
from simple_network_sim import network_of_populations, sampleUseOfModel, hdf5_to_csv
from tests.utils import create_baseline
def test_cli_run(base_data_dir):
try:
sampleUseOfModel.main(["-c", str(base_data_dir / "config.yaml")])
h5_file = base_data_dir / "output" / "simple_network_sim" / "outbreak-timeseries" / "data.h5"
csv_file = base_data_dir / "output" / "simple_network_sim" / "outbreak-timeseries" / "data.csv"
hdf5_to_csv.main([str(h5_file), str(csv_file)])
baseline = create_baseline(csv_file)
test_df = pd.read_csv(csv_file)
baseline_df = pd.read_csv(baseline)
pd.testing.assert_frame_equal(
test_df.set_index(["date", "node", "age", "state"]),
baseline_df.set_index(["date", "node", "age", "state"]),
check_like=True,
)
finally:
# TODO; remove this once https://github.com/ScottishCovidResponse/data_pipeline_api/issues/12 is done
(base_data_dir / "access.log").unlink()
h5_file.unlink()
csv_file.unlink()
def test_stochastic_cli_run(base_data_dir):
try:
sampleUseOfModel.main(["-c", str(base_data_dir / "config_stochastic.yaml")])
h5_file = base_data_dir / "output" / "simple_network_sim" / "outbreak-timeseries" / "data.h5"
csv_file = base_data_dir / "output" / "simple_network_sim" / "outbreak-timeseries" / "data.csv"
hdf5_to_csv.main([str(h5_file), str(csv_file)])
baseline = create_baseline(csv_file)
test_df = pd.read_csv(csv_file)
baseline_df = pd.read_csv(baseline)
pd.testing.assert_frame_equal(
test_df.set_index(["date", "node", "age", "state"]),
baseline_df.set_index(["date", "node", "age", "state"]),
check_like=True,
)
finally:
# TODO; remove this once https://github.com/ScottishCovidResponse/data_pipeline_api/issues/12 is done
(base_data_dir / "access.log").unlink()
h5_file.unlink()
csv_file.unlink()
def test_stochastic_seed_sequence(data_api_stochastic):
network, _ = network_of_populations.createNetworkOfPopulation(
data_api_stochastic.read_table("human/compartment-transition", "compartment-transition"),
data_api_stochastic.read_table("human/population", "population"),
data_api_stochastic.read_table("human/commutes", "commutes"),
data_api_stochastic.read_table("human/mixing-matrix", "mixing-matrix"),
data_api_stochastic.read_table("human/infectious-compartments", "infectious-compartments"),
data_api_stochastic.read_table("human/infection-probability", "infection-probability"),
data_api_stochastic.read_table("human/initial-infections", "initial-infections"),
| pd.DataFrame({"Value": [2]}) | pandas.DataFrame |
"""Relative Negative Sentiment Bias (RNSB) metric implementation."""
import logging
from typing import Any, Callable, Dict, List, Tuple, Union
import numpy as np
import pandas as pd
from scipy.stats import entropy
from sklearn.base import BaseEstimator
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from wefe.metrics.base_metric import BaseMetric
from wefe.preprocessing import get_embeddings_from_query
from wefe.query import Query
from wefe.word_embedding_model import WordEmbeddingModel
class RNSB(BaseMetric):
"""Relative Relative Negative Sentiment Bias (RNSB).
The metric was originally proposed in [1].
Visit `RNSB in Metrics Section <https://wefe.readthedocs.io/en/latest/about.html#rnsb>`_
for further information.
References
----------
| [1]: <NAME> and <NAME>. A transparent framework for evaluating
| unintended demographic bias in word embeddings.
| In Proceedings of the 57th Annual Meeting of the Association for
| Computational Linguistics, pages 1662–1667, 2019.
| [2]: https://github.com/ChristopherSweeney/AIFairness/blob/master/python_notebooks/Measuring_and_Mitigating_Word_Embedding_Bias.ipynb
"""
metric_template = ("n", 2)
metric_name = "Relative Negative Sentiment Bias"
metric_short_name = "RNSB"
def _train_classifier(
self,
attribute_embeddings_dict: List[Dict[str, np.ndarray]],
estimator: BaseEstimator = LogisticRegression,
estimator_params: Dict[str, Any] = {"solver": "liblinear", "max_iter": 10000},
random_state: Union[int, None] = None,
holdout: bool = True,
print_model_evaluation: bool = False,
) -> Tuple[BaseEstimator, float]:
"""Train the sentiment classifier from the provided attribute embeddings.
Parameters
----------
attribute_embeddings_dict : dict[str, np.ndarray]
A dict with the attributes keys and embeddings
estimator : BaseEstimator, optional
A scikit-learn classifier class that implements predict_proba function,
by default None,
estimator_params : dict, optional
Parameters that will use the classifier, by default { 'solver': 'liblinear',
'max_iter': 10000, }
random_state : Union[int, None], optional
A seed that allows making the execution of the query reproducible, by
default None
print_model_evaluation : bool, optional
Indicates whether the classifier evaluation is printed after the
training process is completed, by default False
Returns
-------
Tuple[BaseEstimator, float]
The trained classifier and the accuracy obtained by the model.
"""
# when random_state is not none, set it on classifier params.
if random_state is not None:
estimator_params["random_state"] = random_state
# the attribute 0 words are treated as positive words.
positive_embeddings = np.array(list(attribute_embeddings_dict[0].values()))
# the attribute 1 words are treated as negative words.
negative_embeddings = np.array(list(attribute_embeddings_dict[1].values()))
# generate the labels (1, -1) for each embedding
positive_labels = np.ones(positive_embeddings.shape[0])
negative_labels = -np.ones(negative_embeddings.shape[0])
attributes_embeddings = np.concatenate(
(positive_embeddings, negative_embeddings)
)
attributes_labels = np.concatenate((positive_labels, negative_labels))
if holdout:
split = train_test_split(
attributes_embeddings,
attributes_labels,
shuffle=True,
random_state=random_state,
test_size=0.2,
stratify=attributes_labels,
)
X_embeddings_train, X_embeddings_test, y_train, y_test = split
num_train_negative_examples = np.count_nonzero((y_train == -1))
num_train_positive_examples = np.count_nonzero((y_train == 1))
# Check the number of train and test examples.
if num_train_positive_examples == 1:
raise Exception(
"After splitting the dataset using train_test_split "
"(with test_size=0.1), the first attribute remained with 0 training "
"examples."
)
if num_train_negative_examples < 1:
raise Exception(
"After splitting the dataset using train_test_split "
"(with test_size=0.1), the second attribute remained with 0 training "
"examples."
)
estimator = estimator(**estimator_params)
estimator.fit(X_embeddings_train, y_train)
# evaluate
y_pred = estimator.predict(X_embeddings_test)
score = estimator.score(X_embeddings_test, y_test)
if print_model_evaluation:
print(
"Classification Report:\n{}".format(
classification_report(y_test, y_pred, labels=estimator.classes_)
)
)
else:
estimator = estimator(**estimator_params)
estimator.fit(attributes_embeddings, attributes_labels)
score = estimator.score(attributes_embeddings, attributes_labels)
if print_model_evaluation:
print("Holdout is disabled. No evaluation was performed.")
return estimator, score
def _calc_rnsb(
self,
target_embeddings_dict: List[Dict[str, np.ndarray]],
classifier: BaseEstimator,
) -> Tuple[np.float_, dict]:
"""Calculate the RNSB metric.
Parameters
----------
target_embeddings_dict : Dict[str, np.ndarray]
dict with the target words and their embeddings.
classifier : BaseEstimator
Trained scikit-learn classifier in the previous step.
Returns
-------
Tuple[np.float_, dict]
return the calculated kl_divergence and
negative_sentiment_probabilities in that order.
"""
# join the embeddings and the word sets in their respective arrays
target_embeddings_sets = [
list(target_dict.values()) for target_dict in target_embeddings_dict
]
target_words_sets = [
list(target_dict.keys()) for target_dict in target_embeddings_dict
]
# get the probabilities associated with each target word vector
probabilities = [
classifier.predict_proba(target_embeddings)
for target_embeddings in target_embeddings_sets
]
# row where the negative probabilities are located
negative_class_clf_row = np.where(classifier.classes_ == -1)[0][0]
# extract only the negative sentiment probability for each word
negative_probabilities = np.concatenate(
[
probability[:, negative_class_clf_row].flatten()
for probability in probabilities
]
)
# normalization of the probabilities
normalized_negative_probabilities = np.array(
negative_probabilities / np.sum(negative_probabilities)
)
# get the uniform dist
uniform_dist = (
np.ones(normalized_negative_probabilities.shape[0])
* 1
/ normalized_negative_probabilities.shape[0]
)
# calc the kl divergence
kl_divergence = entropy(normalized_negative_probabilities, uniform_dist)
flatten_target_words = [
item for sublist in target_words_sets for item in sublist
]
# set the probabilities for each word in a dict.
negative_sentiment_probabilities = {
word: prob
for word, prob in zip(flatten_target_words, negative_probabilities)
}
return kl_divergence, negative_sentiment_probabilities
def run_query(
self,
query: Query,
model: WordEmbeddingModel,
estimator: BaseEstimator = LogisticRegression,
estimator_params: Dict[str, Any] = {"solver": "liblinear", "max_iter": 10000},
n_iterations: int = 1,
random_state: Union[int, None] = None,
holdout: bool = True,
print_model_evaluation: bool = False,
lost_vocabulary_threshold: float = 0.2,
preprocessors: List[Dict[str, Union[str, bool, Callable]]] = [{}],
strategy: str = "first",
normalize: bool = False,
warn_not_found_words: bool = False,
*args: Any,
**kwargs: Any
) -> Dict[str, Any]:
"""Calculate the RNSB metric over the provided parameters.
Note if you want to use with Bing Liu dataset, you have to pass
the positive and negative words in the first and second place of
attribute set array respectively.
Scores on this metric vary with each run due to different instances
of classifier training. For this reason, the robustness of these scores
can be improved by repeating the test several times and returning the
average of the scores obtained. This can be indicated in the
n_iterations parameter.
Parameters
----------
query : Query
A Query object that contains the target and attribute word sets to
be tested.
model : WordEmbeddingModel
A word embedding model.
estimator : BaseEstimator, optional
A scikit-learn classifier class that implements predict_proba function,
by default None,
estimator_params : dict, optional
Parameters that will use the classifier, by default { 'solver': 'liblinear',
'max_iter': 10000, }
n_iterations : int, optional
When provided, it tells the metric to run the specified number of times
and then average its results. This functionality is indicated to
strengthen the results obtained, by default 1.
random_state : Union[int, None], optional
Seed that allow making the execution of the query reproducible.
Warning: if a random_state other than None is provided along with
n_iterations, each iteration will split the dataset and train a
classifier associated to the same seed, so the results of each iteration
will always be the same , by default None.
holdout: bool, optional
True indicates that a holdout (split attributes in train/test sets) will
be executed before running the model training.
This option allows to evaluate the performance of the classifier
(can be printed using print_model_evaluation=True) at the cost of training
the classifier with fewer examples. False disables this functionality.
Note that holdout divides into 80%train and 20% test, performs a shuffle
and tries to maintain the original ratio of the classes via stratify param.
by default True
print_model_evaluation : bool, optional
Indicates whether the classifier evaluation is printed after the
training process is completed, by default False
preprocessors : List[Dict[str, Union[str, bool, Callable]]]
A list with preprocessor options.
A ``preprocessor`` is a dictionary that specifies what processing(s) are
performed on each word before it is looked up in the model vocabulary.
For example, the ``preprocessor``
``{'lowecase': True, 'strip_accents': True}`` allows you to lowercase
and remove the accent from each word before searching for them in the
model vocabulary. Note that an empty dictionary ``{}`` indicates that no
preprocessing is done.
The possible options for a preprocessor are:
* ``lowercase``: ``bool``. Indicates that the words are transformed to
lowercase.
* ``uppercase``: ``bool``. Indicates that the words are transformed to
uppercase.
* ``titlecase``: ``bool``. Indicates that the words are transformed to
titlecase.
* ``strip_accents``: ``bool``, ``{'ascii', 'unicode'}``: Specifies that
the accents of the words are eliminated. The stripping type can be
specified. True uses ‘unicode’ by default.
* ``preprocessor``: ``Callable``. It receives a function that operates
on each word. In the case of specifying a function, it overrides the
default preprocessor (i.e., the previous options stop working).
A list of preprocessor options allows you to search for several
variants of the words into the model. For example, the preprocessors
``[{}, {"lowercase": True, "strip_accents": True}]``
``{}`` allows first to search for the original words in the vocabulary of
the model. In case some of them are not found,
``{"lowercase": True, "strip_accents": True}`` is executed on these words
and then they are searched in the model vocabulary.
strategy : str, optional
The strategy indicates how it will use the preprocessed words: 'first' will
include only the first transformed word found. all' will include all
transformed words found, by default "first".
normalize : bool, optional
True indicates that embeddings will be normalized, by default False
warn_not_found_words : bool, optional
Specifies if the function will warn (in the logger)
the words that were not found in the model's vocabulary, by default False.
Returns
-------
Dict[str, Any]
A dictionary with the query name, the calculated kl-divergence,
the negative probabilities for all tested target words and
the normalized distribution of probabilities.
Examples
--------
The following example shows how to run a query that measures gender
bias using RNSB:
>>> from wefe.query import Query
>>> from wefe.utils import load_test_model
>>> from wefe.metrics import RNSB
>>>
>>> # define the query
>>> query = Query(
... target_sets=[
... ["female", "woman", "girl", "sister", "she", "her", "hers",
... "daughter"],
... ["male", "man", "boy", "brother", "he", "him", "his", "son"],
... ],
... attribute_sets=[
... ["home", "parents", "children", "family", "cousins", "marriage",
... "wedding", "relatives",],
... ["executive", "management", "professional", "corporation", "salary",
... "office", "business", "career", ],
... ],
... target_sets_names=["Female terms", "Male Terms"],
... attribute_sets_names=["Family", "Careers"],
... )
>>>
>>> # load the model (in this case, the test model included in wefe)
>>> model = load_test_model()
>>>
>>> # instance the metric and run the query
>>> RNSB().run_query(query, model) # doctest: +SKIP
{
'query_name': 'Female terms and Male Terms wrt Family and Careers',
'result': 0.02899395368025491,
'rnsb': 0.02899395368025491,
'negative_sentiment_probabilities': {
'female': 0.43272977959940667,
'woman': 0.6951544646603257,
'girl': 0.8141335128074891,
'sister': 0.8472896023561901,
'she': 0.5718048693637721,
'her': 0.5977365245684795,
'hers': 0.6939932357393684,
'daughter': 0.8887895021296551,
'male': 0.5511334216620132,
'man': 0.584603563015763,
'boy': 0.8129431089763982,
'brother': 0.8331301278277582,
'he': 0.4420145415672582,
'him': 0.5139776652415698,
'his': 0.44459083129125154,
'son': 0.8483699001061482
},
'negative_sentiment_distribution': {
'female': 0.04093015763103808,
'woman': 0.06575184597373163,
'girl': 0.07700559236475293,
'sister': 0.08014169261861909,
'she': 0.05408470722518866,
'her': 0.05653747748783378,
'hers': 0.0656420100321782,
'daughter': 0.0840670000956609,
'male': 0.052129478690471215,
'man': 0.055295283832909777,
'boy': 0.07689299688658582,
'brother': 0.07880240525790659,
'he': 0.04180836566946482,
'him': 0.04861506614276754,
'his': 0.04205204648247447,
'son': 0.0802438736084164
}
}
If you want to perform a holdout to evaluate (defualt option) the model
and print the evaluation, use the params `holdout=True` and
`print_model_evaluation=True`
>>> RNSB().run_query(
... query,
... model,
... holdout=True,
... print_model_evaluation=True) # doctest: +SKIP
"Classification Report:"
" precision recall f1-score support"
" "
" -1.0 1.00 1.00 1.00 2"
" 1.0 1.00 1.00 1.00 2"
" "
" accuracy 1.00 4"
" macro avg 1.00 1.00 1.00 4"
"weighted avg 1.00 1.00 1.00 4"
{
'query_name': 'Female terms and Male Terms wrt Family and Careers',
'result': 0.028622532697549753,
'rnsb': 0.028622532697549753,
'negative_sentiment_probabilities': {
'female': 0.4253580834091863,
'woman': 0.7001106999668327,
'girl': 0.8332271657179001,
'sister': 0.8396986674252397,
'she': 0.603565156083575,
'her': 0.6155296658190583,
'hers': 0.7147102319731146,
'daughter': 0.884829695542309,
'male': 0.5368167185683463,
'man': 0.5884385611055519,
'boy': 0.8132056992854114,
'brother': 0.8270792128939456,
'he': 0.4500708786239489,
'him': 0.49965355723589994,
'his': 0.45394634194580535,
'son': 0.8450690196299462
},
'negative_sentiment_distribution': {
'female': 0.04000994319670431,
'woman': 0.0658536664275202,
'girl': 0.07837483962483958,
'sister': 0.07898356066672689,
'she': 0.05677241964432896,
'her': 0.057897822860029945,
'hers': 0.06722692455767754,
'daughter': 0.08322866600691568,
'male': 0.05049394205657851,
'man': 0.055349585027011844,
'boy': 0.07649158463116877,
'brother': 0.07779655217044128,
'he': 0.04233447297841125,
'him': 0.04699830853762932,
'his': 0.04269900599992016,
'son': 0.07948870561409564
}
}
If you want to disable the holdout, use the param `holdout=False`.
>>> # instance the metric and run the query
>>> RNSB().run_query(
... query,
... model,
... holdout=False,
... print_model_evaluation=True) # doctest: +SKIP
"Holdout is disabled. No evaluation was performed."
{
'query_name': 'Female terms and Male Terms wrt Family and Careers',
'result': 0.03171747070323668,
'rnsb': 0.03171747070323668,
'negative_sentiment_probabilities': {
'female': 0.41846552820545985,
'woman': 0.7104860753714863,
'girl': 0.8325507470146775,
'sister': 0.8634309153859019,
'she': 0.593223646607777,
'her': 0.6138756234516175,
'hers': 0.7205687956033292,
'daughter': 0.8964129106245865,
'male': 0.545075356696542,
'man': 0.5856674025396198,
'boy': 0.8184955986780176,
'brother': 0.8392921127806534,
'he': 0.43437306199747594,
'him': 0.4974336520424158,
'his': 0.4342254305877148,
'son': 0.851969666735826
},
'negative_sentiment_distribution': {
'female': 0.03927208494188834,
'woman': 0.0666775818349327,
'girl': 0.07813308731881921,
'sister': 0.0810311243458957,
'she': 0.055672756461026464,
'her': 0.05761089983046311,
'hers': 0.06762382332604978,
'daughter': 0.08412641327954143,
'male': 0.05115414356760721,
'man': 0.05496361929467757,
'boy': 0.07681404203995185,
'brother': 0.07876574991858241,
'he': 0.04076497259018534,
'him': 0.04668307260513937,
'his': 0.04075111770161401,
'son': 0.07995551094362546
}
}
"""
# check the types of the provided arguments (only the defaults).
self._check_input(query, model, locals())
# transform query word sets into embeddings
embeddings = get_embeddings_from_query(
model=model,
query=query,
lost_vocabulary_threshold=lost_vocabulary_threshold,
preprocessors=preprocessors,
strategy=strategy,
normalize=normalize,
warn_not_found_words=warn_not_found_words,
)
# if there is any/some set has less words than the allowed limit,
# return the default value (nan)
if embeddings is None:
return {
"query_name": query.query_name,
"result": np.nan,
"rnsb": np.nan,
"score": np.nan,
"negative_sentiment_probabilities": {},
"negative_sentiment_distribution": {},
}
# get the targets and attribute sets transformed into embeddings.
target_sets, attribute_sets = embeddings
# get only the embeddings of the sets.
target_embeddings = list(target_sets.values())
attribute_embeddings = list(attribute_sets.values())
# create the arrays that will contain the scores for each iteration
calculated_divergences = []
calculated_negative_sentiment_probabilities = []
scores = []
# calculate the scores for each iteration
for i in range(n_iterations):
try:
if print_model_evaluation and (i > 0 and i < 2):
print(
"When n_iterations > 1, only the first evaluation is printed."
)
print_model_evaluation = False
# train the logit with the train data.
trained_classifier, score = self._train_classifier(
attribute_embeddings_dict=attribute_embeddings,
random_state=random_state,
estimator=estimator,
estimator_params=estimator_params,
holdout=holdout,
print_model_evaluation=print_model_evaluation,
)
scores.append(score)
# get the scores
divergence, negative_sentiment_probabilities = self._calc_rnsb(
target_embeddings, trained_classifier
)
calculated_divergences.append(divergence)
calculated_negative_sentiment_probabilities.append(
negative_sentiment_probabilities
)
except Exception as e:
logging.exception("RNSB Iteration omitted: " + str(e))
# aggregate results
divergence = np.mean(np.array(calculated_divergences))
negative_sentiment_probabilities = dict(
| pd.DataFrame(calculated_negative_sentiment_probabilities) | pandas.DataFrame |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from numpy import mean, var
from scipy import stats
from matplotlib import rc
from lifelines import KaplanMeierFitter
# python program to plot the OS difference between M2 HOXA9 low and M2 high HOXA9
def find_gene_index(gene_list,gene):
j = [i for i,x in enumerate(gene_list) if x == gene]
return j
def find_patients_index(patients, p):
j = [i for i,x in enumerate(patients) if x == p]
return j[0]
filename = "log_modified_LAML_TPM.csv"
filename2 = "laml_tcga_clinical_data.tsv" # from David download - cbioPortal
data = pd.read_csv(filename)
patient_description = pd.read_csv(filename2,sep='\t')
gene_list = data['Hybridization REF']
# find the index of HOXA9 in the data
i_HOXA9 = find_gene_index(gene_list, "HOXA9")
HOXA9_exp = data.iloc[i_HOXA9,2:]
# select patients that have HOXA9 expression in the peaks
peak1_indexes = [i+2 for i,x in enumerate(HOXA9_exp.values[0]) if x <= 1 and x >= 0.005] # +1 due to the first gene columns we removed +1 due to index shift
peak2_indexes = [i+2 for i,x in enumerate(HOXA9_exp.values[0]) if x <= 5.5 and x >= 4]
# 32 patients for low and 80 for high
peak1_patients = data.iloc[:,peak1_indexes].columns
peak2_patients = data.iloc[:,peak2_indexes] .columns
# only keep the patient number
peak1_patients = [item.split('-')[2] for item in peak1_patients]
peak2_patients = [item.split('-')[2] for item in peak2_patients]
patient2 = patient_description['Patient ID']
patient2 = [item.split('-')[2] for item in patient2]
M2_low_indexes = [i for i,x in enumerate(patient2) if x in peak1_patients and patient_description['FAB'][i] == 'M2']
M2_high_indexes = [i for i,x in enumerate(patient2) if x in peak2_patients and patient_description['FAB'][i] == 'M2']
M4_low_indexes = [i for i,x in enumerate(patient2) if x in peak1_patients and patient_description['FAB'][i] == 'M4']
M4_high_indexes = [i for i,x in enumerate(patient2) if x in peak2_patients and patient_description['FAB'][i] == 'M4']
M2_low_vital = patient_description["Patient's Vital Status"][M2_low_indexes]
M2_high_vital = patient_description["Patient's Vital Status"][M2_high_indexes ]
M4_low_vital = patient_description["Patient's Vital Status"][M4_low_indexes]
M4_high_vital = patient_description["Patient's Vital Status"][M4_high_indexes ]
M2_low_vital2 = [0 if item == "Alive" else 1 for item in M2_low_vital]
M2_high_vital2 = [0 if item == "Alive" else 1 for item in M2_high_vital]
M4_low_vital2 = [0 if item == "Alive" else 1 for item in M4_low_vital]
M4_high_vital2 = [0 if item == "Alive" else 1 for item in M4_high_vital]
M2_low_OS = patient_description["Overall Survival (Months)"][M2_low_indexes]
M2_high_OS = patient_description["Overall Survival (Months)"][M2_high_indexes]
M4_low_OS = patient_description["Overall Survival (Months)"][M4_low_indexes]
M4_high_OS = patient_description["Overall Survival (Months)"][M4_high_indexes]
M2_low_tab = {'OS':M2_low_OS, 'vital':M2_low_vital, 'vital2':M2_low_vital2}
M2_high_tab = {'OS':M2_high_OS, 'vital':M2_high_vital, 'vital2':M2_high_vital2}
M4_low_tab = {'OS':M4_low_OS, 'vital':M4_low_vital, 'vital2':M4_low_vital2}
M4_high_tab = {'OS':M4_high_OS, 'vital':M4_high_vital, 'vital2':M4_high_vital2}
M2_low_tab = pd.DataFrame(data=M2_low_tab)
M2_high_tab = | pd.DataFrame(data=M2_high_tab) | pandas.DataFrame |
# -*- coding: UTF-8 -*-
#
# Copyright 2016 Metamarkets Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import csv
import os
import pandas
import pytest
from pandas.testing import assert_frame_equal
from pydruid.query import Query, QueryBuilder
from pydruid.utils import aggregators, filters, having, postaggregator
def create_query_with_results():
query = Query({}, "timeseries")
query.result = [
{
"result": {"value1": 1, "value2": "㬓"},
"timestamp": "2015-01-01T00:00:00.000-05:00",
},
{
"result": {"value1": 2, "value2": "㬓"},
"timestamp": "2015-01-02T00:00:00.000-05:00",
},
]
return query
EXPECTED_RESULTS_PANDAS = [
{"timestamp": "2015-01-01T00:00:00.000-05:00", "value1": 1, "value2": "㬓"},
{"timestamp": "2015-01-02T00:00:00.000-05:00", "value1": 2, "value2": "㬓"},
]
def expected_results_csv_reader():
# csv.DictReader does not perform promotion to int64
expected_results = []
for element in EXPECTED_RESULTS_PANDAS:
modified_elem = element.copy()
modified_elem.update({"value1": str(modified_elem["value1"])})
expected_results.append(modified_elem)
return expected_results
class TestQueryBuilder:
def test_build_query(self):
# given
expected_query_dict = {
"queryType": None,
"dataSource": "things",
"aggregations": [{"fieldName": "thing", "name": "count", "type": "count"}],
"postAggregations": [
{
"fields": [
{"fieldName": "sum", "type": "fieldAccess"},
{"fieldName": "count", "type": "fieldAccess"},
],
"fn": "/",
"name": "avg",
"type": "arithmetic",
}
],
"pagingSpec": {"pagingIdentifies": {}, "threshold": 1},
"filter": {"dimension": "one", "type": "selector", "value": 1},
"having": {"aggregation": "sum", "type": "greaterThan", "value": 1},
"new_key": "value",
}
builder = QueryBuilder()
# when
query = builder.build_query(
None,
{
"datasource": "things",
"aggregations": {"count": aggregators.count("thing")},
"post_aggregations": {
"avg": (postaggregator.Field("sum") / postaggregator.Field("count"))
},
"paging_spec": {"pagingIdentifies": {}, "threshold": 1},
"filter": filters.Dimension("one") == 1,
"having": having.Aggregation("sum") > 1,
"new_key": "value",
},
)
# then
assert query.query_dict == expected_query_dict
def test_build_query_none_type(self):
# given
expected_query_dict = {
"queryType": None,
"dataSource": "things",
"aggregations": [{"fieldName": "thing", "name": "count", "type": "count"}],
"filter": {"dimension": "one", "type": "selector", "value": 1},
"having": {"aggregation": "sum", "type": "greaterThan", "value": 1},
"dimension": "dim1",
}
builder = QueryBuilder()
# when
builder_dict = {
"datasource": "things",
"aggregations": {"count": aggregators.count("thing")},
"filter": filters.Dimension("one") == 1,
"having": having.Aggregation("sum") > 1,
"dimension": "dim1",
}
query = builder.build_query(None, builder_dict)
# then
assert query.query_dict == expected_query_dict
# you should be able to pass `None` to dimension/having/filter
for v in ["dimension", "having", "filter"]:
expected_query_dict[v] = None
builder_dict[v] = None
query = builder.build_query(None, builder_dict)
assert query.query_dict == expected_query_dict
def test_validate_query(self):
# given
builder = QueryBuilder()
# when
builder.validate_query(None, ["validkey"], {"validkey": "value"})
# then
pytest.raises(
ValueError,
builder.validate_query,
*[None, ["validkey"], {"invalidkey": "value"}]
)
def test_union_datasource(self):
# Given
expected_query_dict = {"queryType": None, "dataSource": "things"}
builder = QueryBuilder()
# when
builder_dict = {"datasource": "things"}
query = builder.build_query(None, builder_dict)
# then
assert query.query_dict == expected_query_dict
# Given
expected_query_dict = {
"queryType": None,
"dataSource": {
"type": "union",
"dataSources": ["things", "others", "more"],
},
}
builder = QueryBuilder()
# when
builder_dict = {"datasource": ["things", "others", "more"]}
query = builder.build_query(None, builder_dict)
# then
assert query.query_dict == expected_query_dict
# Given check that it rejects non-string items
builder = QueryBuilder()
builder_dict = {"datasource": ["things", 123]}
with pytest.raises(ValueError):
query = builder.build_query(None, builder_dict)
def test_build_subquery(self):
# given
expected_query_dict = {
"query": {
"queryType": "groupBy",
"dataSource": "things",
"aggregations": [
{"fieldName": "thing", "name": "count", "type": "count"}
],
"postAggregations": [
{
"fields": [
{"fieldName": "sum", "type": "fieldAccess"},
{"fieldName": "count", "type": "fieldAccess"},
],
"fn": "/",
"name": "avg",
"type": "arithmetic",
}
],
"filter": {"dimension": "one", "type": "selector", "value": 1},
"having": {"aggregation": "sum", "type": "greaterThan", "value": 1},
},
"type": "query",
}
builder = QueryBuilder()
# when
subquery_dict = builder.subquery(
{
"datasource": "things",
"aggregations": {"count": aggregators.count("thing")},
"post_aggregations": {
"avg": (postaggregator.Field("sum") / postaggregator.Field("count"))
},
"filter": filters.Dimension("one") == 1,
"having": having.Aggregation("sum") > 1,
}
)
# then
assert subquery_dict == expected_query_dict
class TestQuery:
def test_export_tsv(self, tmpdir):
query = create_query_with_results()
file_path = tmpdir.join("out.tsv")
query.export_tsv(str(file_path))
with open(str(file_path)) as tsv_file:
reader = csv.DictReader(tsv_file, delimiter="\t")
actual = [line for line in reader]
assert actual == expected_results_csv_reader()
def test_export_pandas(self):
query = create_query_with_results()
df = query.export_pandas()
expected_df = | pandas.DataFrame(EXPECTED_RESULTS_PANDAS) | pandas.DataFrame |
# coding: utf-8
# # Read Data Sample
# In[1]:
import pandas as pd
import numpy as np
import os
from collections import namedtuple
pd.set_option("display.max_rows",100)
#%matplotlib inline
# In[2]:
class dataset:
kdd_train_2labels = pd.read_pickle("dataset/kdd_train_2labels.pkl")
kdd_test_2labels = pd.read_pickle("dataset/kdd_test_2labels.pkl")
kdd_test__2labels = pd.read_pickle("dataset/kdd_test__2labels.pkl")
kdd_train_5labels = pd.read_pickle("dataset/kdd_train_5labels.pkl")
kdd_test_5labels = pd.read_pickle("dataset/kdd_test_5labels.pkl")
# In[3]:
dataset.kdd_train_2labels.shape
# In[4]:
dataset.kdd_test_2labels.shape
# In[5]:
from sklearn import model_selection as ms
from sklearn import preprocessing as pp
class preprocess:
output_columns_2labels = ['is_Normal','is_Attack']
x_input = dataset.kdd_train_2labels.drop(output_columns_2labels, axis = 1)
y_output = dataset.kdd_train_2labels.loc[:,output_columns_2labels]
x_test_input = dataset.kdd_test_2labels.drop(output_columns_2labels, axis = 1)
y_test = dataset.kdd_test_2labels.loc[:,output_columns_2labels]
x_test__input = dataset.kdd_test__2labels.drop(output_columns_2labels, axis = 1)
y_test_ = dataset.kdd_test__2labels.loc[:,output_columns_2labels]
ss = pp.StandardScaler()
x_train = ss.fit_transform(x_input)
x_test = ss.transform(x_test_input)
x_test_ = ss.transform(x_test__input)
y_train = y_output.values
y_test = y_test.values
y_test_ = y_test_.values
preprocess.x_train.shape
# In[6]:
import tensorflow as tf
# In[7]:
class network(object):
input_dim = 122
classes = 2
hidden_encoder_dim = 122
hidden_layers = 1
latent_dim = 18
def __init__(self, classes, hidden_layers, num_of_features):
self.classes = classes
self.hidden_layers = hidden_layers
self.latent_dim = num_of_features
def build_layers(self):
tf.reset_default_graph()
#learning_rate = tf.Variable(initial_value=0.001)
input_dim = self.input_dim
classes = self.classes
hidden_encoder_dim = self.hidden_encoder_dim
hidden_layers = self.hidden_layers
latent_dim = self.latent_dim
with tf.variable_scope("Input"):
self.x = tf.placeholder("float", shape=[None, input_dim])
self.y_ = tf.placeholder("float", shape=[None, classes])
self.keep_prob = tf.placeholder("float")
self.lr = tf.placeholder("float")
with tf.variable_scope("Layer_Encoder"):
hidden_encoder = tf.layers.dense(self.x, hidden_encoder_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
hidden_encoder = tf.nn.dropout(hidden_encoder, self.keep_prob)
for h in range(hidden_layers - 1):
hidden_encoder = tf.layers.dense(hidden_encoder, latent_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
hidden_encoder = tf.nn.dropout(hidden_encoder, self.keep_prob)
#hidden_encoder = tf.layers.dense(self.x, latent_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
#hidden_encoder = tf.nn.dropout(hidden_encoder, self.keep_prob)
with tf.variable_scope("Layer_Dense_Softmax"):
self.y = tf.layers.dense(hidden_encoder, classes, activation=tf.nn.softmax)
with tf.variable_scope("Loss"):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = self.y_, logits = self.y))
#loss = tf.clip_by_value(loss, -1e-1, 1e-1)
#loss = tf.where(tf.is_nan(loss), 1e-1, loss)
#loss = tf.where(tf.equal(loss, -1e-1), tf.random_normal(loss.shape), loss)
#loss = tf.where(tf.equal(loss, 1e-1), tf.random_normal(loss.shape), loss)
self.regularized_loss = loss
correct_prediction = tf.equal(tf.argmax(self.y_, 1), tf.argmax(self.y, 1))
self.tf_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name = "Accuracy")
with tf.variable_scope("Optimizer"):
learning_rate=self.lr
optimizer = tf.train.AdamOptimizer(learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(self.regularized_loss))
gradients = [
None if gradient is None else tf.clip_by_value(gradient, -1, 1)
for gradient in gradients]
self.train_op = optimizer.apply_gradients(zip(gradients, variables))
#self.train_op = optimizer.minimize(self.regularized_loss)
# add op for merging summary
#self.summary_op = tf.summary.merge_all()
self.pred = tf.argmax(self.y, axis = 1)
self.actual = tf.argmax(self.y_, axis = 1)
# add Saver ops
self.saver = tf.train.Saver()
# In[8]:
import collections
import time
class Train:
result = namedtuple("score", ['epoch', 'no_of_features','hidden_layers','train_score', 'test_score', 'test_score_20', 'time_taken'])
predictions = {}
results = []
best_acc = 0
best_acc_global = 0
def train(epochs, net, h,f, lrs):
batch_iterations = 200
train_loss = None
Train.best_acc = 0
os.makedirs("dataset/tf_dense_only_nsl_kdd/hidden layers_{}_features count_{}".format(epochs,h,f),
exist_ok = True)
with tf.Session() as sess:
#summary_writer_train = tf.summary.FileWriter('./logs/kdd/VAE/training', graph=sess.graph)
#summary_writer_valid = tf.summary.FileWriter('./logs/kdd/VAE/validation')
sess.run(tf.global_variables_initializer())
start_time = time.perf_counter()
for c, lr in enumerate(lrs):
for epoch in range(1, (epochs+1)):
x_train, x_valid, y_train, y_valid, = ms.train_test_split(preprocess.x_train,
preprocess.y_train,
test_size=0.1)
batch_indices = np.array_split(np.arange(x_train.shape[0]),
batch_iterations)
for i in batch_indices:
def train_batch():
nonlocal train_loss
_, train_loss = sess.run([net.train_op,
net.regularized_loss,
], #net.summary_op
feed_dict={net.x: x_train[i,:],
net.y_: y_train[i,:],
net.keep_prob:0.5, net.lr:lr})
train_batch()
#summary_writer_train.add_summary(summary_str, epoch)
while((train_loss > 1e4 or np.isnan(train_loss)) and epoch > 1):
print("Step {} | Training Loss: {:.6f}".format(epoch, train_loss))
net.saver.restore(sess,
tf.train.latest_checkpoint('dataset/tf_dense_only_nsl_kdd/hidden_layers_{}_features_count_{}'
.format(epochs,h,f)))
train_batch()
valid_accuracy = sess.run(net.tf_accuracy, #net.summary_op
feed_dict={net.x: x_valid,
net.y_: y_valid,
net.keep_prob:1, net.lr:lr})
#summary_writer_valid.add_summary(summary_str, epoch)
accuracy, pred_value, actual_value, y_pred = sess.run([net.tf_accuracy,
net.pred,
net.actual, net.y],
feed_dict={net.x: preprocess.x_test,
net.y_: preprocess.y_test,
net.keep_prob:1, net.lr:lr})
accuracy_, pred_value_, actual_value_, y_pred_ = sess.run([net.tf_accuracy,
net.pred,
net.actual, net.y],
feed_dict={net.x: preprocess.x_test_,
net.y_: preprocess.y_test_,
net.keep_prob:1, net.lr:lr})
print("Step {} | Training Loss: {:.6f} | Validation Accuracy: {:.6f}".format(epoch, train_loss, valid_accuracy))
print("Accuracy on Test data: {}, {}".format(accuracy, accuracy_))
if accuracy > Train.best_acc_global:
Train.best_acc_global = accuracy
Train.pred_value = pred_value
Train.actual_value = actual_value
Train.pred_value_ = pred_value_
Train.actual_value_ = actual_value_
Train.best_parameters = "Hidden Layers:{}, Features Count:{}".format(h, f)
if accuracy > Train.best_acc:
Train.best_acc = accuracy
if not (np.isnan(train_loss)):
net.saver.save(sess,
"dataset/tf_dense_only_nsl_kdd/hidden_layers_{}_features_count_{}".format(h,f),
global_step = epochs)
curr_pred = pd.DataFrame({"Attack_prob":y_pred[:,-2], "Normal_prob":y_pred[:, -1], "Prediction":pred_value})
Train.predictions.update({"{}_{}_{}".format((epoch+1)*(c+1),f,h):(curr_pred,
Train.result((epoch+1)*(c+1), f, h, valid_accuracy, accuracy, accuracy_, time.perf_counter() - start_time))})
#Train.results.append(Train.result(epochs, f, h,valid_accuracy, accuracy))
# In[9]:
import itertools
class Hyperparameters:
# features_arr = [2, 4, 8, 16, 32, 64, 128, 256]
# hidden_layers_arr = [2, 4, 6, 10]
features_arr = [1, 4, 16, 32, 64, 122]
hidden_layers_arr = [1, 3, 5]
epochs = [2]
lrs = [1e-5, 1e-5, 1e-6]
for e, h, f in itertools.product(epochs, hidden_layers_arr, features_arr):
print("Current Layer Attributes - epochs:{} hidden layers:{} features count:{}".format(e,h,f))
n = network(2,h,f)
n.build_layers()
Train.train(e, n, h,f, lrs)
# In[10]:
dict1 = {}
dict2 = []
for k, (v1, v2) in Train.predictions.items():
dict1.update({k: v1})
dict2.append(v2)
# In[11]:
Train.predictions = dict1
Train.results = dict2
# In[12]:
df_results = pd.DataFrame(Train.results)
# In[13]:
g = df_results.groupby(by=['no_of_features'])
idx = g['test_score'].transform(max) == df_results['test_score']
df_results[idx].sort_values(by = 'test_score', ascending = False)
# In[14]:
g = df_results.groupby(by=['no_of_features'])
idx = g['test_score_20'].transform(max) == df_results['test_score_20']
df_results[idx].sort_values(by = 'test_score_20', ascending = False)
# In[15]:
df_results.sort_values(by = 'test_score', ascending = False)
# In[16]:
pd.Panel(Train.predictions).to_pickle("dataset/tf_dense_only_nsl_kdd_predictions.pkl")
df_results.to_pickle("dataset/tf_dense_only_nsl_kdd_scores.pkl")
temp = df_results.set_index(['no_of_features', 'hidden_layers'])
if not os.path.isfile('dataset/tf_dense_only_nsl_kdd_scores_all.pkl'):
past_scores = temp
else:
past_scores = | pd.read_pickle("dataset/tf_dense_only_nsl_kdd_scores_all.pkl") | pandas.read_pickle |
import pandas as pd
from typing import List, Tuple
from pydantic import BaseModel
from icolos.core.containers.compound import Conformer
from icolos.utils.enums.step_enums import StepClusteringEnum
from icolos.core.workflow_steps.step import _LE
from icolos.core.workflow_steps.calculation.base import StepCalculationBase
from sklearn.cluster import KMeans
_SC = StepClusteringEnum()
class StepClustering(StepCalculationBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
# extend parameters
if _SC.N_CLUSTERS not in self.settings.arguments.parameters.keys():
self.settings.arguments.parameters[_SC.N_CLUSTERS] = 3
if _SC.MAX_ITER not in self.settings.arguments.parameters.keys():
self.settings.arguments.parameters[_SC.MAX_ITER] = 300
if _SC.TOP_N_PER_SOLVENT not in self.settings.additional.keys():
self.settings.additional[_SC.TOP_N_PER_SOLVENT] = 3
def _get_nclusters_and_top_n(self, len_conformers: int) -> Tuple[int, int]:
n_clusters = self.settings.arguments.parameters[_SC.N_CLUSTERS]
if n_clusters > len_conformers:
n_clusters = len_conformers
self._logger.log(
f"Set number of clusters to {n_clusters} because not enough observations were provided.",
_LE.DEBUG,
)
top_n_per_solvent = self.settings.additional[_SC.TOP_N_PER_SOLVENT]
if top_n_per_solvent > len_conformers:
top_n_per_solvent = len_conformers
self._logger.log(
f'Set number of "top_N_per_solvent" to {top_n_per_solvent} because not enough observations were provided.',
_LE.DEBUG,
)
return n_clusters, top_n_per_solvent
def _generate_feature_dataframe(self, conformers: List[Conformer]) -> pd.DataFrame:
features = self.settings.additional[_SC.FEATURES]
df_features = | pd.DataFrame(columns=features) | pandas.DataFrame |
"""
Project: gresearch
File: data.py
Created by: louise
On: 25/01/18
At: 4:56 PM
"""
import os
import torch
from torch.utils.data.dataset import Dataset
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
class SP500(Dataset):
def __init__(self, folder_dataset, T=10, symbols=['AAPL'], use_columns=['Date', 'Close'], start_date='2012-01-01',
end_date='2015-12-31', step=1):
"""
:param folder_dataset: str
:param T: int
:param symbols: list of str
:param use_columns: bool
:param start_date: str, date format YYY-MM-DD
:param end_date: str, date format YYY-MM-DD
"""
self.scaler = MinMaxScaler()
self.symbols = symbols
if len(symbols)==0:
print("No Symbol was specified")
return
self.start_date = start_date
if len(start_date)==0:
print("No start date was specified")
return
self.end_date = end_date
if len(end_date)==0:
print("No end date was specified")
return
self.use_columns = use_columns
if len(use_columns)==0:
print("No column was specified")
return
self.T = T
# Create output dataframe
self.dates = pd.date_range(self.start_date, self.end_date)
self.df_data = pd.DataFrame(index=self.dates)
# Read csv files corresponding to symbols
for symbol in symbols:
fn = os.path.join(folder_dataset, symbol + "_data.csv")
fn = "/home/louise/src/gresearch/" + folder_dataset + "/" + symbol + "_data.csv"
print(fn)
df_current = pd.read_csv(fn, index_col='Date', usecols=self.use_columns, na_values='nan', parse_dates=True)
df_current = df_current.rename(columns={'Close': symbol})
self.df_data = self.df_data.join(df_current)
# Replace NaN values with forward then backward filling
self.df_data.fillna(method='ffill', inplace=True, axis=0)
self.df_data.fillna(method='bfill', inplace=True, axis=0)
self.numpy_data = self.df_data.as_matrix(columns=self.symbols)
self.train_data = self.scaler.fit_transform(self.numpy_data)
self.chunks = torch.FloatTensor(self.train_data).unfold(0, self.T, step).permute(0, 2, 1)
def __getitem__(self, index):
x = self.chunks[index, :-1, :]
y = self.chunks[index, -1, :]
return x, y
def __len__(self):
return self.chunks.size(0)
class SP500Multistep(Dataset):
def __init__(self, folder_dataset, symbols=['AAPL'], use_columns=['Date', 'Close'], start_date='2012-01-01',
end_date='2015-12-31', step=1, n_in=10, n_out=5):
"""
:param folder_dataset: str
:param symbols: list of str
:param use_columns: bool
:param start_date: str, date format YYY-MM-DD
:param end_date: str, date format YYY-MM-DD
"""
self.scaler = MinMaxScaler()
self.symbols = symbols
if len(symbols)==0:
print("No Symbol was specified")
return
self.start_date = start_date
if len(start_date)==0:
print("No start date was specified")
return
self.end_date = end_date
if len(end_date)==0:
print("No end date was specified")
return
self.use_columns = use_columns
if len(use_columns)==0:
print("No column was specified")
return
# Create output dataframe
self.dates = pd.date_range(self.start_date, self.end_date)
self.df_data = pd.DataFrame(index=self.dates)
# Read csv files corresponding to symbols
for symbol in symbols:
fn = os.path.join(folder_dataset, symbol + "_data.csv")
fn = "/home/louise/src/gresearch/" + folder_dataset + "/" + symbol + "_data.csv"
print(fn)
df_current = | pd.read_csv(fn, index_col='Date', usecols=self.use_columns, na_values='nan', parse_dates=True) | pandas.read_csv |
"""
MIT License
Copyright (c) 2018 <NAME> Institute of Molecular Physiology
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import numpy as np
import pandas as pd
import pytest
from .. import cter
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
OUTPUT_TEST_FOLDER = 'OUTPUT_TESTS_DUMP'
INPUT_TEST_FOLDER = '../../../test_files'
class TestGetCterV10HeaderNames:
def test_call_functions_should_return_filled_list(self):
data = [
'defocus',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'astigmatism_amplitude',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
]
assert cter.get_cter_v1_0_header_names() == data
class TestLoadCterV10:
def test_correct_multiline_file_should_return_filled_data_frame(self):
file_name = os.path.join(THIS_DIR, INPUT_TEST_FOLDER, 'cter_v1_0_multiline.txt')
return_frame = cter.load_cter_v1_0(file_name=file_name)
columns = (
'DefocusU',
'DefocusV',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
)
data = [[
22257.635,
22862.365,
0.01,
300,
1.14,
0,
0.1,
19.435,
0.0010212,
0,
0.0021005,
6.5849,
0.045268,
3.4734,
2.307018,
2.779399,
2.279982,
2.279982,
0,
0.1,
0,
'test_file.mrc'
]] * 2
data_frame = pd.DataFrame(
data,
columns=columns
)
assert data_frame.round(5).equals(return_frame.round(5))
def test_correct_file_should_return_filled_data_frame(self):
file_name = os.path.join(THIS_DIR, INPUT_TEST_FOLDER, 'cter_v1_0.txt')
return_frame = cter.load_cter_v1_0(file_name=file_name)
columns = (
'DefocusU',
'DefocusV',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
)
data = [
22257.635,
22862.365,
0.01,
300,
1.14,
0,
0.1,
19.435,
0.0010212,
0,
0.0021005,
6.5849,
0.045268,
3.4734,
2.307018,
2.779399,
2.279982,
2.279982,
0,
0.1,
0,
'test_file.mrc'
]
data_frame = pd.DataFrame(
[data],
columns=columns
)
assert data_frame.round(5).equals(return_frame.round(5))
def test_correct_file_low_angle_should_return_filled_data_frame(self):
file_name = os.path.join(THIS_DIR, INPUT_TEST_FOLDER, 'cter_v1_0_low_angle.txt')
return_frame = cter.load_cter_v1_0(file_name=file_name)
columns = (
'DefocusU',
'DefocusV',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
)
data = [
22257.635,
22862.365,
0.01,
300,
1.14,
0,
0.1,
19.435,
0.0010212,
0,
0.0021005,
6.5849,
0.045268,
3.4734,
2.307018,
2.779399,
2.279982,
2.279982,
0,
0.1,
0,
'test_file.mrc'
]
data_frame = pd.DataFrame(
[data],
columns=columns
)
assert data_frame.round(5).equals(return_frame.round(5))
def test_correct_file_high_angle_should_return_filled_data_frame(self):
file_name = os.path.join(THIS_DIR, INPUT_TEST_FOLDER, 'cter_v1_0_high_angle.txt')
return_frame = cter.load_cter_v1_0(file_name=file_name)
columns = (
'DefocusU',
'DefocusV',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
)
data = [
22257.635,
22862.365,
0.01,
300,
1.14,
0,
0.1,
19.435,
0.0010212,
0,
0.0021005,
6.5849,
0.045268,
3.4734,
2.307018,
2.779399,
2.279982,
2.279982,
0,
0.1,
0,
'test_file.mrc'
]
data_frame = pd.DataFrame(
[data],
columns=columns
)
assert data_frame.round(5).equals(return_frame.round(5))
class TestDefocusDefocusDiffToDefocuUAndV:
def test_defocus_2_um_zero_astigmatism_should_return_20000_angstrom(self):
def_u, _ = cter.defocus_defocus_diff_to_defocus_u_and_v(2, 0)
assert def_u == 20000
def test_zero_astigmatism_should_return_same_values(self):
def_u, def_v = cter.defocus_defocus_diff_to_defocus_u_and_v(2, 0)
assert def_u == def_v
def test_values_should_return_correct_defocus_u(self):
def_u, _ = cter.defocus_defocus_diff_to_defocus_u_and_v(2.05, 0.1)
assert def_u == 20000
def test_values_should_return_correct_defocus_v(self):
_, def_v = cter.defocus_defocus_diff_to_defocus_u_and_v(2.05, 0.1)
assert def_v == 21000
def test_values_inverse_should_return_correct_defocus_v(self):
_, def_v = cter.defocus_defocus_diff_to_defocus_u_and_v(2.05, -0.1)
assert def_v == 20000
def test_multi_input_should_return_multi_output_defocus_u(self):
def_u, _ = cter.defocus_defocus_diff_to_defocus_u_and_v(
pd.Series([2, 2.05, 2.05]),
pd.Series([0, -0.1, 0.1])
)
assert def_u.equals(pd.Series([20000, 21000, 20000], dtype=float))
def test_multi_input_should_return_multi_output_defocus_v(self):
_, def_v = cter.defocus_defocus_diff_to_defocus_u_and_v(
pd.Series([2, 2.05, 2.05]),
pd.Series([0, 0.1, -0.1]),
)
assert def_v.equals(pd.Series([20000, 21000, 20000], dtype=float))
class TestDefocuUAndVToDefocusDefocusDiff:
def test_defocus_u_2_um_defocus_v_2_um_should_return_20000_angstrom(self):
defocus, _ = cter.defocus_u_and_v_to_defocus_defocus_diff(20000, 20000)
assert defocus == 2
def test_zero_astigmatism_should_return_same_values(self):
_, astigmatism = cter.defocus_u_and_v_to_defocus_defocus_diff(20000, 20000)
assert astigmatism == 0
def test_values_should_return_correct_defocus_u(self):
defocus, astigmatism = cter.defocus_u_and_v_to_defocus_defocus_diff(21000, 20000)
assert defocus == 2.05
def test_values_should_return_correct_defocus_v(self):
defocus, astigmatism = cter.defocus_u_and_v_to_defocus_defocus_diff(21000, 20000)
assert astigmatism == -0.1
def test_values_invert_should_return_correct_defocus_v(self):
defocus, astigmatism = cter.defocus_u_and_v_to_defocus_defocus_diff(20000, 21000)
assert astigmatism == 0.1
def test_multi_input_should_return_multi_output_defocus(self):
defocus, _ = cter.defocus_u_and_v_to_defocus_defocus_diff(
pd.Series([20000, 21000]),
pd.Series([20000, 20000])
)
assert defocus.equals( | pd.Series([2, 2.05], dtype=float) | pandas.Series |
"""
Functions to filter WI images based on different conditions.
"""
import numpy as np
import pandas as pd
from . import _domestic, _labels, _utils
from .extraction import get_lowest_taxon
def remove_domestic(images: pd.DataFrame, reset_index: bool = True) -> pd.DataFrame:
"""
Removes images where the identification corresponds to a domestic
species. See wiutils/_domestic for a list of the genera considered
as domestic.
Parameters
----------
images : DataFrame
DataFrame with the project's images.
reset_index : bool
Whether to reset the index of the resulting DataFrame. If True,
the index will be numeric from 0 to the length of the result.
Returns
-------
DataFrame
Copy of images with removed domestic species.
"""
images = images.copy()
images = images[~images[_labels.images.genus].isin(_domestic.genera)]
if reset_index:
images = images.reset_index(drop=True)
return images
def remove_duplicates(
images: pd.DataFrame,
interval: int = 30,
unit: str = "minutes",
reset_index: bool = True,
) -> pd.DataFrame:
"""
Removes duplicate records (images) from the same taxon in the same
deployment given a time interval.
Parameters
----------
images : DataFrame
DataFrame with the project's images.
interval : int
Time interval (for a specific time unit).
unit : str
Time unit. Possible values are:
- 'weeks'
- 'days'
- 'hours'
- 'minutes'
- 'seconds'
reset_index : bool
Whether to reset the index of the resulting DataFrame. If True,
the index will be numeric from 0 to the length of the result.
Returns
-------
DataFrame
Copy of images with removed duplicates.
"""
if unit not in ("weeks", "days", "hours", "minutes", "seconds"):
raise ValueError(
"unit must be one of ['weeks', 'days', 'hours', 'minutes', 'seconds']"
)
images = images.copy()
images["taxon"] = get_lowest_taxon(images, return_rank=False)
df = images.copy()
df[_labels.images.date] = | pd.to_datetime(df[_labels.images.date]) | pandas.to_datetime |
import unittest
import os
import shutil
import numpy as np
import pandas as pd
from aistac import ConnectorContract
from ds_discovery import Wrangle, SyntheticBuilder
from ds_discovery.intent.wrangle_intent import WrangleIntentModel
from aistac.properties.property_manager import PropertyManager
class WrangleIntentCorrelateTest(unittest.TestCase):
def setUp(self):
os.environ['HADRON_PM_PATH'] = os.path.join('work', 'config')
os.environ['HADRON_DEFAULT_PATH'] = os.path.join('work', 'data')
try:
os.makedirs(os.environ['HADRON_PM_PATH'])
os.makedirs(os.environ['HADRON_DEFAULT_PATH'])
except:
pass
PropertyManager._remove_all()
def tearDown(self):
try:
shutil.rmtree('work')
except:
pass
@property
def tools(self) -> WrangleIntentModel:
return Wrangle.scratch_pad()
def test_runs(self):
"""Basic smoke test"""
im = Wrangle.from_env('tester', default_save=False, default_save_intent=False,
reset_templates=False, has_contract=False).intent_model
self.assertTrue(WrangleIntentModel, type(im))
def test_correlate_custom(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [1, 2, 3]
result = tools.correlate_custom(df, code_str="[x + 2 for x in @['A']]")
self.assertEqual([3, 4, 5], result)
result = tools.correlate_custom(df, code_str="[True if x == $v1 else False for x in @['A']]", v1=2)
self.assertEqual([False, True, False], result)
def test_correlate_choice(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [[1,2,4,6], [1], [2,4,8,1], [2,4]]
result = tools.correlate_choice(df, header='A', list_size=2)
control = [[1, 2], [1], [2, 4], [2, 4]]
self.assertEqual(control, result)
result = tools.correlate_choice(df, header='A', list_size=1)
self.assertEqual([1, 1, 2, 2], result)
def test_correlate_coefficient(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [1,2,3]
result = tools.correlate_polynomial(df, header='A', coefficient=[2,1])
self.assertEqual([3, 4, 5], result)
result = tools.correlate_polynomial(df, header='A', coefficient=[0, 0, 1])
self.assertEqual([1, 4, 9], result)
def test_correlate_join(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [1,2,3]
df['B'] = list('XYZ')
df['C'] = [4.2,7.1,4.1]
result = tools.correlate_join(df, header='B', action="values", sep='_')
self.assertEqual(['X_values', 'Y_values', 'Z_values'], result)
result = tools.correlate_join(df, header='A', action=tools.action2dict(method='correlate_numbers', header='C'))
self.assertEqual(['14.2', '27.1', '34.1'], result)
def test_correlate_columns(self):
tools = self.tools
df = pd.DataFrame({'A': [1,1,1,1,None], 'B': [1,None,2,3,None], 'C': [2,2,2,2,None], 'D': [5,5,5,5,None]})
result = tools.correlate_aggregate(df, headers=list('ABC'), agg='sum')
control = [4.0, 3.0, 5.0, 6.0, 0.0]
self.assertEqual(result, control)
for action in ['sum', 'prod', 'count', 'min', 'max', 'mean']:
print(action)
result = tools.correlate_aggregate(df, headers=list('ABC'), agg=action)
self.assertEqual(5, len(result))
def test_correlate_number(self):
tools = self.tools
df = pd.DataFrame(data=[1,2,3,4.0,5,6,7,8,9,0], columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', precision=0)
self.assertCountEqual([1,2,3,4,5,6,7,8,9,0], result)
# Offset
df = pd.DataFrame(data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', offset=1, precision=0)
self.assertEqual([2,3,4,5,6,7,8,9,10,1], result)
# str offset
df = pd.DataFrame(data=[1, 2, 3, 4], columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', offset='1-@', precision=0)
self.assertEqual([0,-1,-2,-3], result)
# complex str offset
result = tools.correlate_numbers(df, 'numbers', offset='x + 2 if x <= 2 else x', precision=0)
self.assertEqual([3, 4, 3, 4], result)
# jitter
df = pd.DataFrame(data=[2] * 1000, columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=0)
self.assertLessEqual(max(result), 4)
self.assertGreaterEqual(min(result), 0)
df = pd.DataFrame(data=tools._get_number(99999, size=5000), columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=1)
self.assertNotEqual(df['numbers'].to_list(), result)
self.assertEqual(5000, len(result))
for index in range(len(result)):
loss = abs(df['numbers'][index] - result[index])
self.assertLessEqual(loss, 5)
df = pd.DataFrame(data=tools._get_number(99999, size=5000), columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', jitter=1, precision=1)
self.assertNotEqual(df['numbers'].to_list(), result)
self.assertEqual(5000, len(result))
for index in range(len(result)):
loss = abs(df['numbers'][index] - result[index])
self.assertLessEqual(loss, 1)
def test_correlate_normalize(self):
tools = self.tools
df = pd.DataFrame(data=[1,2,2,3,3,2,2,1], columns=['numbers'])
result = tools.correlate_numbers(df, header='numbers', normalize=(0, 1))
self.assertEqual([0.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 0.0], result)
result = tools.correlate_numbers(df, header='numbers', normalize=(-1, 1))
self.assertEqual([-1.0, 0, 0, 1.0, 1.0, 0, 0, -1.0], result)
def test_correlate_standardise(self):
tools = self.tools
df = pd.DataFrame(data=[1,2,2,3,3,2,2,1], columns=['numbers'])
result = tools.correlate_numbers(df, header='numbers', standardize=True, precision=1)
self.assertEqual([-1.4, 0.0, 0.0, 1.4, 1.4, 0.0, 0.0, -1.4], result)
def test_correlate_number_to_numeric(self):
tools = self.tools
df = pd.DataFrame(data=list("123") + ['4-5'], columns=['numbers'])
with self.assertRaises(ValueError) as context:
result = tools.correlate_numbers(df, header='numbers')
self.assertTrue("The header column is of type" in str(context.exception))
result = tools.correlate_numbers(df, header='numbers', to_numeric=True)
self.assertEqual([1.0, 2.0, 3.0], result[:3])
result = tools.correlate_numbers(df, header='numbers', to_numeric=True, replace_nulls=0, rtn_type='int')
self.assertEqual([1, 2, 3, 0], result.to_list())
def test_correlate_number_extras(self):
tools = self.tools
# weighting
df = pd.DataFrame(columns=['numbers'], data=[2] * 1000)
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=0, jitter_freq=[0, 0, 1, 1])
self.assertCountEqual([2,3,4], list( | pd.Series(result) | pandas.Series |
#!/usr/bin/env python3
"""
https://mygene.info/
https://mygene.info/v3/api
https://pypi.org/project/mygene/
"""
###
#
import sys,os
import pandas as pd
import mygene as mg
#
FIELDS = 'HGNC,symbol,name,taxid,entrezgene,ensemblgene'
NCHUNK=100;
#
#############################################################################
def GetGenes(ids, fields=FIELDS, fout=None):
"""Get genes by Entrez or Ensembl gene ID."""
df=pd.DataFrame();
mgi = mg.MyGeneInfo()
ichunk=0;
while ichunk*NCHUNK<len(ids):
df_this = mgi.getgenes(ids[ichunk*NCHUNK:((ichunk+1)*NCHUNK)], fields, as_dataframe=True)
df = | pd.concat([df, df_this]) | pandas.concat |
#!/usr/bin/env python
#
# analysis.py
#
# Copyright (c) 2018 <NAME>. All rights reserved.
import argparse
import time
import sys
import random
from sort import *
import pandas as pd
import matplotlib.pyplot as plt
# Utility
def print_err(*args, **kwargs):
print(*args, **kwargs, file=sys.stderr)
def parse_int(n):
try:
return int(n)
except ValueError:
return None
# Analysis
def analyze(sort_func, array, order=Order.LE):
""" Sorting method wrapper.
Execute sorting method on specified array.
Return Stats object filled with statistics.
"""
stats = Stats(len(array))
start = time.time()
sort_func(array, order=order, stats=stats)
end = time.time()
stats.time = end - start
return stats
def analyze_random(count, output=None, input=None):
""" Perform analysis using random arrays of sizes in 100...10000,
and plot them.
input -- input csv file
output -- output file name
"""
print_err('Random analysis started...')
if input is None:
row_list = []
alg_list = [(merge_sort, Algorithm.MERGE),
(quicksort, Algorithm.QUICK),
(dual_pivot_quicksort, Algorithm.DPQUICK),
(radix_sort, Algorithm.RADIX),
(hybrid_sort, Algorithm.HYBRID)]
for n in range(100, 10100, 100):
for func, alg in alg_list:
for _ in range(count):
arr = random.sample(range(n), n)
d = vars(analyze(func, arr))
d['algorithm'] = alg.name.lower()
row_list.append(d)
del arr
print_err("COMPLETED {} OK".format(n))
df = pd.DataFrame(row_list)
if not output is None:
df.to_csv(output)
print("File saved")
else:
df = pd.read_csv(input)
df['ncomp/n'] = np.where(df['length'] < 1, df['length'], df['ncomp']/df['length'])
df['nswap/n'] = np.where(df['length'] < 1, df['length'], df['nswap']/df['length'])
grouped = df.groupby(['length', 'algorithm']).mean(numeric_only=True)
ncomp_g = grouped.loc[:, ['ncomp']]
ncomp_g = pd.pivot_table(ncomp_g, values='ncomp', index='length', columns='algorithm')
nswap_g = grouped.loc[:, ['nswap']]
nswap_g = pd.pivot_table(nswap_g, values='nswap', index='length', columns='algorithm')
time_g = grouped.loc[:, ['time']]
time_g = pd.pivot_table(time_g, values='time', index='length', columns='algorithm')
ncomp_n_g = grouped.loc[:, ['ncomp/n']]
ncomp_n_g = pd.pivot_table(ncomp_n_g, values='ncomp/n', index='length', columns='algorithm')
nswap_n_g = grouped.loc[:, ['nswap/n']]
nswap_n_g = | pd.pivot_table(nswap_n_g, values='nswap/n', index='length', columns='algorithm') | pandas.pivot_table |
######################################################################################################
# importar bibliotecas
######################################################################################################
import streamlit as st
from streamlit import caching
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import pandas as pd
import numpy as np
#import json
#import smtplib
from datetime import datetime, time
import pytz
import base64
from io import StringIO, BytesIO
# import pymongo
# from st_aggrid import AgGrid
# from pylogix import PLC
from PIL import Image
import io
import matplotlib.pyplot as plt
import cv2
from pyzbar.pyzbar import decode
import time
import qrcode
from PIL import Image
import json
from openpyxl import load_workbook
from openpyxl.drawing.image import Image as Image_openpyxl
from openpyxl.styles import Font, Color
#from webcam import webcam
import asyncio
import logging
import queue
import threading
import urllib.request
from pathlib import Path
from typing import List, NamedTuple
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
import av
import cv2
import matplotlib.pyplot as plt
import numpy as np
#import pydub
import streamlit as st
from aiortc.contrib.media import MediaPlayer
from streamlit_webrtc import (
AudioProcessorBase,
RTCConfiguration,
VideoProcessorBase,
WebRtcMode,
webrtc_streamer,
)
RTC_CONFIGURATION = RTCConfiguration(
{"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}
)
from google.cloud import firestore
from google.oauth2 import service_account
key_dict = json.loads(st.secrets['textkey'])
creds = service_account.Credentials.from_service_account_info(key_dict)
db = firestore.Client(credentials=creds, project='logistica-invent')
######################################################################################################
#Funções
######################################################################################################
st.set_page_config(
page_title="Inventário Logístico",
)
m = st.markdown("""
<style>
div.stButton > button:first-child{
width: 100%;
font-size: 18px;
}
label.css-qrbaxs{
font-size: 18px;
}
p{
font-size: 18px;
}
h1{
text-align: center;
}
div.block-container{
padding-top: 1rem;
}
div.streamlit-expanderHeader{
width: 100%;
font-size: 18px;
background-color: rgb(240,242,246);
color: black;
}
</style>""", unsafe_allow_html=True) # font-weight: bold;
def read_barcodes(frame):
barcodes = decode(frame)
for barcode in barcodes:
x, y , w, h = barcode.rect #1
barcode_info = barcode.data.decode('utf-8')
return barcode_info
def entrada_bobinas() -> None:
st.subheader('Inserir bobina')
dict_data = {}
with st.form(key='myform', clear_on_submit=True):
texto_qrcode = ''
dict_descricao_bobinas = {
'BOBINA ALUMINIO LATA 16 OZ COIL 00098': 50761710,
'BOBINA ALUMINIO LATA 12 OZ COIL 00098': 50679811,
'BOBINA ALUMINIO LATA 12 OZ COIL 98 SCRAP': 40011008,
'BOBINA ALUMINIO LACRE PRETO': 50552903,
'BOBINA ALUMINIO LACRE AZUL': 50527602,
'BOBINA ALUMINIO LATA 16 OZ': 50490762,
'BOBINA ALUMINIO LATA 12 OZ': 50490761,
'BOBINA ALUMINIO TAMPA PRATA REFRIG.': 50490760,
'BOBINA ALUMINIO TAMPA DOURADO CERVEJA': 50490599,
'BOBINA ALUMINIO LACRE PRATA': 50490598,
'BOBINA ALUMINIO LATA 12 OZ SCRAP': 40010824,
'BOBINA ALUMINIO TAMPA BRANCA': 50527252,
'BOBINA ALUMINIO LACRE DOURADO': 50771048}
tipo_bobinas = ['L3', 'L2', 'L1', 'M', 'H1', 'H2', 'H3']
dict_data['status'] = st.selectbox('Status da bobina', ['Liberado', 'Não conforme']) # data
dict_data['descricao'] = st.selectbox('Descrição:', list(dict_descricao_bobinas.keys()))
dict_data['conferente'] = st.text_input('Conferente:')
dict_data['quantidade'] = st.number_input('Quantidade:', format='%i', step=1, value=9000)
dict_data['lote'] = st.text_input('Lote SAP:')
dict_data['tipo'] = st.selectbox('Tipo', tipo_bobinas)
dict_data['data'] = st.date_input('Data entrada:')
submit_button = st.form_submit_button(label='Salvar bobina')
if submit_button:
if (dict_data['conferente'] == '') or (dict_data['lote'] == ''):
st.error('Preencha todos os campos')
else:
dict_data['descricao'] = dict_data['descricao'].replace(',',' ')
dict_data['conferente'] = dict_data['conferente'].replace(',',' ')
dict_data['lote'] = dict_data['lote'].replace(',',' ')
if dict_data['status'] == 'Não conforme':
dict_data['tipo_de_etiqueta'] = 'BLOQUEADO'
if dict_data['status'] == 'Liberado':
dict_data['tipo_de_etiqueta'] = 'LIBERADO'
dict_data['sap'] = dict_descricao_bobinas[dict_data['descricao']]
doc_ref = db.collection('bobinas').document('bobinas')
doc = doc_ref.get()
if doc.exists:
dicionario = doc.to_dict()
csv = dicionario['dataframe']
csv_string = StringIO(csv)
df_bobinas = pd.read_csv(csv_string, sep=',')
df_bobinas = df_bobinas.append(dict_data, ignore_index=True)
df_bobinas.drop_duplicates(inplace=True)
dados = {}
dados['dataframe'] = df_bobinas.to_csv(index=False)
try:
doc_ref.set(dados)
st.success('Bobina inserida com sucesso')
except:
st.error('Erro ao inserir bobina')
else:
df_bobinas = pd.DataFrame(dict_data, index=[0])
df_bobinas.drop_duplicates(inplace=True)
dados = {}
dados['dataframe'] = df_bobinas.to_csv(index=False)
try:
doc_ref.set(dados)
st.success('Bobina inserida com sucesso')
except:
st.error('Erro ao inserir bobina')
time.sleep(2)
st.experimental_rerun()
@st.cache(allow_output_mutation=True)
def read_cv2():
return cv2.VideoCapture(0)
def visualizar_inventario() -> None:
st.subheader('Inventários realizados')
doc_ref = db.collection('inventario').document('inventario')
doc = doc_ref.get()
if doc.exists:
dicionario = doc.to_dict()
csv = dicionario['dataframe']
csv_string = StringIO(csv)
df_bobinas = pd.read_csv(csv_string, sep=',')
df_bobinas['id'] = df_bobinas['nome_inventario'].astype(str) + '_' + df_bobinas['data_inventario'].astype(str)
df_bobinas.sort_values(by=['data_inventario'], ascending=False, inplace=True)
lista_inventarios = df_bobinas['id'].unique()
for inventario in lista_inventarios:
df_inventario = df_bobinas[df_bobinas['id'] == inventario]
with st.expander(f'{inventario} ({str(df_inventario.shape[0])})'):
st.dataframe(df_inventario)
else:
st.warning('Não foram realizados inventários')
def VideoProcessor(dataframe_string: str) -> None:
class video_processor(VideoProcessorBase):
def __init__(self):
self.result_queue = queue.Queue()
def recv(self, frame):
img = frame.to_ndarray(format='bgr24') #bgr24
# data = read_barcodes(img)
data = ''
barcodes = decode(img)
for barcode in barcodes:
x, y , w, h = barcode.rect #1
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 5)
data = barcode.data.decode('utf-8')
if data != '' and data is not None:
self.result_queue.put(data)
return av.VideoFrame.from_ndarray(img, format='bgr24')
webrtc_ctx = webrtc_streamer(key='opencv-filter',
video_processor_factory=video_processor,
mode=WebRtcMode.SENDRECV,
rtc_configuration=RTC_CONFIGURATION,
media_stream_constraints={"video": True, "audio": False},
async_processing=True)
if webrtc_ctx.state.playing:
#st.write('Bobina atual')
labels_placeholder = st.empty()
# st.write('Bobinas armazenadas')
# result_placeholder = st.empty()
while True:
if webrtc_ctx.video_processor:
try:
result = webrtc_ctx.video_processor.result_queue.get(timeout=2.0)
except queue.Empty:
result = None
labels_placeholder.warning('Nenhum QR code detectado')
else:
break
if result is not None:
if result not in st.session_state.data_inventario and result.count(',') == 7:
st.session_state.data_inventario = ''.join((st.session_state.data_inventario, result, '\n'))
labels_placeholder.success('Bobina adicionada ao inventário')
if result in st.session_state.data_inventario and result.count(',') == 7:
labels_placeholder.info('Bobina já adicionada ao inventário')
if result.count(',') != 7:
labels_placeholder.error('QR code inválido')
# result_placeholder.write(st.session_state.data_inventario)
def inserir_invetario() -> None:
st.subheader('Inventário de bobinas')
nome_inventario = st.text_input('ID do colaborador:')
encerrar_inventario = st.button('Encerrar inventário')
colunas = 'status,descricao,conferente,quantidade,lote,tipo,data,sap\n'
if 'data_inventario' not in st.session_state:
st.session_state['data_inventario'] = colunas
VideoProcessor('colunas')
csv_string = StringIO(st.session_state.data_inventario)
df_inventario_atual = pd.read_csv(csv_string, sep=',')
df_inventario_atual['data_inventario'] = datetime.now().strftime('%d/%m/%Y')
df_inventario_atual['nome_inventario'] = nome_inventario
if df_inventario_atual.shape[0] > 0:
st.write(df_inventario_atual)
else:
st.warning('Nenhuma bobina adicionada ao inventário')
if encerrar_inventario:
if st.session_state.data_inventario != colunas:
doc_ref = db.collection('inventario').document('inventario')
doc = doc_ref.get()
if doc.exists:
dicionario = doc.to_dict()
csv = dicionario['dataframe']
csv_string = StringIO(csv)
df_bobinas = pd.read_csv(csv_string, sep=',')
df_bobinas = df_bobinas.append(df_inventario_atual, ignore_index=True)
df_bobinas.drop_duplicates(inplace=True)
dados = {}
dados['dataframe'] = df_bobinas.to_csv(index=False)
try:
doc_ref.set(dados)
st.session_state['data_inventario'] = colunas
st.success('Inventário realizado com sucesso')
except:
st.error('Erro ao salvar inventário')
else:
df_bobinas = pd.DataFrame(df_inventario_atual, index=[0])
df_bobinas.drop_duplicates(inplace=True)
dados = {}
dados['dataframe'] = df_bobinas.to_csv(index=False)
try:
doc_ref.set(dados)
st.session_state['data_inventario'] = colunas
st.success('Inventário realizado com sucesso')
except:
st.error('Erro ao salvar inventário')
time.sleep(1)
st.experimental_rerun()
else:
st.warning('Não há bobinas para armazenar')
def download_etiqueta(texto_qrcode: str, dados_bobina: pd.DataFrame) -> None:
# imagem_bobina_qr = qrcode.make(texto_qrcode , version=12, box_size=2, border=2, error_correction=qrcode.constants.ERROR_CORRECT_H) #, fit=True)
# image_bytearray = io.BytesIO()
# imagem_bobina_qr.save(image_bytearray, format='PNG', name='qrcode.png')
if dados_bobina.loc['tipo_de_etiqueta'] == 'LIBERADO':
imagem_bobina_qr = qrcode.make(texto_qrcode , version=12, box_size=2, border=2, error_correction=qrcode.constants.ERROR_CORRECT_H) #, fit=True)
image_bytearray = io.BytesIO()
imagem_bobina_qr.save(image_bytearray, format='PNG', name='qrcode.png')
wb = load_workbook('LIBERADO.xlsx')
ws = wb.active
img = Image_openpyxl(image_bytearray)
ws.add_image(img,'F2')
ft = Font(bold=True, size=48)
ws['A2'] = dados_bobina.loc['sap']
ws['A3'] = dados_bobina.loc['descricao']
ws['A5'] = dados_bobina.loc['conferente']
ws['A9'] = dados_bobina.loc['lote']
ws['D9'] = dados_bobina.loc['data']
ws['A18'] = str(dados_bobina.loc['quantidade'])
ws['D18'] = dados_bobina.loc['tipo'].replace('BOBINA ALUMINIO ', '')
ws['A18'].font = ft
if dados_bobina.loc['tipo_de_etiqueta'] == 'BLOQUEADO':
imagem_bobina_qr = qrcode.make(texto_qrcode , version=10, box_size=2, border=2, error_correction=qrcode.constants.ERROR_CORRECT_H) #, fit=True)
image_bytearray = io.BytesIO()
imagem_bobina_qr.save(image_bytearray, format='PNG', name='qrcode.png')
wb = load_workbook('BLOQUEADO.xlsx')
ws = wb.active
#ws = wb['BLOQUEADO']
img = Image_openpyxl(image_bytearray)
ws.add_image(img,'A23')
#st.write(dados_bobina.astype(str))
ws['A2'] = dados_bobina.loc['sap'] #codigo do produto
ws['A3'] = dados_bobina.loc['quantidade'] #quantidade do produto
ws['A5'] = dados_bobina.loc['lote'] #lote do produto
ws['A13'] = dados_bobina.loc['data'] #data de entrada do produto
wb.save('Etiqueta_download.xlsx')
stream = BytesIO()
wb.save(stream)
towrite = stream.getvalue()
b64 = base64.b64encode(towrite).decode() # some strings
# link para download e nome do arquivo
linko = f'<a href="data:application/vnd.openxmlformats-officedocument.spreadsheetml.sheet;base64,{b64}" download="etiqueta.xlsx">Download etiqueta</a>'
st.markdown(linko, unsafe_allow_html=True)
def etiquetas_bobinas() -> None:
st.subheader('Etiquetas de bobinas')
doc_ref = db.collection('bobinas').document('bobinas')
doc = doc_ref.get()
if doc.exists:
dicionario = doc.to_dict()
csv = dicionario['dataframe']
csv_string = StringIO(csv)
df_bobinas = | pd.read_csv(csv_string, sep=',') | pandas.read_csv |
"""Dataset preprocessing scripts"""
def process_mim_gold_ner():
from pathlib import Path
import pandas as pd
from tqdm.auto import tqdm
import json
import re
from collections import defaultdict
conversion_dict = {
"O": "O",
"B-Person": "B-PER",
"I-Person": "I-PER",
"B-Location": "B-LOC",
"I-Location": "I-LOC",
"B-Organization": "B-ORG",
"I-Organization": "I-ORG",
"B-Miscellaneous": "B-MISC",
"I-Miscellaneous": "I-MISC",
"B-Date": "O",
"I-Date": "O",
"B-Time": "O",
"I-Time": "O",
"B-Money": "O",
"I-Money": "O",
"B-Percent": "O",
"I-Percent": "O",
}
def get_df(path: Path):
lines = path.read_text().split("\n")
data_dict = defaultdict(list)
tokens = list()
tags = list()
for line in tqdm(lines):
if line != "":
token, tag = line.split("\t")
tag = conversion_dict[tag]
tokens.append(token)
tags.append(tag)
else:
doc = " ".join(tokens)
doc = re.sub(" ([.,])", "\1", doc)
data_dict["doc"].append(doc)
data_dict["tokens"].append(tokens)
data_dict["ner_tags"].append(tags)
tokens = list()
tags = list()
return pd.DataFrame(data_dict)
def export_as_jsonl(df: pd.DataFrame, output_path: Path):
for idx, row in tqdm(list(df.iterrows())):
data_dict = dict(doc=row.doc, tokens=row.tokens, ner_tags=row.ner_tags)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(df) - 1:
f.write("\n")
data_dir = Path("datasets") / "mim_gold_ner"
train_input_path = data_dir / "raw_train"
val_input_path = data_dir / "raw_val"
test_input_path = data_dir / "raw_test"
train_output_path = data_dir / "train.jsonl"
test_output_path = data_dir / "test.jsonl"
train_df = pd.concat((get_df(train_input_path), get_df(val_input_path)))
test_df = get_df(test_input_path)
export_as_jsonl(train_df, train_output_path)
export_as_jsonl(test_df, test_output_path)
def process_fdt():
from pathlib import Path
import json
from tqdm.auto import tqdm
import re
dep_conversion_dict = {
"acl": "acl",
"acl:relcl": "acl",
"acl:cleft": "acl",
"advcl": "advcl",
"advmod": "advmod",
"advmod:emph": "advmod",
"advmod:lmod": "advmod",
"amod": "amod",
"appos": "appos",
"aux": "aux",
"aux:pass": "aux",
"case": "case",
"cc": "cc",
"cc:preconj": "cc",
"ccomp": "ccomp",
"clf": "clf",
"compound": "compound",
"compound:lvc": "compound",
"compound:prt": "compound",
"compound:redup": "compound",
"compound:svc": "compound",
"conj": "conj",
"cop": "cop",
"csubj": "csubj",
"csubj:pass": "csubj",
"dep": "dep",
"det": "det",
"det:numgov": "det",
"det:nummod": "det",
"det:poss": "det",
"discourse": "discourse",
"dislocated": "dislocated",
"expl": "expl",
"expl:impers": "expl",
"expl:pass": "expl",
"expl:pv": "expl",
"fixed": "fixed",
"flat": "flat",
"flat:foreign": "flat",
"flat:name": "flat",
"goeswith": "goeswith",
"iobj": "iobj",
"list": "list",
"mark": "mark",
"nmod": "nmod",
"nmod:poss": "nmod",
"nmod:tmod": "nmod",
"nsubj": "nsubj",
"nsubj:pass": "nsubj",
"nummod": "nummod",
"nummod:gov": "nummod",
"obj": "obj",
"obl": "obl",
"obl:agent": "obl",
"obl:arg": "obl",
"obl:lmod": "obl",
"obl:loc": "obl",
"obl:tmod": "obl",
"orphan": "orphan",
"parataxis": "parataxis",
"punct": "punct",
"reparandum": "reparandum",
"root": "root",
"vocative": "vocative",
"xcomp": "xcomp",
}
dataset_dir = Path("datasets/fdt")
if not dataset_dir.exists():
dataset_dir.mkdir()
input_paths = [
Path("datasets/fo_farpahc-ud-train.conllu"),
Path("datasets/fo_farpahc-ud-dev.conllu"),
Path("datasets/fo_farpahc-ud-test.conllu"),
]
output_paths = [
Path("datasets/fdt/train.jsonl"),
Path("datasets/fdt/val.jsonl"),
Path("datasets/fdt/test.jsonl"),
]
for input_path, output_path in zip(input_paths, output_paths):
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ids = list()
doc = ""
lines = input_path.read_text().split("\n")
store = True
for idx, line in enumerate(tqdm(lines)):
if line.startswith("# text = "):
doc = re.sub("# text = ", "", line)
store = True
elif line.startswith("#"):
continue
elif line == "":
if tokens != [] and store:
data_dict = dict(
ids=ids,
doc=doc,
tokens=tokens,
pos_tags=pos_tags,
heads=heads,
deps=deps,
)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(lines) - 1:
f.write("\n")
ids = list()
tokens = list()
pos_tags = list()
heads = list()
deps = list()
doc = ""
else:
data = line.split("\t")
ids.append(data[0])
tokens.append(data[1])
pos_tags.append(data[3])
heads.append(data[6])
try:
deps.append(dep_conversion_dict[data[7]])
except KeyError:
store = False
def process_wikiann_fo():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
from sklearn.model_selection import train_test_split
import re
dataset_dir = Path("datasets/wikiann_fo")
if not dataset_dir.exists():
dataset_dir.mkdir()
input_path = Path("datasets/wikiann-fo.bio")
train_output_path = Path("datasets/wikiann_fo/train.jsonl")
test_output_path = Path("datasets/wikiann_fo/test.jsonl")
corpus = input_path.read_text().split("\n")
tokens = list()
ner_tags = list()
records = list()
for line in corpus:
if line != "":
data = line.split(" ")
tokens.append(data[0])
ner_tags.append(data[-1])
else:
assert len(tokens) == len(ner_tags)
doc = " ".join(tokens)
doc = re.sub(" ([.,])", "\1", doc)
records.append(dict(doc=doc, tokens=tokens, ner_tags=ner_tags))
tokens = list()
ner_tags = list()
# Show the NER tags in the dataset, as a sanity check
print(sorted(set([tag for record in records for tag in record["ner_tags"]])))
# Count the number of each NER tag, as a sanity check
tags = ["PER", "LOC", "ORG", "MISC"]
for tag in tags:
num = len([t for record in records for t in record["ner_tags"] if t[2:] == tag])
print(tag, num)
df = pd.DataFrame.from_records(records)
train, test = train_test_split(df, test_size=0.3)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
def export_as_jsonl(df: pd.DataFrame, output_path: Path):
for idx, row in tqdm(df.iterrows()):
data_dict = dict(doc=row.doc, tokens=row.tokens, ner_tags=row.ner_tags)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(df) - 1:
f.write("\n")
export_as_jsonl(train, train_output_path)
export_as_jsonl(test, test_output_path)
def process_idt():
from pathlib import Path
import json
from tqdm.auto import tqdm
import re
dep_conversion_dict = {
"acl": "acl",
"acl:relcl": "acl",
"acl:cleft": "acl",
"advcl": "advcl",
"advmod": "advmod",
"advmod:emph": "advmod",
"advmod:lmod": "advmod",
"amod": "amod",
"appos": "appos",
"aux": "aux",
"aux:pass": "aux",
"case": "case",
"cc": "cc",
"cc:preconj": "cc",
"ccomp": "ccomp",
"clf": "clf",
"compound": "compound",
"compound:lvc": "compound",
"compound:prt": "compound",
"compound:redup": "compound",
"compound:svc": "compound",
"conj": "conj",
"cop": "cop",
"csubj": "csubj",
"csubj:pass": "csubj",
"dep": "dep",
"det": "det",
"det:numgov": "det",
"det:nummod": "det",
"det:poss": "det",
"discourse": "discourse",
"dislocated": "dislocated",
"expl": "expl",
"expl:impers": "expl",
"expl:pass": "expl",
"expl:pv": "expl",
"fixed": "fixed",
"flat": "flat",
"flat:foreign": "flat",
"flat:name": "flat",
"goeswith": "goeswith",
"iobj": "iobj",
"list": "list",
"mark": "mark",
"nmod": "nmod",
"nmod:poss": "nmod",
"nmod:tmod": "nmod",
"nsubj": "nsubj",
"nsubj:pass": "nsubj",
"nummod": "nummod",
"nummod:gov": "nummod",
"obj": "obj",
"obl": "obl",
"obl:agent": "obl",
"obl:arg": "obl",
"obl:lmod": "obl",
"obl:loc": "obl",
"obl:tmod": "obl",
"orphan": "orphan",
"parataxis": "parataxis",
"punct": "punct",
"reparandum": "reparandum",
"root": "root",
"vocative": "vocative",
"xcomp": "xcomp",
}
dataset_dir = Path("datasets/idt")
if not dataset_dir.exists():
dataset_dir.mkdir()
input_paths = [
Path("datasets/is_modern-ud-train.conllu"),
Path("datasets/is_modern-ud-dev.conllu"),
Path("datasets/is_modern-ud-test.conllu"),
]
output_paths = [
Path("datasets/idt/train.jsonl"),
Path("datasets/idt/val.jsonl"),
Path("datasets/idt/test.jsonl"),
]
for input_path, output_path in zip(input_paths, output_paths):
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ids = list()
doc = ""
lines = input_path.read_text().split("\n")
store = True
for idx, line in enumerate(tqdm(lines)):
if line.startswith("# text = "):
doc = re.sub("# text = ", "", line)
store = True
elif line.startswith("#"):
continue
elif line == "":
if tokens != [] and store:
data_dict = dict(
ids=ids,
doc=doc,
tokens=tokens,
pos_tags=pos_tags,
heads=heads,
deps=deps,
)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(lines) - 1:
f.write("\n")
ids = list()
tokens = list()
pos_tags = list()
heads = list()
deps = list()
doc = ""
else:
data = line.split("\t")
ids.append(data[0])
tokens.append(data[1])
pos_tags.append(data[3])
heads.append(data[6])
try:
deps.append(dep_conversion_dict[data[7]])
except KeyError:
store = False
def process_suc3():
from pathlib import Path
import json
from tqdm.auto import tqdm
import re
from lxml import etree
import pandas as pd
from sklearn.model_selection import train_test_split
import io
sdt_dir = Path("datasets/suc3")
if not sdt_dir.exists():
sdt_dir.mkdir()
conversion_dict = dict(
O="O",
animal="MISC",
event="MISC",
inst="ORG",
myth="MISC",
other="MISC",
person="PER",
place="LOC",
product="MISC",
work="MISC",
)
input_path = Path("datasets/suc3.xml")
train_output_path = Path("datasets/suc3/train.jsonl")
test_output_path = Path("datasets/suc3/test.jsonl")
print("Parsing XML file...")
xml_data = input_path.read_bytes()
context = etree.iterparse(io.BytesIO(xml_data), events=("start", "end"))
ner_tag = "O"
records = list()
for action, elt in context:
if elt.tag == "name" and action == "start":
ner_tag = f'B-{conversion_dict[elt.attrib["type"]]}'
elif elt.tag == "name" and action == "end":
ner_tag = "O"
elif elt.tag == "w" and action == "start":
if elt.text:
tokens.append(elt.text)
ner_tags.append(ner_tag)
elif elt.tag == "w" and action == "end":
if ner_tag.startswith("B-"):
ner_tag = f"I-{ner_tag[2:]}"
elif elt.tag == "sentence" and action == "end":
if len(tokens):
doc = " ".join(tokens)
doc = re.sub(" ([.,])", "\1", doc)
assert len(tokens) == len(ner_tags)
record = dict(doc=doc, tokens=tokens, ner_tags=ner_tags)
records.append(record)
elif elt.tag == "sentence" and action == "start":
tokens = list()
ner_tags = list()
ner_tag = "O"
# Count the number of each NER tag, as a sanity check
tags = ["PER", "LOC", "ORG", "MISC"]
for tag in tags:
num = len([t for record in records for t in record["ner_tags"] if t[2:] == tag])
print(tag, num)
df = pd.DataFrame.from_records(records)
train, test = train_test_split(df, test_size=0.3)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
def export_as_jsonl(df: pd.DataFrame, output_path: Path):
for idx, row in tqdm(df.iterrows()):
data_dict = dict(doc=row.doc, tokens=row.tokens, ner_tags=row.ner_tags)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(df) - 1:
f.write("\n")
export_as_jsonl(train, train_output_path)
export_as_jsonl(test, test_output_path)
def process_sdt():
from pathlib import Path
import json
from tqdm.auto import tqdm
import re
dep_conversion_dict = {
"acl": "acl",
"acl:relcl": "acl",
"acl:cleft": "acl",
"advcl": "advcl",
"advmod": "advmod",
"advmod:emph": "advmod",
"advmod:lmod": "advmod",
"amod": "amod",
"appos": "appos",
"aux": "aux",
"aux:pass": "aux",
"case": "case",
"cc": "cc",
"cc:preconj": "cc",
"ccomp": "ccomp",
"clf": "clf",
"compound": "compound",
"compound:lvc": "compound",
"compound:prt": "compound",
"compound:redup": "compound",
"compound:svc": "compound",
"conj": "conj",
"cop": "cop",
"csubj": "csubj",
"csubj:pass": "csubj",
"dep": "dep",
"det": "det",
"det:numgov": "det",
"det:nummod": "det",
"det:poss": "det",
"discourse": "discourse",
"dislocated": "dislocated",
"expl": "expl",
"expl:impers": "expl",
"expl:pass": "expl",
"expl:pv": "expl",
"fixed": "fixed",
"flat": "flat",
"flat:foreign": "flat",
"flat:name": "flat",
"goeswith": "goeswith",
"iobj": "iobj",
"list": "list",
"mark": "mark",
"nmod": "nmod",
"nmod:poss": "nmod",
"nmod:tmod": "nmod",
"nsubj": "nsubj",
"nsubj:pass": "nsubj",
"nummod": "nummod",
"nummod:gov": "nummod",
"obj": "obj",
"obl": "obl",
"obl:agent": "obl",
"obl:arg": "obl",
"obl:lmod": "obl",
"obl:loc": "obl",
"obl:tmod": "obl",
"orphan": "orphan",
"parataxis": "parataxis",
"punct": "punct",
"reparandum": "reparandum",
"root": "root",
"vocative": "vocative",
"xcomp": "xcomp",
}
sdt_dir = Path("datasets/sdt")
if not sdt_dir.exists():
sdt_dir.mkdir()
input_paths = [
Path("datasets/sv_talbanken-ud-train.conllu"),
Path("datasets/sv_talbanken-ud-dev.conllu"),
Path("datasets/sv_talbanken-ud-test.conllu"),
]
output_paths = [
Path("datasets/sdt/train.jsonl"),
Path("datasets/sdt/val.jsonl"),
Path("datasets/sdt/test.jsonl"),
]
for input_path, output_path in zip(input_paths, output_paths):
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ids = list()
doc = ""
lines = input_path.read_text().split("\n")
store = True
for idx, line in enumerate(tqdm(lines)):
if line.startswith("# text = "):
doc = re.sub("# text = ", "", line)
store = True
elif line.startswith("#"):
continue
elif line == "":
if tokens != [] and store:
data_dict = dict(
ids=ids,
doc=(
doc.replace(" s k", " s.k.")
.replace("S k", "S.k.")
.replace(" bl a", " bl.a.")
.replace("Bl a", "Bl.a.")
.replace(" t o m", " t.o.m.")
.replace("T o m", "T.o.m.")
.replace(" fr o m", " fr.o.m.")
.replace("Fr o m", "Fr.o.m.")
.replace(" o s v", " o.s.v.")
.replace("O s v", "O.s.v.")
.replace(" d v s", " d.v.s.")
.replace("D v s", "D.v.s.")
.replace(" m fl", " m.fl.")
.replace("M fl", "M.fl.")
.replace(" t ex", " t.ex.")
.replace("T ex", "T.ex.")
.replace(" f n", " f.n.")
.replace("F n", "F.n.")
),
tokens=tokens,
pos_tags=pos_tags,
heads=heads,
deps=deps,
)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(lines) - 1:
f.write("\n")
ids = list()
tokens = list()
pos_tags = list()
heads = list()
deps = list()
doc = ""
else:
data = line.split("\t")
ids.append(data[0])
tokens.append(
data[1]
.replace("s k", "s.k.")
.replace("S k", "S.k.")
.replace("t o m", "t.o.m.")
.replace("T o m", "T.o.m.")
.replace("fr o m", "fr.o.m.")
.replace("Fr o m", "Fr.o.m.")
.replace("bl a", "bl.a.")
.replace("Bl a", "Bl.a.")
.replace("m fl", "m.fl.")
.replace("M fl", "M.fl.")
.replace("o s v", "o.s.v.")
.replace("O s v", "O.s.v.")
.replace("d v s", "d.v.s.")
.replace("D v s", "D.v.s.")
.replace("t ex", "t.ex.")
.replace("T ex", "T.ex.")
.replace("f n", "f.n.")
.replace("F n", "F.n.")
)
pos_tags.append(data[3])
heads.append(data[6])
try:
deps.append(dep_conversion_dict[data[7]])
except KeyError:
store = False
def process_norne_nn():
from pathlib import Path
import json
from tqdm.auto import tqdm
import re
ner_conversion_dict = {
"O": "O",
"B-LOC": "B-LOC",
"I-LOC": "I-LOC",
"B-PER": "B-PER",
"I-PER": "I-PER",
"B-ORG": "B-ORG",
"I-ORG": "I-ORG",
"B-MISC": "B-MISC",
"I-MISC": "I-MISC",
"B-GPE_LOC": "B-LOC",
"I-GPE_LOC": "I-LOC",
"B-GPE_ORG": "B-ORG",
"I-GPE_ORG": "I-ORG",
"B-PROD": "B-MISC",
"I-PROD": "I-MISC",
"B-DRV": "B-MISC",
"I-DRV": "I-MISC",
"B-EVT": "B-MISC",
"I-EVT": "I-MISC",
}
dep_conversion_dict = {
"acl": "acl",
"acl:relcl": "acl",
"acl:cleft": "acl",
"advcl": "advcl",
"advmod": "advmod",
"advmod:emph": "advmod",
"advmod:lmod": "advmod",
"amod": "amod",
"appos": "appos",
"aux": "aux",
"aux:pass": "aux",
"case": "case",
"cc": "cc",
"cc:preconj": "cc",
"ccomp": "ccomp",
"clf": "clf",
"compound": "compound",
"compound:lvc": "compound",
"compound:prt": "compound",
"compound:redup": "compound",
"compound:svc": "compound",
"conj": "conj",
"cop": "cop",
"csubj": "csubj",
"csubj:pass": "csubj",
"dep": "dep",
"det": "det",
"det:numgov": "det",
"det:nummod": "det",
"det:poss": "det",
"discourse": "discourse",
"dislocated": "dislocated",
"expl": "expl",
"expl:impers": "expl",
"expl:pass": "expl",
"expl:pv": "expl",
"fixed": "fixed",
"flat": "flat",
"flat:foreign": "flat",
"flat:name": "flat",
"goeswith": "goeswith",
"iobj": "iobj",
"list": "list",
"mark": "mark",
"nmod": "nmod",
"nmod:poss": "nmod",
"nmod:tmod": "nmod",
"nsubj": "nsubj",
"nsubj:pass": "nsubj",
"nummod": "nummod",
"nummod:gov": "nummod",
"obj": "obj",
"obl": "obl",
"obl:agent": "obl",
"obl:arg": "obl",
"obl:lmod": "obl",
"obl:loc": "obl",
"obl:tmod": "obl",
"orphan": "orphan",
"parataxis": "parataxis",
"punct": "punct",
"reparandum": "reparandum",
"root": "root",
"vocative": "vocative",
"xcomp": "xcomp",
}
norne_dir = Path("datasets/norne_nn")
if not norne_dir.exists():
norne_dir.mkdir()
input_paths = [
Path("datasets/no_nynorsk-ud-train.conllu"),
Path("datasets/no_nynorsk-ud-dev.conllu"),
Path("datasets/no_nynorsk-ud-test.conllu"),
]
output_paths = [
Path("datasets/norne_nn/train.jsonl"),
Path("datasets/norne_nn/val.jsonl"),
Path("datasets/norne_nn/test.jsonl"),
]
for input_path, output_path in zip(input_paths, output_paths):
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ner_tags = list()
ids = list()
doc = ""
lines = input_path.read_text().split("\n")
for idx, line in enumerate(tqdm(lines)):
if line.startswith("# text = "):
doc = re.sub("# text = ", "", line)
elif line.startswith("#"):
continue
elif line == "":
if tokens != []:
data_dict = dict(
ids=ids,
doc=doc,
tokens=tokens,
pos_tags=pos_tags,
heads=heads,
deps=deps,
ner_tags=ner_tags,
)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(lines) - 1:
f.write("\n")
ids = list()
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ner_tags = list()
doc = ""
else:
data = line.split("\t")
ids.append(data[0])
tokens.append(data[1])
pos_tags.append(data[3])
heads.append(data[6])
deps.append(dep_conversion_dict[data[7]])
tag = data[9].replace("name=", "").split("|")[-1]
ner_tags.append(ner_conversion_dict[tag])
def process_norne_nb():
from pathlib import Path
import json
from tqdm.auto import tqdm
import re
ner_conversion_dict = {
"O": "O",
"B-LOC": "B-LOC",
"I-LOC": "I-LOC",
"B-PER": "B-PER",
"I-PER": "I-PER",
"B-ORG": "B-ORG",
"I-ORG": "I-ORG",
"B-MISC": "B-MISC",
"I-MISC": "I-MISC",
"B-GPE_LOC": "B-LOC",
"I-GPE_LOC": "I-LOC",
"B-GPE_ORG": "B-ORG",
"I-GPE_ORG": "I-ORG",
"B-PROD": "B-MISC",
"I-PROD": "I-MISC",
"B-DRV": "B-MISC",
"I-DRV": "I-MISC",
"B-EVT": "B-MISC",
"I-EVT": "I-MISC",
}
dep_conversion_dict = {
"acl": "acl",
"acl:relcl": "acl",
"acl:cleft": "acl",
"advcl": "advcl",
"advmod": "advmod",
"advmod:emph": "advmod",
"advmod:lmod": "advmod",
"amod": "amod",
"appos": "appos",
"aux": "aux",
"aux:pass": "aux",
"case": "case",
"cc": "cc",
"cc:preconj": "cc",
"ccomp": "ccomp",
"clf": "clf",
"compound": "compound",
"compound:lvc": "compound",
"compound:prt": "compound",
"compound:redup": "compound",
"compound:svc": "compound",
"conj": "conj",
"cop": "cop",
"csubj": "csubj",
"csubj:pass": "csubj",
"dep": "dep",
"det": "det",
"det:numgov": "det",
"det:nummod": "det",
"det:poss": "det",
"discourse": "discourse",
"dislocated": "dislocated",
"expl": "expl",
"expl:impers": "expl",
"expl:pass": "expl",
"expl:pv": "expl",
"fixed": "fixed",
"flat": "flat",
"flat:foreign": "flat",
"flat:name": "flat",
"goeswith": "goeswith",
"iobj": "iobj",
"list": "list",
"mark": "mark",
"nmod": "nmod",
"nmod:poss": "nmod",
"nmod:tmod": "nmod",
"nsubj": "nsubj",
"nsubj:pass": "nsubj",
"nummod": "nummod",
"nummod:gov": "nummod",
"obj": "obj",
"obl": "obl",
"obl:agent": "obl",
"obl:arg": "obl",
"obl:lmod": "obl",
"obl:loc": "obl",
"obl:tmod": "obl",
"orphan": "orphan",
"parataxis": "parataxis",
"punct": "punct",
"reparandum": "reparandum",
"root": "root",
"vocative": "vocative",
"xcomp": "xcomp",
}
norne_dir = Path("datasets/norne_nb")
if not norne_dir.exists():
norne_dir.mkdir()
input_paths = [
Path("datasets/no_bokmaal-ud-train.conllu"),
Path("datasets/no_bokmaal-ud-dev.conllu"),
Path("datasets/no_bokmaal-ud-test.conllu"),
]
output_paths = [
Path("datasets/norne_nb/train.jsonl"),
Path("datasets/norne_nb/val.jsonl"),
Path("datasets/norne_nb/test.jsonl"),
]
for input_path, output_path in zip(input_paths, output_paths):
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ner_tags = list()
ids = list()
doc = ""
lines = input_path.read_text().split("\n")
for idx, line in enumerate(tqdm(lines)):
if line.startswith("# text = "):
doc = re.sub("# text = ", "", line)
elif line.startswith("#"):
continue
elif line == "":
if tokens != []:
data_dict = dict(
ids=ids,
doc=doc,
tokens=tokens,
pos_tags=pos_tags,
heads=heads,
deps=deps,
ner_tags=ner_tags,
)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(lines) - 1:
f.write("\n")
ids = list()
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ner_tags = list()
doc = ""
else:
data = line.split("\t")
ids.append(data[0])
tokens.append(data[1])
pos_tags.append(data[3])
heads.append(data[6])
deps.append(dep_conversion_dict[data[7]])
tag = data[9].replace("name=", "").split("|")[-1]
ner_tags.append(ner_conversion_dict[tag])
def process_nordial():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
import json
dataset_dir = Path("datasets/nordial")
if not dataset_dir.exists():
dataset_dir.mkdir()
train_input_path = Path("datasets/nordial_train.json")
val_input_path = Path("datasets/nordial_val.json")
test_input_path = Path("datasets/nordial_test.json")
output_paths = [dataset_dir / "train.jsonl", dataset_dir / "test.jsonl"]
train = pd.read_json(train_input_path, orient="records").dropna()
val = pd.read_json(val_input_path, orient="records").dropna()
train = train.append(val)
test = pd.read_json(test_input_path, orient="records").dropna()
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows()):
data_dict = dict(text=row.text, label=row.category)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_norec():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
import json
dataset_dir = Path("datasets/norec")
if not dataset_dir.exists():
dataset_dir.mkdir()
train_input_path = Path("datasets/norec_train.json")
val_input_path = Path("datasets/norec_val.json")
test_input_path = Path("datasets/norec_test.json")
output_paths = [dataset_dir / "train.jsonl", dataset_dir / "test.jsonl"]
train = pd.read_json(train_input_path, orient="records").dropna()
val = pd.read_json(val_input_path, orient="records").dropna()
train = train.append(val)
test = pd.read_json(test_input_path, orient="records").dropna()
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows()):
data_dict = dict(text=row.text, label=row.label)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_twitter_subj():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
Path("datasets/twitter_subj").mkdir()
input_path = Path("datasets/twitter_sent.csv")
output_paths = [
Path("datasets/twitter_subj/train.jsonl"),
Path("datasets/twitter_subj/test.jsonl"),
]
df = pd.read_csv(input_path, header=0).dropna()
train = df.query('part == "train"')
test = df.query('part == "test"')
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows()):
data_dict = dict(tweet_id=row.twitterid, label=row["sub/obj"])
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_twitter_sent_sentiment():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
Path("datasets/twitter_sent").mkdir(exist_ok=True)
input_path = Path("datasets/twitter_sent/twitter_sent.csv")
output_paths = [
Path("datasets/twitter_sent/train.jsonl"),
Path("datasets/twitter_sent/test.jsonl"),
]
df = pd.read_csv(input_path, header=0).dropna()
train = df.query('part == "train"')
test = df.query('part == "test"')
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows()):
data_dict = dict(tweet_id=row.twitterid, label=row.polarity)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_lcc():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
from sklearn.model_selection import train_test_split
Path("datasets/lcc").mkdir()
input_paths = [Path("datasets/lcc1.csv"), Path("datasets/lcc2.csv")]
output_paths = [Path("datasets/lcc/train.jsonl"), Path("datasets/lcc/test.jsonl")]
dfs = list()
for input_path in input_paths:
df = pd.read_csv(input_path, header=0).dropna(subset=["valence", "text"])
for idx, row in df.iterrows():
try:
int(row.valence)
except:
df = df.drop(idx)
continue
if row.text.strip() == "":
df = df.drop(idx)
else:
if int(row.valence) > 0:
sentiment = "positiv"
elif int(row.valence) < 0:
sentiment = "negativ"
else:
sentiment = "neutral"
df.loc[idx, "valence"] = sentiment
dfs.append(df)
df = pd.concat(dfs, axis=0, ignore_index=True)
train, test = train_test_split(df, test_size=0.3, stratify=df.valence)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows()):
data_dict = dict(text=row.text, label=row.valence)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_lcc2():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
from sklearn.model_selection import train_test_split
Path("datasets/lcc2").mkdir()
input_path = Path("datasets/lcc2.csv")
output_paths = [Path("datasets/lcc2/train.jsonl"), Path("datasets/lcc2/test.jsonl")]
df = pd.read_csv(input_path, header=0).dropna(subset=["valence", "text"])
for idx, row in df.iterrows():
try:
int(row.valence)
except:
df = df.drop(idx)
continue
if row.text.strip() == "":
df = df.drop(idx)
else:
if int(row.valence) > 0:
sentiment = "positiv"
elif int(row.valence) < 0:
sentiment = "negativ"
else:
sentiment = "neutral"
df.loc[idx, "valence"] = sentiment
train, test = train_test_split(df, test_size=0.3, stratify=df.valence)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows()):
data_dict = dict(text=row.text, label=row.valence)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_lcc1():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
from sklearn.model_selection import train_test_split
Path("datasets/lcc1").mkdir()
input_path = Path("datasets/lcc1.csv")
output_paths = [Path("datasets/lcc1/train.jsonl"), Path("datasets/lcc1/test.jsonl")]
df = pd.read_csv(input_path, header=0).dropna(subset=["valence", "text"])
for idx, row in df.iterrows():
try:
int(row.valence)
except:
df = df.drop(idx)
continue
if row.text.strip() == "":
df = df.drop(idx)
else:
if int(row.valence) > 0:
sentiment = "positiv"
elif int(row.valence) < 0:
sentiment = "negativ"
else:
sentiment = "neutral"
df.loc[idx, "valence"] = sentiment
train, test = train_test_split(df, test_size=0.3, stratify=df.valence)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows()):
data_dict = dict(text=row.text, label=row.valence)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_europarl_subj():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
from sklearn.model_selection import train_test_split
Path("datasets/europarl_subj").mkdir()
input_path = Path("datasets/europarl2.csv")
output_paths = [
Path("datasets/europarl_subj/train.jsonl"),
Path("datasets/europarl_subj/test.jsonl"),
]
df = pd.read_csv(input_path, header=0).dropna()
train, test = train_test_split(df, test_size=0.3, stratify=df["sub/obj"])
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows(), total=len(split)):
data_dict = dict(text=row.text, label=row["sub/obj"])
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_europarl2():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
from sklearn.model_selection import train_test_split
Path("datasets/europarl2").mkdir()
input_path = Path("datasets/europarl2.csv")
output_paths = [
Path("datasets/europarl2/train.jsonl"),
Path("datasets/europarl2/test.jsonl"),
]
df = pd.read_csv(input_path, header=0).dropna()
train, test = train_test_split(df, test_size=0.3, stratify=df.polarity)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows(), total=len(split)):
data_dict = dict(text=row.text, label=row.polarity)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_europarl1():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
from sklearn.model_selection import train_test_split
input_path = Path("datasets/europarl1.csv")
output_paths = [
Path("datasets/europarl1/train.jsonl"),
Path("datasets/europarl1/test.jsonl"),
]
df = pd.read_csv(input_path, header=0).dropna(subset=["valence", "text"])
for idx, row in df.iterrows():
try:
int(row.valence)
except:
df = df.drop(idx)
continue
if row.text.strip() == "":
df = df.drop(idx)
else:
if int(row.valence) > 0:
sentiment = "positiv"
elif int(row.valence) < 0:
sentiment = "negativ"
else:
sentiment = "neutral"
df.loc[idx, "valence"] = sentiment
train, test = train_test_split(df, test_size=0.3, stratify=df.valence)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows()):
data_dict = dict(text=row.text, label=row.valence)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_angrytweets():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
from sklearn.model_selection import train_test_split
input_path = Path("datasets/angry_tweets/angry_tweets.csv")
output_paths = [
Path("datasets/angry_tweets/train.jsonl"),
Path("datasets/angry_tweets/test.jsonl"),
]
df = pd.read_csv(input_path, header=0)
for idx, row in df.iterrows():
labels = json.loads(row.annotation.replace("'", '"'))
if len(set(labels)) > 1 or "skip" in labels:
df = df.drop(idx)
else:
df.loc[idx, "annotation"] = labels[0]
train, test = train_test_split(df, test_size=0.3, stratify=df.annotation)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows()):
data_dict = dict(tweet_id=row.twitterid, label=row.annotation)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_dkhate():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
input_paths = [
Path("datasets/dkhate/dkhate.train.tsv"),
Path("datasets/dkhate/dkhate.test.tsv"),
]
output_paths = [
Path("datasets/dkhate/dkhate_train.jsonl"),
Path("datasets/dkhate/dkhate_test.jsonl"),
]
for input_path, output_path in zip(input_paths, output_paths):
df = | pd.read_csv(input_path, sep="\t") | pandas.read_csv |
import tempfile
import unittest
import numpy as np
import pandas as pd
from airflow import DAG
from datetime import datetime
from mock import MagicMock, patch
import dd.api.workflow.dataset
from dd import DB
from dd.api.workflow.actions import Action
from dd.api.workflow.sql import SQLOperator
dd.api.workflow.dataset.is_ipython = lambda: True
dd.api.workflow.actions.is_ipython = lambda: True
from dd.api.contexts.distributed import AirflowContext
from dd.api.workflow.dataset import Dataset, DatasetLoad, DatasetTransformation
class TestDataset(unittest.TestCase):
def setUp(self):
self.workflow = MagicMock(spec_set=DAG("test_workflow", start_date=datetime.now()))
self.workflow.dag_id = "mock"
self.db = MagicMock()
self.db.query.result_value = None
def test_creating_dataset_should_add_task_to_workflow(self):
# Given
workflow = self.workflow
db = self.db
# When
_ = AirflowContext(workflow, db).create_dataset("table")
# Assert
workflow.add_task.assert_called_once()
def test_apply_method_should_run(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
expected_result1 = pd.DataFrame([[np.nan, 7], [6, 7]])
# With a function with only args
def my_apply_function(indf, arg1, arg2, arg3):
self.assertEqual(arg1, 1)
self.assertEqual(arg2, 2)
self.assertEqual(arg3, 3)
odf = indf.applymap(lambda t: t + arg1 + arg2 + arg3)
self.assertTrue(odf.equals(expected_result1))
# When a valid execution
new_action = dataset.apply(my_apply_function, 1, 2, 3)
# Assert
self.assertFalse(new_action.executed)
new_action.execute()
def test_apply_method_should_raise_when_invalid_number_args(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
# With a function with only args
def my_apply_function(indf, arg1, arg2, arg3):
pass
# When
new_action = dataset.apply(my_apply_function, 1, 2)
# Assert
self.assertFalse(new_action.executed)
with self.assertRaises(TypeError) as context:
new_action.execute()
possible_exceptions = ["my_apply_function() missing 1 required positional argument: 'arg3'", # msg Python 3
"my_apply_function() takes exactly 4 arguments (3 given)"] # msg Python 2
self.assertIn(str(context.exception), possible_exceptions)
# When
new_action = dataset.apply(my_apply_function)
# Assert
self.assertFalse(new_action.executed)
with self.assertRaises(TypeError) as context:
new_action.execute()
possible_exceptions = ["my_apply_function() missing 3 required positional arguments: 'arg1', 'arg2', and 'arg3'", # msg Python 3
"my_apply_function() takes exactly 4 arguments (1 given)"] # msg Python 2
self.assertIn(str(context.exception), possible_exceptions)
def test_transform_method_should_return_new_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
new_dataset = dataset.transform(lambda x: x)
# Assert
self.assertIsNot(new_dataset, dataset)
self.assertIsInstance(new_dataset, Dataset)
def test_transform_method_should_handle_optional_kwargs(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
dataset2 = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
expected_result1 = pd.DataFrame([[np.nan, 2], [1, 2]])
# With a function with only args
def my_transform_function(indf, df2, arg1=0):
return indf.applymap(lambda t: t + arg1)
# When
new_dataset = dataset.transform(my_transform_function,
arg1=1,
output_table="mytable",
datasets=[dataset2],
write_options=dict(if_exists="replace"))
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
# Finally
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result1
))
self.assertTrue(new_dataset.output_table == "mytable")
def test_transform_method_should_raise_when_invalid_number_args(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
expected_result1 = pd.DataFrame([[np.nan, 4], [3, 4]])
# With a function with only args
def my_transform_function(indf, arg1, arg2, arg3):
return indf.applymap(lambda t: t + arg1 + arg2 + arg3)
# When
new_dataset = dataset.transform(my_transform_function, 1, 2)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
with self.assertRaises(TypeError) as context:
new_dataset.execute()
possible_exceptions = ["my_transform_function() missing 1 required positional argument: 'arg3'", # msg Python 3
"my_transform_function() takes exactly 4 arguments (3 given)"] # msg Python 2
self.assertIn(str(context.exception), possible_exceptions)
# When
new_dataset = dataset.transform(my_transform_function)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
with self.assertRaises(TypeError) as context:
new_dataset.execute()
possible_exceptions = ["my_transform_function() missing 3 required positional arguments: 'arg1', 'arg2', and 'arg3'", # msg Python 3
"my_transform_function() takes exactly 4 arguments (1 given)"] # msg Python 2
self.assertIn(str(context.exception), possible_exceptions)
# When
new_dataset = dataset.transform(my_transform_function, 1, 1, 1)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
# Finally
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result1
))
def test_transform_method_should_handle_args_kwargs(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
expected_result1 = pd.DataFrame([[np.nan, 2], [1, 2]])
expected_result2 = pd.DataFrame([[np.nan, 3], [2, 3]])
# With a function with arg and kwargs
def mytransfun(indf, myarg1, mynamedarg1=1):
return indf.applymap(lambda t: t + myarg1 - mynamedarg1)
# When
new_dataset = dataset.transform(mytransfun, 2)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result1
))
# When
new_dataset = dataset.transform(mytransfun, 2, mynamedarg1=0)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result2
))
def test_transform_method_should_apply_function_to_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
dataset2 = context.create_dataset("table")
expected_result1 = pd.DataFrame([[np.nan, 2], [1, 2]])
expected_result2 = pd.DataFrame([[0.0, 1], [0.0, 1]])
# When
new_dataset = dataset.transform(lambda x: x.applymap(lambda t: t + 1))
new_dataset2 = dataset2.transform(lambda df: df.fillna(0))
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertIsNone(new_dataset2.dataframe)
self.assertFalse(new_dataset.executed)
self.assertFalse(new_dataset2.executed)
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result1
))
new_dataset2.execute()
new_dataset2.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result2
))
def test_transform_method_should_be_able_to_process_multiple_datasets(
self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset1 = context.create_dataset("table")
dataset2 = context.create_dataset("table")
mock_function = MagicMock()
mock_function.__name__ = "mock"
new_dataset = dataset1.transform(mock_function, datasets=[dataset2])
# When
new_dataset.execute()
new_dataset.collect()
# Check
args, kwargs = mock_function.call_args
self.assertTrue(args[0], dataset1)
self.assertTrue(args[1], dataset2)
def test_collect_should_return_dataframe_attribute_when_non_empty(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
initial_dataframe = pd.DataFrame([[0.0, 1], [0.0, 1]])
dataset.dataframe = initial_dataframe
# When
dataframe = dataset.collect()
# Assert
self.assertIsInstance(dataframe, pd.DataFrame)
self.assertTrue(dataframe.equals(initial_dataframe))
def test_collect_should_call_db_retrieve_table_when_empty(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
output_table = "output_table"
dataset.output_table = output_table
# When
dataset.collect()
# Assert
self.db.retrieve_table.assert_called_once_with(output_table)
def test_split_train_test_should_return_two_datasets(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
train, test = dataset.split_train_test()
# Assert
self.assertIsInstance(train, Dataset)
self.assertIsInstance(test, Dataset)
def test_join_should_return_new_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset_left = context.create_dataset("table")
dataset_right = context.create_dataset("table")
# When
join = dataset_left.join(dataset_right)
# Check
self.assertIsInstance(join, Dataset)
def test_execute_should_call_operator_execute_once(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table").transform(lambda x: x)
dataset.operator = MagicMock()
# When
dataset.execute()
dataset.execute()
# Check
dataset.operator.execute.assert_called_once()
def test_execute_with_force_should_call_operator_execute_twice(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table").transform(lambda x: x)
dataset.operator = MagicMock()
# When
dataset.execute()
dataset.execute(force=True)
# Check
self.assertEqual(dataset.operator.execute.call_count, 2)
def test_execute_when_operator_is_DDOperator_should_return_resulted_dataframe_from_operator_get_result(self):
# Given
dataset = Dataset(MagicMock(), 'output')
dataset.executed = False
dataset.operator = MagicMock()
dataset.operator.execute = lambda: 'output_table'
dataset.operator.get_result = lambda: 'Dataframe'
dataset.operator.set_upstream = None
# When
result = dataset.execute()
# Check
self.assertEqual(result, 'Dataframe')
def test_transform_with_if_exists_should_append_to_existing_table(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
new_dataset = dataset.transform(lambda x: x,
write_options=dict(if_exists="append"))
# When
new_dataset.execute()
# Check
self.assertIn("if_exists", self.db.import_dataframe.call_args[1])
self.assertEqual(self.db.import_dataframe.call_args[1]["if_exists"],
"append")
def test_select_columns_should_create_new_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
new_dataset = dataset.select_columns(["foo", "bar"])
# Check
self.assertIsInstance(new_dataset, Dataset)
self.assertIsNot(new_dataset, dataset)
def test_default_is_cached_should_match_context_auto_persistence(self):
# Given
persisted_context = MagicMock()
persisted_context.auto_persistence = True
unpersisted_context = MagicMock()
unpersisted_context.auto_persistence = False
# When
persisted_dataset = Dataset(persisted_context, "foo")
unpersisted_dataset = Dataset(unpersisted_context, "bar")
# Check
self.assertTrue(persisted_dataset.is_cached)
self.assertFalse(unpersisted_dataset.is_cached)
def test_is_cached_attribute_may_be_set_by_cache_method(self):
# Given
context = MagicMock()
context.auto_persistence = False
dataset = Dataset(context, "foo")
# When
dataset.cache()
# Check
self.assertTrue(dataset.is_cached)
# Then when
dataset.cache(boolean=False)
# Check
self.assertFalse(dataset.is_cached)
def test_memory_usage_returns_integer(self):
# Given
context = MagicMock()
context.auto_persistence = False
dataset = Dataset(context, "foo")
# When
usage = dataset.memory_usage
# Check
self.assertIsInstance(usage, int)
def test_providing_output_table_in_select_columns_must_set_output_table(
self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
new_dataset = dataset.select_columns(["foo", "bar"],
output_table="myoutput.table")
# Check
self.assertEqual(new_dataset.output_table, "myoutput.table")
def test_sql_query_should_return_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
new_dataset = dataset.sql.query("SELECT * FROM foo.bar")
# Check
self.assertIsInstance(new_dataset, Dataset)
def test_sql_query_should_call_db_query(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
qw = dataset.sql.query("SELECT * FROM foo.bar")
qw.execute() # In airflow context we force execution
qw.head()
# Check
self.db.query.assert_called_once_with("SELECT * FROM foo.bar")
def test_sql_execute_should_return_action(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
action = dataset.sql.execute("SELECT * FROM foo.bar")
# Check
self.assertIsInstance(action, Action)
def test_sql_execute_should_call_db_execute(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
action = dataset.sql.execute("SELECT * FROM foo.bar")
# When
action.execute(force=True)
# Check
self.db.execute.assert_called_once_with("SELECT * FROM foo.bar")
def test_apply_should_return_action(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
mock_function = MagicMock()
mock_function.__name__ = "mock"
# When
result = dataset.apply(mock_function)
# Check
self.assertIsInstance(result, Action)
def test_sql_should_be_SQLOperator(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
result = dataset.sql
# Check
self.assertIsInstance(result, SQLOperator)
def test_sql_should_have_same_context(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
result = dataset.sql
# Check
self.assertIs(result.context, dataset.context)
def test_multitransform_method_should_allow_multiple_output_datasets(self):
# Given
with tempfile.NamedTemporaryFile() as tmp:
workflow = DAG("test_workflow", start_date=datetime.now())
db = DB(dbtype='sqlite', filename=tmp.name)
ctx = AirflowContext(workflow, db)
# given
df = pd.DataFrame([[np.nan, 2], [1, 2]])
df.columns = map(lambda x: "num_" + str(x), df.columns)
expected_result2 = pd.DataFrame([[np.nan, 3], [2, 3]])
expected_result2.columns = map(lambda x: "num_" + str(x), expected_result2.columns)
db.import_dataframe(df, "test_num", index=False)
dataset = ctx.table("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
# then
self.assertIsNone(new_df1.dataframe)
self.assertFalse(new_df1.executed)
self.assertIsNone(new_df2.dataframe)
self.assertFalse(new_df2.executed)
# finally
new_df1.execute()
# same result
odf1 = new_df1.collect()
odf2 = new_df2.collect()
pd.testing.assert_frame_equal(odf1, df)
pd.testing.assert_frame_equal(odf2, expected_result2)
def test_multitransform_should_handle_column_method(self):
# Given
ctx = self._get_airflow_context()
ctx.db.import_dataframe(pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"]),
"test_num", index=False)
dataset = ctx.create_dataset("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
new_df1.execute()
# then columns must be equal
new_df1_cols = list(new_df1.columns)
new_df2_cols = list(new_df2.columns)
self.assertEqual(new_df1_cols, new_df2_cols)
def test_multitransform_should_handle_shape_method(self):
# Given
ctx = self._get_airflow_context()
ctx.db.import_dataframe(pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"]),
"test_num", index=False)
dataset = ctx.create_dataset("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
new_df1.execute()
# then shapes must be equal
new_df1_sh = new_df1.shape
new_df2_sh = new_df2.shape
self.assertEqual(new_df1_sh, new_df2_sh)
def test_multitransform_should_handle_memory_usage_method(self):
# Given
ctx = self._get_airflow_context()
ctx.db.import_dataframe(pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"]),
"test_num", index=False)
dataset = ctx.create_dataset("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
new_df1.execute()
# then memory usage must be equal
mu1 = new_df1.memory_usage
mu2 = new_df2.memory_usage
self.assertEqual(mu1, mu2)
def test_multitransform_should_handle_head_method(self):
# Given
ctx = self._get_airflow_context()
df = pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"])
ctx.db.import_dataframe(df,
"test_num", index=False)
dataset = ctx.create_dataset("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
new_df1.execute()
# then head must be equal
pd.testing.assert_frame_equal(new_df1.head(2), df.head(2))
pd.testing.assert_frame_equal(new_df2.head(2), df.head(2) + 1)
def test_multitransform_should_handle_sql_operator(self):
# Given
ctx = self._get_airflow_context()
df = pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"])
ctx.db.import_dataframe(df,
"test_num", index=False)
dataset = ctx.create_dataset("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
new_df1.execute()
result = ctx.db.read_sql("select * from odf1")
# then dataframe must be equal
pd.testing.assert_frame_equal(result, df)
def test_multitransform_should_handle_join_method(self):
# Given
ctx = self._get_airflow_context()
df = pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"])
ctx.db.import_dataframe(df,
"test_num", index=False)
dataset = ctx.create_dataset("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
new_df1.execute()
new_df3 = new_df1.join(new_df2, left_index=True, right_index=True)
result = new_df3.collect()
# then dataframe must be equal
pd.testing.assert_frame_equal(result, df.merge(df + 1, left_index=True, right_index=True))
def test_multitransform_should_handle_select_columns_method(self):
# Given
ctx = self._get_airflow_context()
df = pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"])
ctx.db.import_dataframe(df,
"test_num", index=False)
dataset = ctx.create_dataset("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
new_df1.execute()
new_df3 = new_df1.select_columns(["n1"])
result = new_df3.collect()
# then dataframe must be equal
pd.testing.assert_frame_equal(result, df[["n1"]])
def test_multitransform_should_handle_split_train_test_method(self):
# Given
ctx = self._get_airflow_context()
df = | pd.DataFrame([[np.nan, 2], [1, 2]], columns=["n1", "n2"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import datetime
import os
import time
import pandas as pd
from quantity.digger.errors import ArgumentError
def csv2frame(fname):
return | pd.read_csv(fname, index_col=0, parse_dates=True) | pandas.read_csv |
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from credit_scoring.metrics.credit_score import CreditScore
class CalculatedLift(CreditScore):
def __init__(self, pred, target, bucket=10):
super().__init__(pred, target)
self.bucket = bucket
self.pred = 1 - self.pred
def lift(self) -> pd.DataFrame:
"""
:return: The lift table contains Gain, Lift each deciles
"""
# Create dataframe contain target and prediction values
data = | pd.DataFrame({'Target': self.target, 'Pred': self.pred}) | pandas.DataFrame |
#!/usr/bin/env python
# By <NAME>
# Sept 10, 2020
# Store queried ZTF objects in database
import sqlite3
import pandas as pd
from sqlite3 import Error
import os
import inspect
import pdb
import sys
# from .constants import DB_DIR
DB_DIR = '../local/db/'
def create_connection(db_file):
""" Create a database connection to the SQLite database
specified by db_file
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
raise Exception(f"Error creating connection {e}")
return conn
def create_table(conn, create_table_sql):
""" Create a table from the create_table_sql statement
"""
try:
cur = conn.cursor()
cur.execute(create_table_sql)
cur.close()
except Error as e:
raise Exception(f"Error creating table {e}")
def cache_ZTF_object(conn, ztf_object):
"""
Add ZTF object to database.
Parameters
----------
conn: sqlite3.Connection object
The connection to the database
ztf_object: list or str
Data to insert into the database in the follwoing form: (ZTF_object_id,SIMBAD_otype,ra,dec,xray_name)
Returns
-------
cur.lastrowid: int
Id of the last row inserted into the database
"""
sql = ''' INSERT INTO ZTF_objects(ZTF_object_id,SIMBAD_otype,ra,dec,xray_name)
VALUES(?,?,?,?,?) '''
cur = conn.cursor()
cur.execute(sql, ztf_object)
conn.commit()
return cur.lastrowid
def insert_data(conn, table, val_dict):
cur = conn.cursor()
try:
cols = tuple(val_dict.keys())
vals = tuple('{}'.format(val_dict[col]) for col in cols)
if len(cols) == 1:
cols = f"({cols[0]})"
vals = f"('{vals[0]}')"
cur.execute(f"INSERT INTO {table}{str(cols)} VALUES {str(vals)}")
except Error as e:
raise Exception(f"Error inserting data into {table}: {e}")
conn.commit()
def select_ZTF_objects(conn, ztf_object_ids):
"""
Select rows from database with id(s) in ztf_object_ids.
Parameters
----------
conn: sqlite3.Connection object
The connection to the database
ztf_object_ids: str or tuple of strs
The ztf_object_ids to select from the database
Returns
df: pandas DataFrame
Rows in the database corresponding to ztf_object_ids
"""
cur = conn.cursor()
if isinstance(ztf_object_ids, str):
try:
cur.execute("SELECT * FROM ZTF_objects WHERE ZTF_object_id=?", (ztf_object_ids,))
except Error as e:
raise Exception(f"Error selection objecs from ZTF_objects {e}")
else:
try:
cur.execute("SELECT * FROM ZTF_objects WHERE ZTF_object_id IN {}".format(str(ztf_object_ids)))
except Error as e:
raise Exception(f"Error selecting all objects from ZTF_objects {e}")
rows = cur.fetchall()
df = | pd.DataFrame(rows, columns=["ZTF_object_id","SIMBAD_otype","ra","dec","xray_name", "SIMBAD_include"]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# ## Plot mutation prediction results
# In this notebook, we'll compare the results of our mutation prediction experiments for expression and methylation data only, predicting a binary mutated/not mutated label for each gene (see `README.md` for more details). The files analyzed in this notebook are generated by the `run_mutation_prediction.py` script.
#
# Notebook parameters:
# * SIG_ALPHA (float): significance cutoff (after FDR correction)
# In[1]:
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from adjustText import adjust_text
import mpmp.config as cfg
import mpmp.utilities.analysis_utilities as au
# In[2]:
# set results directory
# this is a mess, TODO move results into same location
results_dir1 = Path(cfg.results_dirs['mutation'], 'bmiq_results', 'gene').resolve()
results_dir2 = Path(cfg.results_dirs['mutation'], 'bmiq_results_2', 'gene').resolve()
results_dir3 = Path(cfg.results_dirs['mutation'], 'bmiq_results_me_control', 'gene').resolve()
# set significance cutoff after FDR correction
SIG_ALPHA = 0.001
# In[3]:
# load raw data
results_df1 = au.load_stratified_prediction_results(results_dir1, 'gene')
results_df1.loc[results_df1.training_data == 'me_27k', 'training_data'] = 'me_27k_corrected'
results_df2 = au.load_stratified_prediction_results(results_dir2, 'gene')
results_df2.loc[results_df2.training_data == 'me_27k', 'training_data'] = 'me_27k_corrected'
results_df3 = au.load_stratified_prediction_results(results_dir3, 'gene')
results_df = pd.concat((results_df1, results_df2, results_df3))
print(results_df.shape)
print(results_df.seed.unique())
print(results_df.training_data.unique())
results_df.head()
# In[4]:
all_results_df = pd.DataFrame()
for training_data in results_df.training_data.unique():
data_results_df = au.compare_results(results_df[results_df.training_data == training_data],
identifier='identifier',
metric='aupr',
correction=True,
correction_method='fdr_bh',
correction_alpha=SIG_ALPHA,
verbose=True)
data_results_df['training_data'] = training_data
data_results_df.rename(columns={'identifier': 'gene'}, inplace=True)
all_results_df = pd.concat((all_results_df, data_results_df))
# now filter out genes that don't have comparisons for all data types
data_type_counts = all_results_df.groupby('gene').count().training_data
valid_genes = data_type_counts[data_type_counts == len(results_df.training_data.unique())].index
all_results_df = all_results_df[
all_results_df.gene.isin(valid_genes)
]
all_results_df.sort_values(by='p_value').head(10)
# In[5]:
all_results_df['nlog10_p'] = -np.log10(all_results_df.corr_pval)
sns.set({'figure.figsize': (24, 6)})
sns.set_style('whitegrid')
fig, axarr = plt.subplots(1, 3)
# all plots should have the same axes for a fair comparison
xlim = (-0.2, 1.0)
y_max = all_results_df.nlog10_p.max()
ylim = (0, y_max+3)
# function to add gene labels to points
def label_points(x, y, gene, ax):
text_labels = []
a = pd.DataFrame({'x': x, 'y': y, 'gene': gene})
for i, point in a.iterrows():
if point['y'] > -np.log10(SIG_ALPHA):
text_labels.append(
ax.text(point['x'], point['y'], str(point['gene']))
)
return text_labels
# plot mutation prediction from expression, in a volcano-like plot
for ix, training_data in enumerate(sorted(all_results_df.training_data.unique())):
ax = axarr[ix]
data_results_df = all_results_df[all_results_df.training_data == training_data]
sns.scatterplot(data=data_results_df, x='delta_mean', y='nlog10_p', hue='reject_null',
hue_order=[False, True], ax=ax)
# add vertical line at 0
ax.axvline(x=0, linestyle='--', linewidth=1.25, color='black')
# add horizontal line at statistical significance threshold
l = ax.axhline(y=-np.log10(SIG_ALPHA), linestyle='--', linewidth=1.25, zorder=-1)
# label horizontal line with significance threshold
# (matplotlib makes this fairly difficult, sadly)
ax.text(0.9, -np.log10(SIG_ALPHA)+0.1,
r'$\mathbf{{\alpha = {}}}$'.format(SIG_ALPHA),
va='center', ha='center', color=l.get_color(),
backgroundcolor=ax.get_facecolor(),
zorder=0)
ax.set_xlabel('AUPR(signal) - AUPR(shuffled)', size=14)
ax.set_ylabel(r'$-\log_{10}($adjusted $p$-value$)$', size=14)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.legend(title=r'Reject $H_0$', loc='upper left')
ax.set_title(r'Mutation prediction, {} data'.format(training_data), size=14)
# label genes and adjust text to not overlap
# automatic alignment isn't perfect, can align by hand in inkscape if necessary
text_labels = label_points(data_results_df['delta_mean'],
data_results_df['nlog10_p'],
data_results_df.gene,
ax)
adjust_text(text_labels,
ax=ax,
expand_text=(1., 1.),
lim=5)
print('{}: {}/{}'.format(
training_data,
np.count_nonzero(data_results_df.reject_null),
data_results_df.shape[0]
))
# In[6]:
# compare all data modalities against each other
import itertools as it
# function to add gene labels to points
def label_points(x, y, gene, ax):
text_labels = []
a = | pd.DataFrame({'x': x, 'y': y, 'gene': gene}) | pandas.DataFrame |
from elsapy.elsclient import ElsClient
from elsapy.elsdoc import FullDoc, AbsDoc
import pandas as pd
from . import requests
from . import error
@error.error
class elsapy_connector:
def __init__(self):
req = requests.request_handler()
self.client = ElsClient(req.apikey)
pass
def pii_search(self, df, rows=None):
res = []
count = 0
if rows:
df = df.loc[: rows - 1]
pii = df["pii"].dropna()
for i in pii:
data = FullDoc(sd_pii=i)
if data.read(self.client):
if type(data.data["originalText"]) == str:
res.append([i, data, "Paper found"])
else:
res.append([i, data, "No full text article found"])
count += 1
else:
res.append([i, data, "No full text article found"])
count += 1
if not count % 20:
print(f"{count} articles processed")
self.pii_result = | pd.DataFrame(res) | pandas.DataFrame |
import os
import h5py
import numpy as np
from os import listdir
from os.path import isfile, join
import pandas as pd
import bisect
from ECG_preprocessing import *
from ECG_feature_extraction import *
from PPG_preprocessing import *
from PPG_feature_extraction import *
import csv
import xlrd
from sklearn.impute import SimpleImputer
def load_label_event(patient_folder_path,excel_file_path,excel_sheet_name,windowsize,fs = 240,
ecg_features = {'RR':True,'Wavelet':{'family':'db1','level':3}},
ppg_features = {'all':True,'st':True, 'dt':True, 'half_width':True,'two_third_width':True},save_file_path = None):
# this function is to prepare the database with ECG feature and PPG feature ready for training
# load label event from label event excel file
labelevent = pd.read_excel(excel_file_path,sheet_name=excel_sheet_name)
save_file_name = 'dataset_for_modeling.csv'
save_file = save_file_path+'/'+save_file_name
#writing header to file
with open(save_file,'w') as f:
writer = csv.writer(f)
if windowsize==1:
writer.writerow(
['patient', 'block', 'start_time', 'end_time', 'pre_R', 'post_R', 'local_R', 'st', 'dt', 'half_width',
'two_third_width'])
else:
writer.writerow(
['patient', 'block', 'start_time', 'end_time', 'pre_R_median', 'pre_R_IQR', 'post_R_median',
'post_R_IQR', 'local_R_median', 'local_R_IQR', 'st_median', 'st_IQR', 'dt_median', 'dt_IQR',
'half_width_median', 'half_width_IQR', 'two_third_width_median', 'two_third_width_IQR'])
for index,record in labelevent.iterrows():
label_record = record.tolist()
patient_id,event_start_time,event_end_time,label = label_record
patient_file_path = patient_folder_path+'/'+str(patient_id)
for block_file in listdir(patient_file_path):
# trying to find the ecg signal and ppg signal during the label event time
block_path = patient_file_path+'/'+block_file
all_signals = h5py.File(block_path, 'r')
signals_keys = set(all_signals.keys())
block_start_time,block_end_time = all_signals['time'][0],all_signals['time'][-1]
if block_start_time <= event_start_time <= event_end_time <= block_end_time:
start_index = int((event_start_time-block_start_time)*fs)
end_index = int((event_end_time-block_start_time)*fs)
blockid = int(block_path.split('block_')[1].split('.')[0])
blockl = end_index - start_index
event_time = all_signals['time'][start_index:end_index +1]
len_signal = len(event_time)
ecg, ppg = None, None
if 'GE_WAVE_ECG_2_ID' in signals_keys:
ecg = all_signals['GE_WAVE_ECG_2_ID'][start_index:end_index +1]
if 'GE_WAVE_SPO2_WAVE_ID' in signals_keys:
ppg = all_signals['GE_WAVE_SPO2_WAVE_ID'][start_index:end_index +1]
if (ppg is None) or (ecg is None) or(not ppg.any or not ecg.any): break
# ECG signal preprocessing for denoising and R-peak detection
R_peak_index,ecg_denoise = ecg_preprocessing_final(ecg) # the location of R_peak during the label event
num_beats = len(R_peak_index) # the total number of beats during the label event
R_peak = [] # the time when the R-peak appears
for i in range(num_beats):
R_peak.append(event_time[R_peak_index[i]])
if ecg_features['RR']:
ecg_RR_feature = compute_RR_intervals(R_peak)
if ecg_features['Wavelet']:
family = ecg_features['Wavelet']['family']
level = ecg_features['Wavelet']['level']
ecg_wt_feature = compute_wavelet_features(ecg_denoise,R_peak_index,wavelet = family,level = level,windowsize=windowsize)
if ppg.any:
# PPG signal preprocessing for denoising
# print("un-denoised ppg", ppg)
ppg = PPG_denoising(ppg)
# PPG signal feature extraction
ppg_extracted_features = PPG_feature_extraction(ppg,event_time,ppg_features,R_peak_index)
if windowsize==1: #1beat extraction
for i in range(num_beats):
windowL = int(max(R_peak_index[i] - 85, 0)) # not exact, just for plotting
windowR = int(min(R_peak_index[i] + 85, len_signal-1))
temp = [patient_id, blockid, event_time[windowL], event_time[windowR]]
if ecg_features['RR']:
temp.append(ecg_RR_feature.pre_R[i])
temp.append(ecg_RR_feature.post_R[i])
temp.append(ecg_RR_feature.local_R[i])
if ppg_features['st']:
temp.append(ppg_extracted_features.st[i])
if ppg_features['dt']:
temp.append(ppg_extracted_features.dt[i])
if ppg_features['half_width']:
temp.append(ppg_extracted_features.half_width[i])
if ppg_features['two_third_width']:
temp.append(ppg_extracted_features.two_third_width[i])
if ecg_features['Wavelet']:
temp.extend(ecg_wt_feature[i])
temp.append(label)
if label == 'PP' or label == 'PS':
health = 0
else:
health = 1
temp.append(health)
writer.writerow(temp)
else: #dealing with multi-beat window size
start_buffer = (windowsize - 1) // 2
end_buffer = windowsize // 2
# quartiles of selected features within window
q1i, q2i, q3i = [int(0.25 * windowsize), int(0.5 * windowsize), int(0.75 * windowsize)]
for i in range(start_buffer, num_beats-end_buffer):
windowL = int(max(R_peak_index[i - start_buffer] - 85, 0)) #not exact, just for plotting
windowR = int(min(R_peak_index[i + end_buffer] + 85,len_signal-1))
temp = [patient_id,blockid,event_time[windowL],event_time[windowR]]
if ecg_features['RR']:
#getting RR features in window
pre_R = ecg_RR_feature.pre_R[(i-start_buffer):(i+end_buffer+1)]; #pre_R = np.sort(pre_R)
post_R = ecg_RR_feature.post_R[(i-start_buffer):(i+end_buffer+1)]; #post_R = np.sort(post_R)
local_R = ecg_RR_feature.local_R[(i-start_buffer):(i+end_buffer+1)]; #local_R = np.sort(local_R)
temp.extend(pre_R); temp.extend(post_R); temp.extend(local_R)
#if just keeping median and interquartile-range (IQR) of selected features
#temp.append(pre_R[q2i]); temp.append(pre_R[q3i] - pre_R[q1i])
#temp.append(post_R[q2i]); temp.append(post_R[q3i] - post_R[q1i])
#temp.append(local_R[q2i]); temp.append(local_R[q3i] - local_R[q1i])
if ppg_features['all']:
st = ppg_extracted_features.st[(i-start_buffer):(i+end_buffer+1)]; #st = np.sort(st)
#temp.append(st[q2i]); temp.append(st[q3i] - st[q1i])
dt = ppg_extracted_features.dt[(i-start_buffer):(i+end_buffer+1)]; #dt = np.sort(dt)
#temp.append(dt[q2i]); temp.append(dt[q3i] - dt[q1i])
hw = ppg_extracted_features.half_width[(i-start_buffer):(i+end_buffer+1)]; #hw = np.sort(hw)
#temp.append(hw[q2i]); temp.append(hw[q3i] - hw[q1i])
ttw = ppg_extracted_features.two_third_width[(i-start_buffer):(i+end_buffer+1)]; #ttw = np.sort(ttw)
#temp.append(ttw[q2i]); temp.append(ttw[q3i] - ttw[q1i])
temp.extend(st); temp.extend(dt); temp.extend(hw); temp.extend(ttw)
if ecg_features['Wavelet']:
temp.extend(ecg_wt_feature[i-start_buffer])
#finaling writing label and health information
temp.append(label)
if label == 'PP' or label == 'PS':
health = 0
else:
health = 1
temp.append(health)
writer.writerow(temp)
break
else: continue
return None
def clean_data(path_to_data):
'''
Takes path to loaded and data and writes new file of cleaned data.
Cleaned data has NaN, [], or empty entries replaced usng median imputation
'''
data = pd.read_csv(path_to_data, low_memory=False)
#data.replace(np.nan, -10000)
labels = list(data)
featuredata = data.iloc[:,:-2].to_numpy()
imp = SimpleImputer(missing_values=np.nan, strategy='median')
imp = imp.fit(featuredata)
featuredata = imp.transform(featuredata)
dic = {}
n = data.shape[1]
for i in range(n-2):
dic[labels[i]] = featuredata[:,i]
dic[labels[-2]] = data.iloc[:,-2]
dic[labels[-1]] = data.iloc[:,-1]
| pd.DataFrame(data=dic) | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import unittest
import pytest
from numpy.testing import assert_array_equal
import numpy as np
from pandas.util.testing import assert_frame_equal
import pandas as pd
import pyarrow as pa
from pyarrow.compat import guid
from pyarrow.feather import (read_feather, write_feather,
FeatherReader)
from pyarrow.lib import FeatherWriter
def random_path():
return 'feather_{}'.format(guid())
class TestFeatherReader(unittest.TestCase):
def setUp(self):
self.test_files = []
def tearDown(self):
for path in self.test_files:
try:
os.remove(path)
except os.error:
pass
def test_file_not_exist(self):
with self.assertRaises(pa.ArrowIOError):
FeatherReader('test_invalid_file')
def _get_null_counts(self, path, columns=None):
reader = FeatherReader(path)
counts = []
for i in range(reader.num_columns):
col = reader.get_column(i)
if columns is None or col.name in columns:
counts.append(col.null_count)
return counts
def _check_pandas_roundtrip(self, df, expected=None, path=None,
columns=None, null_counts=None,
nthreads=1):
if path is None:
path = random_path()
self.test_files.append(path)
write_feather(df, path)
if not os.path.exists(path):
raise Exception('file not written')
result = read_feather(path, columns, nthreads=nthreads)
if expected is None:
expected = df
assert_frame_equal(result, expected)
if null_counts is None:
null_counts = np.zeros(len(expected.columns))
np.testing.assert_array_equal(self._get_null_counts(path, columns),
null_counts)
def _assert_error_on_write(self, df, exc, path=None):
# check that we are raising the exception
# on writing
if path is None:
path = random_path()
self.test_files.append(path)
def f():
write_feather(df, path)
self.assertRaises(exc, f)
def test_num_rows_attr(self):
df = pd.DataFrame({'foo': [1, 2, 3, 4, 5]})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == len(df)
df = pd.DataFrame({})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == 0
def test_float_no_nulls(self):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_float_nulls(self):
num_values = 100
path = random_path()
self.test_files.append(path)
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
null_counts = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
writer.write_array(name, values, null_mask)
values[null_mask] = np.nan
expected_cols.append(values)
null_counts.append(null_mask.sum())
writer.close()
ex_frame = pd.DataFrame(dict(zip(dtypes, expected_cols)),
columns=dtypes)
result = read_feather(path)
assert_frame_equal(result, ex_frame)
assert_array_equal(self._get_null_counts(path), null_counts)
def test_integer_no_nulls(self):
data = {}
numpy_dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_platform_numpy_integers(self):
data = {}
numpy_dtypes = ['longlong']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
path = random_path()
self.test_files.append(path)
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
writer.write_array(name, values, null_mask)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
writer.close()
result = read_feather(path)
assert_frame_equal(result, ex_frame)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
self._check_pandas_roundtrip(df)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
path = random_path()
self.test_files.append(path)
num_values = 100
np.random.seed(0)
writer = FeatherWriter()
writer.open(path)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
writer.write_array('bools', values, mask)
expected = values.astype(object)
expected[mask] = None
writer.close()
ex_frame = pd.DataFrame({'bools': expected})
result = read_feather(path)
assert_frame_equal(result, ex_frame)
def test_buffer_bounds_error(self):
# ARROW-1676
path = random_path()
self.test_files.append(path)
for i in range(16, 256):
values = pa.array([None] + list(range(i)), type=pa.float64())
writer = FeatherWriter()
writer.open(path)
writer.write_array('arr', values)
writer.close()
result = read_feather(path)
expected = pd.DataFrame({'arr': values.to_pandas()})
assert_frame_equal(result, expected)
self._check_pandas_roundtrip(expected, null_counts=[1])
def test_boolean_object_nulls(self):
repeats = 100
arr = np.array([False, None, True] * repeats, dtype=object)
df = pd.DataFrame({'bools': arr})
self._check_pandas_roundtrip(df, null_counts=[1 * repeats])
def test_delete_partial_file_on_error(self):
if sys.platform == 'win32':
pytest.skip('Windows hangs on to file handle for some reason')
# strings will fail
df = pd.DataFrame(
{
'numbers': range(5),
'strings': [b'foo', None, u'bar', 'qux', np.nan]},
columns=['numbers', 'strings'])
path = random_path()
try:
write_feather(df, path)
except:
pass
assert not os.path.exists(path)
def test_strings(self):
repeats = 1000
# we hvae mixed bytes, unicode, strings
values = [b'foo', None, u'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
self._assert_error_on_write(df, ValueError)
# embedded nulls are ok
values = ['foo', None, 'bar', 'qux', None]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
self._check_pandas_roundtrip(df, expected, null_counts=[2 * repeats])
values = ['foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
self._check_pandas_roundtrip(df, expected, null_counts=[2 * repeats])
def test_empty_strings(self):
df = pd.DataFrame({'strings': [''] * 10})
self._check_pandas_roundtrip(df)
def test_all_none(self):
df = pd.DataFrame({'all_none': [None] * 10})
self._check_pandas_roundtrip(df, null_counts=[10])
def test_all_null_category(self):
# ARROW-1188
df = pd.DataFrame({"A": (1, 2, 3), "B": (None, None, None)})
df = df.assign(B=df.B.astype("category"))
self._check_pandas_roundtrip(df, null_counts=[0, 3])
def test_multithreaded_read(self):
data = {'c{0}'.format(i): [''] * 10
for i in range(100)}
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df, nthreads=4)
def test_nan_as_null(self):
# Create a nan that is not numpy.nan
values = np.array(['foo', np.nan, np.nan * 2, 'bar'] * 10)
df = pd.DataFrame({'strings': values})
self._check_pandas_roundtrip(df)
def test_category(self):
repeats = 1000
values = ['foo', None, u'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
df['strings'] = df['strings'].astype('category')
values = ['foo', None, 'bar', 'qux', None]
expected = pd.DataFrame({'strings': pd.Categorical(values * repeats)})
self._check_pandas_roundtrip(df, expected,
null_counts=[2 * repeats])
def test_timestamp(self):
df = pd.DataFrame({'naive': pd.date_range('2016-03-28', periods=10)})
df['with_tz'] = (df.naive.dt.tz_localize('utc')
.dt.tz_convert('America/Los_Angeles'))
self._check_pandas_roundtrip(df)
def test_timestamp_with_nulls(self):
df = pd.DataFrame({'test': [ | pd.datetime(2016, 1, 1) | pandas.datetime |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
# DTA.__isub__ GH#43904
dta = dti._data.copy()
dta -= tdi
tm.assert_datetime_array_equal(dta, expected._data)
out = dti._data.copy()
np.subtract(out, tdi, out=out)
tm.assert_datetime_array_equal(out, expected._data)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from ndarray"
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi._values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
assert_cannot_add(dtarr, addend, msg)
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array):
ser = Series(
[
| Timestamp("20130301") | pandas.Timestamp |
#!/usr/bin/env python3
import os
import sys
from bs4 import BeautifulSoup
from fake_headers import Headers
from pprint import pprint
from pandas import DataFrame
from requests_futures.sessions import FuturesSession
from requests.exceptions import ConnectionError
from datetime import datetime
from pathlib import Path
fake_headers = Headers(headers=True).generate()
url = {
'libertatea': 'https://www.libertatea.ro/stiri-noi',
'digi24' : 'https://www.digi24.ro/stiri/actualitate',
'mediafax' : 'https://www.mediafax.ro/ultimele-stiri/',
'agerpres' : 'https://www.agerpres.ro/'}
def start_session(url1, url2, url3, url4):
with FuturesSession() as s:
print(f'[!] Starting sessions for [!]\n{url1}\n{url2}\n{url3}\n{url4}\n')
print('-' * len(url3), '\n')
session1 = s.get(url1, headers=fake_headers)
session2 = s.get(url2, headers=fake_headers)
session3 = s.get(url3, headers=fake_headers)
session4 = s.get(url4, headers=fake_headers)
soup1 = session1.result()
soup2 = session2.result()
soup3 = session3.result()
soup4 = session4.result()
session_soup1 = BeautifulSoup(soup1.text, 'lxml')
session_soup2 = BeautifulSoup(soup2.text, 'lxml')
session_soup3 = BeautifulSoup(soup3.text, 'lxml')
session_soup4 = BeautifulSoup(soup4.text, 'lxml')
return session_soup1, session_soup2, session_soup3, session_soup4
def libertatea_data(soup):
titles_list = list()
links_list = list()
print('[+] Parsing data from Libertatea')
titles = soup.find_all('h2', {'class' : 'article-title'})
for news_title in titles:
text_list = news_title.a['title']
titles_list.append(text_list)
links = soup.find_all('h2', {'class' : 'article-title'})
for news_link in links:
link_list = news_link.a['href']
links_list.append(link_list)
return zip(titles_list, links_list)
def digi24_data(soup):
titles_list = list()
links_list = list()
print('[+] Parsing data from Digi24')
titles = soup.find_all('h4', {'class' : 'article-title'})
for news_title in titles:
text_list = news_title.a['title']
titles_list.append(text_list)
links = soup.find_all('h4', {'class' : 'article-title'})
for news_link in links:
link_list = news_link.a['href']
links_list.append('https://www.digi24.ro/stiri/actualitate' + link_list)
return zip(titles_list, links_list)
def mediafax_data(soup):
titles_list = list()
links_list = list()
print('[+] Parsing data from Mediafax')
titles = soup.find_all('a', {'class':'item-title'})
for news_title in titles:
text_list = news_title['title']
titles_list.append(text_list)
links = soup.find_all('a', {'class':'item-title'})
for news_link in links:
the_links = news_link['href']
if 'tags' in the_links:
continue
links_list.append(the_links)
return zip(titles_list, links_list)
def agerpres_data(soup):
titles_list = list()
links_list = list()
print('[+] Parsing data from Agerpres', '\n')
print('-' * len(url['digi24']))
data = soup.find_all('div', {'class' : 'title_news'})
for title in data:
if len(title) != 1 or 'javascript:void(0)' not in title.a['href']:
titles_list.append(title.h2.string)
for links in data:
if len(title) != 1 or 'javascript:void(0)' not in links.a['href']:
links_list.append('https://www.agerpres.ro' + links.h2.a['href'])
return zip(titles_list, links_list)
def create_csv(data, filename):
print(f'[+] Exporting {filename} to CSV')
dataFrame = | DataFrame(data, columns=['Titles', 'Links']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import skorecard.reporting.report
from skorecard.metrics import metrics
from skorecard.bucketers import DecisionTreeBucketer
@pytest.fixture()
def X_y():
"""Set of X,y for testing the transformers."""
X = np.array(
[[0, 1], [1, 0], [0, 0], [3, 2], [0, 1], [1, 2], [2, 0], [2, 1], [0, 0]],
np.int32,
)
y = np.array([0, 0, 0, 1, 1, 1, 0, 0, 1])
return X, y
@pytest.fixture()
def X1_X2():
"""Set of dataframes to test psi."""
X1 = pd.DataFrame(
[[0, 1], [1, 0], [0, 0], [3, 2], [0, 1], [1, 2], [2, 0], [2, 1], [0, 0]], columns=["col1", "col2"]
)
X2 = pd.DataFrame(
[[0, 2], [3, 0], [0, 0], [1, 2], [0, 4], [2, 1], [1, 1], [2, 1], [1, 1]], columns=["col1", "col2"]
)
return X1, X2
def test_iv_on_array(X_y):
"""Test the IV calculation for two arrays."""
X, y = X_y
X = pd.DataFrame(X, columns=["0", "1"])
np.testing.assert_almost_equal(metrics._IV_score(y, X["0"]), 5.307, decimal=2)
np.testing.assert_almost_equal(metrics._IV_score(y, X["1"]), 4.635, decimal=2)
def test_psi_zero(df):
"""Test that PSI on same array is zero."""
features = ["LIMIT_BAL", "BILL_AMT1"]
X = df[features]
y = df["default"]
X_bins = DecisionTreeBucketer(variables=features).fit_transform(X, y)
psi_vals = skorecard.reporting.report.psi(X_bins, X_bins)
assert set(psi_vals.keys()) == set(X_bins.columns)
assert all([val == 0 for val in psi_vals.values()])
def test_psi_values(X1_X2):
"""Assert PSi values match expectations."""
X1, X2 = X1_X2
expected_psi = {"col1": 0.0773, "col2": 0.965}
psi_vals = skorecard.reporting.report.psi(X1, X2)
np.testing.assert_array_almost_equal(pd.Series(expected_psi).values, pd.Series(psi_vals).values, decimal=2)
def test_IV_values(X_y):
"""Assert IV values match expectations."""
X, y = X_y
X = pd.DataFrame(X, columns=["col1", "col2"])
expected_iv = {"col1": 5.307, "col2": 4.635}
iv_vals = skorecard.reporting.report.iv(X, y)
np.testing.assert_array_almost_equal( | pd.Series(expected_iv) | pandas.Series |
'''
CIS 419/519 project: Using decision tree ensembles to infer the pathological
cause of age-related neurodegenerative changes based on clinical assessment
nadfahors: <NAME>, <NAME>, & <NAME>
This file contains code for preparing NACC data for analysis, including:
* synthesis of pathology data to create pathology class outcomes
* dropping uninformative variables from predictor set
* identifying and merging/resolving redundant clusters of variables
* identifying missing data codes and replacing with NaNs as appropriate
* creating change variables from longitudinal data
* imputation of missing data
* categorizing retained variables as interval/ratio, ordinal, or nominal
* creation of dummy variables for nominal variables
* standardizing interval/ratio and ordinal variables
* creating date variables, then converting these to useful ages or intervals
* quadratic expansion for interval/ratio variables?
'''
# Module imports
import pandas as pd
import numpy as np
import datetime
# Read in full dataset. Warning: this is about 340 MB.
fulldf = pd.read_csv('investigator_nacc48.csv')
# List of Uniform Data Set (UDS) values that will serve as potential
# predictors. Those with a "False" next to them will be excluded after data
# preparation; those with a True will be kept.
xvar = pd.read_csv('xvar.csv')
# Variables from the NACC neuropathology table that will be used to group
# individuals by pathology class:
# 1) Alzheimer's disease (AD);
# 2) frontotemporal lobar degeneration due to tauopathy (FTLD-tau)
# 3) frontotemporal lobar degeneration due to TDP-43 (FTLD-TDP)
# 4) Lewy body disease due to alpha synuclein (including Lewy body dementia and Parkinson's disease)
# 5) vascular disease
# Path classes: AD (ABC criteria); FTLD-tau; FTLD-TDP, including ALS; Lewy body disease (are PD patients captured here?); vascular
npvar = pd.DataFrame(np.array(["NPPMIH",0, # Postmortem interval--keep in as a potential confound variable?
"NPFIX",0,
"NPFIXX",0,
"NPWBRWT",0,
"NPWBRF",0,
"NACCBRNN",0,
"NPGRCCA",0,
"NPGRLA",0,
"NPGRHA",0,
"NPGRSNH",0,
"NPGRLCH",0,
"NACCAVAS",0,
"NPTAN",False,
"NPTANX",False,
"NPABAN",False,
"NPABANX",False,
"NPASAN",False,
"NPASANX",False,
"NPTDPAN",False,
"NPTDPANX",False,
"NPHISMB",False,
"NPHISG",False,
"NPHISSS",False,
"NPHIST",False,
"NPHISO",False,
"NPHISOX",False,
"NPTHAL",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCBRAA",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCNEUR",False,# Use for ABC scoring to create ordinal measure of AD change
"NPADNC",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCDIFF",False,
"NACCVASC",False,# Vasc presence/absence
"NACCAMY",False,
"NPLINF",False,
"NPLAC",False,
"NPINF",False,# Derived variable summarizing several assessments of infarcts and lacunes
"NPINF1A",False,
"NPINF1B",False,
"NPINF1D",False,
"NPINF1F",False,
"NPINF2A",False,
"NPINF2B",False,
"NPINF2D",False,
"NPINF2F",False,
"NPINF3A",False,
"NPINF3B",False,
"NPINF3D",False,
"NPINF3F",False,
"NPINF4A",False,
"NPINF4B",False,
"NPINF4D",False,
"NPINF4F",False,
"NACCINF",False,
"NPHEM",False,
"NPHEMO",False,
"NPHEMO1",False,
"NPHEMO2",False,
"NPHEMO3",False,
"NPMICRO",False,
"NPOLD",False,
"NPOLD1",False,
"NPOLD2",False,
"NPOLD3",False,
"NPOLD4",False,
"NACCMICR",False,# Derived variable for microinfarcts
"NPOLDD",False,
"NPOLDD1",False,
"NPOLDD2",False,
"NPOLDD3",False,
"NPOLDD4",False,
"NACCHEM",False,# Derived variables for microbleeds and hemorrhages
"NACCARTE",False,
"NPWMR",False,
"NPPATH",False,# Other ischemic/vascular pathology
"NACCNEC",False,
"NPPATH2",False,
"NPPATH3",False,
"NPPATH4",False,
"NPPATH5",False,
"NPPATH6",False,
"NPPATH7",False,
"NPPATH8",False,
"NPPATH9",False,
"NPPATH10",False,
"NPPATH11",False,
"NPPATHO",False,
"NPPATHOX",False,
"NPART",False,
"NPOANG",False,
"NACCLEWY",False,# Note that limbic/transitional and amygdala-predominant are not differentiated
"NPLBOD",False,# But here they are differentiated!
"NPNLOSS",False,
"NPHIPSCL",False,
"NPSCL",False,
"NPFTDTAU",False,# FTLD-tau
"NACCPICK",False,# FTLD-tau
"NPFTDT2",False,# FTLD-tau
"NACCCBD",False,# FTLD-tau
"NACCPROG",False,# FTLD-tau
"NPFTDT5",False,# FTLD-tau
"NPFTDT6",False,# FTLD-tau
"NPFTDT7",False,# FTLD-tau
"NPFTDT8",False,# This is FTLD-tau but associated with ALS/parkinsonism--wut?
"NPFTDT9",False,# tangle-dominant disease--is this PART? Maybe exclude cases who have this as only path type.
"NPFTDT10",False,# FTLD-tau: other 3R+4R tauopathy. What is this if not AD? Maybe exclude. How many cases?
"NPFRONT",False,# FTLD-tau
"NPTAU",False,# FTLD-tau
"NPFTD",False,# FTLD-TDP
"NPFTDTDP",False,# FTLD-TDP
"NPALSMND",False,# FTLD-TDP (but exclude FUS and SOD1)
"NPOFTD",False,
"NPOFTD1",False,
"NPOFTD2",False,
"NPOFTD3",False,
"NPOFTD4",False,
"NPOFTD5",False,
"NPFTDNO",False,
"NPFTDSPC",False,
"NPTDPA",False,# In second pass, use anatomical distribution to stage
"NPTDPB",False,# In second pass, use anatomical distribution to stage
"NPTDPC",False,# In second pass, use anatomical distribution to stage
"NPTDPD",False,# In second pass, use anatomical distribution to stage
"NPTDPE",False,# In second pass, use anatomical distribution to stage
"NPPDXA",False,# Exclude?
"NPPDXB",False,# Exclude
"NACCPRIO",False,# Exclude
"NPPDXD",False,# Exclude
"NPPDXE",False,
"NPPDXF",False,
"NPPDXG",False,
"NPPDXH",False,
"NPPDXI",False,
"NPPDXJ",False,
"NPPDXK",False,
"NPPDXL",False,
"NPPDXM",False,
"NPPDXN",False,
"NACCDOWN",False,
"NACCOTHP",False,# Survey for exclusion criteria
"NACCWRI1",False,# Survey for exclusion criteria
"NACCWRI2",False,# Survey for exclusion criteria
"NACCWRI3",False,# Survey for exclusion criteria
"NACCBNKF",False,
"NPBNKB",False,
"NACCFORM",False,
"NACCPARA",False,
"NACCCSFP",False,
"NPBNKF",False,
"NPFAUT",False,
"NPFAUT1",False,
"NPFAUT2",False,
"NPFAUT3",False,
"NPFAUT4",False,
"NACCINT",False,
"NPNIT",False,
"NPCERAD",False,# What sort of variable?
"NPADRDA",False,
"NPOCRIT",False,
"NPVOTH",False,
"NPLEWYCS",False,
"NPGENE",True,# Family history--include in predictors?
"NPFHSPEC",False,# Code as dummy variables if useful.
"NPCHROM",False,# Exclusion factor? Genetic/chromosomal abnormalities
"NPPNORM",False,# Check all the following variables for redundancy with the ones above.
"NPCNORM",False,
"NPPADP",False,
"NPCADP",False,
"NPPAD",False,
"NPCAD",False,
"NPPLEWY",False,
"NPCLEWY",False,
"NPPVASC",False,
"NPCVASC",False,
"NPPFTLD",False,
"NPCFTLD",False,
"NPPHIPP",False,
"NPCHIPP",False,
"NPPPRION",False,
"NPCPRION",False,
"NPPOTH1",False,
"NPCOTH1",False,
"NPOTH1X",False,
"NPPOTH2",False,
"NPCOTH2",False,
"NPOTH2X",False,
"NPPOTH3",False,
"NPCOTH3",False,
"NPOTH3X",0]).reshape((-1,2)))
npvar.columns = ['Variable','Keep']
## Case selection process.
# Include only those with autopsy data.
aut = fulldf[fulldf.NACCAUTP == 1]
del fulldf
def table(a,b):
print(pd.crosstab(aut[a],aut[b],dropna=False,margins=True))
# Exclude for Down's, Huntington's, and other conditions.
aut = aut.loc[aut.DOWNS != 1]
aut = aut.loc[aut.HUNT != 1]
aut = aut.loc[aut.PRION != 1]
aut = aut.loc[~aut.MSAIF.isin([1,2,3])]
aut = aut.loc[~aut.NEOPIF.isin([1,2,3])]
aut = aut.loc[~aut.SCHIZOIF.isin([1,2,3])]
aut.index = list(range(aut.shape[0]))
# How many unique IDs?
# For now, keep in follow-up visits to increase our training data.
uids = aut.NACCID[~aut.NACCID.duplicated()]
#aut = aut[~aut.NACCID.duplicated()]
## Coding of pathology class outcomes.
# Create binary variables for the presence of each pathology class of interest.
# Code Alzheimer's disease pathology based on NPADNC, which implements
# ABC scoring based on Montine et al. (2012).
aut = aut.assign(ADPath = 0)
aut.loc[aut.NPADNC.isin((2,3)),'ADPath'] = 1
aut.loc[aut.NPPAD == 1,'ADPath'] = 1
# The following two commands make the ADPath variable false if the AD path
# diagnosis is as contributing, not as primary.
aut.loc[aut.NPPAD == 2,'ADPath'] = 0
aut.loc[aut.NPCAD == 1,'ADPath'] = 0
aut.loc[aut.NPPVASC == 1,'ADPath'] = 0
aut.loc[aut.NPPLEWY == 1,'ADPath'] = 0
aut.loc[aut.NPPFTLD == 1,'ADPath'] = 0
# Several variables pertain to FTLD tauopathies.
aut = aut.assign(TauPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPFTDTAU == 1,'TauPath'] = 1
aut.loc[aut.NACCPICK == 1,'TauPath'] = 1
aut.loc[aut.NACCCBD == 1,'TauPath'] = 1
aut.loc[aut.NACCPROG == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT2 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT5 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT6 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT7 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT9 == 1,'TauPath'] = 1
aut.loc[aut.NPFRONT == 1,'TauPath'] = 1
aut.loc[aut.NPTAU == 1,'TauPath'] = 1
aut.loc[aut.ADPath == 1, 'TauPath'] = 0
aut.loc[aut.NPCFTLD == 1, 'TauPath'] = 0
# Code Lewy body disease based on NPLBOD variable. Do not include amygdala-
# predominant, brainstem-predominant, or olfactory-only cases.
# See Toledo et al. (2016, Acta Neuropathol) and Irwin et al. (2018, Nat Rev
# Neuro).
aut = aut.assign(LBPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPLBOD.isin((2,3)),'LBPath'] = 1
aut.loc[aut.NPPLEWY == 1,'LBPath'] = 1
aut.loc[aut.NPPLEWY == 2,'LBPath'] = 0
aut.loc[aut.NPCLEWY == 1,'LBPath'] = 0
aut.loc[aut.ADPath == 1 & (aut.NPPLEWY != 1), 'LBPath'] = 0
aut.loc[aut.TauPath == 1 & (aut.NPPLEWY != 1),'LBPath'] = 0
# Code TDP-43 pathology based on NPFTDTDP and NPALSMND, excluding FUS and SOD1
# cases.
aut = aut.assign(TDPPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPFTD == 1,'TDPPath'] = 1
aut.loc[aut.NPFTDTDP == 1,'TDPPath'] = 1
aut.loc[aut.NPALSMND == 1,'TDPPath'] = 1
aut.loc[aut.ADPath == 1, 'TDPPath'] = 0
aut.loc[aut.LBPath == 1, 'TDPPath'] = 0
aut.loc[aut.TauPath == 1, 'TDPPath'] = 0
# Code vascular disease based on relevant derived variables:
aut = aut.assign(VPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPINF == 1,'VPath'] = 1
aut.loc[aut.NACCMICR == 1,'VPath'] = 1
aut.loc[aut.NACCHEM == 1,'VPath'] = 1
aut.loc[aut.NPPATH == 1,'VPath'] = 1
aut.loc[aut.NPPVASC == 1,'VPath'] = 1
aut.loc[aut.NPPVASC == 2,'VPath'] = 0
aut.loc[aut.NPCVASC == 1,'VPath'] = 0
aut.loc[aut.ADPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.LBPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.NPPFTLD == 1 & (aut.NPPVASC != 1),'VPath'] = 0
aut.loc[aut.TDPPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.TauPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut = aut.assign(Class = aut.ADPath)
aut.loc[aut.TauPath == 1,'Class'] = 2
aut.loc[aut.TDPPath == 1,'Class'] = 3
aut.loc[aut.LBPath == 1,'Class'] = 4
aut.loc[aut.VPath == 1,'Class'] = 5
aut = aut.loc[aut.Class != 0]
aut.index = list(range(aut.shape[0]))
## Predictor variable preparation: one-hot-encoding, date/age/interval operations,
# consolidating redundant variables, consolidating free-text variables.
aut = aut.assign(DOB = aut.BIRTHYR)
aut = aut.assign(DOD = aut.NACCYOD)
aut = aut.assign(VISITDATE = aut.VISITYR)
for i in range(aut.shape[0]):
aut.loc[i,'DOB'] = datetime.datetime.strptime('-'.join([str(aut.BIRTHYR.loc[i]),str(aut.BIRTHMO.loc[i]),'01']),'%Y-%m-%d')
aut.loc[i,'DOD'] = datetime.datetime.strptime('-'.join([str(aut.NACCYOD.loc[i]),str(aut.NACCMOD.loc[i]),'01']),'%Y-%m-%d')
aut.loc[i,'VISITDATE'] = datetime.datetime.strptime('-'.join([str(aut.VISITYR.loc[i]),str(aut.VISITMO.loc[i]),str(aut.VISITDAY.loc[i])]),'%Y-%m-%d')
# Some time/interval variables
aut = aut.assign(SinceQUITSMOK = aut.NACCAGE - aut.QUITSMOK) # Years since quitting smoking
aut = aut.assign(AgeStroke = aut.NACCSTYR - aut.BIRTHYR)
aut = aut.assign(AgeTIA = aut.NACCTIYR - aut.BIRTHYR)
aut = aut.assign(AgePD = aut.PDYR - aut.BIRTHYR)
aut = aut.assign(AgePDOTHR = aut.PDOTHRYR - aut.BIRTHYR)
aut = aut.assign(AgeTBI = aut.TBIYEAR - aut.BIRTHYR)
aut = aut.assign(Duration = aut.NACCAGE - aut.DECAGE)
# Hispanic origin
aut.HISPORX = aut.HISPORX.str.lower()
aut.loc[aut.HISPORX == 'spanish','HISPORX'] = 'spain'
# Race. RACESECX and RACETERX have too few values to be useful.
aut.RACEX = aut.RACEX.str.lower().str.replace(' ','').str.replace('-','')
aut.loc[aut.RACEX.isin(['hispanic','puerto rican']),'RACEX'] = 'latino'
aut.loc[aut.RACEX.isin(['guam - chamorro']),'RACEX'] = 'chamorro'
aut.loc[aut.RACEX.isin(['multi racial']),'RACEX'] = 'multiracial'
# Other language. But actually, let's just drop this and code as English/non-English.
#aut.PRIMLANX = aut.PRIMLANX.str.lower().str.replace(' ','').str.replace('-','')
# Drug list. First get a list of all the unique drug names, then code as dummy variables.
# Update as of 04/01/2020: drugs alone are going to be a huge amount of work.
# For now, just rely on the NACC derived variables for diabetes meds, cardiac drugs, etc.
drugcols = ['DRUG' + str(i) for i in range(1,41)]
drugs = aut[drugcols].stack()
# Several varieties of insulin--important to distinguish?
# drop "*not-codable"
# drop "diphtheria/hepb/pertussis,acel/polio/tetanus"
drugs = drugs.unique()
drugs = [eachdrug.lower() for eachdrug in drugs.tolist()]
drugs = pd.Series(drugs)
drug_corrections = [("multivitamin with minerals","multivitamin"),
("multivitamin, prenatal","multivitamin"),
("omega 3-6-9","omega369"),
("omega-3","omega3"),
("vitamin-d","vitamin d"),
("acetyl-l-carnitine","acetyl l carnitine"),
("levodopa","levadopa"),
("pro-stat","prostat"),
("alpha-d-galactosidase","alpha d galactosidase"),
("indium pentetate in-111","indium pentetate in111"),
("fludeoxyglucose f-18","fludeoxyglucose f18"),
("calcium with vitamins d and k", "calcium-vitamin d-vitamin k"),
("aloe vera topical", "aloe vera"),
("ammonium lactate topical", "ammonium lactate")]
for i in range(len(drug_corrections)):
oldval = drug_corrections[i][0]
newval = drug_corrections[i][1]
drugs = drugs.str.replace(pat = oldval, repl = newval)
drugs = drugs.loc[drugs != "*not codable*"]
drugs = drugs.loc[drugs != "diphtheria/hepb/pertussis,acel/polio/tetanus"]
drugs = np.unique([ss for eachdrug in drugs for ss in eachdrug.split('-')])
drugs = np.unique([ss for eachdrug in drugs for ss in eachdrug.split('/')])
drugs.sort()
## Combining redundant variables. Often this reflects a change in form or
# variable name between UDS version 2 & 3.
aut.loc[(aut.CVPACE == -4) & (aut.CVPACDEF == 0),'CVPACE'] = 0
aut.loc[(aut.CVPACE == -4) & (aut.CVPACDEF == 1),'CVPACE'] = 1
xvar.loc[xvar.Variable == 'CVPACDEF','Keep'] = False
# Combine TBIBRIEF and TRAUMBRF.
aut.loc[(aut.TBIBRIEF == -4) & (aut.TRAUMBRF.isin([0])),'TBIBRIEF'] = 0
aut.loc[(aut.TBIBRIEF == -4) & (aut.TRAUMBRF.isin([1,2])),'TBIBRIEF'] = 1
xvar.loc[xvar.Variable == 'TRAUMBRF','Keep'] = False
# More data cleaning
aut.ABRUPT = aut.ABRUPT.replace(to_replace = 2, value = 1)
aut.FOCLSYM = aut.FOCLSYM.replace(to_replace = 2, value = 1)
aut.FOCLSIGN = aut.FOCLSIGN.replace(to_replace = 2, value = 1)
# Convert language to a binary variable (English/non-English)
aut = aut.assign(English = 0)
aut.loc[aut.PRIMLANG == 1,'English'] = 1
xvar.loc[xvar.Variable == 'PRIMLANG','Keep'] = False
# Some dummy coding
vv = xvar.Variable.loc[(xvar.Keep) & (xvar.Comments == "Dummy coding for (95,96,97,98)")]
for v in vv:
aut[v + '_couldnt'] = 0
aut.loc[aut[v].isin([95,96,97,98]),v + '_couldnt'] = 1
vv = xvar.Variable.loc[xvar.Comments == "Dummy coding for (995,996,997,998)"]
for v in vv:
aut[v + '_couldnt'] = 0
aut.loc[aut[v].isin([995,996,997,998]),v + '_couldnt'] = 1
# Drop all columns where xvar.Keep == False.
aut2 = aut
xvar.loc[xvar.Variable == 'NACCID','Keep'] = True
xvar.loc[xvar.Variable == 'NACCID','Type'] = "ID"
xvar.loc[xvar.Variable == 'VISITDATE','Keep'] = True
xvar.loc[xvar.Variable == 'VISITDATE','Type'] = "ID"
aut = aut.drop(columns = xvar.Variable[~xvar.Keep])
# Fill with NA values
xvar = xvar.loc[xvar.Keep]
xvar.index = range(xvar.shape[0])
for i in range(xvar.shape[0]):
if not xvar.NaNValues.isna()[i]:
v = xvar.Variable[i]
badval = eval(xvar.NaNValues[i])
#print(v,badval)
if isinstance(badval,int):
badval = [badval]
aut[v].mask(aut[v].isin(badval),inplace = True)
# Get rid of variables with very few meaningful observations.
valcounts = aut.describe().iloc[0]
aut = aut.drop(columns = valcounts.loc[valcounts < 100].index)
#aut = aut[valcounts.loc[valcounts >= 100].index]
# Find correlated variables and drop.
ac = aut.corr()
acs = ac.unstack(level = 0)
acs = acs.loc[abs(acs)>0.8]
acsind = list(acs.index)
diagnames = [ind for ind in acsind if ind[0] == ind[1]]
acs = acs.drop(labels=diagnames)
acs = pd.DataFrame(acs)
acs.columns = ['r']
acs['v1'] = acs.index
acs[['v1','v2']] = pd.DataFrame(acs['v1'].tolist(),index = acs.index)
y = aut.Class
X = aut.drop(columns = npvar.Variable.loc[npvar.Variable.isin(aut.columns)])
X = X.drop(columns = ['Class','ADPath','TauPath','TDPPath','LBPath','VPath'])
xd = X.describe().iloc[0]
# Impute numeric variables with the mean.
from sklearn.impute import SimpleImputer
numvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Numeric"])
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
imp_mean.fit(X[numvar])
Xnumimp = imp_mean.transform(X[numvar])
Xnumimp = | pd.DataFrame(Xnumimp) | pandas.DataFrame |
#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
import os
try:
os.chdir(os.path.join(os.getcwd(), 'easy21'))
print(os.getcwd())
except:
pass
#%% [markdown]
# # Easy 21 Control Assignment
# ### Exercise instructions:
# In this assignment, we want to learn the state-value function for a given policy \pi
# Consider the policy that sticks if the player’s sum is 20 or 21, and otherwise hits,
# plus other player’s policies of your choice. For each of the 2 policies, and for each
# algorithm, plot the optimal value function v_\pi using similar axes to the Figure 5.2 (right)
# from Sutton and Barto’s book. Note that now the dealer can show either a black or a red card.
#
# ### Possible actions:
# - stick - Don't draw any new cards
# - hit - draw new card
#
# ### State definition:
# - Values of the player’s cards (added (black cards) or subtracted (red cards))
# - Value of dealer's cards
#
# ### State-Action transitions:
# - stick -> draw new card
# - hit -> The dealer always sticks on any sum of 17 or greater, and hits otherwise.
#
# ### Draw card:
# - number 1-10 uniform distribution
# - Color: 1/3 red 2/3 black
#
#%% [markdown]
# # Part 1 - Implementation of Easy21 simulator
#%%
# %load easy21-environment.py
##################################################################################################
# Environment implementation #
##################################################################################################
import random
# defining constants
CARD_MAX_ABS_VALUE = 10
CARD_MIN_ABS_VALUE = 1
RED = "red"
BLACK = "black"
HIT = 0
STICK = 1
PLAYER = 0
DEALER = 1
class State:
def __init__(self):
self.random = random.Random()
self.playerPoints = random.randint(CARD_MIN_ABS_VALUE, CARD_MAX_ABS_VALUE)
self.dealerPoints = random.randint(CARD_MIN_ABS_VALUE, CARD_MAX_ABS_VALUE)
self.isTerminal = False
def toStateTuple(self):
return (self.playerPoints, self.dealerPoints)
def updateState(self, card, agent):
if agent == PLAYER:
self.playerPoints += card.value
else:
self.dealerPoints += card.value
def __str__(self):
return "(Pl, De) = ({0}, {1})".format(self.playerPoints, self.dealerPoints)
class Card(object):
def __init__ (self):
self.color = Card.getColor()
self.absValue = random.randint(CARD_MIN_ABS_VALUE, CARD_MAX_ABS_VALUE)
self.value = self.absValue if self.color == BLACK else -self.absValue
@staticmethod
def getColor():
colorVariable = random.randint(1,3)
return RED if colorVariable == 1 else BLACK
class Policy:
def act(self, state):
pass
class DefaultDealerPolicy(Policy):
def act(self, state):
if state.dealerPoints >= 17:
return STICK
return HIT
class DefaultPlayerPolicy(Policy):
def act(self, state):
if state.playerPoints >= 20:
return STICK
return HIT
class EpisodeStep:
def __init__(self, state, action, reward, timeStep):
self.state = state
self.action = action
self.reward = reward
self.timeStep = timeStep
def __str__(self):
return "(S, A, R, t) = ({0}, {1}, {2}, {3})".format(str(self.state), "hit" if self.action == 0 else "stick", str(self.reward), self.timeStep)
class Game:
def __init__(self, playerPolicy=None, debug=False):
self.currentState = State()
self.playerPolicy = playerPolicy
self.dealerPolicy = DefaultDealerPolicy()
self.debug = debug
self.RandomReset()
def rewardFunction(self, state):
if state.playerPoints > 21 or state.playerPoints < 1:
return -1
if state.dealerPoints > 21 or state.dealerPoints < 1:
return 1
if not state.isTerminal:
return 0
if state.dealerPoints == state.playerPoints:
return 0
if state.playerPoints - state.dealerPoints > 0:
return 1
else:
return -1
def step (self, state, playerAction):
if playerAction == HIT and (state.playerPoints <= 21 or state.playerPoints > 0):
card = Card()
if self.debug: print ("Player Hit:", card.value, card.color)
state.updateState(card, PLAYER)
if self.debug: print("Current state:", state.playerPoints, state.dealerPoints)
if state.playerPoints > 21 or state.playerPoints < 1:
state.isTerminal = True
elif state.dealerPoints <= 21 or state.dealerPoints > 0:
if self.debug: print ("Player stick", str(state))
dealerAction = self.dealerPolicy.act(state)
while dealerAction == HIT:
card = Card()
if self.debug: print ("Dealer Hit:", card.value, card.color)
state.updateState(card, DEALER)
if self.debug: print("Current state:", state.playerPoints, state.dealerPoints)
dealerAction = self.dealerPolicy.act(state)
state.isTerminal = True
return self.rewardFunction(state), state
def SimulateEpisode(self):
episodes = []
t = 0
self.currentState = State()
if self.debug: print("Initial state:", self.currentState.playerPoints, self.currentState.dealerPoints)
while not self.currentState.isTerminal:
stateTuple = (self.currentState.playerPoints, self.currentState.dealerPoints)
playerAction = self.playerPolicy.act(self.currentState)
reward, _ = self.step(self.currentState, playerAction)
t += 1
episodes.append(EpisodeStep(stateTuple, playerAction, reward, t))
if self.debug: print("End state:", self.currentState.playerPoints, self.currentState.dealerPoints)
return episodes
def SimulateMultipleEpisodes(self, n):
sample = []
self.RandomReset()
for i in range(n):
sample.append(self.SimulateEpisode())
return sample
def RandomReset(self):
random.seed(10)
#%%
##################################################################################################
# Environment test #
##################################################################################################
game = Game(DefaultPlayerPolicy())
for i in range (10):
episodes = game.SimulateEpisode()
for e in episodes:
print (e)
print ("-------------------------------------")
#%% [markdown]
# # Auxiliary functions and imports for the tests
#%%
##################################################################################################
# Imports #
##################################################################################################
get_ipython().run_line_magic('matplotlib', 'inline')
from collections import defaultdict
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import pandas as pd
from sys import argv
from itertools import product
import sys
from time import time
#%%
##################################################################################################
# Auxiliary methods #
##################################################################################################
def extractValueFunction(q):
v = []
for k in q:
v.append(((k[0], k[1]), q[k]))
return v
def argmaxA(q, s):
if (s[0], s[1], HIT) in q:
return HIT if q[(s[0], s[1], HIT)] > q[(s[0], s[1], STICK)] else STICK
return 0
def getStateActionVisits(episode):
firstStateActionVisits = {}
everyStateVisitsCount = dict.fromkeys(product(range(1, 22), range(1, 11)), 0)
for t in range(len(episode)):
step = episode[t]
everyStateVisitsCount[step.state] += 1
if not step.state in firstStateActionVisits:
firstStateActionVisits[(step.state[0], step.state[1], step.action)] = t+1
return firstStateActionVisits, everyStateVisitsCount
def plotMutipleValueFunction(vPis, sizes, rows=2, cols=3, message='episode', width=19, height=9.5):
fig = plt.figure(figsize=(width, height))
for i in range(len(vPis)):
ax = fig.add_subplot(rows, cols, i+1, projection='3d')
ax.set_title("{} {}{}".format(sizes[i], message,'s' if i > 0 else ''),
fontsize=12)
# fig.colorbar(surf, shrink=0.5, aspect=5)
plotSurface(vPis[i], ax)
plt.subplots_adjust(wspace=0, hspace=0.1)
plt.show()
def plotSurface(vPi, ax):
x = list(map(lambda x: x[0][1], vPi))
y = list(map(lambda y: y[0][0], vPi))
z = list(map(lambda x: x[1], vPi))
df = pd.DataFrame({'x': x, 'y': y, 'z': z})
ax.set_xlabel('Dealer initial card')
ax.set_ylabel('Player card sum')
ax.set_zlabel('State value')
ax.set_xticks(range(1,11))
ax.set_yticks(range(1,22,2))
ax.set_zlim([-1, 1])
ax.plot_trisurf(df.x, df.y, df.z, cmap=cm.coolwarm, linewidth=0.1)
def plotValueFunction(vPi):
x = list(map(lambda x: x[0][1], vPi))
y = list(map(lambda y: y[0][0], vPi))
z = list(map(lambda x: x[1], vPi))
df = | pd.DataFrame({'x': x, 'y': y, 'z': z}) | pandas.DataFrame |
import pandas as pd
import numpy as np
from dateutil import relativedelta
import datetime
from forex_python.converter import CurrencyRates
from currency_converter import ECB_URL, CurrencyConverter
import os
import shutil
import urllib.request
class Declaracion:
@staticmethod
def fifo(dfg, trade=True):
"""
:param dfg: Dataframe of transacctions grouped by stocks and sorted by stock, date ascending
:param trade: If Trade=True, then return the df with all pair buy-sell.
If Trade=False, return a df with opened position after compute all sell transaction
:return: Dataframe with pairs of buy - sales every 2 rows. Using the fifo method
"""
df_compras = dfg[dfg['Quantity'] > 0].reset_index(drop=True)
df_prof = | pd.DataFrame(columns=dfg.columns) | pandas.DataFrame |
import copy
import re
from textwrap import dedent
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
jinja2 = pytest.importorskip("jinja2")
from pandas.io.formats.style import ( # isort:skip
Styler,
)
from pandas.io.formats.style_render import (
_get_level_lengths,
_get_trimming_maximums,
maybe_convert_css_to_tuples,
non_reducing_slice,
)
@pytest.fixture
def mi_df():
return DataFrame(
[[1, 2], [3, 4]],
index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
dtype=int,
)
@pytest.fixture
def mi_styler(mi_df):
return Styler(mi_df, uuid_len=0)
@pytest.fixture
def mi_styler_comp(mi_styler):
# comprehensively add features to mi_styler
mi_styler = mi_styler._copy(deepcopy=True)
mi_styler.css = {**mi_styler.css, **{"row": "ROW", "col": "COL"}}
mi_styler.uuid_len = 5
mi_styler.uuid = "abcde"
mi_styler.set_caption("capt")
mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}])
mi_styler.hide(axis="columns")
mi_styler.hide([("c0", "c1_a")], axis="columns", names=True)
mi_styler.hide(axis="index")
mi_styler.hide([("i0", "i1_a")], axis="index", names=True)
mi_styler.set_table_attributes('class="box"')
mi_styler.format(na_rep="MISSING", precision=3)
mi_styler.format_index(precision=2, axis=0)
mi_styler.format_index(precision=4, axis=1)
mi_styler.highlight_max(axis=None)
mi_styler.applymap_index(lambda x: "color: white;", axis=0)
mi_styler.applymap_index(lambda x: "color: black;", axis=1)
mi_styler.set_td_classes(
DataFrame(
[["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns
)
)
mi_styler.set_tooltips(
DataFrame(
[["a2", "b2"], ["a2", "c2"]],
index=mi_styler.index,
columns=mi_styler.columns,
)
)
return mi_styler
@pytest.mark.parametrize(
"sparse_columns, exp_cols",
[
(
True,
[
{"is_visible": True, "attributes": 'colspan="2"', "value": "c0"},
{"is_visible": False, "attributes": "", "value": "c0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "c0"},
{"is_visible": True, "attributes": "", "value": "c0"},
],
),
],
)
def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols):
exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"}
exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"}
ctx = mi_styler._translate(True, sparse_columns)
assert exp_cols[0].items() <= ctx["head"][0][2].items()
assert exp_cols[1].items() <= ctx["head"][0][3].items()
assert exp_l1_c0.items() <= ctx["head"][1][2].items()
assert exp_l1_c1.items() <= ctx["head"][1][3].items()
@pytest.mark.parametrize(
"sparse_index, exp_rows",
[
(
True,
[
{"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"},
{"is_visible": False, "attributes": "", "value": "i0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "i0"},
{"is_visible": True, "attributes": "", "value": "i0"},
],
),
],
)
def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows):
exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"}
exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"}
ctx = mi_styler._translate(sparse_index, True)
assert exp_rows[0].items() <= ctx["body"][0][0].items()
assert exp_rows[1].items() <= ctx["body"][1][0].items()
assert exp_l1_r0.items() <= ctx["body"][0][1].items()
assert exp_l1_r1.items() <= ctx["body"][1][1].items()
def test_mi_styler_sparsify_options(mi_styler):
with pd.option_context("styler.sparse.index", False):
html1 = mi_styler.to_html()
with pd.option_context("styler.sparse.index", True):
html2 = mi_styler.to_html()
assert html1 != html2
with pd.option_context("styler.sparse.columns", False):
html1 = mi_styler.to_html()
with pd.option_context("styler.sparse.columns", True):
html2 = mi_styler.to_html()
assert html1 != html2
@pytest.mark.parametrize(
"rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn",
[
(100, 100, 100, None, None, 12, 6), # reduce to (12, 6) < 100 elements
(1000, 3, 750, None, None, 250, 3), # dynamically reduce rows to 250, keep cols
(4, 1000, 500, None, None, 4, 125), # dynamically reduce cols to 125, keep rows
(1000, 3, 750, 10, None, 10, 3), # overwrite above dynamics with max_row
(4, 1000, 500, None, 5, 4, 5), # overwrite above dynamics with max_col
(100, 100, 700, 50, 50, 25, 25), # rows cols below given maxes so < 700 elmts
],
)
def test_trimming_maximum(rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn):
rn, cn = _get_trimming_maximums(
rn, cn, max_els, max_rows, max_cols, scaling_factor=0.5
)
assert (rn, cn) == (exp_rn, exp_cn)
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_rows", 3),
],
)
def test_render_trimming_rows(option, val):
# test auto and specific trimming of rows
df = DataFrame(np.arange(120).reshape(60, 2))
with pd.option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 3 # index + 2 data cols
assert len(ctx["body"]) == 4 # 3 data rows + trimming row
assert len(ctx["body"][0]) == 3 # index + 2 data cols
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_columns", 2),
],
)
def test_render_trimming_cols(option, val):
# test auto and specific trimming of cols
df = DataFrame(np.arange(30).reshape(3, 10))
with pd.option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 4 # index + 2 data cols + trimming col
assert len(ctx["body"]) == 3 # 3 data rows
assert len(ctx["body"][0]) == 4 # index + 2 data cols + trimming col
def test_render_trimming_mi():
midx = MultiIndex.from_product([[1, 2], [1, 2, 3]])
df = DataFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx)
with pd.option_context("styler.render.max_elements", 4):
ctx = df.style._translate(True, True)
assert len(ctx["body"][0]) == 5 # 2 indexes + 2 data cols + trimming row
assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items()
assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items()
assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
assert len(ctx["body"]) == 3 # 2 data rows + trimming row
assert len(ctx["head"][0]) == 5 # 2 indexes + 2 column headers + trimming col
assert {"attributes": 'colspan="2"'}.items() <= ctx["head"][0][2].items()
def test_render_empty_mi():
# GH 43305
df = DataFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"]))
expected = dedent(
"""\
>
<thead>
<tr>
<th class="index_name level0" > </th>
<th class="index_name level1" >one</th>
</tr>
</thead>
"""
)
assert expected in df.style.to_html()
@pytest.mark.parametrize("comprehensive", [True, False])
@pytest.mark.parametrize("render", [True, False])
@pytest.mark.parametrize("deepcopy", [True, False])
def test_copy(comprehensive, render, deepcopy, mi_styler, mi_styler_comp):
styler = mi_styler_comp if comprehensive else mi_styler
styler.uuid_len = 5
s2 = copy.deepcopy(styler) if deepcopy else copy.copy(styler) # make copy and check
assert s2 is not styler
if render:
styler.to_html()
excl = [
"na_rep", # deprecated
"precision", # deprecated
"cellstyle_map", # render time vars..
"cellstyle_map_columns",
"cellstyle_map_index",
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
if not deepcopy: # check memory locations are equal for all included attributes
for attr in [a for a in styler.__dict__ if (not callable(a) and a not in excl)]:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else: # check memory locations are different for nested or mutable vars
shallow = [
"data",
"columns",
"index",
"uuid_len",
"uuid",
"caption",
"cell_ids",
"hide_index_",
"hide_columns_",
"hide_index_names",
"hide_column_names",
"table_attributes",
]
for attr in shallow:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
for attr in [
a
for a in styler.__dict__
if (not callable(a) and a not in excl and a not in shallow)
]:
if getattr(s2, attr) is None:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else:
assert id(getattr(s2, attr)) != id(getattr(styler, attr))
def test_clear(mi_styler_comp):
# NOTE: if this test fails for new features then 'mi_styler_comp' should be updated
# to ensure proper testing of the 'copy', 'clear', 'export' methods with new feature
# GH 40675
styler = mi_styler_comp
styler._compute() # execute applied methods
clean_copy = Styler(styler.data, uuid=styler.uuid)
excl = [
"data",
"index",
"columns",
"uuid",
"uuid_len", # uuid is set to be the same on styler and clean_copy
"cell_ids",
"cellstyle_map", # execution time only
"cellstyle_map_columns", # execution time only
"cellstyle_map_index", # execution time only
"precision", # deprecated
"na_rep", # deprecated
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
# tests vars are not same vals on obj and clean copy before clear (except for excl)
for attr in [a for a in styler.__dict__ if not (callable(a) or a in excl)]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert not (all(res) if (hasattr(res, "__iter__") and len(res) > 0) else res)
# test vars have same vales on obj and clean copy after clearing
styler.clear()
for attr in [a for a in styler.__dict__ if not (callable(a))]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert all(res) if hasattr(res, "__iter__") else res
def test_export(mi_styler_comp, mi_styler):
exp_attrs = [
"_todo",
"hide_index_",
"hide_index_names",
"hide_columns_",
"hide_column_names",
"table_attributes",
"table_styles",
"css",
]
for attr in exp_attrs:
check = getattr(mi_styler, attr) == getattr(mi_styler_comp, attr)
assert not (
all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
)
export = mi_styler_comp.export()
used = mi_styler.use(export)
for attr in exp_attrs:
check = getattr(used, attr) == getattr(mi_styler_comp, attr)
assert all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
used.to_html()
def test_hide_raises(mi_styler):
msg = "`subset` and `level` cannot be passed simultaneously"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", subset="something", level="something else")
msg = "`level` must be of type `int`, `str` or list of such"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", level={"bad": 1, "type": 2})
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
def test_hide_index_level(mi_styler, level):
mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"]
ctx = mi_styler.hide(axis="index", level=level)._translate(False, True)
assert len(ctx["head"][0]) == 3
assert len(ctx["head"][1]) == 3
assert len(ctx["head"][2]) == 4
assert ctx["head"][2][0]["is_visible"]
assert not ctx["head"][2][1]["is_visible"]
assert ctx["body"][0][0]["is_visible"]
assert not ctx["body"][0][1]["is_visible"]
assert ctx["body"][1][0]["is_visible"]
assert not ctx["body"][1][1]["is_visible"]
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
@pytest.mark.parametrize("names", [True, False])
def test_hide_columns_level(mi_styler, level, names):
mi_styler.columns.names = ["zero", "one"]
if names:
mi_styler.index.names = ["zero", "one"]
ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False)
assert len(ctx["head"]) == (2 if names else 1)
@pytest.mark.parametrize("method", ["applymap", "apply"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header(method, axis):
# GH 41893
df = DataFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"])
func = {
"apply": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s],
"applymap": lambda v: "attr: val" if ("A" in v or "C" in v) else "",
}
# test execution added to todo
result = getattr(df.style, f"{method}_index")(func[method], axis=axis)
assert len(result._todo) == 1
assert len(getattr(result, f"ctx_{axis}")) == 0
# test ctx object on compute
result._compute()
expected = {
(0, 0): [("attr", "val")],
}
assert getattr(result, f"ctx_{axis}") == expected
@pytest.mark.parametrize("method", ["apply", "applymap"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header_mi(mi_styler, method, axis):
# GH 41893
func = {
"apply": lambda s: ["attr: val;" if "b" in v else "" for v in s],
"applymap": lambda v: "attr: val" if "b" in v else "",
}
result = getattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute()
expected = {(1, 1): [("attr", "val")]}
assert getattr(result, f"ctx_{axis}") == expected
def test_apply_map_header_raises(mi_styler):
# GH 41893
with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"):
mi_styler.applymap_index(lambda v: "attr: val;", axis="bad")._compute()
class TestStyler:
def setup_method(self, method):
np.random.seed(24)
self.s = DataFrame({"A": np.random.permutation(range(6))})
self.df = DataFrame({"A": [0, 1], "B": np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo="bar"):
return pd.Series(f"color: {foo}", index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = DataFrame({"A": ["color: red", "color: blue"]})
self.dataframes = [
self.df,
DataFrame(
{"f": [1.0, 2.0], "o": ["a", "b"], "c": pd.Categorical(["a", "b"])}
),
]
self.blank_value = " "
def test_init_non_pandas(self):
msg = "``data`` must be a Series or DataFrame"
with pytest.raises(TypeError, match=msg):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(pd.Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_repr_html_mathjax(self):
# gh-19824 / 41395
assert "tex2jax_ignore" not in self.styler._repr_html_()
with pd.option_context("styler.html.mathjax", False):
assert "tex2jax_ignore" in self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_and_trailing_semi(self):
attrs = DataFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]})
self.styler._update_ctx(attrs)
expected = {
(0, 0): [("color", "red"), ("foo", "bar")],
(1, 0): [("color", "blue"), ("foo", "baz")],
}
assert self.styler.ctx == expected
def test_render(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid="AB").apply(style)
s.to_html()
# it worked?
def test_multiple_render(self):
# GH 39396
s = Styler(self.df, uuid_len=0).applymap(lambda x: "color: red;", subset=["A"])
s.to_html() # do 2 renders to ensure css styles not duplicated
assert (
'<style type="text/css">\n#T__row0_col0, #T__row1_col0 {\n'
" color: red;\n}\n</style>" in s.to_html()
)
def test_render_empty_dfs(self):
empty_df = DataFrame()
es = Styler(empty_df)
es.to_html()
# An index but no columns
DataFrame(columns=["a"]).style.to_html()
# A column but no index
DataFrame(index=["a"]).style.to_html()
# No IndexError raised?
def test_render_double(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(
["color: red; border: 1px", "color: blue; border: 2px"], name=x.name
)
s = Styler(df, uuid="AB").apply(style)
s.to_html()
# it worked?
def test_set_properties(self):
df = DataFrame({"A": [0, 1]})
result = df.style.set_properties(color="white", size="10px")._compute().ctx
# order is deterministic
v = [("color", "white"), ("size", "10px")]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
df = DataFrame({"A": [0, 1]})
result = (
df.style.set_properties(subset=pd.IndexSlice[0, "A"], color="white")
._compute()
.ctx
)
expected = {(0, 0): [("color", "white")]}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.style._translate(True, True)
assert len(result["head"]) == 1
expected = {
"class": "blank level0",
"type": "th",
"value": self.blank_value,
"is_visible": True,
"display_value": self.blank_value,
}
assert expected.items() <= result["head"][0][0].items()
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.set_index("A").style._translate(True, True)
expected = {
"class": "index_name level0",
"type": "th",
"value": "A",
"is_visible": True,
"display_value": "A",
}
assert expected.items() <= result["head"][1][0].items()
def test_multiindex_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.set_index(["A", "B"]).style._translate(True, True)
expected = [
{
"class": "index_name level0",
"type": "th",
"value": "A",
"is_visible": True,
"display_value": "A",
},
{
"class": "index_name level1",
"type": "th",
"value": "B",
"is_visible": True,
"display_value": "B",
},
{
"class": "blank col0",
"type": "th",
"value": self.blank_value,
"is_visible": True,
"display_value": self.blank_value,
},
]
assert result["head"][1] == expected
def test_numeric_columns(self):
# https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = DataFrame({0: [1, 2, 3]})
df.style._translate(True, True)
def test_apply_axis(self):
df = DataFrame({"A": [0, 0], "B": [1, 1]})
f = lambda x: [f"val: {x.max()}" for v in x]
result = df.style.apply(f, axis=1)
assert len(result._todo) == 1
assert len(result.ctx) == 0
result._compute()
expected = {
(0, 0): [("val", "1")],
(0, 1): [("val", "1")],
(1, 0): [("val", "1")],
(1, 1): [("val", "1")],
}
assert result.ctx == expected
result = df.style.apply(f, axis=0)
expected = {
(0, 0): [("val", "0")],
(0, 1): [("val", "1")],
(1, 0): [("val", "0")],
(1, 1): [("val", "1")],
}
result._compute()
assert result.ctx == expected
result = df.style.apply(f) # default
result._compute()
assert result.ctx == expected
@pytest.mark.parametrize("axis", [0, 1])
def test_apply_series_return(self, axis):
# GH 42014
df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
# test Series return where len(Series) < df.index or df.columns but labels OK
func = lambda s: pd.Series(["color: red;"], index=["Y"])
result = df.style.apply(func, axis=axis)._compute().ctx
assert result[(1, 1)] == [("color", "red")]
assert result[(1 - axis, axis)] == [("color", "red")]
# test Series return where labels align but different order
func = lambda s: pd.Series(["color: red;", "color: blue;"], index=["Y", "X"])
result = df.style.apply(func, axis=axis)._compute().ctx
assert result[(0, 0)] == [("color", "blue")]
assert result[(1, 1)] == [("color", "red")]
assert result[(1 - axis, axis)] == [("color", "red")]
assert result[(axis, 1 - axis)] == [("color", "blue")]
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("columns", [False, True])
def test_apply_dataframe_return(self, index, columns):
# GH 42014
df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
idxs = ["X", "Y"] if index else ["Y"]
cols = ["X", "Y"] if columns else ["Y"]
df_styles = DataFrame("color: red;", index=idxs, columns=cols)
result = df.style.apply(lambda x: df_styles, axis=None)._compute().ctx
assert result[(1, 1)] == [("color", "red")] # (Y,Y) styles always present
assert (result[(0, 1)] == [("color", "red")]) is index # (X,Y) only if index
assert (result[(1, 0)] == [("color", "red")]) is columns # (Y,X) only if cols
assert (result[(0, 0)] == [("color", "red")]) is (index and columns) # (X,X)
@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:],
pd.IndexSlice[:, ["A"]],
pd.IndexSlice[[1], :],
pd.IndexSlice[[1], ["A"]],
pd.IndexSlice[:2, ["A", "B"]],
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_apply_subset(self, slice_, axis):
result = (
self.df.style.apply(self.h, axis=axis, subset=slice_, foo="baz")
._compute()
.ctx
)
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns
}
assert result == expected
@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:],
pd.IndexSlice[:, ["A"]],
pd.IndexSlice[[1], :],
pd.IndexSlice[[1], ["A"]],
pd.IndexSlice[:2, ["A", "B"]],
],
)
def test_applymap_subset(self, slice_):
result = (
self.df.style.applymap(lambda x: "color:baz;", subset=slice_)._compute().ctx
)
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns
}
assert result == expected
@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:, pd.IndexSlice["x", "A"]],
pd.IndexSlice[:, pd.IndexSlice[:, "A"]],
pd.IndexSlice[:, pd.IndexSlice[:, ["A", "C"]]], # missing col element
pd.IndexSlice[pd.IndexSlice["a", 1], :],
pd.IndexSlice[pd.IndexSlice[:, 1], :],
pd.IndexSlice[pd.IndexSlice[:, [1, 3]], :], # missing row element
pd.IndexSlice[:, ("x", "A")],
pd.IndexSlice[("a", 1), :],
],
)
def test_applymap_subset_multiindex(self, slice_):
# GH 19861
# edited for GH 33562
warn = None
msg = "indexing on a MultiIndex with a nested sequence of labels"
if (
isinstance(slice_[-1], tuple)
and isinstance(slice_[-1][-1], list)
and "C" in slice_[-1][-1]
):
warn = FutureWarning
elif (
isinstance(slice_[0], tuple)
and isinstance(slice_[0][1], list)
and 3 in slice_[0][1]
):
warn = FutureWarning
idx = MultiIndex.from_product([["a", "b"], [1, 2]])
col = MultiIndex.from_product([["x", "y"], ["A", "B"]])
df = DataFrame(np.random.rand(4, 4), columns=col, index=idx)
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
df.style.applymap(lambda x: "color: red;", subset=slice_).to_html()
def test_applymap_subset_multiindex_code(self):
# https://github.com/pandas-dev/pandas/issues/25858
# Checks styler.applymap works with multindex when codes are provided
codes = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])
columns = MultiIndex(
levels=[["a", "b"], ["%", "#"]], codes=codes, names=["", ""]
)
df = DataFrame(
[[1, -1, 1, 1], [-1, 1, 1, 1]], index=["hello", "world"], columns=columns
)
pct_subset = pd.IndexSlice[:, pd.IndexSlice[:, "%":"%"]]
def color_negative_red(val):
color = "red" if val < 0 else "black"
return f"color: {color}"
df.loc[pct_subset]
df.style.applymap(color_negative_red, subset=pct_subset)
def test_empty(self):
df = DataFrame({"A": [1, 0]})
s = df.style
s.ctx = {(0, 0): [("color", "red")], (1, 0): [("", "")]}
result = s._translate(True, True)["cellstyle"]
expected = [
{"props": [("color", "red")], "selectors": ["row0_col0"]},
{"props": [("", "")], "selectors": ["row1_col0"]},
]
assert result == expected
def test_duplicate(self):
df = DataFrame({"A": [1, 0]})
s = df.style
s.ctx = {(0, 0): [("color", "red")], (1, 0): [("color", "red")]}
result = s._translate(True, True)["cellstyle"]
expected = [
{"props": [("color", "red")], "selectors": ["row0_col0", "row1_col0"]}
]
assert result == expected
def test_init_with_na_rep(self):
# GH 21527 28358
df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
ctx = Styler(df, na_rep="NA")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "NA"
assert ctx["body"][0][2]["display_value"] == "NA"
def test_caption(self):
styler = Styler(self.df, caption="foo")
result = styler.to_html()
assert all(["caption" in result, "foo" in result])
styler = self.df.style
result = styler.set_caption("baz")
assert styler is result
assert styler.caption == "baz"
def test_uuid(self):
styler = Styler(self.df, uuid="abc123")
result = styler.to_html()
assert "abc123" in result
styler = self.df.style
result = styler.set_uuid("aaa")
assert result is styler
assert result.uuid == "aaa"
def test_unique_id(self):
# See https://github.com/pandas-dev/pandas/issues/16780
df = DataFrame({"a": [1, 3, 5, 6], "b": [2, 4, 12, 21]})
result = df.style.to_html(uuid="test")
assert "test" in result
ids = re.findall('id="(.*?)"', result)
assert np.unique(ids).size == len(ids)
def test_table_styles(self):
style = [{"selector": "th", "props": [("foo", "bar")]}] # default format
styler = Styler(self.df, table_styles=style)
result = " ".join(styler.to_html().split())
assert "th { foo: bar; }" in result
styler = self.df.style
result = styler.set_table_styles(style)
assert styler is result
assert styler.table_styles == style
# GH 39563
style = [{"selector": "th", "props": "foo:bar;"}] # css string format
styler = self.df.style.set_table_styles(style)
result = " ".join(styler.to_html().split())
assert "th { foo: bar; }" in result
def test_table_styles_multiple(self):
ctx = self.df.style.set_table_styles(
[
{"selector": "th,td", "props": "color:red;"},
{"selector": "tr", "props": "color:green;"},
]
)._translate(True, True)["table_styles"]
assert ctx == [
{"selector": "th", "props": [("color", "red")]},
{"selector": "td", "props": [("color", "red")]},
{"selector": "tr", "props": [("color", "green")]},
]
def test_table_styles_dict_multiple_selectors(self):
# GH 44011
result = self.df.style.set_table_styles(
[{"selector": "th,td", "props": [("border-left", "2px solid black")]}]
)._translate(True, True)["table_styles"]
expected = [
{"selector": "th", "props": [("border-left", "2px solid black")]},
{"selector": "td", "props": [("border-left", "2px solid black")]},
]
assert result == expected
def test_maybe_convert_css_to_tuples(self):
expected = [("a", "b"), ("c", "d e")]
assert maybe_convert_css_to_tuples("a:b;c:d e;") == expected
assert maybe_convert_css_to_tuples("a: b ;c: d e ") == expected
expected = []
assert maybe_convert_css_to_tuples("") == expected
def test_maybe_convert_css_to_tuples_err(self):
msg = "Styles supplied as string must follow CSS rule formats"
with pytest.raises(ValueError, match=msg):
maybe_convert_css_to_tuples("err")
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.df, table_attributes=attributes)
result = styler.to_html()
assert 'class="foo" data-bar' in result
result = self.df.style.set_table_attributes(attributes).to_html()
assert 'class="foo" data-bar' in result
def test_apply_none(self):
def f(x):
return DataFrame(
np.where(x == x.max(), "color: red", ""),
index=x.index,
columns=x.columns,
)
result = DataFrame([[1, 2], [3, 4]]).style.apply(f, axis=None)._compute().ctx
assert result[(1, 1)] == [("color", "red")]
def test_trim(self):
result = self.df.style.to_html() # trim=True
assert result.count("#") == 0
result = self.df.style.highlight_max().to_html()
assert result.count("#") == len(self.df.columns)
def test_export(self):
f = lambda x: "color: red" if x > 0 else "color: blue"
g = lambda x, z: f"color: {z}" if x > 0 else f"color: {z}"
style1 = self.styler
style1.applymap(f).applymap(g, z="b").highlight_max()._compute() # = render
result = style1.export()
style2 = self.df.style
style2.use(result)
assert style1._todo == style2._todo
style2.to_html()
def test_bad_apply_shape(self):
df = DataFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["X", "Y"])
msg = "resulted in the apply method collapsing to a Series."
with pytest.raises(ValueError, match=msg):
df.style._apply(lambda x: "x")
msg = "created invalid {} labels"
with pytest.raises(ValueError, match=msg.format("index")):
df.style._apply(lambda x: [""])
with pytest.raises(ValueError, match=msg.format("index")):
df.style._apply(lambda x: ["", "", "", ""])
with pytest.raises(ValueError, match=msg.format("index")):
df.style._apply(lambda x: pd.Series(["a:v;", ""], index=["A", "C"]), axis=0)
with pytest.raises(ValueError, match=msg.format("columns")):
df.style._apply(lambda x: ["", "", ""], axis=1)
with pytest.raises(ValueError, match=msg.format("columns")):
df.style._apply(lambda x: pd.Series(["a:v;", ""], index=["X", "Z"]), axis=1)
msg = "returned ndarray with wrong shape"
with pytest.raises(ValueError, match=msg):
df.style._apply(lambda x: np.array([[""], [""]]), axis=None)
def test_apply_bad_return(self):
def f(x):
return ""
df = DataFrame([[1, 2], [3, 4]])
msg = (
"must return a DataFrame or ndarray when passed to `Styler.apply` "
"with axis=None"
)
with pytest.raises(TypeError, match=msg):
df.style._apply(f, axis=None)
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_bad_labels(self, axis):
def f(x):
return DataFrame(**{axis: ["bad", "labels"]})
df = DataFrame([[1, 2], [3, 4]])
msg = f"created invalid {axis} labels."
with pytest.raises(ValueError, match=msg):
df.style._apply(f, axis=None)
def test_get_level_lengths(self):
index = MultiIndex.from_product([["a", "b"], [0, 1, 2]])
expected = {
(0, 0): 3,
(0, 3): 3,
(1, 0): 1,
(1, 1): 1,
(1, 2): 1,
(1, 3): 1,
(1, 4): 1,
(1, 5): 1,
}
result = _get_level_lengths(index, sparsify=True, max_index=100)
tm.assert_dict_equal(result, expected)
expected = {
(0, 0): 1,
(0, 1): 1,
(0, 2): 1,
(0, 3): 1,
(0, 4): 1,
(0, 5): 1,
(1, 0): 1,
(1, 1): 1,
(1, 2): 1,
(1, 3): 1,
(1, 4): 1,
(1, 5): 1,
}
result = | _get_level_lengths(index, sparsify=False, max_index=100) | pandas.io.formats.style_render._get_level_lengths |
import unittest
import ast
import pandas as pd
from blotter import blotter
from pandas.util.testing import assert_dict_equal
class TestBlotter(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def assertEventEqual(self, ev1, ev2):
self.assertEqual(ev1.type, ev2.type)
assert_dict_equal(ev1.data, ev2.data)
def test_no_timestamp_key(self):
data = {'somevalue': pd.Timestamp('2016-12-01T10:00:00'),
'instrument': 'CLZ6', 'price': 53.46, 'quantity': 100,
'commission': 2.50, 'ccy': 'USD'}
def make_ev():
blotter._Event('TRADE', data)
self.assertRaises(ValueError, make_ev)
def test_no_timestamp_value(self):
data = {'timestamp': '2016-12-01T10:00:00',
'instrument': 'CLZ6', 'price': 53.46, 'quantity': 100,
'commission': 2.50, 'ccy': 'USD'}
def make_ev():
blotter._Event('TRADE', data)
self.assertRaises(ValueError, make_ev)
def test_trade(self):
blt = blotter.Blotter()
data = {'timestamp': pd.Timestamp('2016-12-01T10:00:00'),
'instrument': 'CLZ6', 'price': 53.46, 'quantity': 100,
'multiplier': 1, 'commission': 2.50, 'ccy': 'USD'}
ev = [blotter._Event('TRADE', data)]
blt.dispatch_events(ev)
def test_trade_fromstring(self):
trd_str = ('TRADE|{"timestamp":"2016-12-01 10:00:00",'
'"instrument":"CLZ6","price":53.46,"quantity":100,'
'"commission":2.50,"ccy":"USD"}')
ev = blotter._Event.fromstring(trd_str)
data = {'timestamp': | pd.Timestamp('2016-12-01T10:00:00') | pandas.Timestamp |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Taskmaster-2 implementation for ParlAI.
No official train/valid/test splits are available as of 2020-05-18, so we make our own
splits.
"""
from parlai.core.params import ParlaiParser
import os
import pandas as pd
from collections import Counter
from parlai.core.opt import Opt
import parlai.core.tod.tod_core as tod
from parlai.utils.misc import warn_once
import json
from typing import Optional
from parlai.utils.data import DatatypeHelper
from parlai.utils.io import PathManager
import parlai.tasks.taskmaster2.build as build_
import parlai.core.tod.tod_agents as tod_agents
DOMAINS = [
"flights",
"food-ordering",
"hotels",
"movies",
"restaurant-search",
"sports",
"music",
]
class Taskmaster2Parser(tod_agents.TodStructuredDataParser):
"""
Abstract data loader.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
parser.add_argument(
"--taskmaster2-domains",
nargs="+",
default=DOMAINS,
choices=DOMAINS,
help="Uses last passed in configuration.",
)
parser.add_argument(
"--use-cumulative-api-calls",
type=bool,
default=True,
help="Have API Call/API response turns only when an API response"
"slot exist. Accumulate all API call slots with same API call name",
)
return super().add_cmdline_args(parser, partial_opt)
def __init__(self, opt: Opt, shared=None):
self.fold = DatatypeHelper.fold(opt["datatype"])
opt["datafile"] = self.fold
self.dpath = os.path.join(opt["datapath"], "taskmaster-2")
if shared is None:
warn_once(
"Taskmaster2 is a beta dataset, and format may significantly change."
)
build_.build(opt)
super().__init__(opt, shared)
def _load_data(self, fold, domains):
# load up the ontology
ontologies = {}
for section in domains:
fn = os.path.join(self.dpath, section + ".onto.json")
with PathManager.open(fn, "r") as f:
ontologies.update(json.load(f))
chunks = []
for section in domains:
with PathManager.open(os.path.join(self.dpath, section + ".json")) as f:
subset = pd.read_json(f)
subset["domain"] = section
chunks.append(subset)
chunks = | pd.concat(chunks, axis=0) | pandas.concat |
import pandas as pd
import numpy as np
import os
import tensorflow as tf
import copy
from tensorflow.contrib import learn
import _pickle as pickle
def read_from_csv():
csv_fname = "/Users/shubhi/Public/CMPS296/friends.csv" #replace with local file loc
df = | pd.DataFrame.from_csv(csv_fname) | pandas.DataFrame.from_csv |
import pandas as pd
import numpy as np
from scipy import signal as sgn
import math as m
##################################################
# Aux functions #
##################################################
# Mult quaternion
def q_mult(q1, q2):
w1, x1, y1, z1 = q1
w2, x2, y2, z2 = q2
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2
z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2
return w, x, y, z
# Mult quaternion
def qq_mult(q1, q2):
#q2 = (0.0,) + v1
return q_mult(q_mult(q1, q2), q_conjugate(q1))
# Conjugate for quaternion
def q_conjugate(q):
w, x, y, z = q
return (w, -x, -y, -z)
# Return quaternion x quaternion with DataFrame X Vector
def get_mult_quat_DFxV (quat):
res = pd.DataFrame([],columns=quat.columns.to_numpy())
q2 = quat.loc[0]
n=q2[0]**2+q2[1]**2+q2[2]**2+q2[3]**2
q2=q_conjugate(q2)/n
for i in range(len(quat)):
q=quat.loc[i].to_numpy()
res.loc[i] = q_mult(q2,q)
return res
# Return data rotate with DataFrame X Vector
def get_rotation_DFxV (quat,v,name):
res = pd.DataFrame([],columns=name)
v = np.append([0]*(4-len(name)),v)
for i in range(len(quat)):
q=quat.loc[i].to_numpy()
if len(name)==4:
res.loc[i] = qq_mult(v,q)
else:
qq=qq_mult(q,v)
res.loc[i] = [qq[1],qq[2],qq[3]]
return res
# Return data rotate with DataFrame X DataFrame
def get_rotation_DFxDF(quat,data,name):
res = pd.DataFrame([],columns=name)
for i in range(len(quat)):
q1=quat.values[i]
q2=data.values[i]
if len(q2)==4:
res.loc[i] = qq_mult(q1,q2)
else:
res.loc[i] = qq_mult(q1,np.append([0],q2))[1:]
return res
# Get Euler with quaternion
def quaternion_to_euler(v):
w, x, y, z = v
t0 = 2 * (w * x + y * z)
t1 = 1 - 2 * (x * x + y * y)
X = m.atan2(t0, t1)
t2 = 2 * (w * y - z * x)
t2 = 1 if t2 > 1 else t2
t2 = -1 if t2 < -1 else t2
Y = m.asin(t2)
t3 = 2 * (w * z + x * y)
t4 = 1 - 2 * (y * y + z * z)
Z = m.atan2(t3, t4)
return X, Y, Z
# Calculate norm
def norm(v):
n = pd.DataFrame([],columns=['norm'])
t = 0
for i in range(v.index.stop):
for j in range(3):
t+=v.loc[i][j]**2
t = np.sqrt(t)
print(t)
T = pd.DataFrame([t],columns=['norm'])
n = n.append(T,ignore_index = True)
t = 0
return n
# Remove n% of position or uvw vector
def df_drop(n,data):
for i in range(len(data)):
if(i%n!=0):
data = data.drop(i,axis=0)
return data#data.reset_index(drop=True)
# Return position and uvw size to quivers in reference a the frames
def get_arrow(pos,uvw,n,skip):
'''
Apenas se descobrir a norm
bool_index = n>=index_pos_drop
for i in range(bool_index.size):
if bool_index[i]==False:
break
n=i
'''
n = int(n/skip)
x = pos['posx'][:n]
y = pos['posy'][:n]
z = pos['posz'][:n]
u = uvw['u'][:n]
v = uvw['v'][:n]
w = uvw['w'][:n]
return x,y,z,u,v,w
# Return position and uvw size to quivers
def get_arrow_one(pos,uvw,n,v_type):
if v_type=='rotation':
x = [0,0,0]
y = [0,0,0]
z = [0,0,0]
elif v_type=='static':
x = pos['posx'][n]
y = pos['posy'][n]
z = pos['posz'][n]
u = uvw['u'][n]
v = uvw['v'][n]
w = uvw['w'][n]
return x,y,z,u,v,w
# Calculate uvw vectors
def get_uvw(size_vector,quat):
# Rotate quat
quat=get_mult_quat_DFxV(quat)
#qz=[0.70710678118, 0, 0, 0.70710678118]
#quat=get_rotation_DFxV(quat,qz,['qw','qx','qy','qz'])
# Create vector uvw
columns=['u','v','w']
x=get_rotation_DFxV(quat,[size_vector,0,0],columns)
y=get_rotation_DFxV(quat,[0,size_vector,0],columns)
z=get_rotation_DFxV(quat,[0,0,size_vector],columns)
# Create vectors uv2 and position
uvw = [x,y,z]
return uvw
# Calculate High or Low Pass filter
def pass_filter(data,type,filtcutoff):
res=pd.DataFrame([])
name=data.columns.to_numpy()
b, a = sgn.butter(1,(2*filtcutoff)/(10),type)
for i in range(len(data.columns)):
res[name[i]]=sgn.filtfilt(b,a,data[name[i]])
return res
# Calculate median filter
def median_filter(data,f_size):
res= | pd.DataFrame([]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 21 14:16:52 2021
@author: <NAME>
INFO:
This script is where the UHA and urban heat analyses are done
The UHA is calculated in this script using both MIDAS and CWS measurements
Most of the plots and tables in the ERL paper are made here
Some information written in the manuscript are simply printed by the script
"""
import pandas as pd
import geopandas as gpd
import rasterio
from shapely.geometry import box
import numpy as np
# import matplotlib.ticker as mticker
import matplotlib.pyplot as plt
import matplotlib as mpl
import xarray as xr
import cartopy.crs as ccrs
from fiona.crs import from_epsg
import glob
from mpl_toolkits.axes_grid1 import make_axes_locatable
from datetime import datetime
from scipy import stats
import matplotlib.colors as clrs
import geopy.distance
from matplotlib import markers
from matplotlib.path import Path
#########################
### ATTRIBUTES ###
#########################
### City of interest
city = 'London'
reshape_na = False ## Only use if working with NetCDF file. Switch to make a df out of NA data. Takes time and memory /!\
### Bounding box to study intra-urban temperature variability:
### - default (Gherkin): 51.1,-0.8 ; 51.9,0.6
### - centered large (British Museum): 51.12,-0.73 ; 51.92,0.47
### - centered large (Trafalgar Square): 51.1,-0.72 ; 51.9,0.48
bbox_llat = 51.1
bbox_ulat = 51.9
bbox_llon = -0.72
bbox_ulon = 0.48
quadrants = {'NE': [(bbox_llon + bbox_ulon)/2, (bbox_llat + bbox_ulat)/2, bbox_ulon, bbox_ulat],
'SE': [(bbox_llon + bbox_ulon)/2, bbox_llat, bbox_ulon, (bbox_llat + bbox_ulat)/2],
'SW': [bbox_llon, bbox_llat, (bbox_llon + bbox_ulon)/2, (bbox_llat + bbox_ulat)/2],
'NW': [bbox_llon, (bbox_llat + bbox_ulat)/2, (bbox_llon + bbox_ulon)/2, bbox_ulat]}
### Wind classes and plotting information
breeze_classes = [0,3,6,9] ### In meters per second
breeze_y_classes = [0.3,0.1,-0.1,-0.3] ### Y axis coordinates
breezes = ['Calm or Light Breeze', 'Gentle to Moderate Breeze', 'Moderate to Fresh Breeze', 'Strong Breeze'] ### Classes
wind_dir = [0,90,180,270] ### Wind orientation thresholds (0 = North, 90 = East, 180 = South and 270 = West; see https://www.metoffice.gov.uk/binaries/content/assets/metofficegovuk/pdf/research/library-and-archive/library/publications/factsheets/factsheet_17-observations.pdf)
wind_dir_name = ['NE', 'SE', 'SW', 'NW'] ### Wind orientation classes
### Which figures to plot (/!\ not the same number as in the ERL paper since supplementary figures are also included)
plot_fig1 = True
plot_fig2 = True
plot_fig3 = True
plot_fig4 = True
plot_fig5 = True
plot_fig6 = True
plot_fig7 = True
plot_fig8 = True
plot_fig9 = True
plot_tab1 = True
plot_tab2 = True
### Plotting variables
seasons = ['DJF', 'MAM', 'JJA', 'SON']
### Different colormaps for the AVG T, DTR and number of available measurements per CWS
min_val, max_val = (0.1,1)
n = 10
cmap_temp_orig = plt.cm.Reds
colors_temp = cmap_temp_orig(np.linspace(min_val, max_val, n))
cmap_temp = clrs.LinearSegmentedColormap.from_list("mycmap", colors_temp)
cmap_temp.set_bad('white', alpha=0)
cmap_dtr_orig = plt.cm.Greens
colors_dtr = cmap_dtr_orig(np.linspace(min_val, max_val, n))
cmap_dtr = clrs.LinearSegmentedColormap.from_list("mycmap", colors_dtr)
cmap_dtr.set_bad('white', alpha=0)
cmap_prc_orig = plt.cm.Greys
colors_prc = cmap_prc_orig(np.linspace(min_val, max_val, n))
cmap_prc = clrs.LinearSegmentedColormap.from_list("mycmap", colors_prc)
cmap_prc.set_bad('white', alpha=0)
cmap_uha = plt.cm.RdBu_r
cmap_uha.set_bad('white', alpha=0)
## Modulable years and months
startyear = '2015'
startmon = '01'
startday = '01'
endyear = '2021'
endmon = '01'
endday = '01'
## Exact dates for time slicing
startdate = startyear + '-' + startmon + '-' + startday
enddate = endyear + '-' + endmon + '-' + endday
## List of covered dates for plotting labels
dates_list = [d.strftime('%Y-%m-%d') for d in pd.date_range(startdate, enddate, freq='1d').to_list()]
years = [2015, 2016, 2017, 2018, 2019, 2020]
### LCZ color bars and names
lcz_colors_dict = {0:'#FFFFFF', 1:'#910613', 2:'#D9081C', 3:'#FF0A22', 4:'#C54F1E', 5:'#FF6628', 6:'#FF985E',
7:'#FDED3F', 8:'#BBBBBB', 9:'#FFCBAB',10:'#565656', 11:'#006A18', 12:'#00A926',
13:'#628432', 14:'#B5DA7F', 15:'#000000', 16:'#FCF7B1', 17:'#656BFA', 18:'#00ffff'}
cmap_lcz = mpl.colors.ListedColormap(list(lcz_colors_dict.values()))
lcz_classes = list(lcz_colors_dict.keys()); lcz_classes.append(19)
norm_lcz = mpl.colors.BoundaryNorm(lcz_classes, cmap_lcz.N)
lcz_labels = ['Mask', 'Compact High Rise: LCZ 1', 'Compact Mid Rise: LCZ 2', 'Compact Low Rise: LCZ 3',
'Open High Rise: LCZ 4', 'Open Mid Rise: LCZ 5', 'Open Low Rise: LCZ 6',
'Lighweight Lowrise: LCZ 7', 'Large Lowrise: LCZ 8',
'Sparsely Built: LCZ 9', 'Heavy Industry: LCZ 10',
'Dense Trees: LCZ A', 'Sparse Trees: LCZ B', 'Bush - Scrubs: LCZ C',
'Low Plants: LCZ D', 'Bare Rock - Paved: LCZ E', 'Bare Soil - Sand: LCZ F',
'Water: LCZ G', 'Wetlands: LCZ W']
lcz_labels_dict = dict(zip(list(lcz_colors_dict.keys()),lcz_labels))
#########################
### AWS OBSERVATIONS ###
#########################
datadir_MIDAS = '' + city + '/Filtered/' ### Directory where MIDAS standardized data is located
df = pd.read_csv(datadir_MIDAS + 'Heathrow_' + str(years[0]) + '.csv', index_col=0)
for yr in years[1::]:
df_tmp = pd.read_csv(datadir_MIDAS + 'Heathrow_' + str(yr) + '.csv', index_col=0)
df = df.append(df_tmp)
del df_tmp
df = df.set_index( | pd.DatetimeIndex(df.index) | pandas.DatetimeIndex |
import os
import errno
import warnings # To ignore any warnings warnings.filterwarnings("ignore")
from glob import glob # glob uses the wildcard pattern to create an iterable object file names # containing all matching file names in the current directory.
import numpy as np # For mathematical calculations
import pandas as pd # For Pandas DataFrame
from scipy.stats import kurtosis, skew # To calculate skewness, kurtosis
import pickle # To store the necessary files for efficient reuse
def main(train_label_df, feature, data_flag):
#list all subject's data floder
folders = os.listdir("dataset/" + data_flag)
df = pd.DataFrame()
for folder in set(folders):
# read all the datasets one by one for each subject in the directory listing
data_df = pd.DataFrame(fetch_train_data(folder , feature))
# add the labels to training data
if(data_flag == 'train'):
data_df['activity'] = train_label_df.loc[train_label_df.Subject == folder]['Label'].to_list()
# append each subject's dataframe to a common dataframe
df = df.append(data_df, ignore_index=True)
# random shuffling of all test/train data
df = df.sample(frac=1).reset_index(drop=True)
# write the file with extracted features
out_file = "dataset/pickle/"+ data_flag +"/"+ feature +".pickle"
if not os.path.exists(os.path.dirname(out_file)):
try:
os.makedirs(os.path.dirname(out_file))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
pickle_out = open(out_file,"wb")
pickle.dump(df, pickle_out)
print("Excuted Successfully")
# Here is the function to fecth the summarised training data of each subject
def fetch_train_data(folder, feature):
# DataFrame to hold each processed dataset
dataframe = pd.DataFrame()
# filenames: holds all the activity files given subject
file_names = glob("dataset/" + data_flag + "/" + folder +"/*.csv")
#read each activity file of the subject
for file_name in file_names:
df = pd.read_csv(file_name, header=None)
#append the processed dataset to dataframe
dataframe = dataframe.append(extract_features(df, feature), ignore_index=True)
return dataframe
def extract_features(df, feature):
stats_df = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# Copyright (c) pytmge Development Team.
'''
Classes for data preparation.
Dataset
chemical_formulas
composition
'''
import re
import numpy as np
import pandas as pd
from pytmge.core import element_list, progressbar, _print
__author__ = '<NAME>'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '1.0'
__date__ = '2022/3/18'
class data_set:
def __init__(self, df_dataset):
'''
df_dataset : DataFrame
Dataset of chemical formula and target variable.
(chemical formulas as index,
target variable and labels as columns,
target variable at the first column)
'''
self.data = df_dataset
self.chemical_formulas = chemical_formulas(df_dataset)
self.target_variable = df_dataset.iloc[:, 0]
def delete_duplicates(self):
'''
Delete duplicate entries in dataset.
If two or more entries have the same chemical formula
but different property values, keep the entry having greater value (of the first column).
Returns
-------
df_deduped_dataset : DataFrame
deduped subset.
'''
print('\n deleting duplicate entries in dataset ...') if _print else 0
# sort the dataset by the values of columns (first column at the end).
for col_name in list(self.data)[::-1]:
self.data.sort_values(by=col_name, inplace=True)
deduped_dataset = {}
for i, cf in enumerate(list(self.data.index)):
deduped_dataset[cf] = self.data.loc[cf]
progressbar(i + 1, self.data.shape[0]) if _print else 0
print(' original:', i + 1, '| deduped:', len(deduped_dataset)) if _print else 0
df_deduped_dataset = pd.DataFrame.from_dict(deduped_dataset, orient='index')
# df_deduped_dataset.sort_index(ascending=True, inplace=True)
df_deduped_dataset.sort_values(
by=list(df_deduped_dataset)[0],
ascending=False,
inplace=True
)
print(' Done.') if _print else 0
return df_deduped_dataset
def categorization_by_composition(self):
'''
Categorizing the chemical formulas, according to (n-e-c).
n : 'number_of_elements',
e : 'element',
c : 'elemental_contents'
Returns
-------
dict_category : dict
Dict of category.
'''
print('\n categorizing chemical formulas ...') if _print else 0
df_composition = self.chemical_formulas.composition.data['DataFrame']
cfs = list(df_composition.index)
_elements = list(df_composition.columns)
# elemental contents
contents = df_composition.fillna(0).applymap(lambda x: np.int(x + 0.5))
# number of elements in each chemical formula, ignore the element(s) that content < 0.5
number_of_elements = (df_composition.applymap(lambda x: x >= 0.5) * 1).sum(axis=1)
# print(' labeling ...') if _print else 0
category_labels = {}
for cf in cfs:
category_labels[cf] = {}
i = 0
for e in _elements:
if contents.loc[cf, e] >= 0.5:
# assign a category lable 'n-e-c' to each chemical formula
n = str(round(number_of_elements[cf]))
c = str(int(contents.loc[cf, e] + 0.5))
category_labels[cf][i] = n + '-' + e + '-' + c
i += 1
# print(' collecting ...') if _print else 0
dict_category = {}
for cf in cfs:
for label in category_labels[cf].values():
# dict_category[label] = {}
dict_category[label] = []
for cf in cfs:
for label in category_labels[cf].values():
# dict_category[label][cf] = self.data.loc[cf, :].to_dict()
dict_category[label] += (cf, )
print(' Done.') if _print else 0
return dict_category
def subset(self):
'''
Extracting subset.
For each category, pick one entry having the highest value of material property.
Returns
-------
df_subset : DataFrame
df_subset.
'''
dict_category = self.categorization_by_composition()
print('\n extracting subset ...') if _print else 0
ds_dataset = self.data.iloc[:, 0]
highest_entries = {}
for category_label, cfs in dict_category.items():
highest_value = ds_dataset[cfs].nlargest(1).values[0]
highest_entries[category_label] = ds_dataset[cfs][ds_dataset >= highest_value * 1.0].to_dict()
# top_3 = ds_dataset[cfs].nlargest(3).index
# highest_entries[category_label] = ds_dataset[cfs][top_3].to_dict()
list_highest = []
for k, v in highest_entries.items():
list_highest += list(v) # sometimes there are multiple highest ones
# list_highest += (list(v)[0], ) # only take one
df_subset = self.data.loc[list(set(list_highest)), :]
df_subset.sort_values(by=list(df_subset)[0], ascending=False, inplace=True)
print(' Done.') if _print else 0
return df_subset
class chemical_formulas:
def __init__(self, dataset: object):
self.data = list(dataset.index)
self.in_proper_format = self.check_format()
self.composition = composition(self.in_proper_format)
def check_format(self):
'''
Checking the format of the chemical formulas.
The format is supposed to be like 'H2O1' or 'C60',
whereas 'H2O' or 'C' or 'La2Cu1O4-x' is NOT ok.
Do NOT use brakets.
Returns
-------
chemical_formulas_in_proper_format : list
Chemical formulas in proper format.
'''
print('\n checking format of chemical formulas ...') if _print else 0
chemical_formulas_in_proper_format = []
is_proper_format = {}
for i, cf in enumerate(self.data):
is_proper_format[cf] = True
if | pd.isnull(cf) | pandas.isnull |
import numpy as np
import numpy as np
import pandas as pd
from sklearn import preprocessing
import pprint
from os import chdir
from sklearn.ensemble import RandomForestClassifier
import sys
#sys.path.insert(0, '//Users/babakmac/Documents/HypDB/relational-causal-inference/source/HypDB')
#from core.cov_selection import *
#from core.explanation import *
#import core.query as sql
#import modules.statistics.cit as ci_test
#from Modules.InformationTheory.info_theo import *
from sklearn.metrics import confusion_matrix
import copy
from sklearn import tree
from utils.read_data import read_from_csv
from sklearn import model_selection
from sklearn.model_selection import cross_val_score
import seaborn as sns
sns.set(style="white") #white background style for seaborn plots
sns.set(style="whitegrid", color_codes=True)
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, classification_report, precision_score, recall_score
from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve, auc, log_loss
from sklearn.linear_model import LogisticRegression
import statsmodels.api as sm
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import numpy as np
from scipy import interp
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.linear_model import LogisticRegression
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
import math
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import ShuffleSplit
from sklearn.preprocessing import StandardScaler
def data_split(data,outcome,path,k=5,test_size=0.3):
rs = StratifiedShuffleSplit(n_splits=k, test_size=test_size, random_state=2)
data_y = pd.DataFrame(data[outcome])
data_X = data.drop([outcome], axis=1)
rs.get_n_splits(data_X, data_y)
j = 0
for test, train in rs.split(data_X,data_y):
cur_test = data.iloc[train]
cur_train = data.iloc[test]
cur_train = pd.concat([cur_test, cur_train])
cur_train.to_csv(path + 'train_' + str(j) + '.csv', encoding='utf-8', index=False)
#print(path + 'train_' + str(j) + '.csv')
#cur_test.to_csv(path + 'test_' + str(j) + '.csv', encoding='utf-8', index=False)
#print(len(cur_test.index))
#print(path + 'test_' + str(j) + '.csv')
j +=1
def cross_valid(data,features,D_features,Y_features,X_features,path,k=5):
print('Original Data Size',len(data.index))
train_df = data[features]
dft1 = pd.get_dummies(train_df[X_features])
dft2 = pd.get_dummies(train_df[Y_features])
X = dft1.values
y = dft2.values
y = y.flatten()
cv = StratifiedKFold(n_splits=k,shuffle=True)
#classifier = LogisticRegression()
j = 0
for train, test in cv.split(X, y):
cur_train = train_df.iloc[train]
cur_test = train_df.iloc[test]
cur_train.to_csv(path + 'train_' + str(j) + '.csv', encoding='utf-8', index=False)
print(len(cur_train.index))
print(path + 'train_' + str(j) + '.csv')
cur_test.to_csv(path + 'test_' + str(j) + '.csv', encoding='utf-8', index=False)
print(len(cur_test.index))
print(path + 'test_' + str(j) + '.csv')
j +=1
def strr(list):
return str(['%.3f' % val for val in list])
def pretty(d, indent=0):
for key, value in d.items():
print('\t' * indent + str(key))
if isinstance(value, dict):
pretty(value, indent+1)
else:
print('*****************************************************************************************')
print('\t' * (indent+1) + strr(value))
print('mean:', mean(value))
print('variance:', var(value))
print('*****************************************************************************************')
def test_rep_str(D_features,Y_features,X_features,path1,path2,k=5,droped=False,classifier='log_reg'):
if classifier=='log_reg':
classifier = LogisticRegression()
elif classifier=='rand_forest':
classifier=RandomForestClassifier(max_depth=2, random_state=0)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
MI_inp = dict()
MI_out = dict()
MI_test=dict()
for j in range(0, k):
print(path2+str(j)+'.csv')
cur_train=read_from_csv(path1+str(j)+'.csv')
print(path1+str(j)+'.csv')
cur_test=read_from_csv(path2+str(j)+'.csv')
#atts=cur_train.columns
#atts=atts.tolist()
#list=[att.replace('_x','').replace('_y','') for att in atts]
#atts
for item in D_features:
pval, mi = ci_test.ulti_fast_permutation_tst(cur_train, item, Y_features, X_features, pvalue=0.01,
debug=False, loc_num_samples=100,
num_samples=100, view=False)
rmi = round(mi, 3)
print('####################################')
print(len(cur_train.index))
print('Mutul information in train data:', item,'pvalue:' , pval, 'MI:', rmi)
print('####################################')
if item not in MI_inp.keys():
MI_inp[item]= [rmi]
else:
MI_inp[item] = MI_inp[item] +[rmi]
inf = Info(cur_test)
for item in D_features:
pval, mi = ci_test.ulti_fast_permutation_tst(cur_test, item, Y_features, X_features, pvalue=0.01,
debug=False, loc_num_samples=100,
num_samples=100, view=False)
mi = round(mi, 3)
print('####################################')
print('MI in test data:', item,'pvalue:' , pval, 'MI:', mi)
print('####################################')
if item not in MI_test.keys():
MI_test[item]= [mi]
else:
MI_test[item] = MI_test[item] +[mi]
mi = inf.CMI(D_features+X_features, Y_features)
mi = round(mi, 3)
print('Predictive Power(traning)', mi)
inf = Info(cur_test)
mi = inf.CMI(D_features, Y_features,X_features)
mi = round(mi, 3)
print('Repaied MI test', mi)
mi = inf.CMI(D_features+X_features, Y_features)
mi = round(mi, 3)
print('Predictive Power(test)', mi)
cur_train[Y_features[0]] = pd.to_numeric(cur_train[Y_features[0]])
ate = cur_train.groupby([D_features[0]])[Y_features[0]].mean()
print(ate)
# m = abs(ate.values[0] - ate.values[1]).value
#ate0.insert(0, m)
#print('Repaied ATE \n', ate)
# new=abs(max((ate.values[0] / ate.values[1]) - 1, (ate.values[0] / ate.values[1]) - 1)).value
#print('Repaied J \n', new)
#J1.insert(0,new)
#ate = cur_test.groupby([D_features[0]])[Y_features[0]].mean()
#m = abs(ate.values[0] - ate.values[1]).value
#ate0.insert(0, m)
#print('Repaied ATE test \n', ate)
#new=abs(max((ate.values[0] / ate.values[1]) - 1, (ate.values[0] / ate.values[1]) - 1)).value
#print('Repaied J test \n', new)
#J1.insert(0,new)
# print("len",cur_train.columns,len(cur_train.index),cur_train.shape)
# print("len",len(cur_test.index),cur_test.shape)
j += 1
#inf = Info(cur_train)
#MI_inp.insert(0, I)
cur_test['W']=1
train_objs_num = len(cur_train)
dataset = pd.concat(objs=[cur_train[ D_features+X_features], cur_test[ D_features+X_features]], axis=0)
dataset = pd.get_dummies(dataset)
dft1 = dataset[:train_objs_num]
dft4 = dataset[train_objs_num:]
train_X = dft1.values
train_y = cur_train[Y_features[0]].values
# train_y=train_y.flatten()
#if droped:
# dft4 = pd.get_dummies(cur_test[X_features])
#else:
# dft4 = pd.get_dummies(cur_test[ D_features+X_features])
#print(cur_test[D_features+X_features])
dft5 = pd.get_dummies(cur_test[Y_features])
# logit = sm.Logit(train_df['bscore'], train_df['juv_misd_count'])
X = dft4.values
y = dft5.values
y = y.flatten()
#print("#####################",len(train_X),len(train_y),type(train_X),type(train_y),train_X,train_y,X.shape)
print(X.shape,train_X.shape)
kfold = model_selection.KFold(n_splits=10, random_state=7)
modelCV = LogisticRegression()
probas_ = classifier.fit(train_X, train_y).predict_proba(X)
scoring = 'accuracy'
results = model_selection.cross_val_score(modelCV, train_X, train_y, cv=kfold, scoring=scoring)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@',mean(results))
#logit = sm.Logit(train_X,cur_train[Y_features[0]])
# fit the model
#result = logit.fit()
#print(probas_)
y_pred = classifier.predict(X)
cur_test.insert(0,'y',y_pred) # insert the outcome into the test dataset
for item in D_features:
pval, mi = ci_test.ulti_fast_permutation_tst(cur_test, item, ['y'], X_features, pvalue=0.01,
debug=False, loc_num_samples=100,
num_samples=100, view=False)
mi = round(mi, 3)
print('*************************')
print(' MI in output',item,'pvalue:' , pval, 'MI:', mi)
print('***************************')
if item not in MI_out.keys():
MI_out[item] = [mi]
else:
MI_out[item] = MI_out[item] + [mi]
print(path1 + str(j) + '.csv')
for item in D_features:
pval, mi = ci_test.ulti_fast_permutation_tst(cur_test, item, ['y'], pvalue=0.01,
debug=False, loc_num_samples=100,
num_samples=100, view=False)
#mi = round(mi, 3)
print('*************************')
print(' MI in output (marginal)',item,'pvalue:' , pval, 'MI:', mi)
print('***************************')
ate = cur_test.groupby([D_features[0]])[['y']].mean()
print(ate)
# print("ATE on on test labels", '\n averagee:', mean(ate1), "variancee", var(ate1))
# print("ATE on on outcome", '\n averagee:', mean(ate2), "variancee", var(ate2))
# print("J on on input", '\n averagee:', mean(J1), "variancee", var(J1))
# print("J on on outcome", '\n averagee:', mean(J2), "variancee", var(J2))
print('####################################')
#ate = cur_test.groupby(D_features)[Y_features[0]].mean()
#m = abs(ate.values[0] - ate.values[1]).value
#ate1.insert(0, m)
ate = cur_test.groupby(D_features)['y'].mean()
#m = abs(ate.values[0] - ate.values[1]).value
#ate2.insert(0, m)
print('ATE on outcome:',ate)
#new=abs(max((ate.values[0] / ate.values[1]) - 1, (ate.values[0] / ate.values[1]) - 1)).value
#print('Outcome J \n', new)
#J2.insert(0,new)
fpr, tpr, thresholds = roc_curve(y, probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
cur_test.to_csv(path1 + '_trained.csv')
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
#print("Mutual Information on repaired traning labels", '\n averagee:', mean(rep_MI_inp), "variancee",var(rep_MI_inp))
#print("ATE on repaired traning labels", '\n averagee:', mean(ate0), "variancee", var(ate0))
#print("Mutual Information on test labels", '\n averagee:', mean(MI_inp.values()), "variancee", var(MI_inp.values()))
#print("Mutual Information on outcome", '\n avg:', mean(MI_out.values()), "variancee", var(MI_out.values()))
print("Mutual Information on train: \n")
pretty(MI_inp)
plt.show()
print("Mutual Information on test: \n")
pretty(MI_test)
#print(" Mutual Information on repaired data", rep_MI_inp)
print("Mutual Information on outcome: \n")
pretty(MI_out)
plt.show()
return MI_out,MI_inp, mean_auc, std_auc
def classification(cur_train,cur_test, dependant, dependee, classifier='log_reg'):
if classifier=='log_reg':
classifier = LogisticRegression()
elif classifier=='rand_forest':
classifier=RandomForestClassifier(max_depth=2, random_state=0)
train_objs_num = len(cur_train)
dataset = pd.concat(objs=[cur_train[dependant], cur_test[ dependant]], axis=0)
dataset = pd.get_dummies(dataset)
dft1 = dataset[:train_objs_num]
dft4 = dataset[train_objs_num:]
train_X = dft1.values
train_y = cur_train[dependee[0]].values
dft5 = pd.get_dummies(cur_test[dependee])
X = dft4.values
y = dft5.values
y = y.flatten()
probas_ = classifier.fit(train_X, train_y).predict_proba(X)
#coef= classifier.coef_
y_pred = classifier.predict(X)
probas_=np.array(probas_)
#cur_test.insert(0, 'prob', probas_[:,0])
cur_test.insert(0,'y',y_pred) # insert the outcome into the test dataset
#cur_test['FP']=cur_test.loc[(cur_test[Y_features] ==1) & (cur_test.y == 1)]
#cur_test['FP'] = cur_test.apply(lambda x: 1 if x[dependee[0]] == 0 and x['y'] == 1 else 0, axis=1)
#cur_test['FN'] = cur_test.apply(lambda x: 1 if x[dependee[0]] == 1 and x['y'] == 0 else 0, axis=1)
print('accuracy',accuracy_score(cur_test[dependee[0]], y_pred, normalize=True))
print('AUC', roc_auc_score(cur_test[dependee[0]], y_pred))
print(confusion_matrix(cur_test[dependee[0]], y_pred))
fpr, tpr, _ = roc_curve(y_pred, cur_test[dependee[0]], drop_intermediate=False)
import matplotlib.pyplot as plt
plt.figure()
##Adding the ROC
plt.plot(fpr, tpr, color='red',
lw=2, label='ROC curve')
##Random FPR and TPR
plt.plot([0, 1], [0, 1], color='blue', lw=2, linestyle='--')
##Title and label
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC curve')
plt.show()
return cur_test
def old_test_rep_str(indeps, features, protecteds, Y_features, path1, path2, k=5, droped=False, classifier='log_reg',method='original'):
classifer_method=classifier
if Y_features[0] in features:
features.remove(Y_features[0])
D_features=[]
X_features=features
print("Fetures to learn on",X_features+D_features)
if classifier=='log_reg':
classifier = LogisticRegression()
elif classifier=='rand_forest':
classifier=RandomForestClassifier(max_depth=2, random_state=0)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
for j in range(0, k):
print(path2+str(j)+'.csv')
cur_train=read_from_csv(path1+str(j)+'.csv')
print(path1+str(j)+'.csv')
cur_test=read_from_csv(path2+str(j)+'.csv')
#atts=cur_train.columns
#atts=atts.tolist()
#list=[att.replace('_x','').replace('_y','') for att in atts]
#atts
for att in protecteds:
ate = cur_train.groupby([att])[Y_features].mean()
print('ATE on train:',att, ate)
for att in protecteds:
ate = cur_test.groupby([att])[Y_features].mean()
print('ATE on test:',att, ate)
i=0
for indep in indeps:
X=indep[0]
Y=indep[1]
Z=indep[2]
for att in [X,Y,Z]:
if 'y' in att:
att.remove('y')
att.insert(0,Y_features[0])
pval, mi = ci_test.ulti_fast_permutation_tst(cur_train, X, Y, Z, pvalue=0.01,
debug=False, loc_num_samples=100,
num_samples=100, view=False)
rmi = round(mi, 3)
print('####################################')
print(len(cur_train.index))
print('MI in train data:', indep,'pvalue:' , pval, 'MI:', rmi)
print('####################################')
MI_inp[i]= [rmi]
i+=1
inf = Info(cur_test)
i=0
for indep in indeps:
X=indep[0]
Y=indep[1]
Z=indep[2]
for att in [X,Y,Z]:
if 'y' in att:
att.remove('y')
att.insert(0,Y_features[0])
pval, mi = ci_test.ulti_fast_permutation_tst(cur_test, X, Y, Z, pvalue=0.01,
debug=False, loc_num_samples=100,
num_samples=100, view=False)
rmi = round(mi, 3)
print('####################################')
print(len(cur_test.index))
print('MI in test data:', indep,'pvalue:' , pval, 'MI:', rmi)
print('####################################')
MI_test[i]= [rmi]
i+=1
mi = inf.CMI(D_features+X_features, Y_features)
mi = round(mi, 3)
print('Predictive Power(traning)', mi)
inf = Info(cur_test)
mi = inf.CMI(D_features, Y_features,X_features)
mi = round(mi, 3)
print('Repaied MI test', mi)
mi = inf.CMI(D_features+X_features, Y_features)
mi = round(mi, 3)
print('Predictive Power(test)', mi)
#cur_train[Y_features[0]] = pd.to_numeric(cur_train[Y_features[0]])
#ate = cur_train.groupby([D_features[0]])[Y_features[0]].mean()
#print(ate)
# m = abs(ate.values[0] - ate.values[1]).value
#ate0.insert(0, m)
#print('Repaied ATE \n', ate)
# new=abs(max((ate.values[0] / ate.values[1]) - 1, (ate.values[0] / ate.values[1]) - 1)).value
#print('Repaied J \n', new)
#J1.insert(0,new)
#ate = cur_test.groupby([D_features[0]])[Y_features[0]].mean()
#m = abs(ate.values[0] - ate.values[1]).value
#ate0.insert(0, m)
#print('Repaied ATE test \n', ate)
#new=abs(max((ate.values[0] / ate.values[1]) - 1, (ate.values[0] / ate.values[1]) - 1)).value
#print('Repaied J test \n', new)
#J1.insert(0,new)
# print("len",cur_train.columns,len(cur_train.index),cur_train.shape)
# print("len",len(cur_test.index),cur_test.shape)
j += 1
#inf = Info(cur_train)
#MI_inp.insert(0, I)
cur_test['W']=1
train_objs_num = len(cur_train)
dataset = pd.concat(objs=[cur_train[ D_features+X_features], cur_test[ D_features+X_features]], axis=0)
dataset = pd.get_dummies(dataset)
dft1 = dataset[:train_objs_num]
dft4 = dataset[train_objs_num:]
train_X = dft1.values
train_y = cur_train[Y_features[0]].values
# train_y=train_y.flatten()
#if droped:
# dft4 = pd.get_dummies(cur_test[X_features])
#else:
# dft4 = pd.get_dummies(cur_test[ D_features+X_features])
#print(cur_test[D_features+X_features])
dft5 = | pd.get_dummies(cur_test[Y_features]) | pandas.get_dummies |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import json
import multiprocessing as mp
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
import pytest
import pyarrow as pa
import pyarrow.types as patypes
from pyarrow.compat import PY2
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=True,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata, so we need to
# add it before checking schema equality
expected_schema = expected_schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array, type=None):
arr = pa.array(np_array, from_pandas=True, type=type)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]})
schema = pa.schema([pa.field("a", pa.int16(), nullable=True)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
assert table[0].to_pylist() == [1, 2, None]
tm.assert_frame_equal(df, table.to_pandas())
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_integer_types(self):
# Test all Numpy integer aliases
data = OrderedDict()
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',
'int_', 'uint', 'longlong', 'ulonglong']
for dtype in numpy_dtypes:
data[dtype] = np.arange(12, dtype=dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df)
# Do the same with pa.array()
# (for some reason, it doesn't use the same code paths at all)
for np_arr in data.values():
arr = pa.array(np_arr)
assert arr.to_pylist() == np_arr.tolist()
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
def test_half_floats_from_numpy(self):
arr = np.array([1.5, np.nan], dtype=np.float16)
a = pa.array(arr, type=pa.float16())
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert isinstance(y, np.float16)
assert np.isnan(y)
a = pa.array(arr, type=pa.float16(), from_pandas=True)
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert y is None
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_array_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
result = array.to_pandas(integer_object_nulls=True)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_table_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
expected = pd.DataFrame({dtype: expected})
table = pa.Table.from_arrays([array], [dtype])
result = table.to_pandas(integer_object_nulls=True)
tm.assert_frame_equal(result, expected)
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_python_datetime_subclass(self):
class MyDatetime(datetime):
# see https://github.com/pandas-dev/pandas/issues/21142
nanosecond = 0.0
date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]
df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({"datetime": date_array})
# https://github.com/pandas-dev/pandas/issues/21142
expected_df["datetime"] = pd.to_datetime(expected_df["datetime"])
| tm.assert_frame_equal(expected_df, result) | pandas.util.testing.assert_frame_equal |
import itertools
from collections.abc import Iterable
from typing import Pattern
from warnings import warn
import numpy as np
import pandas as pd
def _unique(df, columns=None):
if isinstance(columns, str):
columns = [columns]
if not columns:
columns = df.columns.tolist()
info = {}
for col in columns:
values = df[col].dropna().values
uniques = np.unique(list(_flatten_list(values))).tolist()
info[col] = {'count': len(uniques), 'values': uniques}
return info
def search(df, require_all_on=None, **query):
"""
Search for entries in a pandas DataFrame.
Parameters
----------
df : pd.DataFrame
Pandas DataFrame to run query against.
require_all_on : list, str, optional
A dataframe column or a list of dataframe columns across
which all entries must satisfy the query criteria.
If None, return entries that fulfill any of the criteria specified
in the query, by default None.
**query:
keyword arguments corresponding to user's query to execute against the dataframe.
Returns
-------
pd.DataFrame
"""
columns_with_iterables = _get_columns_with_iterables(df)
message = 'Query returned zero results.'
if not query:
warn(message)
return pd.DataFrame(columns=df.columns)
condition = np.ones(len(df), dtype=bool)
query = _normalize_query(query)
for key, val in query.items():
condition_i = np.zeros(len(df), dtype=bool)
column_is_stringtype = isinstance(
df[key].dtype, (np.object, pd.core.arrays.string_.StringDtype)
)
column_has_iterables = key in columns_with_iterables
for val_i in val:
if column_has_iterables:
cond = df[key].str.contains(val_i, regex=False)
else:
value_is_pattern = _is_pattern(val_i)
if column_is_stringtype and value_is_pattern:
cond = df[key].str.contains(val_i, regex=True, case=True, flags=0)
else:
cond = df[key] == val_i
condition_i = condition_i | cond
condition = condition & condition_i
query_results = df.loc[condition]
if require_all_on:
if isinstance(require_all_on, str):
require_all_on = [require_all_on]
_query = query.copy()
# Make sure to remove columns that were already
# specified in the query when specified in `require_all_on`. For example,
# if query = dict(variable_id=["A", "B"], source_id=["FOO", "BAR"])
# and require_all_on = ["source_id"], we need to make sure `source_id` key is
# not present in _query for the logic below to work
for key in require_all_on:
_query.pop(key, None)
keys = list(_query.keys())
grouped = query_results.groupby(require_all_on)
values = [tuple(v) for v in _query.values()]
condition = set(itertools.product(*values))
results = []
for key, group in grouped:
index = group.set_index(keys).index
if not isinstance(index, pd.MultiIndex):
index = {(element,) for element in index.to_list()}
else:
index = set(index.to_list())
if index == condition:
results.append(group)
if len(results) >= 1:
return pd.concat(results).reset_index(drop=True)
warn(message)
return | pd.DataFrame(columns=df.columns) | pandas.DataFrame |
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implement transformers for summarizing a time series."""
__author__ = ["mloning", "RNKuhns", "danbartl", "grzegorzrut"]
__all__ = ["SummaryTransformer", "WindowSummarizer"]
import warnings
import pandas as pd
from joblib import Parallel, delayed
from sktime.transformations.base import BaseTransformer
class WindowSummarizer(BaseTransformer):
"""Transformer for extracting time series features.
The WindowSummarizer transforms input series to features based
on a provided dictionary of window summarizer, window shifts
and window lengths.
Parameters
----------
n_jobs : int, optional (default=-1)
The number of jobs to run in parallel for applying the window functions.
``-1`` means using all processors.
target_cols: list of str, optional (default = None)
Specifies which columns in X to target for applying the window functions.
``None`` will target the first column
lag_feature: dict of str and list, optional (default = dict containing first lag)
Dictionary specifying as key the type of function to be used and as value
the argument `window`.
For all keys other than `lag`, the argument `window` is a length 2 list
containing the integer `lag`, which specifies how far back
in the past the window will start, and the integer `window length`,
which will give the length of the window across which to apply the function.
For ease of notation, for the key "lag", only a single integer
specifying the `lag` argument will be provided.
Please see blow a graphical representation of the logic using the following
symbols:
``z`` = time stamp that the window is summarized *to*.
Part of the window if `lag` is between 0 and `1-window_length`, otherwise
not part of the window.
``*`` = (other) time stamps in the window which is summarized
``x`` = observations, past or future, not part of the window
The summarization function is applied to the window consisting of * and
potentially z.
For `window = [1, 3]`, we have a `lag` of 1 and
`window_length` of 3 to target the three last days (exclusive z) that were
observed. Summarization is done across windows like this:
|-------------------------- |
| x x x x x x x x * * * z x |
|---------------------------|
For `window = [0, 3]`, we have a `lag` of 0 and
`window_length` of 3 to target the three last days (inclusive z) that
were observed. Summarization is done across windows like this:
|-------------------------- |
| x x x x x x x x * * z x x |
|---------------------------|
Special case ´lag´: Since lags are frequently used and window length is
redundant, a special notation will be used for lags. You need to provide a list
of `lag` values, and `window_length` is not available.
So `window = [1]` will result in the first lag:
|-------------------------- |
| x x x x x x x x x x * z x |
|---------------------------|
And `window = [1, 4]` will result in the first and fourth lag:
|-------------------------- |
| x x x x x x x * x x * z x |
|---------------------------|
key: either custom function call (to be
provided by user) or str corresponding to native pandas window function:
* "sum",
* "mean",
* "median",
* "std",
* "var",
* "kurt",
* "min",
* "max",
* "corr",
* "cov",
* "skew",
* "sem"
See also: https://pandas.pydata.org/docs/reference/window.html.
The column generated will be named after the key provided, followed by the
lag parameter and the window_length (if not a lag).
second value (window): list of integers
List containg lag and window_length parameters.
truncate: str, optional (default = None)
Defines how to deal with NAs that were created as a result of applying the
functions in the lag_feature dict across windows that are longer than
the remaining history of data.
For example a lag config of [14, 7] cannot be fully applied for the first 20
observations of the targeted column.
A lag_feature of [[8, 14], [1, 28]] cannot be correctly applied for the
first 21 resp. 28 observations of the targeted column. Possible values
to deal with those NAs:
* None
* "bfill"
None will keep the NAs generated, and would leave it for the user to choose
an estimator that can correctly deal with observations with missing values,
"bfill" will fill the NAs by carrying the first observation backwards.
Attributes
----------
truncate_start : int
See section Parameters - truncate for a more detailed explanation of truncation
as a result of applying windows of certain lengths across past observations.
Truncate_start will give the maximum of observations that are filled with NAs
across all arguments of the lag_feature when truncate is set to None.
Returns
-------
X: pd.DataFrame
Contains all transformed columns as well as non-transformed columns.
The raw inputs to transformed columns will be dropped.
self: reference to self
Examples
--------
>>> import pandas as pd
>>> from sktime.transformations.series.summarize import WindowSummarizer
>>> from sktime.datasets import load_airline, load_longley
>>> from sktime.forecasting.naive import NaiveForecaster
>>> from sktime.forecasting.base import ForecastingHorizon
>>> from sktime.forecasting.compose import ForecastingPipeline
>>> from sktime.forecasting.model_selection import temporal_train_test_split
>>> y = load_airline()
>>> kwargs = {
... "lag_feature": {
... "lag": [1],
... "mean": [[1, 3], [3, 6]],
... "std": [[1, 4]],
... }
... }
>>> transformer = WindowSummarizer(**kwargs)
>>> y_transformed = transformer.fit_transform(y)
Example where we transform on a different, later test set:
>>> y = load_airline()
>>> y_train, y_test = temporal_train_test_split(y)
>>> kwargs = {
... "lag_config": {
... "lag": ["lag", [[1, 0]]],
... "mean": ["mean", [[3, 0], [12, 0]]],
... "std": ["std", [[4, 0]]],
... }
... }
>>> transformer = WindowSummarizer(**kwargs)
>>> y_test_transformed = transformer.fit(y_train).transform(y_test)
Example with transforming multiple columns of exogeneous features
>>> y, X = load_longley()
>>> y_train, y_test, X_train, X_test = temporal_train_test_split(y, X)
>>> fh = ForecastingHorizon(X_test.index, is_relative=False)
>>> # Example transforming only X
>>> pipe = ForecastingPipeline(
... steps=[
... ("a", WindowSummarizer(n_jobs=1, target_cols=["POP", "GNPDEFL"])),
... ("b", WindowSummarizer(n_jobs=1, target_cols=["GNP"], **kwargs)),
... ("forecaster", NaiveForecaster(strategy="drift")),
... ]
... )
>>> pipe_return = pipe.fit(y_train, X_train)
>>> y_pred1 = pipe_return.predict(fh=fh, X=X_test)
Example with transforming multiple columns of exogeneous features
as well as the y column
>>> Z_train = pd.concat([X_train, y_train], axis=1)
>>> Z_test = pd.concat([X_test, y_test], axis=1)
>>> pipe = ForecastingPipeline(
... steps=[
... ("a", WindowSummarizer(n_jobs=1, target_cols=["POP", "TOTEMP"])),
... ("b", WindowSummarizer(**kwargs, n_jobs=1, target_cols=["GNP"])),
... ("forecaster", NaiveForecaster(strategy="drift")),
... ]
... )
>>> pipe_return = pipe.fit(y_train, Z_train)
>>> y_pred2 = pipe_return.predict(fh=fh, X=Z_test)
"""
_tags = {
"scitype:transform-input": "Series",
"scitype:transform-output": "Series",
"scitype:instancewise": True,
"capability:inverse_transform": False,
"scitype:transform-labels": False,
"X_inner_mtype": [
"pd-multiindex",
"pd.DataFrame",
"pd_multiindex_hier",
], # which mtypes do _fit/_predict support for X?
"skip-inverse-transform": True, # is inverse-transform skipped when called?
"univariate-only": False, # can the transformer handle multivariate X?
"handles-missing-data": True, # can estimator handle missing data?
"X-y-must-have-same-index": False, # can estimator handle different X/y index?
"enforce_index_type": None, # index type that needs to be enforced in X/y
"fit_is_empty": False, # is fit empty and can be skipped? Yes = True
"transform-returns-same-time-index": False,
# does transform return have the same time index as input X
}
def __init__(
self,
lag_config=None,
lag_feature=None,
n_jobs=-1,
target_cols=None,
truncate=None,
):
# self._converter_store_X = dict()
self.lag_config = lag_config
self.lag_feature = lag_feature
self.n_jobs = n_jobs
self.target_cols = target_cols
self.truncate = truncate
super(WindowSummarizer, self).__init__()
def _fit(self, X, y=None):
"""Fit transformer to X and y.
Private _fit containing the core logic, called from fit
Attributes
----------
truncate_start : int
See section class WindowSummarizer - Parameters - truncate for a more
detailed explanation of truncation as a result of applying windows of
certain lengths across past observations.
Truncate_start will give the maximum of observations that are filled
with NAs across all arguments of the lag_feature when truncate is
set to None.
Returns
-------
X: pd.DataFrame
Contains all transformed columns as well as non-transformed columns.
The raw inputs to transformed columns will be dropped.
self: reference to self
"""
self._X_memory = X
X_name = get_name_list(X)
if self.target_cols is not None:
if not all(x in X_name for x in self.target_cols):
missing_cols = [x for x in self.target_cols if x not in X_name]
raise ValueError(
"target_cols "
+ " ".join(missing_cols)
+ " specified that do not exist in X."
)
if self.target_cols is None:
self._target_cols = [X_name[0]]
else:
self._target_cols = self.target_cols
# Convert lag config dictionary to pandas dataframe
if self.lag_config is not None:
func_dict = pd.DataFrame(self.lag_config).T.reset_index()
func_dict.rename(
columns={"index": "name", 0: "summarizer", 1: "window"},
inplace=True,
)
func_dict = func_dict.explode("window")
func_dict["window"] = func_dict["window"].apply(lambda x: [x[1] + 1, x[0]])
func_dict.drop("name", inplace=True, axis=1)
warnings.warn(
"Specifying lag features via lag_config is deprecated since 0.12.0,"
+ " and will be removed in 0.13.0. Please use the lag_feature notation"
+ " (see the documentation for the new notation)."
)
else:
if self.lag_feature is None:
func_dict = pd.DataFrame(
{
"lag": [1],
}
).T.reset_index()
else:
func_dict = pd.DataFrame.from_dict(
self.lag_feature, orient="index"
).reset_index()
func_dict = pd.melt(
func_dict, id_vars="index", value_name="window", ignore_index=False
)
func_dict.sort_index(inplace=True)
func_dict.drop("variable", axis=1, inplace=True)
func_dict.rename(
columns={"index": "summarizer"},
inplace=True,
)
func_dict = func_dict.dropna(axis=0, how="any")
# Identify lags (since they can follow special notation)
lags = func_dict["summarizer"] == "lag"
# Convert lags to default list notation with window_length 1
boost_lag = func_dict.loc[lags, "window"].apply(lambda x: [int(x), 1])
func_dict.loc[lags, "window"] = boost_lag
self.truncate_start = func_dict["window"].apply(lambda x: x[0] + x[1]).max()
self._func_dict = func_dict
def _transform(self, X, y=None):
"""Transform X and return a transformed version.
Parameters
----------
X : pd.DataFrame
y : None
Returns
-------
transformed version of X
"""
idx = X.index
X = X.combine_first(self._X_memory)
func_dict = self._func_dict
target_cols = self._target_cols
X.columns = X.columns.map(str)
Xt_out = []
if self.truncate == "bfill":
bfill = True
else:
bfill = False
for cols in target_cols:
if isinstance(X.index, pd.MultiIndex):
hier_levels = list(range(X.index.nlevels - 1))
X_grouped = X.groupby(level=hier_levels)[cols]
df = Parallel(n_jobs=self.n_jobs)(
delayed(_window_feature)(X_grouped, **kwargs, bfill=bfill)
for index, kwargs in func_dict.iterrows()
)
else:
df = Parallel(n_jobs=self.n_jobs)(
delayed(_window_feature)(X.loc[:, [cols]], **kwargs, bfill=bfill)
for _index, kwargs in func_dict.iterrows()
)
Xt = pd.concat(df, axis=1)
Xt = Xt.add_prefix(str(cols) + "_")
Xt_out.append(Xt)
Xt_out_df = pd.concat(Xt_out, axis=1)
Xt_return = pd.concat([Xt_out_df, X.drop(target_cols, axis=1)], axis=1)
Xt_return = Xt_return.loc[idx]
return Xt_return
def _update(self, X, y=None):
"""Update X and return a transformed version.
Parameters
----------
X : pd.DataFrame
y : None
Returns
-------
transformed version of X
"""
self._X_memory = X.combine_first(self._X_memory)
@classmethod
def get_test_params(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
params1 = {
"lag_feature": {
"lag": [1],
"mean": [[1, 3], [1, 12]],
"std": [[1, 4]],
}
}
params2 = {
"lag_feature": {
"lag": [3, 6],
}
}
params3 = {
"lag_feature": {
"mean": [[1, 7], [8, 7]],
"cov": [[1, 28]],
}
}
return [params1, params2, params3]
# List of native pandas rolling window function.
# In the future different engines for pandas will be investigated
pd_rolling = [
"sum",
"mean",
"median",
"std",
"var",
"kurt",
"min",
"max",
"corr",
"cov",
"skew",
"sem",
]
def get_name_list(Z):
"""Get names of pd.Series or pd.Dataframe."""
if isinstance(Z, pd.DataFrame):
Z_name = Z.columns.to_list()
else:
if Z.name is not None:
Z_name = [Z.name]
else:
Z_name = None
Z_name = [str(z) for z in Z_name]
return Z_name
def _window_feature(Z, summarizer=None, window=None, bfill=False):
"""Compute window features and lag.
Apply summarizer passed over a certain window
of past observations, e.g. the mean of a window of length 7 days, lagged by 14 days.
Z: pandas Dataframe with a single column.
name : str, base string of the derived features, will be appended by
`lag` and window length parameters defined in window.
summarizer: either str corresponding to pandas window function, currently
* "sum",
* "mean",
* "median",
* "std",
* "var",
* "kurt",
* "min",
* "max",
* "corr",
* "cov",
* "skew",
* "sem"
or custom function call. See for the native window functions also
https://pandas.pydata.org/docs/reference/window.html.
window: list of integers
List containg window_length and lag parameters, see WindowSummarizer
class description for in-depth explanation.
"""
lag = window[0]
window_length = window[1]
if summarizer in pd_rolling:
if isinstance(Z, pd.core.groupby.generic.SeriesGroupBy):
if bfill is False:
feat = getattr(Z.shift(lag).rolling(window_length), summarizer)()
else:
feat = getattr(
Z.shift(lag).fillna(method="bfill").rolling(window_length),
summarizer,
)()
feat = pd.DataFrame(feat)
else:
if bfill is False:
feat = Z.apply(
lambda x: getattr(x.shift(lag).rolling(window_length), summarizer)()
)
else:
feat = Z.apply(
lambda x: getattr(
x.shift(lag).fillna(method="bfill").rolling(window_length),
summarizer,
)()
)
else:
if bfill is False:
feat = Z.shift(lag)
else:
feat = Z.shift(lag).fillna(method="bfill")
if isinstance(Z, pd.core.groupby.generic.SeriesGroupBy) and callable(
summarizer
):
feat = feat.rolling(window_length).apply(summarizer, raw=True)
elif not isinstance(Z, pd.core.groupby.generic.SeriesGroupBy) and callable(
summarizer
):
feat = feat.apply(
lambda x: x.rolling(window_length).apply(summarizer, raw=True)
)
feat = pd.DataFrame(feat)
if bfill is True:
feat = feat.fillna(method="bfill")
if callable(summarizer):
name = summarizer.__name__
else:
name = summarizer
if name == "lag":
feat.rename(
columns={feat.columns[0]: name + "_" + str(window[0])},
inplace=True,
)
else:
feat.rename(
columns={
feat.columns[0]: name + "_" + "_".join([str(item) for item in window])
},
inplace=True,
)
return feat
ALLOWED_SUM_FUNCS = [
"mean",
"min",
"max",
"median",
"sum",
"skew",
"kurt",
"var",
"std",
"mad",
"sem",
"nunique",
"count",
]
def _check_summary_function(summary_function):
"""Validate summary_function.
Parameters
----------
summary_function : str, list or tuple
Either a string or list/tuple of strings indicating the pandas summary
functions ("mean", "min", "max", "median", "sum", "skew", "kurtosis",
"var", "std", "mad", "sem", "nunique", "count") that is used to summarize
each column of the dataset.
Returns
-------
summary_function : list or tuple
The summary functions that will be used to summarize the dataset.
"""
msg = f"""`summary_function` must be str or a list or tuple made up of
{ALLOWED_SUM_FUNCS}.
"""
if isinstance(summary_function, str):
if summary_function not in ALLOWED_SUM_FUNCS:
raise ValueError(msg)
summary_function = [summary_function]
elif isinstance(summary_function, (list, tuple)):
if not all([func in ALLOWED_SUM_FUNCS for func in summary_function]):
raise ValueError(msg)
else:
raise ValueError(msg)
return summary_function
def _check_quantiles(quantiles):
"""Validate quantiles.
Parameters
----------
quantiles : str, list, tuple or None
Either a string or list/tuple of strings indicating the pandas summary
functions ("mean", "min", "max", "median", "sum", "skew", "kurtosis",
"var", "std", "mad", "sem", "nunique", "count") that is used to summarize
each column of the dataset.
Returns
-------
quantiles : list or tuple
The validated quantiles that will be used to summarize the dataset.
"""
msg = """`quantiles` must be int, float or a list or tuple made up of
int and float values that are between 0 and 1.
"""
if isinstance(quantiles, (int, float)):
if not 0.0 <= quantiles <= 1.0:
raise ValueError(msg)
quantiles = [quantiles]
elif isinstance(quantiles, (list, tuple)):
if len(quantiles) == 0 or not all(
[isinstance(q, (int, float)) and 0.0 <= q <= 1.0 for q in quantiles]
):
raise ValueError(msg)
elif quantiles is not None:
raise ValueError(msg)
return quantiles
class SummaryTransformer(BaseTransformer):
"""Calculate summary value of a time series.
For :term:`univariate time series` a combination of summary functions and
quantiles of the input series are calculated. If the input is a
:term:`multivariate time series` then the summary functions and quantiles
are calculated separately for each column.
Parameters
----------
summary_function : str, list, tuple, default=("mean", "std", "min", "max")
Either a string, or list or tuple of strings indicating the pandas
summary functions that are used to summarize each column of the dataset.
Must be one of ("mean", "min", "max", "median", "sum", "skew", "kurt",
"var", "std", "mad", "sem", "nunique", "count").
quantiles : str, list, tuple or None, default=(0.1, 0.25, 0.5, 0.75, 0.9)
Optional list of series quantiles to calculate. If None, no quantiles
are calculated.
See Also
--------
MeanTransformer :
Calculate the mean of a timeseries.
WindowSummarizer:
Extracting features across (shifted) windows from series
Notes
-----
This provides a wrapper around pandas DataFrame and Series agg and
quantile methods.
Examples
--------
>>> from sktime.transformations.series.summarize import SummaryTransformer
>>> from sktime.datasets import load_airline
>>> y = load_airline()
>>> transformer = SummaryTransformer()
>>> y_mean = transformer.fit_transform(y)
"""
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Primitives",
# what scitype is returned: Primitives, Series, Panel
"scitype:instancewise": True, # is this an instance-wise transform?
"X_inner_mtype": ["pd.DataFrame", "pd.Series"],
# which mtypes do _fit/_predict support for X?
"y_inner_mtype": "None", # which mtypes do _fit/_predict support for X?
"fit_is_empty": True,
}
def __init__(
self,
summary_function=("mean", "std", "min", "max"),
quantiles=(0.1, 0.25, 0.5, 0.75, 0.9),
):
self.summary_function = summary_function
self.quantiles = quantiles
super(SummaryTransformer, self).__init__()
def _transform(self, X, y=None):
"""Transform X and return a transformed version.
private _transform containing the core logic, called from transform
Parameters
----------
X : pd.Series or pd.DataFrame
Data to be transformed
y : ignored argument for interface compatibility
Additional data, e.g., labels for transformation
Returns
-------
summary_value : scalar or pd.Series
If `series_or_df` is univariate then a scalar is returned. Otherwise,
a pd.Series is returned.
"""
Z = X
if self.summary_function is None and self.quantiles is None:
raise ValueError(
"One of `summary_function` and `quantiles` must not be None."
)
summary_function = _check_summary_function(self.summary_function)
quantiles = _check_quantiles(self.quantiles)
summary_value = Z.agg(summary_function)
if quantiles is not None:
quantile_value = Z.quantile(quantiles)
quantile_value.index = [str(s) for s in quantile_value.index]
summary_value = | pd.concat([summary_value, quantile_value]) | pandas.concat |
#!/usr/bin/env python3.7
# Copyright [2020] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import fnmatch #module for unix style pattern matching
import glob #module is used to retrieve files/pathnames matching a specified pattern
from yattag import Doc, indent
import argparse, hashlib, os, subprocess, sys, time
parser = argparse.ArgumentParser(prog='ena-metadata-xml-generator.py', formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
+ =================================================================================================================================== +
| European Nucleotide Archive (ENA) Analysis Submission Tool |
| |
| Tool to register study and sample metadata to an ENA project, mainly in the drag and drop tool context. |
|example: python3 metadata_xml_generator.py -u Webin-### -p 'password' -f <dir to the spreadsheet> -a <add/modify> -t <for test server|
+ =================================================================================================================================== +
""")
parser.add_argument('-u', '--username', help='Webin submission account username (e.g. Webin-XXXXX)', type=str, required=True)
parser.add_argument('-p', '--password', help='password for Webin submission account', type=str, required=True)
parser.add_argument('-t', '--test', help='Specify whether to use ENA test server for submission', action='store_true')
parser.add_argument('-f', '--file', help='path for the metadata spreadsheet', type=str, required=True)
parser.add_argument('-a', '--action', help='Specify the type of action needed ( ADD or MODIFY)', type=str, required=True)
args = parser.parse_args()
os.listdir(".") #list files and dirs in wd - make sure you are in the one where the user metadata spreadsheet will be found
files_xlsx = glob.glob(args.file) #should we accept other spreadsheet extensions?
"""
General trimming to the metadata in the spreadsheet and save it in a panda dataframe object
"""
def trimming_the_spreadsheet(df):
trimmed_df = df.iloc[3: ,].copy()
trimmed_df.insert(6,"submission_tool",'drag and drop uploader tool',allow_duplicates=True) #study #to inject constant into trimmed df
trimmed_df.insert(24,"submission_tool",'drag and drop uploader tool',allow_duplicates=True) #sample
trimmed_df.insert(26,"sample capture status",'active surveillance in response to outbreak',allow_duplicates=False)
trimmed_df.rename(columns = {'collecting institute':'collecting institution'}, inplace = True) #####temp fix for collecting institute error
trimmed_df.rename(columns={'collecting institute': 'collecting institution'}, inplace=True)
trimmed_df["release_date"] = pd.to_datetime(trimmed_df["release_date"], errors='coerce').dt.strftime("%Y-%m-%d")
trimmed_df["collection date"] = pd.to_datetime(trimmed_df["collection date"], errors='coerce').dt.strftime("%Y-%m-%d")
trimmed_df["receipt date"] = pd.to_datetime(trimmed_df["receipt date"], errors='coerce').dt.strftime("%Y-%m-%d")
trimmed_df['collection date'] = trimmed_df['collection date'].fillna('not provided')
return trimmed_df
"""
Write pandas dataframe object to study xml file
"""
def study_xml_generator(df):
doc, tag, text = Doc().tagtext()
xml_header = '<?xml version="1.0" encoding="UTF-8"?>'
df = df.loc[3: ,'study_alias':'release_date'] # trim the dataframe to the study section only
df = df.iloc[:, :-1]
modified_df = df.where(pd.notnull(df), None) # replace the nan with none values
doc.asis(xml_header)
with tag('STUDY_SET'):
for item in modified_df.to_dict('records'):
if item['study_alias'] != None:
cleaned_item_dict = {k: v for k, v in item.items() if v not in [None, ' ']} # remove all the none and " " values
with tag('STUDY', alias=cleaned_item_dict['study_alias']):
with tag('DESCRIPTOR'):
with tag("STUDY_TITLE"):
text(cleaned_item_dict['study_name'])
doc.stag('STUDY_TYPE', existing_study_type="Other")
with tag('STUDY_ABSTRACT'):
text(cleaned_item_dict['abstract'])
with tag('CENTER_PROJECT_NAME'):
text(cleaned_item_dict['short_description'])
with tag('STUDY_ATTRIBUTES'):
for header, object in cleaned_item_dict.items():
if header not in ['study_alias', 'email_address', 'center_name', 'study_name',
'short_description', 'abstract']:
with tag("STUDY_ATTRIBUTE"):
with tag("TAG"):
text(header)
with tag("VALUE"):
text(object)
result_study = indent(
doc.getvalue(),
indent_text=False
)
with open("study.xml", "w") as f:
f.write(result_study)
"""
Write pandas dataframe object to sample xml file
"""
def sample_xml_generator(df):
doc, tag, text = Doc().tagtext()
xml_header = '<?xml version="1.0" encoding="UTF-8"?>'
df = df.loc[3:, 'sample_alias':'experiment_name'] # trim the dataframe to the sample section including the "experiment name" to include any user defined fields
df = df.iloc[:, :-1] # remove the last column in the trimmed dataframe ( the "experiment name" column)
modified_df = df.where(pd.notnull(df), None) # replace the nan with none values
doc.asis(xml_header)
with tag('SAMPLE_SET'):
for item in modified_df.to_dict('records'):
if item['sample_alias'] != None:
cleaned_item_dict = {k: v for k, v in item.items() if v not in [None, ' ']} # remove all the none and " " values
if cleaned_item_dict:
with tag('SAMPLE', alias=cleaned_item_dict['sample_alias']):
with tag('TITLE'):
text(cleaned_item_dict['sample_title'])
with tag('SAMPLE_NAME'):
with tag("TAXON_ID"):
text(cleaned_item_dict['tax_id'])
with tag("SCIENTIFIC_NAME"):
text(cleaned_item_dict['scientific_name'])
with tag("DESCRIPTION"):
text(cleaned_item_dict['sample_description'])
with tag('SAMPLE_ATTRIBUTES'):
for header, object in cleaned_item_dict.items():
if header not in ['sample_alias', 'sample_title', 'tax_id', 'scientific_name',
'sample_description']:
with tag("SAMPLE_ATTRIBUTE"):
with tag("TAG"):
text(header)
with tag("VALUE"):
text(object)
if header in ['geographic location (latitude)', 'geographic location (longitude)']:
with tag("UNITS"):
text('DD')
elif header in ['host age']:
with tag("UNITS"):
text('years')
with tag("SAMPLE_ATTRIBUTE"):
with tag("TAG"):
text("ENA-CHECKLIST")
with tag("VALUE"):
text("ERC000033")
result = indent(
doc.getvalue(),
indent_text=False
)
with open("sample.xml", "w") as f:
f.write(result)
"""
Write pandas dataframe object to submission xml file
"""
def submission_xml_generator(df):
doc, tag, text = Doc().tagtext()
xml_header = '<?xml version="1.0" encoding="UTF-8"?>'
doc.asis(xml_header)
with tag('SUBMISSION_SET'):
with tag('SUBMISSION'):
with tag("ACTIONS"):
with tag('ACTION'):
doc.stag(args.action.upper())
if not df['release_date'].dropna().empty: # in case of multiple studies, it will take the release date of the first study only - make sure all the study release dates are the same
with tag('ACTION'):
doc.stag('HOLD', HoldUntilDate=str(df.iloc[0]['release_date']))
result_s = indent(
doc.getvalue(),
indentation=' ',
indent_text=False
)
with open("submission.xml", "w") as f:
f.write(result_s)
"""
the submission command of the output xmls from the spreadsheet
"""
def submission_command(df, args):
if not df["sample_alias"].dropna().empty and not df["study_accession"].dropna().empty or df["study_alias"].dropna().empty:
if args.test is True:
command = 'curl -u {}:{} -F "[email protected]" -F "[email protected]" "https://wwwdev.ebi.ac.uk/ena/submit/drop-box/submit/"'.format(
args.username, args.password)
if args.test is False:
command = 'curl -u {}:{} -F "[email protected]" -F "[email protected]" "https://www.ebi.ac.uk/ena/submit/drop-box/submit/"'.format(
args.username, args.password)
elif not df["study_alias"].dropna().empty and df["study_accession"].dropna().empty and df["sample_alias"].dropna().empty:
if args.test is True:
command = 'curl -u {}:{} -F "[email protected]" -F "[email protected]" "https://wwwdev.ebi.ac.uk/ena/submit/drop-box/submit/"'.format(
args.username, args.password)
if args.test is False:
command = 'curl -u {}:{} -F "[email protected]" -F "[email protected]" "https://www.ebi.ac.uk/ena/submit/drop-box/submit/"'.format(
args.username, args.password)
else:
if args.test is True:
command = 'curl -u {}:{} -F "[email protected]" -F "[email protected]" -F "[email protected]" "https://wwwdev.ebi.ac.uk/ena/submit/drop-box/submit/"'.format(
args.username, args.password)
if args.test is False:
command = 'curl -u {}:{} -F "[email protected]" -F "[email protected]" -F "[email protected]" "https://www.ebi.ac.uk/ena/submit/drop-box/submit/"'.format(
args.username, args.password)
sp = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
print("-" * 100)
print("CURL submission command: \n")
print(command)
print("Returned output: \n")
print(out.decode())
print("-" * 100)
# scanning the metadata spreadsheet
for f in files_xlsx:
# if the spreadsheet is an assembly spreadsheet
if fnmatch.fnmatch(f, '*genome*'):
print('you are using an assembly spreadsheet')
metadata_df = pd.read_excel(f, usecols="L:AW", header=1, sheet_name='Sheet1') #col range suits v4
output_df= trimming_the_spreadsheet(metadata_df)
# if the spreadsheet is a raw read spreadsheet
elif fnmatch.fnmatch(f, '*raw_reads*'):
print('you are using a raw reads spreadsheet')
metadata_df = | pd.read_excel(f, usecols="B:AM", header=1, sheet_name='Sheet1') | pandas.read_excel |
#!/usr/bin/env python
# coding: utf-8
import os
import argparse
from time import time
import pandas as pd
from sqlalchemy import create_engine, table
def main(params):
user = params.user
password = params.password
host = params.host
port = params.port
db = params.db
table_name = params.table_name
url = params.url
csv_name = 'output.csv'
# download the csv
os.system(f"wget {url} -O {csv_name}")
engine = create_engine(f'postgresql://{user}:{password}@{host}:{port}/{db}')
df_iter = pd.read_csv(csv_name, iterator=True, chunksize=100000)
df = next(df_iter)
df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime)
df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime)
df.head(n=0).to_sql(name=table_name, con=engine, if_exists='replace')
while True:
t_start = time()
df = next(df_iter)
df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime)
df.tpep_dropoff_datetime = | pd.to_datetime(df.tpep_dropoff_datetime) | pandas.to_datetime |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import json
cmip5_scenarios = | pd.read_csv('../data/cmip5/scenario_names.csv') | pandas.read_csv |
import pandas as pd
def get_toy_data_seqclassification():
train_data = {
"sentence1": [
'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
"Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .",
"They had published an advertisement on the Internet on June 10 , offering the cargo for sale , he added .",
"Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .",
],
"sentence2": [
'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .',
"Yucaipa bought Dominick 's in 1995 for $ 693 million and sold it to Safeway for $ 1.8 billion in 1998 .",
"On June 10 , the ship 's owners had published an advertisement on the Internet , offering the explosives for sale .",
"Tab shares jumped 20 cents , or 4.6 % , to set a record closing high at A $ 4.57 .",
],
"label": [1, 0, 1, 0],
"idx": [0, 1, 2, 3],
}
train_dataset = pd.DataFrame(train_data)
dev_data = {
"sentence1": [
"The stock rose $ 2.11 , or about 11 percent , to close Friday at $ 21.51 on the New York Stock Exchange .",
"Revenue in the first quarter of the year dropped 15 percent from the same period a year earlier .",
"The Nasdaq had a weekly gain of 17.27 , or 1.2 percent , closing at 1,520.15 on Friday .",
"The DVD-CCA then appealed to the state Supreme Court .",
],
"sentence2": [
"PG & E Corp. shares jumped $ 1.63 or 8 percent to $ 21.03 on the New York Stock Exchange on Friday .",
"With the scandal hanging over Stewart 's company , revenue the first quarter of the year dropped 15 percent from the same period a year earlier .",
"The tech-laced Nasdaq Composite .IXIC rallied 30.46 points , or 2.04 percent , to 1,520.15 .",
"The DVD CCA appealed that decision to the U.S. Supreme Court .",
],
"label": [1, 1, 0, 1],
"idx": [4, 5, 6, 7],
}
dev_dataset = pd.DataFrame(dev_data)
test_data = {
"sentence1": [
"That compared with $ 35.18 million , or 24 cents per share , in the year-ago period .",
"Shares of Genentech , a much larger company with several products on the market , rose more than 2 percent .",
"Legislation making it harder for consumers to erase their debts in bankruptcy court won overwhelming House approval in March .",
"The Nasdaq composite index increased 10.73 , or 0.7 percent , to 1,514.77 .",
],
"sentence2": [
"Earnings were affected by a non-recurring $ 8 million tax benefit in the year-ago period .",
"Shares of Xoma fell 16 percent in early trade , while shares of Genentech , a much larger company with several products on the market , were up 2 percent .",
"Legislation making it harder for consumers to erase their debts in bankruptcy court won speedy , House approval in March and was endorsed by the White House .",
"The Nasdaq Composite index , full of technology stocks , was lately up around 18 points .",
],
"label": [0, 0, 0, 0],
"idx": [8, 10, 11, 12],
}
test_dataset = pd.DataFrame(test_data)
custom_sent_keys = ["sentence1", "sentence2"]
label_key = "label"
X_train = train_dataset[custom_sent_keys]
y_train = train_dataset[label_key]
X_val = dev_dataset[custom_sent_keys]
y_val = dev_dataset[label_key]
X_test = test_dataset[custom_sent_keys]
return X_train, y_train, X_val, y_val, X_test
def get_toy_data_multiclassclassification():
train_data = {
"text": [
"i didnt feel humiliated",
"i can go from feeling so hopeless to so damned hopeful just from being around someone who cares and is awake",
"im grabbing a minute to post i feel greedy wrong",
"i am ever feeling nostalgic about the fireplace i will know that it is still on the property",
"i am feeling grouchy",
"ive been feeling a little burdened lately wasnt sure why that was",
"ive been taking or milligrams or times recommended amount and ive fallen asleep a lot faster but i also feel like so funny",
"i feel as confused about life as a teenager or as jaded as a year old man",
"i have been with petronas for years i feel that petronas has performed well and made a huge profit",
"i feel romantic too",
"i feel like i have to make the suffering i m seeing mean something",
"i do feel that running is a divine experience and that i can expect to have some type of spiritual encounter",
],
"label": [0, 0, 3, 2, 3, 0, 5, 4, 1, 2, 0, 1],
}
train_dataset = pd.DataFrame(train_data)
dev_data = {
"text": [
"i think it s the easiest time of year to feel dissatisfied",
"i feel low energy i m just thirsty",
"i have immense sympathy with the general point but as a possible proto writer trying to find time to write in the corners of life and with no sign of an agent let alone a publishing contract this feels a little precious",
"i do not feel reassured anxiety is on each side",
],
"label": [3, 0, 1, 1],
}
dev_dataset = | pd.DataFrame(dev_data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FFMpegWriter
import copy
from . import otherfunctions
from pathlib import Path
import warnings
import os
from skimage import feature
# Implement the data structure
class BaseMeasurement:
# Store parameters of the measurement
@property
def params(self):
return self._params
# Store flags identifying bad chirps
@property
def flags(self):
return self._flags
# Store flags identifying bad acquisitions
@property
def acq_flags(self):
return self._acq_flags
# Store actual data
@property
def data(self):
return self._data
@property
def meas_type(self):
return self._meas_type
def __init__(self, shodata=None, parameters=None, xaxis=None, adjustphase=True):
if shodata is None:
self._data = 0
self._params = 0
self._flags = 0
self._acq_flags = 0
else:
self._params = parameters.transpose()
temp_plotgroup = shodata["PlotGroup"].xs(0)
in_out = shodata['InOut'].unstack().xs(0)
self._flags = shodata['Flag'].unstack()
shodata['PR'] = np.zeros(shodata.shape[0])
data = shodata[["Amp", "errA", "Phase", "errP", "Res", "errRes", "Q", "errQ", "PR"]].unstack()
if adjustphase:
temp = data['Phase'].replace([np.inf, -np.inf], np.nan).copy()
phaseMean = temp.fillna(0).mean()
phaseMean = phaseMean.replace([np.inf, -np.inf], np.nan)
phaseMean = phaseMean.fillna(0).mean()
data['Phase'] = data['Phase'] - phaseMean
data['Phase'] = data['Phase'].applymap(lambda x: np.mod(x + np.pi, 2*np.pi) - np.pi)
data['PR'] = data.apply(lambda row: row['Amp'] * np.sin(row['Phase']), axis=1)
data = data.transpose()
data['InOut'] = np.tile(in_out.values, 9)
data.set_index('InOut', append=True, inplace=True)
data['PlotGroup'] = np.tile(temp_plotgroup.values, 9)
data.set_index('PlotGroup', append=True, inplace=True)
if xaxis is not None:
data['xaxis'] = np.tile(xaxis.values, 9)
data.set_index('xaxis', append=True, inplace=True)
data = data.transpose()
self._data = data
self.clean()
def GetDataSubset(self, inout=0.0, plotGroup=None, insert=None, stack=None, clean=False):
inout_vals=self._data.columns.get_level_values(level='InOut')
plotGroup_vals=self._data.columns.get_level_values(level='PlotGroup')
if stack is None:
stack = ['Amp', 'Phase', 'Res', 'Q']
if inout is None:
inout_mask = np.ones(inout_vals.shape)
else:
inout_mask = inout_vals == inout
if plotGroup is None:
pg_mask = np.ones(plotGroup_vals.shape)
else:
pg_mask = plotGroup_vals == plotGroup
mask = np.logical_and(inout_mask, pg_mask)
if clean:
cleanmask = self._acq_flags
else:
cleanmask = np.full(self._acq_flags.shape, False)
return_data = copy.deepcopy(self._data)
return_data = return_data[~cleanmask]
if insert is None:
return return_data.T[mask].T[stack]
else:
return_data.T[mask] = insert
return return_data[stack]
def SetDataSubset(self, set_vals, inout=0.0, plotGroup=None, stack=None, clean=False):
if stack is None:
stack = ['Amp', 'Phase', 'Res', 'Q']
inout_vals = self._data[stack].columns.get_level_values(level='InOut')
plotGroup_vals = self._data[stack].columns.get_level_values(level='PlotGroup')
if inout is None:
inout_mask = np.ones(inout_vals.shape)
else:
inout_mask = inout_vals == inout
if plotGroup is None:
pg_mask = np.ones(plotGroup_vals.shape)
else:
pg_mask = plotGroup_vals == plotGroup
mask = np.logical_and(inout_mask, pg_mask)
if clean:
cleanmask = self._acq_flags
else:
cleanmask = np.full(self._acq_flags.shape, False)
old = self.data[stack].loc[:, mask]
new = pd.DataFrame(set_vals, index=old.index, columns=old.columns)
self._data.update(new)
def clean(self, sensitivity=3, var=None, plot=False):
if var is None:
var = ['Amp', 'Phase', 'Q', 'Res', 'errA', 'errP', 'errQ', 'errRes']
outflags = np.full(self._data[var].values.shape,False)
mask = self._data[var].columns.get_level_values(level='InOut') ==0.0
oodata = self._data[var].T[mask].T.values
indata = self._data[var].T[~mask].T.values
outflags[:, mask] = otherfunctions.cleanbychirp(oodata, sensitivity)
outflags[:, ~mask] = otherfunctions.cleanbychirp(indata, sensitivity)
if plot:
plt.imshow(outflags, cmap='binary')
plt.show()
self._flags = pd.DataFrame(outflags, index=self._data[var].index, columns=self._data[var].columns)
self._acq_flags = otherfunctions.collapseflags(self._flags)
return self._acq_flags
def export(self, inout=[0,1], plotgroups=[0], saveName=None):
for i in inout:
pg_data = []
for pg in plotgroups:
temp = self.GetDataSubset(inout=i, plotGroup=pg, stack=['Amp', 'Phase', 'Res', 'Q', 'errA', 'errP', 'errRes', 'errQ'])
pg_data.append(temp)
allData = | pd.concat(pg_data, axis=1) | pandas.concat |
"""analysis.py: module for manifolds analysis."""
__author__ = "<NAME>, <NAME>, <NAME> and <NAME>"
__copyright__ = "Copyright (c) 2020, 2021, <NAME>, <NAME>, <NAME> and <NAME>"
__credits__ = ["Department of Chemical Engineering, University of Utah, Salt Lake City, Utah, USA", "Universite Libre de Bruxelles, Aero-Thermo-Mechanics Laboratory, Brussels, Belgium"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = ["<NAME>", "<NAME>"]
__email__ = ["<EMAIL>", "<EMAIL>", "<EMAIL>"]
__status__ = "Production"
import numpy as np
import copy as cp
import multiprocessing as multiproc
from PCAfold import KReg
from scipy.spatial import KDTree
from scipy.optimize import minimize
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
import random as rnd
from scipy.interpolate import CubicSpline
from PCAfold.styles import *
from PCAfold import preprocess
from PCAfold import reduction
from termcolor import colored
from matplotlib.colors import ListedColormap
import time
################################################################################
#
# Manifold assessment
#
################################################################################
class VarianceData:
"""
A class for storing helpful quantities in analyzing dimensionality of manifolds through normalized variance measures.
This class will be returned by ``compute_normalized_variance``.
:param bandwidth_values:
the array of bandwidth values (Gaussian filter widths) used in computing the normalized variance for each variable
:param normalized_variance:
dictionary of the normalized variance computed at each of the bandwidth values for each variable
:param global_variance:
dictionary of the global variance for each variable
:param bandwidth_10pct_rise:
dictionary of the bandwidth value corresponding to a 10% rise in the normalized variance for each variable
:param variable_names:
list of the variable names
:param normalized_variance_limit:
dictionary of the normalized variance computed as the bandwidth approaches zero (numerically at :math:`10^{-16}`) for each variable
"""
def __init__(self, bandwidth_values, norm_var, global_var, bandwidth_10pct_rise, keys, norm_var_limit):
self._bandwidth_values = bandwidth_values.copy()
self._normalized_variance = norm_var.copy()
self._global_variance = global_var.copy()
self._bandwidth_10pct_rise = bandwidth_10pct_rise.copy()
self._variable_names = keys.copy()
self._normalized_variance_limit = norm_var_limit.copy()
@property
def bandwidth_values(self):
"""return the bandwidth values (Gaussian filter widths) used in computing the normalized variance for each variable"""
return self._bandwidth_values.copy()
@property
def normalized_variance(self):
"""return a dictionary of the normalized variance computed at each of the bandwidth values for each variable"""
return self._normalized_variance.copy()
@property
def global_variance(self):
"""return a dictionary of the global variance for each variable"""
return self._global_variance.copy()
@property
def bandwidth_10pct_rise(self):
"""return a dictionary of the bandwidth value corresponding to a 10% rise in the normalized variance for each variable"""
return self._bandwidth_10pct_rise.copy()
@property
def variable_names(self):
"""return a list of the variable names"""
return self._variable_names.copy()
@property
def normalized_variance_limit(self):
"""return a dictionary of the normalized variance computed as the
bandwidth approaches zero (numerically at 1.e-16) for each variable"""
return self._normalized_variance_limit.copy()
# ------------------------------------------------------------------------------
def compute_normalized_variance(indepvars, depvars, depvar_names, npts_bandwidth=25, min_bandwidth=None,
max_bandwidth=None, bandwidth_values=None, scale_unit_box=True, n_threads=None):
"""
Compute a normalized variance (and related quantities) for analyzing manifold dimensionality.
The normalized variance is computed as
.. math::
\\mathcal{N}(\\sigma) = \\frac{\\sum_{i=1}^n (y_i - \\mathcal{K}(\\hat{x}_i; \\sigma))^2}{\\sum_{i=1}^n (y_i - \\bar{y} )^2}
where :math:`\\bar{y}` is the average quantity over the whole manifold and :math:`\\mathcal{K}(\\hat{x}_i; \\sigma)` is the
weighted average quantity calculated using kernel regression with a Gaussian kernel of bandwidth :math:`\\sigma` centered
around the :math:`i^{th}` observation. :math:`n` is the number of observations.
:math:`\\mathcal{N}(\\sigma)` is computed for each bandwidth in an array of bandwidth values.
By default, the ``indepvars`` (:math:`x`) are centered and scaled to reside inside a unit box (resulting in :math:`\\hat{x}`) so that the bandwidths have the
same meaning in each dimension. Therefore, the bandwidth and its involved calculations are applied in the normalized
independent variable space. This may be turned off by setting ``scale_unit_box`` to False.
The bandwidth values may be specified directly through ``bandwidth_values`` or default values will be calculated as a
logspace from ``min_bandwidth`` to ``max_bandwidth`` with ``npts_bandwidth`` number of values. If left unspecified,
``min_bandwidth`` and ``max_bandwidth`` will be calculated as the minimum and maximum nonzero distance between points, respectively.
More information can be found in :cite:`Armstrong2021`.
**Example:**
.. code:: python
from PCAfold import PCA, compute_normalized_variance
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,5)
# Perform PCA to obtain the low-dimensional manifold:
pca_X = PCA(X, n_components=2)
principal_components = pca_X.transform(X)
# Compute normalized variance quantities:
variance_data = compute_normalized_variance(principal_components, X, depvar_names=['A', 'B', 'C', 'D', 'E'], bandwidth_values=np.logspace(-3, 1, 20), scale_unit_box=True)
# Access bandwidth values:
variance_data.bandwidth_values
# Access normalized variance values:
variance_data.normalized_variance
# Access normalized variance values for a specific variable:
variance_data.normalized_variance['B']
:param indepvars:
``numpy.ndarray`` specifying the independent variable values. It should be of size ``(n_observations,n_independent_variables)``.
:param depvars:
``numpy.ndarray`` specifying the dependent variable values. It should be of size ``(n_observations,n_dependent_variables)``.
:param depvar_names:
``list`` of ``str`` corresponding to the names of the dependent variables (for saving values in a dictionary)
:param npts_bandwidth:
(optional, default 25) number of points to build a logspace of bandwidth values
:param min_bandwidth:
(optional, default to minimum nonzero interpoint distance) minimum bandwidth
:param max_bandwidth:
(optional, default to estimated maximum interpoint distance) maximum bandwidth
:param bandwidth_values:
(optional) array of bandwidth values, i.e. filter widths for a Gaussian filter, to loop over
:param scale_unit_box:
(optional, default True) center/scale the independent variables between [0,1] for computing a normalized variance so the bandwidth values have the same meaning in each dimension
:param n_threads:
(optional, default None) number of threads to run this computation. If None, default behavior of multiprocessing.Pool is used, which is to use all available cores on the current system.
:return:
- **variance_data** - an object of the ``VarianceData`` class.
"""
assert indepvars.ndim == 2, "independent variable array must be 2D: n_observations x n_variables."
assert depvars.ndim == 2, "dependent variable array must be 2D: n_observations x n_variables."
assert (indepvars.shape[0] == depvars.shape[
0]), "The number of observations for dependent and independent variables must match."
assert (len(depvar_names) == depvars.shape[
1]), "The provided keys do not match the shape of the dependent variables yi."
if scale_unit_box:
xi = (indepvars - np.min(indepvars, axis=0)) / (np.max(indepvars, axis=0) - np.min(indepvars, axis=0))
else:
xi = indepvars.copy()
yi = depvars.copy()
if bandwidth_values is None:
if min_bandwidth is None:
tree = KDTree(xi)
min_bandwidth = np.min(tree.query(xi, k=2)[0][tree.query(xi, k=2)[0][:, 1] > 1.e-16, 1])
if max_bandwidth is None:
max_bandwidth = np.linalg.norm(np.max(xi, axis=0) - np.min(xi, axis=0)) * 10.
bandwidth_values = np.logspace(np.log10(min_bandwidth), np.log10(max_bandwidth), npts_bandwidth)
else:
if not isinstance(bandwidth_values, np.ndarray):
raise ValueError("bandwidth_values must be an array.")
lvar = np.zeros((bandwidth_values.size, yi.shape[1]))
kregmod = KReg(xi, yi) # class for kernel regression evaluations
# define a list of argments for kregmod_predict
fcnArgs = [(xi, bandwidth_values[si]) for si in range(bandwidth_values.size) ]
pool = multiproc.Pool(processes=n_threads)
kregmodResults = pool.starmap( kregmod.predict, fcnArgs)
pool.close()
pool.join()
for si in range(bandwidth_values.size):
lvar[si, :] = np.linalg.norm(yi - kregmodResults[si], axis=0) ** 2
# saving the local variance for each yi...
local_var = dict({key: lvar[:, idx] for idx, key in enumerate(depvar_names)})
# saving the global variance for each yi...
global_var = dict(
{key: np.linalg.norm(yi[:, idx] - np.mean(yi[:, idx])) ** 2 for idx, key in enumerate(depvar_names)})
# saving the values of the bandwidth where the normalized variance increases by 10%...
bandwidth_10pct_rise = dict()
for key in depvar_names:
bandwidth_idx = np.argwhere(local_var[key] / global_var[key] >= 0.1)
if len(bandwidth_idx) == 0.:
bandwidth_10pct_rise[key] = None
else:
bandwidth_10pct_rise[key] = bandwidth_values[bandwidth_idx[0]][0]
norm_local_var = dict({key: local_var[key] / global_var[key] for key in depvar_names})
# computing normalized variance as bandwidth approaches zero to check for non-uniqueness
lvar_limit = kregmod.predict(xi, 1.e-16)
nlvar_limit = np.linalg.norm(yi - lvar_limit, axis=0) ** 2
normvar_limit = dict({key: nlvar_limit[idx] for idx, key in enumerate(depvar_names)})
solution_data = VarianceData(bandwidth_values, norm_local_var, global_var, bandwidth_10pct_rise, depvar_names, normvar_limit)
return solution_data
# ------------------------------------------------------------------------------
def normalized_variance_derivative(variance_data):
"""
Compute a scaled normalized variance derivative on a logarithmic scale, :math:`\\hat{\\mathcal{D}}(\\sigma)`, from
.. math::
\\mathcal{D}(\\sigma) = \\frac{\\mathrm{d}\\mathcal{N}(\\sigma)}{\\mathrm{d}\\log_{10}(\\sigma)} + \lim_{\\sigma \\to 0} \\mathcal{N}(\\sigma)
and
.. math::
\\hat{\\mathcal{D}}(\\sigma) = \\frac{\\mathcal{D}(\\sigma)}{\\max(\\mathcal{D}(\\sigma))}
This value relays how fast the variance is changing as the bandwidth changes and captures non-uniqueness from
nonzero values of :math:`\lim_{\\sigma \\to 0} \\mathcal{N}(\\sigma)`. The derivative is approximated
with central finite differencing and the limit is approximated by :math:`\\mathcal{N}(\\sigma=10^{-16})` using the
``normalized_variance_limit`` attribute of the ``VarianceData`` object.
More information can be found in :cite:`Armstrong2021`.
**Example:**
.. code:: python
from PCAfold import PCA, compute_normalized_variance, normalized_variance_derivative
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,5)
# Perform PCA to obtain the low-dimensional manifold:
pca_X = PCA(X, n_components=2)
principal_components = pca_X.transform(X)
# Compute normalized variance quantities:
variance_data = compute_normalized_variance(principal_components, X, depvar_names=['A', 'B', 'C', 'D', 'E'], bandwidth_values=np.logspace(-3, 1, 20), scale_unit_box=True)
# Compute normalized variance derivative:
(derivative, bandwidth_values, max_derivative) = normalized_variance_derivative(variance_data)
# Access normalized variance derivative values for a specific variable:
derivative['B']
:param variance_data:
a ``VarianceData`` class returned from ``compute_normalized_variance``
:return:
- **derivative_dict** - a dictionary of :math:`\\hat{\\mathcal{D}}(\\sigma)` for each variable in the provided ``VarianceData`` object
- **x** - the :math:`\\sigma` values where :math:`\\hat{\\mathcal{D}}(\\sigma)` was computed
- **max_derivatives_dicts** - a dictionary of :math:`\\max(\\mathcal{D}(\\sigma))` values for each variable in the provided ``VarianceData`` object.
"""
x_plus = variance_data.bandwidth_values[2:]
x_minus = variance_data.bandwidth_values[:-2]
x = variance_data.bandwidth_values[1:-1]
derivative_dict = {}
max_derivatives_dict = {}
for key in variance_data.variable_names:
y_plus = variance_data.normalized_variance[key][2:]
y_minus = variance_data.normalized_variance[key][:-2]
derivative = (y_plus-y_minus)/(np.log10(x_plus)-np.log10(x_minus)) + variance_data.normalized_variance_limit[key]
scaled_derivative = derivative/np.max(derivative)
derivative_dict[key] = scaled_derivative
max_derivatives_dict[key] = np.max(derivative)
return derivative_dict, x, max_derivatives_dict
# ------------------------------------------------------------------------------
def find_local_maxima(dependent_values, independent_values, logscaling=True, threshold=1.e-2, show_plot=False):
"""
Finds and returns locations and values of local maxima in a dependent variable given a set of observations.
The functional form of the dependent variable is approximated with a cubic spline for smoother approximations to local maxima.
:param dependent_values:
observations of a single dependent variable such as :math:`\\hat{\\mathcal{D}}` from ``normalized_variance_derivative`` (for a single variable).
:param independent_values:
observations of a single independent variable such as :math:`\\sigma` returned by ``normalized_variance_derivative``
:param logscaling:
(optional, default True) this logarithmically scales ``independent_values`` before finding local maxima. This is needed for scaling :math:`\\sigma` appropriately before finding peaks in :math:`\\hat{\\mathcal{D}}`.
:param threshold:
(optional, default :math:`10^{-2}`) local maxima found below this threshold will be ignored.
:param show_plot:
(optional, default False) when True, a plot of the ``dependent_values`` over ``independent_values`` (logarithmically scaled if ``logscaling`` is True) with the local maxima highlighted will be shown.
:return:
- the locations of local maxima in ``dependent_values``
- the local maxima values
"""
if logscaling:
independent_values = np.log10(independent_values.copy())
zero_indices = []
upslope = True
npts = independent_values.size
for i in range(1, npts):
if upslope and dependent_values[i] - dependent_values[i - 1] <= 0:
if dependent_values[i] > threshold:
zero_indices.append(i - 1)
upslope = False
if not upslope and dependent_values[i] - dependent_values[i - 1] >= 0:
upslope = True
zero_locations = []
zero_Dvalues = []
for idx in zero_indices:
if idx < 1:
indices = [idx, idx + 1, idx + 2, idx + 3]
elif idx < 2:
indices = [idx - 1, idx, idx + 1, idx + 2]
elif idx > npts - 1:
indices = [idx - 3, idx - 2, idx - 1, idx]
else:
indices = [idx - 2, idx - 1, idx, idx + 1]
Dspl = CubicSpline(independent_values[indices], dependent_values[indices])
sigma_max = minimize(lambda s: -Dspl(s), independent_values[idx])
zero_locations.append(sigma_max.x[0])
zero_Dvalues.append(Dspl(sigma_max.x[0]))
if show_plot:
plt.plot(independent_values, dependent_values, 'k-')
plt.plot(zero_locations, zero_Dvalues, 'r*')
plt.xlim([np.min(independent_values),np.max(independent_values)])
plt.ylim([0., 1.05])
plt.grid()
if logscaling:
plt.xlabel('log$_{10}$(independent variable)')
else:
plt.xlabel('independent variable')
plt.ylabel('dependent variable')
plt.show()
if logscaling:
zero_locations = 10. ** np.array(zero_locations)
return np.array(zero_locations, dtype=float), np.array(zero_Dvalues, dtype=float)
# ------------------------------------------------------------------------------
def random_sampling_normalized_variance(sampling_percentages, indepvars, depvars, depvar_names,
n_sample_iterations=1, verbose=True, npts_bandwidth=25, min_bandwidth=None,
max_bandwidth=None, bandwidth_values=None, scale_unit_box=True, n_threads=None):
"""
Compute the normalized variance derivatives :math:`\\hat{\\mathcal{D}}(\\sigma)` for random samples of the provided
data specified using ``sampling_percentages``. These will be averaged over ``n_sample_iterations`` iterations. Analyzing
the shift in peaks of :math:`\\hat{\\mathcal{D}}(\\sigma)` due to sampling can distinguish between characteristic
features and non-uniqueness due to a transformation/reduction of manifold coordinates. True features should not show
significant sensitivity to sampling while non-uniqueness/folds in the manifold will.
More information can be found in :cite:`Armstrong2021`.
:param sampling_percentages:
list or 1D array of fractions (between 0 and 1) of the provided data to sample for computing the normalized variance
:param indepvars:
independent variable values (size: n_observations x n_independent variables)
:param depvars:
dependent variable values (size: n_observations x n_dependent variables)
:param depvar_names:
list of strings corresponding to the names of the dependent variables (for saving values in a dictionary)
:param n_sample_iterations:
(optional, default 1) how many iterations for each ``sampling_percentages`` to average the normalized variance derivative over
:param verbose:
(optional, default True) when True, progress statements are printed
:param npts_bandwidth:
(optional, default 25) number of points to build a logspace of bandwidth values
:param min_bandwidth:
(optional, default to minimum nonzero interpoint distance) minimum bandwidth
:param max_bandwidth:
(optional, default to estimated maximum interpoint distance) maximum bandwidth
:param bandwidth_values:
(optional) array of bandwidth values, i.e. filter widths for a Gaussian filter, to loop over
:param scale_unit_box:
(optional, default True) center/scale the independent variables between [0,1] for computing a normalized variance so the bandwidth values have the same meaning in each dimension
:param n_threads:
(optional, default None) number of threads to run this computation. If None, default behavior of multiprocessing.Pool is used, which is to use all available cores on the current system.
:return:
- a dictionary of the normalized variance derivative (:math:`\\hat{\\mathcal{D}}(\\sigma)`) for each sampling percentage in ``sampling_percentages`` averaged over ``n_sample_iterations`` iterations
- the :math:`\\sigma` values used for computing :math:`\\hat{\\mathcal{D}}(\\sigma)`
- a dictionary of the ``VarianceData`` objects for each sampling percentage and iteration in ``sampling_percentages`` and ``n_sample_iterations``
"""
assert indepvars.ndim == 2, "independent variable array must be 2D: n_observations x n_variables."
assert depvars.ndim == 2, "dependent variable array must be 2D: n_observations x n_variables."
if isinstance(sampling_percentages, list):
for p in sampling_percentages:
assert p > 0., "sampling percentages must be between 0 and 1"
assert p <= 1., "sampling percentages must be between 0 and 1"
elif isinstance(sampling_percentages, np.ndarray):
assert sampling_percentages.ndim ==1, "sampling_percentages must be given as a list or 1D array"
for p in sampling_percentages:
assert p > 0., "sampling percentages must be between 0 and 1"
assert p <= 1., "sampling percentages must be between 0 and 1"
else:
raise ValueError("sampling_percentages must be given as a list or 1D array.")
normvar_data = {}
avg_der_data = {}
for p in sampling_percentages:
if verbose:
print('sampling', p * 100., '% of the data')
nv_data = {}
avg_der = {}
for it in range(n_sample_iterations):
if verbose:
print(' iteration', it + 1, 'of', n_sample_iterations)
rnd.seed(it)
idxsample = rnd.sample(list(np.arange(0, indepvars.shape[0])), int(p * indepvars.shape[0]))
nv_data[it] = compute_normalized_variance(indepvars[idxsample, :], depvars[idxsample, :], depvar_names,
npts_bandwidth=npts_bandwidth, min_bandwidth=min_bandwidth,
max_bandwidth=max_bandwidth, bandwidth_values=bandwidth_values,
scale_unit_box=scale_unit_box, n_threads=n_threads)
der, xder, _ = normalized_variance_derivative(nv_data[it])
for key in der.keys():
if it == 0:
avg_der[key] = der[key] / np.float(n_sample_iterations)
else:
avg_der[key] += der[key] / np.float(n_sample_iterations)
avg_der_data[p] = avg_der
normvar_data[p] = nv_data
return avg_der_data, xder, normvar_data
# ------------------------------------------------------------------------------
def average_knn_distance(indepvars, n_neighbors=10, verbose=False):
"""
Computes an average Euclidean distances to :math:`k` nearest neighbors on
a manifold defined by the independent variables.
**Example:**
.. code:: python
from PCAfold import PCA, average_knn_distance
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,20)
# Instantiate PCA class object:
pca_X = PCA(X, scaling='none', n_components=2, use_eigendec=True, nocenter=False)
# Calculate the principal components:
principal_components = pca_X.transform(X)
# Compute average distances on a manifold defined by the PCs:
average_distances = average_knn_distance(principal_components, n_neighbors=10, verbose=True)
With ``verbose=True``, minimum, maximum and average distance will be printed:
.. code-block:: text
Minimum distance: 0.1388300829487847
Maximum distance: 0.4689587542132183
Average distance: 0.20824964953425693
Median distance: 0.18333873029179215
.. note::
This function requires the ``scikit-learn`` module. You can install it through:
``pip install scikit-learn``
:param indepvars:
``numpy.ndarray`` specifying the independent variable values. It should be of size ``(n_observations,n_independent_variables)``.
:param n_neighbors: (optional)
``int`` specifying the number of nearest neighbors, :math:`k`.
:param verbose: (optional)
``bool`` for printing verbose details.
:return:
- **average_distances** - ``numpy.ndarray`` specifying the vector of average distances for every observation in a data set to its :math:`k` nearest neighbors. It has size ``(n_observations,)``.
"""
if not isinstance(indepvars, np.ndarray):
raise ValueError("Parameter `indepvars` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_independent_variables) = np.shape(indepvars)
except:
raise ValueError("Parameter `indepvars` has to have size `(n_observations,n_independent_variables)`.")
if not isinstance(n_neighbors, int):
raise ValueError("Parameter `n_neighbors` has to be of type int.")
if n_neighbors < 2:
raise ValueError("Parameter `n_neighbors` cannot be smaller than 2.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be a boolean.")
try:
from sklearn.neighbors import NearestNeighbors
except:
raise ValueError("Nearest neighbors search requires the `sklearn` module: `pip install scikit-learn`.")
(n_observations, n_independent_variables) = np.shape(indepvars)
knn_model = NearestNeighbors(n_neighbors=n_neighbors+1)
knn_model.fit(indepvars)
average_distances = np.zeros((n_observations,))
for query_point in range(0,n_observations):
(distances_neigh, idx_neigh) = knn_model.kneighbors(indepvars[query_point,:][None,:], n_neighbors=n_neighbors+1, return_distance=True)
query_point_idx = np.where(idx_neigh.ravel()==query_point)
distances_neigh = np.delete(distances_neigh.ravel(), np.s_[query_point_idx])
idx_neigh = np.delete(idx_neigh.ravel(), np.s_[query_point_idx])
average_distances[query_point] = np.mean(distances_neigh)
if verbose:
print('Minimum distance:\t' + str(np.min(average_distances)))
print('Maximum distance:\t' + str(np.max(average_distances)))
print('Average distance:\t' + str(np.mean(average_distances)))
print('Median distance:\t' + str(np.median(average_distances)))
return average_distances
# ------------------------------------------------------------------------------
def cost_function_normalized_variance_derivative(variance_data, penalty_function=None, norm=None, integrate_to_peak=False):
"""
Defines a cost function for manifold topology optimization based on the areas, or weighted (penalized) areas, under
the normalized variance derivatives curves, :math:`\\hat{\\mathcal{D}}(\\sigma)`, for the selected :math:`n_{dep}` dependent variables.
An individual area, :math:`A_i`, for the :math:`i^{th}` dependent variable, is computed by directly integrating the function :math:`\\hat{\\mathcal{D}}_i(\\sigma)``
in the :math:`\\log_{10}` space of bandwidths :math:`\\sigma`. Integration is performed using the composite trapezoid rule.
When ``integrate_to_peak=False``, the bounds of integration go from the minimum bandwidth, :math:`\\sigma_{min, i}`,
to the maximum bandwidth, :math:`\\sigma_{max, i}`:
.. math::
A_i = \\int_{\\sigma_{min, i}}^{\\sigma_{max, i}} \\hat{\\mathcal{D}}_i(\\sigma) d \\log_{10} \\sigma
.. image:: ../images/cost-function-D-hat.svg
:width: 600
:align: center
When ``integrate_to_peak=True``, the bounds of integration go from the minimum bandwidth, :math:`\\sigma_{min, i}`,
to the bandwidth for which the rightmost peak happens in :math:`\\hat{\\mathcal{D}}_i(\\sigma)``, :math:`\\sigma_{peak, i}`:
.. math::
A_i = \\int_{\\sigma_{min, i}}^{\\sigma_{peak, i}} \\hat{\\mathcal{D}}_i(\\sigma) d \\log_{10} \\sigma
.. image:: ../images/cost-function-D-hat-to-peak.svg
:width: 600
:align: center
In addition, each individual area, :math:`A_i`, can be weighted. Three weighting options are available:
- If ``penalty_function='peak'``, :math:`A_i` is weighted by the inverse of the rightmost peak location:
.. math::
A_i = \\frac{1}{\\sigma_{peak, i}} \\cdot \\int \\hat{\\mathcal{D}}_i(\\sigma) d(\\log_{10} \\sigma)
- If ``penalty_function='sigma'``, :math:`A_i` is weighted continuously by the bandwidth:
.. math::
A_i = \\int \\frac{\\hat{\\mathcal{D}}_i(\\sigma)}{\\sigma} d(\\log_{10} \\sigma)
This type of weighting *strongly* penalizes the area happening at lower bandwidth values:
.. image:: ../images/cost-function-sigma-penalty.svg
:width: 600
:align: center
- If ``penalty_function='log-sigma-over-peak'``, :math:`A_i` is weighted continuously by the :math:`\\log_{10}` -transformed bandwidth\
and takes into account information about the rightmost peak location:
.. math::
A_i = \\int \\Big( \\big| \\log_{10} \\big( \\frac{\\sigma}{\\sigma_{peak, i}} \\big) \\big| + \\frac{1}{||\\sigma_{peak, i}||_{0-1}} \\Big) \\cdot \\hat{\\mathcal{D}}_i(\\sigma) d(\\log_{10} \\sigma)
where :math:`||\\sigma_{peak, i}||_{0-1}` is the rightmost peak location expressed in a normalized 0-1 range. The normalization is performed so that :math:`||\\sigma_{min, i}||_{0-1} = 0.0` and :math:`||\\sigma_{max, i}||_{0-1} = 1.0`.
This type of weighting creates a more gentle penalty for the area happening further from the rightmost peak location:
.. image:: ../images/cost-function-log-sigma-over-peak-penalty.svg
:width: 600
:align: center
If ``norm=None``, a list of costs for all dependent variables is returned.
Otherwise, the final cost, :math:`\\mathcal{L}`, can be computed from all :math:`A_i` in a few ways,
where :math:`n_{dep}` is the number of dependent variables stored in the ``variance_data`` object:
- If ``norm='average'``, :math:`\\mathcal{L} = \\frac{1}{n_{dep}} \\sum_{i = 1}^{n_{dep}} A_i`.
- If ``norm='cumulative'``, :math:`\\mathcal{L} = \\sum_{i = 1}^{n_{dep}} A_i`.
- If ``norm='max'``, :math:`\\mathcal{L} = \\text{max} (A_i)`.
- If ``norm='median'``, :math:`\\mathcal{L} = \\text{median} (A_i)`.
- If ``norm='min'``, :math:`\\mathcal{L} = \\text{min} (A_i)`.
**Example:**
.. code:: python
from PCAfold import PCA, compute_normalized_variance, cost_function_normalized_variance_derivative
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,10)
# Specify variables names
variable_names = ['X_' + str(i) for i in range(0,10)]
# Perform PCA to obtain the low-dimensional manifold:
pca_X = PCA(X, n_components=2)
principal_components = pca_X.transform(X)
# Specify the bandwidth values:
bandwidth_values = np.logspace(-4, 2, 50)
# Compute normalized variance quantities:
variance_data = compute_normalized_variance(principal_components,
X,
depvar_names=variable_names,
bandwidth_values=bandwidth_values)
# Compute the cost for the current manifold:
cost = cost_function_normalized_variance_derivative(variance_data,
penalty_function='peak',
norm='max',
integrate_to_peak=True)
:param variance_data:
an object of ``VarianceData`` class.
:param penalty_function: (optional)
``str`` specifying the weighting (penalty) applied to each area.
Set ``penalty_function='peak'`` to weight each area by the rightmost peak location, :math:`\\sigma_{peak, i}`, for the :math:`i^{th}` dependent variable.
Set ``penalty_function='sigma'`` to weight each area continuously by the bandwidth.
Set ``penalty_function='log-sigma-over-peak'`` to weight each area continuously by the :math:`\\log_{10}` -transformed bandwidth, normalized by the right most peak location, :math:`\\sigma_{peak, i}`.
If ``penalty_function=None``, the area is not weighted.
:param norm: (optional)
``str`` specifying the norm to apply for all areas :math:`A_i`. ``norm='average'`` uses an arithmetic average, ``norm='max'`` uses the :math:`L_{\\infty}` norm,
``norm='median'`` uses a median area, ``norm='cumulative'`` uses a cumulative area and ``norm='min'`` uses a minimum area. If ``norm=None``, a list of costs for all depedent variables is returned.
:param integrate_to_peak: (optional)
``bool`` specifying whether an individual area for the :math:`i^{th}` dependent variable should be computed only up the the rightmost peak location.
:return:
- **cost** - ``float`` specifying the normalized cost, :math:`\\mathcal{L}`, or, if ``norm=None``, a list of costs, :math:`A_i`, for each dependent variable.
"""
__penalty_functions = ['peak', 'sigma', 'log-sigma-over-peak']
__norms = ['average', 'cumulative', 'max', 'median', 'min']
if penalty_function is not None:
if not isinstance(penalty_function, str):
raise ValueError("Parameter `penalty_function` has to be of type `str`.")
if penalty_function not in __penalty_functions:
raise ValueError("Parameter `penalty_function` has to be one of the following: 'peak', 'sigma', 'log-sigma-over-peak'.")
if norm is not None:
if not isinstance(norm, str):
raise ValueError("Parameter `norm` has to be of type `str`.")
if norm not in __norms:
raise ValueError("Parameter `norm` has to be one of the following: 'average', 'cumulative', 'max', 'median', 'min'.")
if not isinstance(integrate_to_peak, bool):
raise ValueError("Parameter `integrate_to_peak` has to be of type `bool`.")
derivative, sigma, _ = normalized_variance_derivative(variance_data)
costs = []
for variable in variance_data.variable_names:
idx_peaks, _ = find_peaks(derivative[variable], height=0)
idx_rightmost_peak = idx_peaks[-1]
rightmost_peak_location = sigma[idx_rightmost_peak]
(indices_to_the_left_of_peak, ) = np.where(sigma<=rightmost_peak_location)
if integrate_to_peak:
if penalty_function is None:
cost = np.trapz(derivative[variable][indices_to_the_left_of_peak], np.log10(sigma[indices_to_the_left_of_peak]))
costs.append(cost)
elif penalty_function == 'peak':
cost = 1. / (rightmost_peak_location) * np.trapz(derivative[variable][indices_to_the_left_of_peak], np.log10(sigma[indices_to_the_left_of_peak]))
costs.append(cost)
elif penalty_function == 'sigma':
penalty_sigma = 1./sigma[indices_to_the_left_of_peak]
cost = np.trapz(derivative[variable][indices_to_the_left_of_peak]*penalty_sigma, np.log10(sigma[indices_to_the_left_of_peak]))
costs.append(cost)
elif penalty_function == 'log-sigma-over-peak':
normalized_sigma, _, _ = preprocess.center_scale(np.log10(sigma[:,None]), scaling='0to1')
addition = normalized_sigma[idx_rightmost_peak][0]
penalty_log_sigma_peak = abs(np.log10(sigma[indices_to_the_left_of_peak]/rightmost_peak_location)) + 1./addition
cost = np.trapz(derivative[variable][indices_to_the_left_of_peak]*penalty_log_sigma_peak, np.log10(sigma[indices_to_the_left_of_peak]))
costs.append(cost)
else:
if penalty_function is None:
cost = np.trapz(derivative[variable], np.log10(sigma))
costs.append(cost)
elif penalty_function == 'peak':
cost = 1. / (rightmost_peak_location) * np.trapz(derivative[variable], np.log10(sigma))
costs.append(cost)
elif penalty_function == 'sigma':
penalty_sigma = 1./sigma
cost = np.trapz(derivative[variable]*penalty_sigma, np.log10(sigma))
costs.append(cost)
elif penalty_function == 'log-sigma-over-peak':
normalized_sigma, _, _ = preprocess.center_scale(np.log10(sigma[:,None]), scaling='0to1')
addition = normalized_sigma[idx_rightmost_peak][0]
penalty_log_sigma_peak = abs(np.log10(sigma/rightmost_peak_location)) + 1./addition
cost = np.trapz(derivative[variable]*penalty_log_sigma_peak, np.log10(sigma))
costs.append(cost)
if norm is None:
return costs
else:
if norm == 'max':
# Take L-infinity norm over all costs:
normalized_cost = np.max(costs)
elif norm == 'average':
# Take the arithmetic average norm over all costs:
normalized_cost = np.mean(costs)
elif norm == 'min':
# Take the minimum norm over all costs:
normalized_cost = np.min(costs)
elif norm == 'median':
# Take the median norm over all costs:
normalized_cost = np.median(costs)
elif norm == 'cumulative':
# Take the cumulative sum over all costs:
normalized_cost = np.sum(costs)
return normalized_cost
# ------------------------------------------------------------------------------
def manifold_informed_feature_selection(X, X_source, variable_names, scaling, bandwidth_values, target_variables=None, add_transformed_source=True, target_manifold_dimensionality=3, bootstrap_variables=None, penalty_function=None, norm='max', integrate_to_peak=False, verbose=False):
"""
Manifold-informed feature selection algorithm based on forward feature addition. The goal of the algorithm is to
select a meaningful subset of the original variables such that
undesired behaviors on a PCA-derived manifold of a given dimensionality are minimized.
The algorithm uses the cost function, :math:`\\mathcal{L}`, based on minimizing the area under the normalized variance derivatives curves, :math:`\\hat{\\mathcal{D}}(\\sigma)`,
for the selected :math:`n_{dep}` dependent variables (as per ``cost_function_normalized_variance_derivative`` function).
The algorithm can be bootstrapped in two ways:
- Automatic bootstrap when ``bootstrap_variables=None``: the first best variable is selected automatically as the one that gives the lowest cost.
- User-defined bootstrap when ``bootstrap_variables`` is set to a user-defined list of the bootstrap variables.
The algorithm iterates, adding a new variable that exhibits the lowest cost at each iteration.
The original variables in a data set get ordered according to their effect
on the manifold topology. Assuming that the original data set is composed of :math:`Q` variables,
the first output is a list of indices of the ordered
original variables, :math:`\\mathbf{X} = [X_1, X_2, \\dots, X_Q]`. The second output is a list of indices of the selected
subset of the original variables, :math:`\\mathbf{X}_S = [X_1, X_2, \\dots, X_n]`, that correspond to the minimum cost, :math:`\\mathcal{L}`.
.. note::
The algorithm can be very expensive (for large data sets) due to multiple computations of the normalized variance derivative.
Try running it on multiple cores or on a sampled data set.
In case the algorithm breaks when not being able to determine the peak
location, try increasing the range in the ``bandwidth_values`` parameter.
**Example:**
.. code:: python
from PCAfold import manifold_informed_feature_selection
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,10)
X_source = np.random.rand(100,10)
# Define original variables to add to the optimization:
target_variables = X[:,0:3]
# Specify variables names
variable_names = ['X_' + str(i) for i in range(0,10)]
# Specify the bandwidth values to compute the optimization on:
bandwidth_values = np.logspace(-4, 2, 50)
# Run the subset selection algorithm:
(ordered, selected, costs) = manifold_informed_feature_selection(X,
X_source,
variable_names,
scaling='auto',
bandwidth_values=bandwidth_values,
target_variables=target_variables,
add_transformed_source=True,
target_manifold_dimensionality=2,
bootstrap_variables=None,
penalty_function='peak',
norm='max',
integrate_to_peak=True,
verbose=True)
:param X:
``numpy.ndarray`` specifying the original data set, :math:`\\mathbf{X}`. It should be of size ``(n_observations,n_variables)``.
:param X_source:
``numpy.ndarray`` specifying the source terms, :math:`\\mathbf{S_X}`, corresponding to the state-space
variables in :math:`\\mathbf{X}`. This parameter is applicable to data sets
representing reactive flows. More information can be found in :cite:`Sutherland2009`. It should be of size ``(n_observations,n_variables)``.
:param variable_names:
``list`` of ``str`` specifying variables names.
:param scaling: (optional)
``str`` specifying the scaling methodology. It can be one of the following:
``'none'``, ``''``, ``'auto'``, ``'std'``, ``'pareto'``, ``'vast'``, ``'range'``, ``'0to1'``,
``'-1to1'``, ``'level'``, ``'max'``, ``'poisson'``, ``'vast_2'``, ``'vast_3'``, ``'vast_4'``.
:param bandwidth_values:
``numpy.ndarray`` specifying the bandwidth values, :math:`\\sigma`, for :math:`\\hat{\\mathcal{D}}(\\sigma)` computation.
:param target_variables: (optional)
``numpy.ndarray`` specifying the dependent variables that should be used in :math:`\\hat{\\mathcal{D}}(\\sigma)` computation. It should be of size ``(n_observations,n_target_variables)``.
:param add_transformed_source: (optional)
``bool`` specifying if the PCA-transformed source terms of the state-space variables should be added in :math:`\\hat{\\mathcal{D}}(\\sigma)` computation, alongside the user-defined dependent variables.
:param target_manifold_dimensionality: (optional)
``int`` specifying the target dimensionality of the PCA manifold.
:param bootstrap_variables: (optional)
``list`` specifying the user-selected variables to bootstrap the algorithm with. If set to ``None``, automatic bootstrapping is performed.
:param penalty_function: (optional)
``str`` specifying the weighting applied to each area.
Set ``penalty_function='peak'`` to weight each area by the rightmost peak location, :math:`\\sigma_{peak, i}`, for the :math:`i^{th}` dependent variable.
Set ``penalty_function='sigma'`` to weight each area continuously by the bandwidth.
Set ``penalty_function='log-sigma-over-peak'`` to weight each area continuously by the :math:`\\log_{10}` -transformed bandwidth, normalized by the right most peak location, :math:`\\sigma_{peak, i}`.
If ``penalty_function=None``, the area is not weighted.
:param norm: (optional)
``str`` specifying the norm to apply for all areas :math:`A_i`. ``norm='average'`` uses an arithmetic average, ``norm='max'`` uses the :math:`L_{\\infty}` norm,
``norm='median'`` uses a median area, ``norm='cumulative'`` uses a cumulative area and ``norm='min'`` uses a minimum area.
:param integrate_to_peak: (optional)
``bool`` specifying whether an individual area for the :math:`i^{th}` dependent variable should be computed only up the the rightmost peak location.
:param verbose: (optional)
``bool`` for printing verbose details.
:return:
- **ordered_variables** - ``list`` specifying the indices of the ordered variables.
- **selected_variables** - ``list`` specifying the indices of the selected variables that correspond to the minimum cost :math:`\\mathcal{L}`.
- **costs** - ``list`` specifying the costs, :math:`\\mathcal{L}`, from each iteration.
"""
__penalty_functions = ['peak', 'sigma', 'log-sigma-over-peak']
__norms = ['average', 'cumulative', 'max', 'median', 'min']
if not isinstance(X, np.ndarray):
raise ValueError("Parameter `X` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_variables) = np.shape(X)
except:
raise ValueError("Parameter `X` has to have shape `(n_observations,n_variables)`.")
if not isinstance(X_source, np.ndarray):
raise ValueError("Parameter `X_source` has to be of type `numpy.ndarray`.")
try:
(n_observations_source, n_variables_source) = np.shape(X_source)
except:
raise ValueError("Parameter `X_source` has to have shape `(n_observations,n_variables)`.")
if n_variables_source != n_variables:
raise ValueError("Parameter `X_source` has different number of variables than `X`.")
if n_observations_source != n_observations:
raise ValueError("Parameter `X_source` has different number of observations than `X`.")
if not isinstance(variable_names, list):
raise ValueError("Parameter `variable_names` has to be of type `list`.")
if len(variable_names) != n_variables:
raise ValueError("Parameter `variable_names` has different number of variables than `X`.")
if not isinstance(scaling, str):
raise ValueError("Parameter `scaling` has to be of type `str`.")
if not isinstance(bandwidth_values, np.ndarray):
raise ValueError("Parameter `bandwidth_values` has to be of type `numpy.ndarray`.")
if target_variables is not None:
if not isinstance(target_variables, np.ndarray):
raise ValueError("Parameter `target_variables` has to be of type `numpy.ndarray`.")
try:
(n_d_hat_observations, n_target_variables) = np.shape(target_variables)
target_variables_names = ['X' + str(i) for i in range(0,n_target_variables)]
except:
raise ValueError("Parameter `target_variables` has to have shape `(n_observations,n_target_variables)`.")
if n_d_hat_observations != n_observations_source:
raise ValueError("Parameter `target_variables` has different number of observations than `X_source`.")
if not isinstance(add_transformed_source, bool):
raise ValueError("Parameter `add_transformed_source` has to be of type `bool`.")
if target_variables is None:
if not add_transformed_source:
raise ValueError("Either `target_variables` has to be specified or `add_transformed_source` has to be set to True.")
if not isinstance(target_manifold_dimensionality, int):
raise ValueError("Parameter `target_manifold_dimensionality` has to be of type `int`.")
if bootstrap_variables is not None:
if not isinstance(bootstrap_variables, list):
raise ValueError("Parameter `bootstrap_variables` has to be of type `list`.")
if penalty_function is not None:
if not isinstance(penalty_function, str):
raise ValueError("Parameter `penalty_function` has to be of type `str`.")
if penalty_function not in __penalty_functions:
raise ValueError("Parameter `penalty_function` has to be one of the following: 'peak', 'sigma', 'log-sigma-over-peak'.")
if not isinstance(norm, str):
raise ValueError("Parameter `norm` has to be of type `str`.")
if norm not in __norms:
raise ValueError("Parameter `norm` has to be one of the following: 'average', 'cumulative', 'max', 'median', 'min'.")
if not isinstance(integrate_to_peak, bool):
raise ValueError("Parameter `integrate_to_peak` has to be of type `bool`.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
variables_indices = [i for i in range(0,n_variables)]
costs = []
# Automatic bootstrapping: -------------------------------------------------
if bootstrap_variables is None:
if verbose: print('Automatic bootstrapping...\n')
bootstrap_cost_function = []
bootstrap_tic = time.perf_counter()
for i_variable in variables_indices:
if verbose: print('\tCurrently checking variable:\t' + variable_names[i_variable])
PCs = X[:,[i_variable]]
PC_sources = X_source[:,[i_variable]]
if target_variables is None:
depvars = cp.deepcopy(PC_sources)
depvar_names = ['SZ1']
else:
if add_transformed_source:
depvars = np.hstack((PC_sources, target_variables))
depvar_names = ['SZ1'] + target_variables_names
else:
depvars = target_variables
depvar_names = target_variables_names
bootstrap_variance_data = compute_normalized_variance(PCs, depvars, depvar_names=depvar_names, bandwidth_values=bandwidth_values)
bootstrap_area = cost_function_normalized_variance_derivative(bootstrap_variance_data, penalty_function=penalty_function, norm=norm, integrate_to_peak=integrate_to_peak)
if verbose: print('\tCost:\t%.4f' % bootstrap_area)
bootstrap_cost_function.append(bootstrap_area)
# Find a single best variable to bootstrap with:
(best_bootstrap_variable_index, ) = np.where(np.array(bootstrap_cost_function)==np.min(bootstrap_cost_function))
best_bootstrap_variable_index = int(best_bootstrap_variable_index)
costs.append(np.min(bootstrap_cost_function))
bootstrap_variables = [best_bootstrap_variable_index]
if verbose: print('\n\tVariable ' + variable_names[best_bootstrap_variable_index] + ' will be used as bootstrap.\n\tCost:\t%.4f' % np.min(bootstrap_cost_function) + '\n')
bootstrap_toc = time.perf_counter()
if verbose: print(f'Boostrapping time: {(bootstrap_toc - bootstrap_tic)/60:0.1f} minutes.' + '\n' + '-'*50)
# Use user-defined bootstrapping: -----------------------------------------
else:
# Manifold dimensionality needs a fix here!
if verbose: print('User-defined bootstrapping...\n')
bootstrap_cost_function = []
bootstrap_tic = time.perf_counter()
if len(bootstrap_variables) < target_manifold_dimensionality:
n_components = len(bootstrap_variables)
else:
n_components = cp.deepcopy(target_manifold_dimensionality)
if verbose: print('\tUser-defined bootstrapping will be performed for a ' + str(n_components) + '-dimensional manifold.')
bootstrap_pca = reduction.PCA(X[:,bootstrap_variables], scaling=scaling, n_components=n_components)
PCs = bootstrap_pca.transform(X[:,bootstrap_variables])
PC_sources = bootstrap_pca.transform(X_source[:,bootstrap_variables], nocenter=True)
if target_variables is None:
depvars = cp.deepcopy(PC_sources)
depvar_names = ['SZ' + str(i) for i in range(0,n_components)]
else:
if add_transformed_source:
depvars = np.hstack((PC_sources, target_variables))
depvar_names = depvar_names = ['SZ' + str(i) for i in range(0,n_components)] + target_variables_names
else:
depvars = target_variables
depvar_names = target_variables_names
bootstrap_variance_data = compute_normalized_variance(PCs, depvars, depvar_names=depvar_names, bandwidth_values=bandwidth_values)
bootstrap_area = cost_function_normalized_variance_derivative(bootstrap_variance_data, penalty_function=penalty_function, norm=norm, integrate_to_peak=integrate_to_peak)
bootstrap_cost_function.append(bootstrap_area)
costs.append(bootstrap_area)
if verbose: print('\n\tVariable(s) ' + ', '.join([variable_names[i] for i in bootstrap_variables]) + ' will be used as bootstrap\n\tCost:\t%.4f' % np.min(bootstrap_area) + '\n')
bootstrap_toc = time.perf_counter()
if verbose: print(f'Boostrapping time: {(bootstrap_toc - bootstrap_tic)/60:0.1f} minutes.' + '\n' + '-'*50)
# Iterate the algorithm starting from the bootstrap selection: -------------
if verbose: print('Optimizing...\n')
total_tic = time.perf_counter()
ordered_variables = [i for i in bootstrap_variables]
remaining_variables_list = [i for i in range(0,n_variables) if i not in bootstrap_variables]
previous_area = np.min(bootstrap_cost_function)
loop_counter = 0
while len(remaining_variables_list) > 0:
iteration_tic = time.perf_counter()
loop_counter += 1
if verbose:
print('Iteration No.' + str(loop_counter))
print('Currently adding variables from the following list: ')
print([variable_names[i] for i in remaining_variables_list])
current_cost_function = []
for i_variable in remaining_variables_list:
if len(ordered_variables) < target_manifold_dimensionality:
n_components = len(ordered_variables) + 1
else:
n_components = cp.deepcopy(target_manifold_dimensionality)
if verbose: print('\tCurrently added variable: ' + variable_names[i_variable])
current_variables_list = ordered_variables + [i_variable]
pca = reduction.PCA(X[:,current_variables_list], scaling=scaling, n_components=n_components)
PCs = pca.transform(X[:,current_variables_list])
PC_sources = pca.transform(X_source[:,current_variables_list], nocenter=True)
if target_variables is None:
depvars = cp.deepcopy(PC_sources)
depvar_names = ['SZ' + str(i) for i in range(0,n_components)]
else:
if add_transformed_source:
depvars = np.hstack((PC_sources, target_variables))
depvar_names = depvar_names = ['SZ' + str(i) for i in range(0,n_components)] + target_variables_names
else:
depvars = target_variables
depvar_names = target_variables_names
current_variance_data = compute_normalized_variance(PCs, depvars, depvar_names=depvar_names, bandwidth_values=bandwidth_values)
current_derivative, current_sigma, _ = normalized_variance_derivative(current_variance_data)
current_area = cost_function_normalized_variance_derivative(current_variance_data, penalty_function=penalty_function, norm=norm, integrate_to_peak=integrate_to_peak)
if verbose: print('\tCost:\t%.4f' % current_area)
current_cost_function.append(current_area)
if current_area <= previous_area:
if verbose: print(colored('\tSAME OR BETTER', 'green'))
else:
if verbose: print(colored('\tWORSE', 'red'))
min_area = np.min(current_cost_function)
(best_variable_index, ) = np.where(np.array(current_cost_function)==min_area)
try:
best_variable_index = int(best_variable_index)
except:
best_variable_index = int(best_variable_index[0])
if verbose: print('\n\tVariable ' + variable_names[remaining_variables_list[best_variable_index]] + ' is added.\n\tCost:\t%.4f' % min_area + '\n')
ordered_variables.append(remaining_variables_list[best_variable_index])
remaining_variables_list = [i for i in range(0,n_variables) if i not in ordered_variables]
if min_area <= previous_area:
previous_area = min_area
costs.append(min_area)
iteration_toc = time.perf_counter()
if verbose: print(f'\tIteration time: {(iteration_toc - iteration_tic)/60:0.1f} minutes.' + '\n' + '-'*50)
# Compute the optimal subset where the cost is minimized: ------------------
(min_cost_function_index, ) = np.where(costs==np.min(costs))
try:
min_cost_function_index = int(min_cost_function_index)
except:
min_cost_function_index = int(min_cost_function_index[0])
if min_cost_function_index+1 < target_manifold_dimensionality:
selected_variables = list(np.array(ordered_variables)[0:target_manifold_dimensionality])
else:
selected_variables = list(np.array(ordered_variables)[0:min_cost_function_index+1])
if verbose:
print('Ordered variables:')
print(', '.join([variable_names[i] for i in ordered_variables]))
print(ordered_variables)
print('Final cost: %.4f' % min_area)
print('\nSelected variables:')
print(', '.join([variable_names[i] for i in selected_variables]))
print(selected_variables)
print('Lowest cost: %.4f' % previous_area)
total_toc = time.perf_counter()
if verbose: print(f'\nOptimization time: {(total_toc - total_tic)/60:0.1f} minutes.' + '\n' + '-'*50)
return ordered_variables, selected_variables, costs
# ------------------------------------------------------------------------------
def manifold_informed_backward_elimination(X, X_source, variable_names, scaling, bandwidth_values, target_variables=None, add_transformed_source=True, source_space=None, target_manifold_dimensionality=3, penalty_function=None, norm='max', integrate_to_peak=False, verbose=False):
"""
Manifold-informed feature selection algorithm based on backward elimination. The goal of the algorithm is to
select a meaningful subset of the original variables such that
undesired behaviors on a PCA-derived manifold of a given dimensionality are minimized.
The algorithm uses the cost function, :math:`\\mathcal{L}`, based on minimizing the area under the normalized variance derivatives curves, :math:`\\hat{\\mathcal{D}}(\\sigma)`,
for the selected :math:`n_{dep}` dependent variables (as per ``cost_function_normalized_variance_derivative`` function).
The algorithm iterates, removing another variable that has an effect of decreasing the cost the most at each iteration.
The original variables in a data set get ordered according to their effect
on the manifold topology. Assuming that the original data set is composed of :math:`Q` variables,
the first output is a list of indices of the ordered
original variables, :math:`\\mathbf{X} = [X_1, X_2, \\dots, X_Q]`. The second output is a list of indices of the selected
subset of the original variables, :math:`\\mathbf{X}_S = [X_1, X_2, \\dots, X_n]`, that correspond to the minimum cost, :math:`\\mathcal{L}`.
.. note::
The algorithm can be very expensive (for large data sets) due to multiple computations of the normalized variance derivative.
Try running it on multiple cores or on a sampled data set.
In case the algorithm breaks when not being able to determine the peak
location, try increasing the range in the ``bandwidth_values`` parameter.
**Example:**
.. code:: python
from PCAfold import manifold_informed_backward_elimination
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,10)
X_source = np.random.rand(100,10)
# Define original variables to add to the optimization:
target_variables = X[:,0:3]
# Specify variables names
variable_names = ['X_' + str(i) for i in range(0,10)]
# Specify the bandwidth values to compute the optimization on:
bandwidth_values = np.logspace(-4, 2, 50)
# Run the subset selection algorithm:
(ordered, selected, costs) = manifold_informed_backward_elimination(X,
X_source,
variable_names,
scaling='auto',
bandwidth_values=bandwidth_values,
target_variables=target_variables,
add_transformed_source=True,
target_manifold_dimensionality=2,
penalty_function='peak',
norm='max',
integrate_to_peak=True,
verbose=True)
:param X:
``numpy.ndarray`` specifying the original data set, :math:`\\mathbf{X}`. It should be of size ``(n_observations,n_variables)``.
:param X_source:
``numpy.ndarray`` specifying the source terms, :math:`\\mathbf{S_X}`, corresponding to the state-space
variables in :math:`\\mathbf{X}`. This parameter is applicable to data sets
representing reactive flows. More information can be found in :cite:`Sutherland2009`. It should be of size ``(n_observations,n_variables)``.
:param variable_names:
``list`` of ``str`` specifying variables names. Order of names in the ``variable_names`` list should match the order of variables (columns) in ``X``.
:param scaling: (optional)
``str`` specifying the scaling methodology. It can be one of the following:
``'none'``, ``''``, ``'auto'``, ``'std'``, ``'pareto'``, ``'vast'``, ``'range'``, ``'0to1'``,
``'-1to1'``, ``'level'``, ``'max'``, ``'poisson'``, ``'vast_2'``, ``'vast_3'``, ``'vast_4'``.
:param bandwidth_values:
``numpy.ndarray`` specifying the bandwidth values, :math:`\\sigma`, for :math:`\\hat{\\mathcal{D}}(\\sigma)` computation.
:param target_variables: (optional)
``numpy.ndarray`` specifying the dependent variables that should be used in :math:`\\hat{\\mathcal{D}}(\\sigma)` computation. It should be of size ``(n_observations,n_target_variables)``.
:param add_transformed_source: (optional)
``bool`` specifying if the PCA-transformed source terms of the state-space variables should be added in :math:`\\hat{\\mathcal{D}}(\\sigma)` computation, alongside the user-defined dependent variables.
:param source_space: (optional)
``str`` specifying the space to which the PC source terms should be transformed before computing the cost. It can be one of the following: ``symlog``, ``continuous-symlog``, ``original-and-symlog``, ``original-and-continuous-symlog``. If set to ``None``, PC source terms are kept in their original PCA-space.
:param target_manifold_dimensionality: (optional)
``int`` specifying the target dimensionality of the PCA manifold.
:param penalty_function: (optional)
``str`` specifying the weighting applied to each area.
Set ``penalty_function='peak'`` to weight each area by the rightmost peak location, :math:`\\sigma_{peak, i}`, for the :math:`i^{th}` dependent variable.
Set ``penalty_function='sigma'`` to weight each area continuously by the bandwidth.
Set ``penalty_function='log-sigma-over-peak'`` to weight each area continuously by the :math:`\\log_{10}` -transformed bandwidth, normalized by the right most peak location, :math:`\\sigma_{peak, i}`.
If ``penalty_function=None``, the area is not weighted.
:param norm: (optional)
``str`` specifying the norm to apply for all areas :math:`A_i`. ``norm='average'`` uses an arithmetic average, ``norm='max'`` uses the :math:`L_{\\infty}` norm,
``norm='median'`` uses a median area, ``norm='cumulative'`` uses a cumulative area and ``norm='min'`` uses a minimum area.
:param integrate_to_peak: (optional)
``bool`` specifying whether an individual area for the :math:`i^{th}` dependent variable should be computed only up the the rightmost peak location.
:param verbose: (optional)
``bool`` for printing verbose details.
:return:
- **ordered_variables** - ``list`` specifying the indices of the ordered variables.
- **selected_variables** - ``list`` specifying the indices of the selected variables that correspond to the minimum cost :math:`\\mathcal{L}`.
- **optimized_cost** - ``float`` specifying the cost corresponding to the optimized subset.
- **costs** - ``list`` specifying the costs, :math:`\\mathcal{L}`, from each iteration.
"""
__penalty_functions = ['peak', 'sigma', 'log-sigma-over-peak']
__norms = ['average', 'cumulative', 'max', 'median', 'min']
__source_spaces = ['symlog', 'continuous-symlog', 'original-and-symlog', 'original-and-continuous-symlog']
if not isinstance(X, np.ndarray):
raise ValueError("Parameter `X` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_variables) = np.shape(X)
except:
raise ValueError("Parameter `X` has to have shape `(n_observations,n_variables)`.")
if not isinstance(X_source, np.ndarray):
raise ValueError("Parameter `X_source` has to be of type `numpy.ndarray`.")
try:
(n_observations_source, n_variables_source) = np.shape(X_source)
except:
raise ValueError("Parameter `X_source` has to have shape `(n_observations,n_variables)`.")
if n_variables_source != n_variables:
raise ValueError("Parameter `X_source` has different number of variables than `X`.")
# TODO: In the future, we might want to allow different number of observations, there is no reason why they should be equal.
if n_observations_source != n_observations:
raise ValueError("Parameter `X_source` has different number of observations than `X`.")
if not isinstance(variable_names, list):
raise ValueError("Parameter `variable_names` has to be of type `list`.")
if len(variable_names) != n_variables:
raise ValueError("Parameter `variable_names` has different number of variables than `X`.")
if not isinstance(scaling, str):
raise ValueError("Parameter `scaling` has to be of type `str`.")
if not isinstance(bandwidth_values, np.ndarray):
raise ValueError("Parameter `bandwidth_values` has to be of type `numpy.ndarray`.")
if target_variables is not None:
if not isinstance(target_variables, np.ndarray):
raise ValueError("Parameter `target_variables` has to be of type `numpy.ndarray`.")
try:
(n_d_hat_observations, n_target_variables) = np.shape(target_variables)
target_variables_names = ['X' + str(i) for i in range(0,n_target_variables)]
except:
raise ValueError("Parameter `target_variables` has to have shape `(n_observations,n_target_variables)`.")
if n_d_hat_observations != n_observations_source:
raise ValueError("Parameter `target_variables` has different number of observations than `X_source`.")
if not isinstance(add_transformed_source, bool):
raise ValueError("Parameter `add_transformed_source` has to be of type `bool`.")
if target_variables is None:
if not add_transformed_source:
raise ValueError("Either `target_variables` has to be specified or `add_transformed_source` has to be set to True.")
if source_space is not None:
if not isinstance(source_space, str):
raise ValueError("Parameter `source_space` has to be of type `str`.")
if source_space.lower() not in __source_spaces:
raise ValueError("Parameter `source_space` has to be one of the following: symlog`, `continuous-symlog`.")
if not isinstance(target_manifold_dimensionality, int):
raise ValueError("Parameter `target_manifold_dimensionality` has to be of type `int`.")
if penalty_function is not None:
if not isinstance(penalty_function, str):
raise ValueError("Parameter `penalty_function` has to be of type `str`.")
if penalty_function not in __penalty_functions:
raise ValueError("Parameter `penalty_function` has to be one of the following: 'peak', 'sigma', 'log-sigma-over-peak'.")
if not isinstance(norm, str):
raise ValueError("Parameter `norm` has to be of type `str`.")
if norm not in __norms:
raise ValueError("Parameter `norm` has to be one of the following: 'average', 'cumulative', 'max', 'median', 'min'.")
if not isinstance(integrate_to_peak, bool):
raise ValueError("Parameter `integrate_to_peak` has to be of type `bool`.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
costs = []
if verbose: print('Optimizing...\n')
if verbose:
if add_transformed_source is not None:
if source_space is not None:
print('PC source terms will be assessed in the ' + source_space + ' space.\n')
total_tic = time.perf_counter()
remaining_variables_list = [i for i in range(0,n_variables)]
ordered_variables = []
loop_counter = 0
# Iterate the algorithm: ---------------------------------------------------
while len(remaining_variables_list) > target_manifold_dimensionality:
iteration_tic = time.perf_counter()
loop_counter += 1
if verbose:
print('Iteration No.' + str(loop_counter))
print('Currently eliminating variable from the following list: ')
print([variable_names[i] for i in remaining_variables_list])
current_cost_function = []
for i_variable in remaining_variables_list:
if verbose: print('\tCurrently eliminated variable: ' + variable_names[i_variable])
# Consider a subset with all variables but the currently eliminated one:
current_variables_list = [i for i in remaining_variables_list if i != i_variable]
if verbose:
print('\tRunning PCA for a subset:')
print('\t' + ', '.join([variable_names[i] for i in current_variables_list]))
pca = reduction.PCA(X[:,current_variables_list], scaling=scaling, n_components=target_manifold_dimensionality)
PCs = pca.transform(X[:,current_variables_list])
(PCs, _, _) = preprocess.center_scale(PCs, '-1to1')
if add_transformed_source:
PC_sources = pca.transform(X_source[:,current_variables_list], nocenter=True)
if source_space is not None:
if source_space == 'original-and-symlog':
transformed_PC_sources = preprocess.log_transform(PC_sources, method='symlog', threshold=1.e-4)
elif source_space == 'original-and-continuous-symlog':
transformed_PC_sources = preprocess.log_transform(PC_sources, method='continuous-symlog', threshold=1.e-4)
else:
transformed_PC_sources = preprocess.log_transform(PC_sources, method=source_space, threshold=1.e-4)
if target_variables is None:
if source_space == 'original-and-symlog' or source_space == 'original-and-continuous-symlog':
depvars = np.hstack((PC_sources, transformed_PC_sources))
depvar_names = ['SZ' + str(i) for i in range(0,target_manifold_dimensionality)] + ['symlog-SZ' + str(i) for i in range(0,target_manifold_dimensionality)]
elif source_space == 'symlog' or source_space == 'continuous-symlog':
depvars = cp.deepcopy(transformed_PC_sources)
depvar_names = ['symlog-SZ' + str(i) for i in range(0,target_manifold_dimensionality)]
else:
depvars = cp.deepcopy(PC_sources)
depvar_names = ['SZ' + str(i) for i in range(0,target_manifold_dimensionality)]
else:
if add_transformed_source:
if source_space == 'original-and-symlog' or source_space == 'original-and-continuous-symlog':
depvars = np.hstack((PC_sources, transformed_PC_sources, target_variables))
depvar_names = ['SZ' + str(i) for i in range(0,target_manifold_dimensionality)] + ['symlog-SZ' + str(i) for i in range(0,target_manifold_dimensionality)] + target_variables_names
elif source_space == 'symlog' or source_space == 'continuous-symlog':
depvars = np.hstack((transformed_PC_sources, target_variables))
depvar_names = ['symlog-SZ' + str(i) for i in range(0,target_manifold_dimensionality)] + target_variables_names
else:
depvars = np.hstack((PC_sources, target_variables))
depvar_names = ['SZ' + str(i) for i in range(0,target_manifold_dimensionality)] + target_variables_names
else:
depvars = cp.deepcopy(target_variables)
depvar_names = cp.deepcopy(target_variables_names)
current_variance_data = compute_normalized_variance(PCs, depvars, depvar_names=depvar_names, scale_unit_box = False, bandwidth_values=bandwidth_values)
current_area = cost_function_normalized_variance_derivative(current_variance_data, penalty_function=penalty_function, norm=norm, integrate_to_peak=integrate_to_peak)
if verbose: print('\tCost:\t%.4f' % current_area)
current_cost_function.append(current_area)
# Starting from the second iteration, we can make a comparison with the previous iteration's results:
if loop_counter > 1:
if current_area <= previous_area:
if verbose: print(colored('\tSAME OR BETTER', 'green'))
else:
if verbose: print(colored('\tWORSE', 'red'))
min_area = np.min(current_cost_function)
costs.append(min_area)
# Search for the variable whose removal will decrease the cost the most:
(worst_variable_index, ) = np.where(np.array(current_cost_function)==min_area)
# This handles cases where there are multiple minima with the same minimum cost value:
try:
worst_variable_index = int(worst_variable_index)
except:
worst_variable_index = int(worst_variable_index[0])
if verbose: print('\n\tVariable ' + variable_names[remaining_variables_list[worst_variable_index]] + ' is removed.\n\tCost:\t%.4f' % min_area + '\n')
# Append removed variable in the ascending order, this list is later flipped to have variables ordered from most to least important:
ordered_variables.append(remaining_variables_list[worst_variable_index])
# Create a new list of variables to loop over at the next iteration:
remaining_variables_list = [i for i in range(0,n_variables) if i not in ordered_variables]
if loop_counter > 1:
if min_area <= previous_area:
previous_area = min_area
else:
previous_area = min_area
iteration_toc = time.perf_counter()
if verbose: print(f'\tIteration time: {(iteration_toc - iteration_tic)/60:0.1f} minutes.' + '\n' + '-'*50)
# Compute the optimal subset where the overal cost from all iterations is minimized: ------------------
# One last time remove the worst variable:
del current_cost_function[worst_variable_index]
for i in remaining_variables_list:
ordered_variables.append(i)
for i in range(0,len(remaining_variables_list)):
costs.append(current_cost_function[i])
# Invert lists to have variables ordered from most to least important:
ordered_variables = ordered_variables[::-1]
costs = costs[::-1]
(min_cost_function_index, ) = np.where(costs==np.min(costs))
# This handles cases where there are multiple minima with the same minimum cost value:
try:
min_cost_function_index = int(min_cost_function_index)
except:
min_cost_function_index = int(min_cost_function_index[0])
selected_variables = list(np.array(ordered_variables)[0:min_cost_function_index])
optimized_cost = costs[min_cost_function_index]
if verbose:
print('Ordered variables:')
print(', '.join([variable_names[i] for i in ordered_variables]))
print(ordered_variables)
print('Final cost: %.4f' % min_area)
print('\nSelected variables:')
print(', '.join([variable_names[i] for i in selected_variables]))
print(selected_variables)
print('Lowest cost: %.4f' % optimized_cost)
total_toc = time.perf_counter()
if verbose: print(f'\nOptimization time: {(total_toc - total_tic)/60:0.1f} minutes.' + '\n' + '-'*50)
return ordered_variables, selected_variables, optimized_cost, costs
################################################################################
#
# Regression assessment
#
################################################################################
class RegressionAssessment:
"""
Wrapper class for storing all regression assessment metrics for a given
regression solution given by the observed dependent variables, :math:`\\pmb{\\phi}_o`,
and the predicted dependent variables, :math:`\\pmb{\\phi}_p`.
**Example:**
.. code:: python
from PCAfold import PCA, RegressionAssessment
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,3)
# Instantiate PCA class object:
pca_X = PCA(X, scaling='auto', n_components=2)
# Approximate the data set:
X_rec = pca_X.reconstruct(pca_X.transform(X))
# Instantiate RegressionAssessment class object:
regression_metrics = RegressionAssessment(X, X_rec)
# Access mean absolute error values:
MAE = regression_metrics.mean_absolute_error
In addition, all stratified regression metrics can be computed on a single variable:
.. code:: python
from PCAfold import variable_bins
# Generate bins:
(idx, bins_borders) = variable_bins(X[:,0], k=5, verbose=False)
# Instantiate RegressionAssessment class object:
stratified_regression_metrics = RegressionAssessment(X[:,0], X_rec[:,0], idx=idx)
# Access stratified mean absolute error values:
stratified_MAE = stratified_regression_metrics.stratified_mean_absolute_error
:param observed:
``numpy.ndarray`` specifying the observed values of dependent variables, :math:`\\pmb{\\phi}_o`. It should be of size ``(n_observations,)`` or ``(n_observations,n_variables)``.
:param predicted:
``numpy.ndarray`` specifying the predicted values of dependent variables, :math:`\\pmb{\\phi}_p`. It should be of size ``(n_observations,)`` or ``(n_observations,n_variables)``.
:param idx:
``numpy.ndarray`` of cluster classifications. It should be of size ``(n_observations,)`` or ``(n_observations,1)``.
:param variable_names: (optional)
``list`` of ``str`` specifying variable names.
:param use_global_mean: (optional)
``bool`` specifying if global mean of the observed variable should be used as a reference in :math:`R^2` calculation.
:param norm:
``str`` specifying the normalization, :math:`d_{norm}`, for NRMSE computation. It can be one of the following: ``std``, ``range``, ``root_square_mean``, ``root_square_range``, ``root_square_std``, ``abs_mean``.
:param use_global_norm: (optional)
``bool`` specifying if global norm of the observed variable should be used in NRMSE calculation.
:param tolerance:
``float`` specifying the tolerance for GDE computation.
**Attributes:**
- **coefficient_of_determination** - (read only) ``numpy.ndarray`` specifying the coefficient of determination, :math:`R^2`, values. It has size ``(1,n_variables)``.
- **mean_absolute_error** - (read only) ``numpy.ndarray`` specifying the mean absolute error (MAE) values. It has size ``(1,n_variables)``.
- **mean_squared_error** - (read only) ``numpy.ndarray`` specifying the mean squared error (MSE) values. It has size ``(1,n_variables)``.
- **root_mean_squared_error** - (read only) ``numpy.ndarray`` specifying the root mean squared error (RMSE) values. It has size ``(1,n_variables)``.
- **normalized_root_mean_squared_error** - (read only) ``numpy.ndarray`` specifying the normalized root mean squared error (NRMSE) values. It has size ``(1,n_variables)``.
- **good_direction_estimate** - (read only) ``float`` specifying the good direction estimate (GDE) value, treating the entire :math:`\\pmb{\\phi}_o` and :math:`\\pmb{\\phi}_p` as vectors. Note that if a single dependent variable is passed, GDE cannot be computed and is set to ``NaN``.
If ``idx`` has been specified:
- **stratified_coefficient_of_determination** - (read only) ``numpy.ndarray`` specifying the coefficient of determination, :math:`R^2`, values. It has size ``(1,n_variables)``.
- **stratified_mean_absolute_error** - (read only) ``numpy.ndarray`` specifying the mean absolute error (MAE) values. It has size ``(1,n_variables)``.
- **stratified_mean_squared_error** - (read only) ``numpy.ndarray`` specifying the mean squared error (MSE) values. It has size ``(1,n_variables)``.
- **stratified_root_mean_squared_error** - (read only) ``numpy.ndarray`` specifying the root mean squared error (RMSE) values. It has size ``(1,n_variables)``.
- **stratified_normalized_root_mean_squared_error** - (read only) ``numpy.ndarray`` specifying the normalized root mean squared error (NRMSE) values. It has size ``(1,n_variables)``.
"""
def __init__(self, observed, predicted, idx=None, variable_names=None, use_global_mean=False, norm='std', use_global_norm=False, tolerance=0.05):
if not isinstance(observed, np.ndarray):
raise ValueError("Parameter `observed` has to be of type `numpy.ndarray`.")
try:
(n_observed,) = np.shape(observed)
n_var_observed = 1
observed = observed[:,None]
except:
(n_observed, n_var_observed) = np.shape(observed)
if not isinstance(predicted, np.ndarray):
raise ValueError("Parameter `predicted` has to be of type `numpy.ndarray`.")
try:
(n_predicted,) = np.shape(predicted)
n_var_predicted = 1
predicted = predicted[:,None]
except:
(n_predicted, n_var_predicted) = np.shape(predicted)
if n_observed != n_predicted:
raise ValueError("Parameter `observed` has different number of elements than `predicted`.")
if n_var_observed != n_var_predicted:
raise ValueError("Parameter `observed` has different number of elements than `predicted`.")
self.__n_variables = n_var_observed
if idx is not None:
if isinstance(idx, np.ndarray):
if not all(isinstance(i, np.integer) for i in idx.ravel()):
raise ValueError("Parameter `idx` can only contain integers.")
else:
raise ValueError("Parameter `idx` has to be of type `numpy.ndarray`.")
try:
(n_observations_idx, ) = np.shape(idx)
n_idx = 1
except:
(n_observations_idx, n_idx) = np.shape(idx)
if n_idx != 1:
raise ValueError("Parameter `idx` has to have size `(n_observations,)` or `(n_observations,1)`.")
if n_observations_idx != n_observed:
raise ValueError('Vector of cluster classifications `idx` has different number of observations than the original data set `X`.')
if n_var_observed != 1:
raise ValueError('Stratified regression metrics can only be computed on a single vector.')
self.__n_clusters = len(np.unique(idx))
self.__cluster_populations = preprocess.get_populations(idx)
self.__cluster_min = []
self.__cluster_max = []
for i in range(0,self.__n_clusters):
(cluster_indices, ) = np.where(idx==i)
self.__cluster_min.append(np.min(observed[cluster_indices,:]))
self.__cluster_max.append(np.max(observed[cluster_indices,:]))
if not isinstance(use_global_mean, bool):
raise ValueError("Parameter `use_global_mean` has to be a boolean.")
if variable_names is not None:
if not isinstance(variable_names, list):
raise ValueError("Parameter `variable_names` has to be of type `list`.")
else:
if self.__n_variables != len(variable_names):
raise ValueError("Parameter `variable_names` has different number of variables than `observed` and `predicted`.")
else:
variable_names = []
for i in range(0,self.__n_variables):
variable_names.append('X' + str(i+1))
self.__variable_names = variable_names
self.__coefficient_of_determination_matrix = np.ones((1,self.__n_variables))
self.__mean_absolute_error_matrix = np.ones((1,self.__n_variables))
self.__mean_squared_error_matrix = np.ones((1,self.__n_variables))
self.__root_mean_squared_error_matrix = np.ones((1,self.__n_variables))
self.__normalized_root_mean_squared_error_matrix = np.ones((1,self.__n_variables))
if n_var_observed > 1:
_, self.__good_direction_estimate_value = good_direction_estimate(observed, predicted, tolerance=tolerance)
self.__good_direction_estimate_matrix = self.__good_direction_estimate_value * np.ones((1,self.__n_variables))
else:
self.__good_direction_estimate_value = np.NAN
self.__good_direction_estimate_matrix = self.__good_direction_estimate_value * np.ones((1,self.__n_variables))
for i in range(0,self.__n_variables):
self.__coefficient_of_determination_matrix[0,i] = coefficient_of_determination(observed[:,i], predicted[:,i])
self.__mean_absolute_error_matrix[0,i] = mean_absolute_error(observed[:,i], predicted[:,i])
self.__mean_squared_error_matrix[0,i] = mean_squared_error(observed[:,i], predicted[:,i])
self.__root_mean_squared_error_matrix[0,i] = root_mean_squared_error(observed[:,i], predicted[:,i])
self.__normalized_root_mean_squared_error_matrix[0,i] = normalized_root_mean_squared_error(observed[:,i], predicted[:,i], norm=norm)
if idx is not None:
self.__stratified_coefficient_of_determination = stratified_coefficient_of_determination(observed, predicted, idx=idx, use_global_mean=use_global_mean)
self.__stratified_mean_absolute_error = stratified_mean_absolute_error(observed, predicted, idx=idx)
self.__stratified_mean_squared_error = stratified_mean_squared_error(observed, predicted, idx=idx)
self.__stratified_root_mean_squared_error = stratified_root_mean_squared_error(observed, predicted, idx=idx)
self.__stratified_normalized_root_mean_squared_error = stratified_normalized_root_mean_squared_error(observed, predicted, idx=idx, norm=norm, use_global_norm=use_global_norm)
else:
self.__stratified_coefficient_of_determination = None
self.__stratified_mean_absolute_error = None
self.__stratified_mean_squared_error = None
self.__stratified_root_mean_squared_error = None
self.__stratified_normalized_root_mean_squared_error = None
@property
def coefficient_of_determination(self):
return self.__coefficient_of_determination_matrix
@property
def mean_absolute_error(self):
return self.__mean_absolute_error_matrix
@property
def mean_squared_error(self):
return self.__mean_squared_error_matrix
@property
def root_mean_squared_error(self):
return self.__root_mean_squared_error_matrix
@property
def normalized_root_mean_squared_error(self):
return self.__normalized_root_mean_squared_error_matrix
@property
def good_direction_estimate(self):
return self.__good_direction_estimate_value
@property
def stratified_coefficient_of_determination(self):
return self.__stratified_coefficient_of_determination
@property
def stratified_mean_absolute_error(self):
return self.__stratified_mean_absolute_error
@property
def stratified_mean_squared_error(self):
return self.__stratified_mean_squared_error
@property
def stratified_root_mean_squared_error(self):
return self.__stratified_root_mean_squared_error
@property
def stratified_normalized_root_mean_squared_error(self):
return self.__stratified_normalized_root_mean_squared_error
# ------------------------------------------------------------------------------
def print_metrics(self, table_format=['raw'], float_format='.4f', metrics=None, comparison=None):
"""
Prints regression assessment metrics as raw text, in ``tex`` format and/or as ``pandas.DataFrame``.
**Example:**
.. code:: python
from PCAfold import PCA, RegressionAssessment
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,3)
# Instantiate PCA class object:
pca_X = PCA(X, scaling='auto', n_components=2)
# Approximate the data set:
X_rec = pca_X.reconstruct(pca_X.transform(X))
# Instantiate RegressionAssessment class object:
regression_metrics = RegressionAssessment(X, X_rec)
# Print regression metrics:
regression_metrics.print_metrics(table_format=['raw', 'tex', 'pandas'],
float_format='.4f',
metrics=['R2', 'NRMSE', 'GDE'])
.. note::
Adding ``'raw'`` to the ``table_format`` list will result in printing:
.. code-block:: text
-------------------------
X1
R2: 0.9900
NRMSE: 0.0999
GDE: 70.0000
-------------------------
X2
R2: 0.6126
NRMSE: 0.6224
GDE: 70.0000
-------------------------
X3
R2: 0.6368
NRMSE: 0.6026
GDE: 70.0000
Adding ``'tex'`` to the ``table_format`` list will result in printing:
.. code-block:: text
\\begin{table}[h!]
\\begin{center}
\\begin{tabular}{llll} \\toprule
& \\textit{X1} & \\textit{X2} & \\textit{X3} \\\\ \\midrule
R2 & 0.9900 & 0.6126 & 0.6368 \\\\
NRMSE & 0.0999 & 0.6224 & 0.6026 \\\\
GDE & 70.0000 & 70.0000 & 70.0000 \\\\
\\end{tabular}
\\caption{}\\label{}
\\end{center}
\\end{table}
Adding ``'pandas'`` to the ``table_format`` list (works well in Jupyter notebooks) will result in printing:
.. image:: ../images/generate-pandas-table.png
:width: 300
:align: center
Additionally, the current object of ``RegressionAssessment`` class can be compared with another object:
.. code:: python
from PCAfold import PCA, RegressionAssessment
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,3)
Y = np.random.rand(100,3)
# Instantiate PCA class object:
pca_X = PCA(X, scaling='auto', n_components=2)
pca_Y = PCA(Y, scaling='auto', n_components=2)
# Approximate the data set:
X_rec = pca_X.reconstruct(pca_X.transform(X))
Y_rec = pca_Y.reconstruct(pca_Y.transform(Y))
# Instantiate RegressionAssessment class object:
regression_metrics_X = RegressionAssessment(X, X_rec)
regression_metrics_Y = RegressionAssessment(Y, Y_rec)
# Print regression metrics:
regression_metrics_X.print_metrics(table_format=['raw', 'pandas'],
float_format='.4f',
metrics=['R2', 'NRMSE', 'GDE'],
comparison=regression_metrics_Y)
.. note::
Adding ``'raw'`` to the ``table_format`` list will result in printing:
.. code-block:: text
-------------------------
X1
R2: 0.9133 BETTER
NRMSE: 0.2944 BETTER
GDE: 67.0000 WORSE
-------------------------
X2
R2: 0.5969 WORSE
NRMSE: 0.6349 WORSE
GDE: 67.0000 WORSE
-------------------------
X3
R2: 0.6175 WORSE
NRMSE: 0.6185 WORSE
GDE: 67.0000 WORSE
Adding ``'pandas'`` to the ``table_format`` list (works well in Jupyter notebooks) will result in printing:
.. image:: ../images/generate-pandas-table-comparison.png
:width: 300
:align: center
:param table_format: (optional)
``list`` of ``str`` specifying the format(s) in which the table should be printed.
Strings can only be ``'raw'``, ``'tex'`` and/or ``'pandas'``.
:param float_format: (optional)
``str`` specifying the display format for the numerical entries inside the
table. By default it is set to ``'.4f'``.
:param metrics: (optional)
``list`` of ``str`` specifying which metrics should be printed. Strings can only be ``'R2'``, ``'MAE'``, ``'MSE'``, ``'RMSE'``, ``'NRMSE'``, ``'GDE'``.
If metrics is set to ``None``, all available metrics will be printed.
:param comparison: (optional)
object of ``RegressionAssessment`` class specifying the metrics that should be compared with the current regression metrics.
"""
__table_formats = ['raw', 'tex', 'pandas']
__metrics_names = ['R2', 'MAE', 'MSE', 'RMSE', 'NRMSE', 'GDE']
__metrics_dict = {'R2': self.__coefficient_of_determination_matrix,
'MAE': self.__mean_absolute_error_matrix,
'MSE': self.__mean_squared_error_matrix,
'RMSE': self.__root_mean_squared_error_matrix,
'NRMSE': self.__normalized_root_mean_squared_error_matrix,
'GDE': self.__good_direction_estimate_matrix}
if comparison is not None:
__comparison_metrics_dict = {'R2': comparison.coefficient_of_determination,
'MAE': comparison.mean_absolute_error,
'MSE': comparison.mean_squared_error,
'RMSE': comparison.root_mean_squared_error,
'NRMSE': comparison.normalized_root_mean_squared_error,
'GDE': comparison.good_direction_estimate * np.ones_like(comparison.coefficient_of_determination)}
if not isinstance(table_format, list):
raise ValueError("Parameter `table_format` has to be of type `list`.")
for item in table_format:
if item not in __table_formats:
raise ValueError("Parameter `table_format` can only contain 'raw', 'tex' and/or 'pandas'.")
if not isinstance(float_format, str):
raise ValueError("Parameter `float_format` has to be of type `str`.")
if metrics is not None:
if not isinstance(metrics, list):
raise ValueError("Parameter `metrics` has to be of type `list`.")
for item in metrics:
if item not in __metrics_names:
raise ValueError("Parameter `metrics` can only be: 'R2', 'MAE', 'MSE', 'RMSE', 'NRMSE', 'GDE'.")
else:
metrics = __metrics_names
if comparison is None:
for item in set(table_format):
if item=='raw':
for i in range(0,self.__n_variables):
print('-'*25 + '\n' + self.__variable_names[i])
metrics_to_print = []
for metric in metrics:
metrics_to_print.append(__metrics_dict[metric][0,i])
for j in range(0,len(metrics)):
print(metrics[j] + ':\t' + ('%' + float_format) % metrics_to_print[j])
if item=='tex':
import pandas as pd
metrics_to_print = np.zeros_like(self.__coefficient_of_determination_matrix)
for metric in metrics:
metrics_to_print = np.vstack((metrics_to_print, __metrics_dict[metric]))
metrics_to_print = metrics_to_print[1::,:]
metrics_table = pd.DataFrame(metrics_to_print, columns=self.__variable_names, index=metrics)
generate_tex_table(metrics_table, float_format=float_format)
if item=='pandas':
import pandas as pd
from IPython.display import display
pandas_format = '{:,' + float_format + '}'
metrics_to_print = np.zeros_like(self.__coefficient_of_determination_matrix.T)
for metric in metrics:
metrics_to_print = np.hstack((metrics_to_print, __metrics_dict[metric].T))
metrics_to_print = metrics_to_print[:,1::]
metrics_table = pd.DataFrame(metrics_to_print, columns=metrics, index=self.__variable_names)
formatted_table = metrics_table.style.format(pandas_format)
display(formatted_table)
else:
for item in set(table_format):
if item=='raw':
for i in range(0,self.__n_variables):
print('-'*25 + '\n' + self.__variable_names[i])
metrics_to_print = []
comparison_metrics_to_print = []
for metric in metrics:
metrics_to_print.append(__metrics_dict[metric][0,i])
comparison_metrics_to_print.append(__comparison_metrics_dict[metric][0,i])
for j, metric in enumerate(metrics):
if metric == 'R2' or metric == 'GDE':
if metrics_to_print[j] > comparison_metrics_to_print[j]:
print(metrics[j] + ':\t' + ('%' + float_format) % metrics_to_print[j] + colored('\tBETTER', 'green'))
elif metrics_to_print[j] < comparison_metrics_to_print[j]:
print(metrics[j] + ':\t' + ('%' + float_format) % metrics_to_print[j] + colored('\tWORSE', 'red'))
elif metrics_to_print[j] == comparison_metrics_to_print[j]:
print(metrics[j] + ':\t' + ('%' + float_format) % metrics_to_print[j] + '\tSAME')
else:
if metrics_to_print[j] > comparison_metrics_to_print[j]:
print(metrics[j] + ':\t' + ('%' + float_format) % metrics_to_print[j] + colored('\tWORSE', 'red'))
elif metrics_to_print[j] < comparison_metrics_to_print[j]:
print(metrics[j] + ':\t' + ('%' + float_format) % metrics_to_print[j] + colored('\tBETTER', 'green'))
elif metrics_to_print[j] == comparison_metrics_to_print[j]:
print(metrics[j] + ':\t' + ('%' + float_format) % metrics_to_print[j] + '\tSAME')
if item=='pandas':
import pandas as pd
from IPython.display import display
pandas_format = '{:,' + float_format + '}'
metrics_to_print = np.zeros_like(self.__coefficient_of_determination_matrix.T)
comparison_metrics_to_print = np.zeros_like(comparison.coefficient_of_determination.T)
for metric in metrics:
metrics_to_print = np.hstack((metrics_to_print, __metrics_dict[metric].T))
comparison_metrics_to_print = np.hstack((comparison_metrics_to_print, __comparison_metrics_dict[metric].T))
metrics_to_print = metrics_to_print[:,1::]
comparison_metrics_to_print = comparison_metrics_to_print[:,1::]
def highlight_better(data, data_comparison, color='lightgreen'):
attr = 'background-color: {}'.format(color)
is_better = False * data
# Lower value is better (MAE, MSE, RMSE, NRMSE):
try:
is_better['MAE'] = data['MAE'].astype(float) < data_comparison['MAE']
except:
pass
try:
is_better['MSE'] = data['MSE'].astype(float) < data_comparison['MSE']
except:
pass
try:
is_better['RMSE'] = data['RMSE'].astype(float) < data_comparison['RMSE']
except:
pass
try:
is_better['NRMSE'] = data['NRMSE'].astype(float) < data_comparison['NRMSE']
except:
pass
# Higher value is better (R2 and GDE):
try:
is_better['R2'] = data['R2'].astype(float) > data_comparison['R2']
except:
pass
try:
is_better['GDE'] = data['GDE'].astype(float) > data_comparison['GDE']
except:
pass
formatting = [attr if v else '' for v in is_better]
formatting = pd.DataFrame(np.where(is_better, attr, ''), index=data.index, columns=data.columns)
return formatting
def highlight_worse(data, data_comparison, color='salmon'):
attr = 'background-color: {}'.format(color)
is_worse = False * data
# Higher value is worse (MAE, MSE, RMSE, NRMSE):
try:
is_worse['MAE'] = data['MAE'].astype(float) > data_comparison['MAE']
except:
pass
try:
is_worse['MSE'] = data['MSE'].astype(float) > data_comparison['MSE']
except:
pass
try:
is_worse['RMSE'] = data['RMSE'].astype(float) > data_comparison['RMSE']
except:
pass
try:
is_worse['NRMSE'] = data['NRMSE'].astype(float) > data_comparison['NRMSE']
except:
pass
# Lower value is worse (R2 and GDE):
try:
is_worse['R2'] = data['R2'].astype(float) < data_comparison['R2']
except:
pass
try:
is_worse['GDE'] = data['GDE'].astype(float) < data_comparison['GDE']
except:
pass
formatting = [attr if v else '' for v in is_worse]
formatting = pd.DataFrame(np.where(is_worse, attr, ''), index=data.index, columns=data.columns)
return formatting
metrics_table = pd.DataFrame(metrics_to_print, columns=metrics, index=self.__variable_names)
comparison_metrics_table = pd.DataFrame(comparison_metrics_to_print, columns=metrics, index=self.__variable_names)
formatted_table = metrics_table.style.apply(highlight_better, data_comparison=comparison_metrics_table, axis=None)\
.apply(highlight_worse, data_comparison=comparison_metrics_table, axis=None)\
.format(pandas_format)
display(formatted_table)
# ------------------------------------------------------------------------------
def print_stratified_metrics(self, table_format=['raw'], float_format='.4f', metrics=None, comparison=None):
"""
Prints stratified regression assessment metrics as raw text, in ``tex`` format and/or as ``pandas.DataFrame``.
In each cluster, in addition to the regression metrics, number of observations is printed,
along with the minimum and maximum values of the observed variable in that cluster.
**Example:**
.. code:: python
from PCAfold import PCA, variable_bins, RegressionAssessment
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,3)
# Instantiate PCA class object:
pca_X = PCA(X, scaling='auto', n_components=2)
# Approximate the data set:
X_rec = pca_X.reconstruct(pca_X.transform(X))
# Generate bins:
(idx, bins_borders) = variable_bins(X[:,0], k=3, verbose=False)
# Instantiate RegressionAssessment class object:
stratified_regression_metrics = RegressionAssessment(X[:,0], X_rec[:,0], idx=idx)
# Print regression metrics:
stratified_regression_metrics.print_stratified_metrics(table_format=['raw', 'tex', 'pandas'],
float_format='.4f',
metrics=['R2', 'MAE', 'NRMSE'])
.. note::
Adding ``'raw'`` to the ``table_format`` list will result in printing:
.. code-block:: text
-------------------------
k1
Observations: 31
Min: 0.0120
Max: 0.3311
R2: -3.3271
MAE: 0.1774
NRMSE: 2.0802
-------------------------
k2
Observations: 38
Min: 0.3425
Max: 0.6665
R2: -1.4608
MAE: 0.1367
NRMSE: 1.5687
-------------------------
k3
Observations: 31
Min: 0.6853
Max: 0.9959
R2: -3.7319
MAE: 0.1743
NRMSE: 2.1753
Adding ``'tex'`` to the ``table_format`` list will result in printing:
.. code-block:: text
\\begin{table}[h!]
\\begin{center}
\\begin{tabular}{llll} \\toprule
& \\textit{k1} & \\textit{k2} & \\textit{k3} \\\\ \\midrule
Observations & 31.0000 & 38.0000 & 31.0000 \\\\
Min & 0.0120 & 0.3425 & 0.6853 \\\\
Max & 0.3311 & 0.6665 & 0.9959 \\\\
R2 & -3.3271 & -1.4608 & -3.7319 \\\\
MAE & 0.1774 & 0.1367 & 0.1743 \\\\
NRMSE & 2.0802 & 1.5687 & 2.1753 \\\\
\\end{tabular}
\\caption{}\\label{}
\\end{center}
\\end{table}
Adding ``'pandas'`` to the ``table_format`` list (works well in Jupyter notebooks) will result in printing:
.. image:: ../images/generate-pandas-table-stratified.png
:width: 500
:align: center
Additionally, the current object of ``RegressionAssessment`` class can be compared with another object:
.. code:: python
from PCAfold import PCA, variable_bins, RegressionAssessment
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,3)
# Instantiate PCA class object:
pca_X = PCA(X, scaling='auto', n_components=2)
# Approximate the data set:
X_rec = pca_X.reconstruct(pca_X.transform(X))
# Generate bins:
(idx, bins_borders) = variable_bins(X[:,0], k=3, verbose=False)
# Instantiate RegressionAssessment class object:
stratified_regression_metrics_0 = RegressionAssessment(X[:,0], X_rec[:,0], idx=idx)
stratified_regression_metrics_1 = RegressionAssessment(X[:,1], X_rec[:,1], idx=idx)
# Print regression metrics:
stratified_regression_metrics_0.print_stratified_metrics(table_format=['raw', 'pandas'],
float_format='.4f',
metrics=['R2', 'MAE', 'NRMSE'],
comparison=stratified_regression_metrics_1)
.. note::
Adding ``'raw'`` to the ``table_format`` list will result in printing:
.. code-block:: text
-------------------------
k1
Observations: 39
Min: 0.0013
Max: 0.3097
R2: 0.9236 BETTER
MAE: 0.0185 BETTER
NRMSE: 0.2764 BETTER
-------------------------
k2
Observations: 29
Min: 0.3519
Max: 0.6630
R2: 0.9380 BETTER
MAE: 0.0179 BETTER
NRMSE: 0.2491 BETTER
-------------------------
k3
Observations: 32
Min: 0.6663
Max: 0.9943
R2: 0.9343 BETTER
MAE: 0.0194 BETTER
NRMSE: 0.2563 BETTER
Adding ``'pandas'`` to the ``table_format`` list (works well in Jupyter notebooks) will result in printing:
.. image:: ../images/generate-pandas-table-comparison-stratified.png
:width: 500
:align: center
:param table_format: (optional)
``list`` of ``str`` specifying the format(s) in which the table should be printed.
Strings can only be ``'raw'``, ``'tex'`` and/or ``'pandas'``.
:param float_format: (optional)
``str`` specifying the display format for the numerical entries inside the
table. By default it is set to ``'.4f'``.
:param metrics: (optional)
``list`` of ``str`` specifying which metrics should be printed. Strings can only be ``'R2'``, ``'MAE'``, ``'MSE'``, ``'RMSE'``, ``'NRMSE'``.
If metrics is set to ``None``, all available metrics will be printed.
:param comparison: (optional)
object of ``RegressionAssessment`` class specifying the metrics that should be compared with the current regression metrics.
"""
__table_formats = ['raw', 'tex', 'pandas']
__metrics_names = ['R2', 'MAE', 'MSE', 'RMSE', 'NRMSE']
__clusters_names = ['k' + str(i) for i in range(1,self.__n_clusters+1)]
__metrics_dict = {'R2': self.__stratified_coefficient_of_determination,
'MAE': self.__stratified_mean_absolute_error,
'MSE': self.__stratified_mean_squared_error,
'RMSE': self.__stratified_root_mean_squared_error,
'NRMSE': self.__stratified_normalized_root_mean_squared_error}
if comparison is not None:
__comparison_metrics_dict = {'R2': comparison.stratified_coefficient_of_determination,
'MAE': comparison.stratified_mean_absolute_error,
'MSE': comparison.stratified_mean_squared_error,
'RMSE': comparison.stratified_root_mean_squared_error,
'NRMSE': comparison.stratified_normalized_root_mean_squared_error}
if not isinstance(table_format, list):
raise ValueError("Parameter `table_format` has to be of type `str`.")
for item in table_format:
if item not in __table_formats:
raise ValueError("Parameter `table_format` can only contain 'raw', 'tex' and/or 'pandas'.")
if not isinstance(float_format, str):
raise ValueError("Parameter `float_format` has to be of type `str`.")
if metrics is not None:
if not isinstance(metrics, list):
raise ValueError("Parameter `metrics` has to be of type `list`.")
for item in metrics:
if item not in __metrics_names:
raise ValueError("Parameter `metrics` can only be: 'R2', 'MAE', 'MSE', 'RMSE', 'NRMSE'.")
else:
metrics = __metrics_names
if comparison is None:
for item in set(table_format):
if item=='raw':
for i in range(0,self.__n_clusters):
print('-'*25 + '\n' + __clusters_names[i])
metrics_to_print = [self.__cluster_populations[i], self.__cluster_min[i], self.__cluster_max[i]]
for metric in metrics:
metrics_to_print.append(__metrics_dict[metric][i])
print('Observations' + ':\t' + str(metrics_to_print[0]))
print('Min' + ':\t' + ('%' + float_format) % metrics_to_print[1])
print('Max' + ':\t' + ('%' + float_format) % metrics_to_print[2])
for j in range(0,len(metrics)):
print(metrics[j] + ':\t' + ('%' + float_format) % metrics_to_print[j+3])
if item=='tex':
import pandas as pd
metrics_to_print = np.vstack((self.__cluster_populations, self.__cluster_min, self.__cluster_max))
for metric in metrics:
metrics_to_print = np.vstack((metrics_to_print, __metrics_dict[metric]))
metrics_table = | pd.DataFrame(metrics_to_print, columns=__clusters_names, index=['Observations', 'Min', 'Max'] + metrics) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
@ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in self.panel.iteritems()))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=bday)
assert_panel_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
panel = _panel
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values, items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.ix[:, [0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_pct_change(self):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
assert_panel_equal(result, expected)
# major, 2
result = wp.pct_change(periods=2)
expected = Panel({'i1': df1.pct_change(2),
'i2': df2.pct_change(2),
'i3': df3.pct_change(2)})
assert_panel_equal(result, expected)
# minor, 1
result = wp.pct_change(axis='minor')
expected = Panel({'i1': df1.pct_change(axis=1),
'i2': df2.pct_change(axis=1),
'i3': df3.pct_change(axis=1)})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=2)
assert_panel_equal(result, expected)
# minor, 2
result = wp.pct_change(periods=2, axis='minor')
expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
'i2': df2.pct_change(periods=2, axis=1),
'i3': df3.pct_change(periods=2, axis=1)})
assert_panel_equal(result, expected)
# items, 1
result = wp.pct_change(axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [1, 0.5, .2],
'c2': [1. / 3, 0.25, 1. / 6]}),
'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6],
'c2': [.25, .2, 1. / 7]})})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=0)
assert_panel_equal(result, expected)
# items, 2
result = wp.pct_change(periods=2, axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i3': DataFrame({'c1': [2, 1, .4],
'c2': [2. / 3, .5, 1. / 3]})})
assert_panel_equal(result, expected)
def test_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = p.round()
self.assert_panel_equal(expected, result)
def test_multiindex_get(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.ix['a']
assert_panel_equal(f1, f2)
self.assertTrue((f1.items == [1, 2]).all())
self.assertTrue((f2.items == [1, 2]).all())
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
def test_multiindex_blocks(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
wp = Panel(self.panel._data)
wp.items = ind
f1 = wp['a']
self.assertTrue((f1.items == [1, 2]).all())
f1 = wp[('b', 1)]
self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
def test_repr_empty(self):
empty = Panel()
repr(empty)
def test_rename(self):
mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'}
renamed = self.panel.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
self.assertTrue(renamed.items.equals(exp))
renamed = self.panel.rename_axis(str.lower, axis=2)
exp = Index(['a', 'b', 'c', 'd'])
self.assertTrue(renamed.minor_axis.equals(exp))
# don't copy
renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
renamed_nocopy['foo'] = 3.
self.assertTrue((self.panel['ItemA'].values == 3).all())
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
# specific cases from #3440
self.panel['a'] = self.panel['ItemA']
assert_frame_equal(self.panel['a'], self.panel.a)
self.panel['i'] = self.panel['ItemA']
assert_frame_equal(self.panel['i'], self.panel.i)
def test_from_frame_level1_unsorted(self):
tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
('MSFT', 1)]
midx = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.rand(5, 4), index=midx)
p = df.to_panel()
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
try:
import xlwt # noqa
import xlrd # noqa
import openpyxl # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
path = '__tmp__.' + ext
with ensure_clean(path) as path:
self.panel.to_excel(path)
try:
reader = ExcelFile(path)
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_to_excel_xlsxwriter(self):
try:
import xlrd # noqa
import xlsxwriter # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("Requires xlrd and xlsxwriter. Skipping test.")
path = '__tmp__.xlsx'
with ensure_clean(path) as path:
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
except ImportError as e:
raise nose.SkipTest("cannot write excel file: %s" % e)
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_dropna(self):
p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
p.ix[:, ['b', 'd'], 0] = np.nan
result = p.dropna(axis=1)
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
inp = p.copy()
inp.dropna(axis=1, inplace=True)
| assert_panel_equal(inp, exp) | pandas.util.testing.assert_panel_equal |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 11:40:40 2020
@author: hendrick
"""
# =============================================================================
# # # # import packages
# =============================================================================
import numpy as np
import pandas as pd
import matplotlib.tri as tri
import matplotlib.pyplot as plt
import bmi.wrapper
import os
from scipy.optimize import newton
from tqdm import tqdm
import datetime
from netCDF4 import Dataset
import faulthandler
faulthandler.enable()
# =============================================================================
# # # # specify directories of ddl- and input-files
# =============================================================================
model_folder = os.path.join('ModelReduction', 'mr004')
# Delft3D directories
D3D_HOME = os.path.join('p:\\11202744-008-vegetation-modelling', 'code_1709',
'windows', 'oss_artifacts_x64_63721', 'x64')
dflow_dir = os.path.join(D3D_HOME, 'dflowfm', 'bin', 'dflowfm.dll')
dimr_path = os.path.join(D3D_HOME, 'dimr', 'bin', 'dimr_dll.dll')
# work directory
workdir = os.path.join('p:\\11202744-008-vegetation-modelling', 'students',
'GijsHendrickx', 'models', model_folder)
inputdir = os.path.join(workdir, 'timeseries')
# input files (Delft3D)
config_file = os.path.join(workdir, 'dimr_config.xml')
mdufile = os.path.join(workdir, 'fm', 'FlowFM.mdu')
# print directories and input-files as check
print('Model : {0}\n'.format(workdir))
print('Delft3D home : {0}'.format(D3D_HOME))
print('DIMR-directory : {0}'.format(dimr_path))
print('Configuration file : {0}'.format(config_file))
# =============================================================================
# # # # prepare locations
# =============================================================================
# # print directories of input- and output-files
print('\nTime-series dir. : {0}'.format(inputdir))
# # intermediate figures
figfolder = os.path.join(workdir, 'figures')
# check existence and create if necessary
if not os.path.exists(figfolder):
os.mkdir(figfolder)
print('New folder created : {0}'.format(figfolder))
print('Figure directory : {0}'.format(figfolder))
# # output files
outputfolder = os.path.join(workdir, 'output')
# check existance and create if necessary
if not os.path.exists(outputfolder):
os.mkdir(outputfolder)
print('New folder created : {0}'.format(outputfolder))
print('Output directory : {0}'.format(outputfolder))
# =============================================================================
# # # # create correct environment
# =============================================================================
os.environ['PATH'] = (os.path.join(D3D_HOME, 'share', 'bin') + ';' +
os.path.join(D3D_HOME, 'dflowfm', 'bin') + ';' +
os.path.join(D3D_HOME, 'dimr', 'bin') + ';' +
os.path.join(D3D_HOME, 'dwaves', 'bin') + ';' +
os.path.join(D3D_HOME, 'esmf', 'scripts') + ';' +
os.path.join(D3D_HOME, 'swan', 'scripts'))
# print created environment as check
print('\nEnvironment : {0}\n'
.format(os.path.join(D3D_HOME, 'share', 'bin')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'dflowfm', 'bin')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'dimr', 'bin')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'dwaves', 'bin')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'esmf', 'scripts')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'swan', 'scripts')))
# =============================================================================
# # # # define and initialize wrappers
# =============================================================================
# define DFM wrapper
modelFM = bmi.wrapper.BMIWrapper(engine=dflow_dir, configfile=mdufile)
# define DIMR wrapper
modelDIMR = bmi.wrapper.BMIWrapper(engine=dimr_path, configfile=config_file)
# initialise model
modelDIMR.initialize()
print('Model initialized.\n')
# =============================================================================
# # # # set the pointers to important model variables of FlowFM
# =============================================================================
# number of boxes, including boundary boxes
ndx = modelFM.get_var('ndx')
# number of non-boundary boxes, i.e. within-domain boxes
ndxi = modelFM.get_var('ndxi')
# x-coord. of the center of gravity of the boxes
xzw = modelFM.get_var('xzw')
# y-coord. of the center of gravity of the boxes
yzw = modelFM.get_var('yzw')
# total number of links between boxes
lnx = modelFM.get_var('lnx')
# number of links between within-domain boxes
lnxi = modelFM.get_var('lnxi')
# link martrix between adjacent boxes [ln, 2] matrix
ln = modelFM.get_var('ln')
# distance between the centers of adjacent boxes
dx = modelFM.get_var('dx')
# width of the interface between adjacent boxes
wu = modelFM.get_var('wu')
# surface area of the boxes
ba = modelFM.get_var('ba')
# =============================================================================
# # # # set time parameters for coupled model
# =============================================================================
# # time-span
# start year
Ystart = 2000
# simulation time [yrs]
Y = 100
# year range
years = np.arange(Ystart, Ystart + Y)
# # model time per vegetation step [s]
mtpervt = 43200
# storm
mtpervt_storm = 86400
# =============================================================================
# # # # define output
# =============================================================================
# # # map > full spatial extent
# # data to output file
# mean flow > { uc }
U2mfile = True
# mean coral temperature > { Tc, Tlo, Thi }
T2mfile = True
# mean photosynthesis > { PS }
PS2mfile = True
# population states > { P } > { PH, PR, PP, PB }
P2mfile = True
# calcification > { G }
G2mfile = True
# morphology > { Lc } > { dc, hc, bc, tc, ac }
M2mfile = True
# # map-file
# map-file directory
mapfile = 'CoralModel_map.nc'
mapfilef = os.path.join(outputfolder, mapfile)
# time-interval > annually
# # # history > time-series
# # data to output file
# flow > { uc }
U2hfile = True
# temperature > { Tc, Tlo, Thi }
T2hfile = True
# photosynthesis > { PS }
PS2hfile = True
# population states > { P } > { PH, PR, PP, PB }
P2hfile = True
# calcification > { G }
G2hfile = True
# morphology > { Lc } > { dc, hc, bc, tc, ac }
M2hfile = True
# # his-file
# location(s)
xynfilef = os.path.join(workdir, 'fm', 'FlowFm_obs.xyn')
xyn = | pd.read_csv(xynfilef, header=None, delim_whitespace=True) | pandas.read_csv |
"""Module for common preprocessing tasks."""
import time
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
# TODO: acertar docstrings
# TODO: drop_by
# TODO: apply_custom_item_level (escolher axis)
# TODO: colocar um acompanhamento de progresso
class Prep(object):
"""Preprocessing / preparing data.
Attributes:
data (pandas DataFrame): dataframe with all transformations
"""
def __init__(self, df: pd.DataFrame):
"""Create new object.
Args:
- df (DataFrame): a pandas dataframe to performs preprocessing tasks.
Al tasks are performed on a copy of this DataFrame
"""
self._data = df.copy()
self._le = {}
self._scaler = None
@property
def df(self):
"""Get the actual version of modified df."""
return self._data.copy()
@df.setter
def df(self, df):
"""Set a new dataframe to be modified."""
self._data = df.copy()
return self
def apply_custom(self, fn, args={}):
"""Apply a custom function to the dataframe.
Args:
- fn: custom function to apply. Should receive the dataframe and returns the modified dataframe
Returns:
self
"""
self._data = fn(self._data, **args)
return self
def drop_nulls(self, cols: list = None):
"""Drop all rows with nulls.
Args:
- cols (list): list of columns or None to all dataframe
Returns:
self
"""
if cols == None:
self._data.dropna(inplace=True)
else:
cols = [c for c in cols if c in self._data.columns]
self._data.dropna(subset=cols, inplace=True)
return self
def drop_not_nulls(self, cols: list):
"""Drop all rows with not null values for each column in cols.
Args:
- cols (list): list of columns
Returns:
self
"""
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data = self._data[self._data[col].isnull()]
return self
def drop_null_cols(self):
"""Drop colls with all null values.
Returns:
self
"""
self._data.dropna(index=1, how='all')
return self
def drop_cols(self, cols: list):
"""Drop all listed columns.
Args:
- cols (list): list of cols to drop
Returns:
self
"""
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data.drop(col, axis=1, inplace=True)
return self
def bool_to_int(self, cols: list):
"""Transform bool into 1 and 0.
Args:
- cols (list): list of cols to transform
Returns:
Self
"""
if cols == None:
self._data.applymap(lambda x: 1 if x else 0)
else:
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data[col] = self._data[col].apply(lambda x: 1 if x else 0)
return self
# TODO: Salvar label encoder em pickle
def encode(self, cols: list):
"""Encode categorical vars into numeric ones.
Args:
- cols (list): list of columns to encode
Returns:
Self
"""
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data[col].fillna('N/A-ENC', inplace=True)
self._le[col] = LabelEncoder()
self._data[col] = self._le[col].fit_transform(self._data[col])
return self
def inverse_encode(self, cols: list):
"""Encode categorical vars into numeric ones.
Args:
- cols (list): list of columns to encode
Returns:
Self
"""
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data[col] = self._le[col].inverse_transform(self._data[col])
return self
def fill_null_with(self, val, cols=None):
"""Fill all null with a same value.
Args:
- val: can be `mean` to replace null with the mean of the columns
or any value to put in place of nulls.
- cols (list): list of columns or None to all dataframe
Returns:
self
"""
if cols == None:
self._data.fillna(val, inplace=True)
else:
cols = [c for c in cols if c in self._data.columns]
if isinstance(val, str):
if val == 'mean':
for col in cols:
self._data[col].fillna((self._data[col].mean()),
inplace=True)
else:
for col in cols:
self._data[col].fillna(val, inplace=True)
else:
for col in cols:
self._data[col].fillna(val, inplace=True)
return self
def dummify(self, columns: list, drop_first: bool = True):
"""Create dummies for selected columns
Args:
columns (list): list of columns to dummify
drop_first (bool, optional): select if the first class will be dropped. Defaults to True
Returns:
pd.DataFrame
"""
for col in columns:
dummy = pd.get_dummies(self._data[col], drop_first=drop_first)
self._data = | pd.concat([self._data, dummy], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = | pd.Index([1, 1.1, 2, 3, 4]) | pandas.Index |
# ----------------------------------------------------------------------------
# Copyright (c) 2022, Bokulich Laboratories.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import json
from typing import List
import pandas as pd
from entrezpy.base.analyzer import EutilsAnalyzer
from entrezpy.base.result import EutilsResult
from q2_fondue.entrezpy_clients._utils import set_up_logger
class ESearchResult(EutilsResult):
"""Entrezpy client for ESearch utility used to search for or validate
provided accession IDs.
"""
def __init__(self, response, request, log_level):
super().__init__(request.eutil, request.query_id, request.db)
self.result_raw = None
self.result = None
self.query_key = None
self.webenv = None
self.logger = set_up_logger(log_level, self)
def size(self):
return self.result.shape[0]
def isEmpty(self):
return True if self.size() == 0 else False
def dump(self):
return {self: {'dump': {'result': self.result,
'query_id': self.query_id,
'db': self.db,
'eutil': self.function}}}
def get_link_parameter(self, reqnum=0):
"""Generates params required for an ELink query"""
return {
'db': self.db, 'queryid': self.query_id, 'WebEnv': self.webenv,
'query_key': self.query_key, 'cmd': 'neighbor_history'
}
def validate_result(self) -> dict:
"""Validates hit counts obtained for all the provided UIDs.
As the expected hit count for a valid SRA accession ID is 1, all the
IDs with that value will be considered valid. UIDs with count higher
than 1 will be considered 'ambiguous' as they could not be resolved
to a single result. Likewise, UIDs with a count of 0 will be considered
'invalid' as no result could be found for those.
Raises:
InvalidIDs: An exception is raised when either ambiguous or invalid
IDs were encountered.
"""
# correct id should have count == 1
leftover_ids = self.result[self.result != 1]
if leftover_ids.shape[0] == 0:
return {}
ambigous_ids = leftover_ids[leftover_ids > 0]
invalid_ids = leftover_ids[leftover_ids == 0]
error_msg = 'Some of the IDs are invalid or ambiguous:'
if ambigous_ids.shape[0] > 0:
error_msg += f'\n Ambiguous IDs: {", ".join(ambigous_ids.index)}'
if invalid_ids.shape[0] > 0:
error_msg += f'\n Invalid IDs: {", ".join(invalid_ids.index)}'
self.logger.warning(error_msg)
return {
**{_id: 'ID is ambiguous.' for _id in ambigous_ids.index},
**{_id: 'ID is invalid.' for _id in invalid_ids.index}
}
def parse_search_results(self, response, uids: List[str]):
"""Parses response received from Esearch as a pandas Series object.
Hit counts obtained in the response will be extracted and assigned to
their respective query IDs. IDs not found in the results but present
in the UIDs list will get a count of 0.
Args:
response (): Response received from Esearch.
uids (List[str]): List of original UIDs that were submitted
as a query.
"""
self.result_raw = response
self.webenv = self.result_raw['esearchresult'].get('webenv')
self.query_key = self.result_raw['esearchresult'].get('querykey')
translation_stack = self.result_raw[
'esearchresult'].get('translationstack')
if not translation_stack:
self.result = pd.Series({x: 0 for x in uids}, name='count')
return
# filter out only positive hits
found_terms = [x for x in translation_stack if isinstance(x, dict)]
found_terms = {
x['term'].replace('[All Fields]', ''): int(x['count'])
for x in found_terms
}
# find ids that are missing
missing_ids = [x for x in uids if x not in found_terms.keys()]
missing_ids = {x: 0 for x in missing_ids}
found_terms.update(missing_ids)
self.result = | pd.Series(found_terms, name='count') | pandas.Series |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2020/1/9 22:52
contact: <EMAIL>
desc: 金十数据中心-经济指标-央行利率-主要央行利率
https://datacenter.jin10.com/economic
美联储利率决议报告
欧洲央行决议报告
新西兰联储决议报告
中国央行决议报告
瑞士央行决议报告
英国央行决议报告
澳洲联储决议报告
日本央行决议报告
俄罗斯央行决议报告
印度央行决议报告
巴西央行决议报告
"""
import json
import time
import pandas as pd
import requests
# 金十数据中心-经济指标-央行利率-主要央行利率-美联储利率决议报告
def macro_bank_usa_interest_rate():
"""
美联储利率决议报告, 数据区间从19820927-至今
https://datacenter.jin10.com/reportType/dc_usa_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v=1578581921
:return: 美联储利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国利率决议"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
temp_df.name = "usa_interest_rate"
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-欧洲央行决议报告
def macro_bank_euro_interest_rate():
"""
欧洲央行决议报告, 数据区间从19990101-至今
https://datacenter.jin10.com/reportType/dc_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_interest_rate_decision_all.js?v=1578581663
:return: 欧洲央行决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["欧元区利率决议"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = | pd.to_datetime(date_list) | pandas.to_datetime |
import os
from glob import glob
import numpy as np, pandas as pd, matplotlib.pyplot as plt
from astropy.io import ascii as ap_ascii
from numpy import array as nparr
from astrobase.services.gaia import objectid_search
from mpl_toolkits.axes_grid1 import make_axes_locatable
from stringcheese import pipeline_utils as pu
def get_reference_data():
#
# Table 4 of Douglas, Curtis et al (2019) Praesepe rotation periods, teffs.
#
# Quoting Curtis+2019: Prot for 743 members were amassed from the literature
# and measured from K2 Campaign 5 light curves by Douglas et al. (2017).
# Douglas et al. (2019) crossmatched this list with DR2 and filtered out stars
# that failed membership, multiplicity, and data quality criteria, leaving us
# with 359 single star members.
#
# And following Douglas+2019 table 4 caption for the flags...
#
praesepe_tab = ap_ascii.read("../data/apjab2468t4_mrt.txt")
sel = (
(praesepe_tab['SFlag'] == 'YYYYY')
|
(praesepe_tab['SFlag'] == 'YYY-Y')
)
praesepe_tab = praesepe_tab[sel]
assert len(praesepe_tab) == 359
praesepe_df = praesepe_tab.to_pandas()
#
# Figure 4 of Curtis+2019. Has a "gold sample" of Pleiades members that
# involved some crossmatching, and removal of binaries. I used WebPlotDigitizer
# to measure the rotation periods from that figure (rather than reproduce the
# actual procedure Jason discusses in the text.)
#
pleiades_df = pd.read_csv('../data/pleaides_prot_vs_teff.csv')
return praesepe_df, pleiades_df
def get_my_data(groupid=113, groupname='nan', classifxndate=20190907,
is_field_star_comparison=False):
#
# for anything flagged manually as good (in other words, the rotation
# period found just from the LS peak was OK), get the rotation period and
# teff from the .results file.
#
if is_field_star_comparison:
fs_str = 'field_star_comparison_'
else:
fs_str = ''
classifixndir = (
'../results/manual_classification/'
'{}_{}group{}_name{}_classification/'.
format(classifxndate, fs_str, groupid, groupname)
)
all_paths = glob(os.path.join(classifixndir,'*.png'))
n_paths = len(all_paths)
gd_paths = glob(os.path.join(classifixndir,'*good*.png'))
gd_sourceids = [
np.int64(os.path.basename(p).split("_")[-1].replace('[good].png',''))
for p in gd_paths
]
if len(gd_sourceids)==0:
raise AssertionError('expected some good sourceids')
# now get the LS results
datadir = (
'../results/pkls_statuses_pages/{}group{}_name{}'.
format(fs_str, groupid, groupname)
)
prots, teffs = [], []
for sourceid in gd_sourceids:
status_file = os.path.join(datadir, str(sourceid),
'GLS_rotation_period.results')
if not os.path.exists(status_file):
raise AssertionError('expected {} to exist'.format(status_file))
d = pu.load_status(status_file)
teffs.append(d['lomb-scargle']['teff'])
prots.append(d['lomb-scargle']['ls_period'])
df = pd.DataFrame(
{'teff': teffs, 'prot': prots, 'source_id':gd_sourceids}
)
return df, n_paths
def plot_prot_vs_teff_singlegroup(classifxndate=20190907, groupid=113,
groupname='nan',
is_field_star_comparison=False,
remove_outliers=False):
praesepe_df, pleiades_df = get_reference_data()
group_df, n_paths = get_my_data(
groupid=groupid,
groupname=groupname,
classifxndate=classifxndate,
is_field_star_comparison=is_field_star_comparison
)
kc19_df = pd.read_csv('../data/string_table2.csv')
if remove_outliers:
# remove outliers manually selected from glue (RVs or HR diagram
# offset)
_hr = pd.read_csv(
'../data/kc19_group{}_table1_hr_diagram_weirdos.csv'.
format(groupid)
)
_rv = pd.read_csv(
'../data/kc19_group{}_table1_rv_weirdos.csv'.
format(groupid)
)
outlier_df = pd.concat((_hr, _rv))
common = group_df.merge(outlier_df, on='source_id', how='inner')
print('before pruning RV and HR diagram outliers, had {} Prots'.
format(len(group_df)))
group_df = group_df[~group_df.source_id.isin(common.source_id)]
print('after pruning RV and HR diagram outliers, had {} Prots'.
format(len(group_df)))
row = kc19_df[kc19_df['group_id'] == groupid]
age = 10**(float(row['age']))
age_gyr = age/(1e9)
age_myr = age_gyr*1e3
##########################################
plt.close('all')
f,ax = plt.subplots(figsize=(4,3))
ax.scatter(
nparr(praesepe_df['Teff']), nparr(praesepe_df['Prot']),
color='gray', edgecolors='k',
alpha=1, linewidths=0.4, zorder=2, s=6, marker='s',
label='Praesepe 670 Myr'
)
ax.scatter(
nparr(pleiades_df['teff']), nparr(pleiades_df['prot']),
color='whitesmoke', edgecolors='gray',
alpha=1, linewidths=0.4, zorder=1, s=6, marker='X',
label='Pleiades 120 Myr'
)
if is_field_star_comparison:
label = 'Group {} field neighbors'.format(groupid)
else:
label = 'Group {}'.format(groupid)
ax.scatter(
nparr(group_df['teff']).astype(float), nparr(group_df['prot']).astype(float),
color='darkorange', edgecolors='k',
alpha=1, linewidths=0.4, zorder=3, s=9, marker='o',
label=label
)
ax.legend(loc='best', fontsize='x-small')
ax.set_xlim((7000,3000))
ax.set_ylim((0,16.2))
ax.set_xlabel('Effective temperature [K]')
ax.set_ylabel('Rotation period [days]')
titlestr = (
'Name: {}. KC19 isochrone age: {:d} Myr.\n{}/{} ({:d}%) with Prot.'.
format(groupname, int(age_myr), len(group_df), n_paths,
int(100*len(group_df)/n_paths))
)
ax.set_title(titlestr, fontsize='x-small')
ax.get_yaxis().set_tick_params(which='both', direction='in',
labelsize='small', top=True, right=True)
ax.get_xaxis().set_tick_params(which='both', direction='in',
labelsize='small', top=True, right=True)
if is_field_star_comparison:
fs_str = '_field_star_comparison'
else:
fs_str = ''
outpath = (
'../results/prot_vs_teff/prot_vs_teff_group{}_name{}{}.png'.
format(groupid, groupname, fs_str)
)
if remove_outliers:
outpath = (
'../results/prot_vs_teff/prot_vs_teff_{}group{}_name{}_outliers_removed.png'.
format(fs_str, groupid, groupname)
)
f.savefig(outpath, dpi=300, bbox_inches='tight')
print('made {}'.format(outpath))
if groupid==113 and not is_field_star_comparison:
sel = (
(group_df['teff'].astype(float)>4000) &
(group_df['prot'].astype(float)>8)
)
print('group 113 prot vs teff outliers are...')
print(group_df[sel].source_id)
def plot_prot_vs_teff_allgroups(classifxndate=20190907, groupids=None,
groupnames=None, colorisBpmRp=0, xisBpmRp=None,
xisTeff=None, xisAge=None):
assert isinstance(groupids, list)
assert isinstance(groupnames, list)
if xisBpmRp:
assert not xisTeff and not xisAge
if xisTeff:
assert not xisBpmRp and not xisAge
if xisAge:
assert not xisBpmRp and not xisTeff
praesepe_df, pleiades_df = get_reference_data()
outpath = '../results/prot_vs_teff/prot_vs_teff_allgroups.csv'
if not os.path.exists(outpath):
group_df_list = []
n_paths = []
for groupid, groupname in zip(groupids, groupnames):
i_group_df, i_n_paths = get_my_data(
groupid=groupid,
groupname=groupname,
classifxndate=classifxndate,
is_field_star_comparison=False
)
i_group_df['groupid'] = groupid
i_group_df['groupname'] = groupname
group_df_list.append(i_group_df)
n_paths.append(i_n_paths)
group_df = | pd.concat(group_df_list) | pandas.concat |
import os
import pandas as pd
import numpy as np
import scipy
import scipy.stats
import pypeliner
import remixt.seqdataio
import remixt.config
def infer_snp_genotype(data, base_call_error=0.005, call_threshold=0.9):
""" Infer snp genotype based on binomial PMF
Args:
data (pandas.DataFrame): input snp data
KwArgs:
base_call_error (float): per base sequencing error
call_threshold (float): posterior threshold for calling a genotype
Input dataframe should have columns 'ref_count', 'alt_count'
The operation is in-place, and the input dataframe after the call will
have 'AA', 'AB', 'BB' columns, in addition to others.
"""
data['total_count'] = data['ref_count'] + data['alt_count']
data['likelihood_AA'] = scipy.stats.binom.pmf(data['alt_count'], data['total_count'], base_call_error)
data['likelihood_AB'] = scipy.stats.binom.pmf(data['alt_count'], data['total_count'], 0.5)
data['likelihood_BB'] = scipy.stats.binom.pmf(data['ref_count'], data['total_count'], base_call_error)
data['evidence'] = data['likelihood_AA'] + data['likelihood_AB'] + data['likelihood_BB']
data['posterior_AA'] = data['likelihood_AA'] / data['evidence']
data['posterior_AB'] = data['likelihood_AB'] / data['evidence']
data['posterior_BB'] = data['likelihood_BB'] / data['evidence']
data['AA'] = (data['posterior_AA'] >= call_threshold) * 1
data['AB'] = (data['posterior_AB'] >= call_threshold) * 1
data['BB'] = (data['posterior_BB'] >= call_threshold) * 1
def read_snp_counts(seqdata_filename, chromosome, num_rows=1000000):
""" Count reads for each SNP from sequence data
Args:
seqdata_filename (str): sequence data filename
chromosome (str): chromosome for which to count reads
KwArgs:
num_rows (int): number of rows per chunk for streaming
Returns:
pandas.DataFrame: read counts per SNP
Returned dataframe has columns 'position', 'ref_count', 'alt_count'
"""
snp_counts = list()
for alleles_chunk in remixt.seqdataio.read_allele_data(seqdata_filename, chromosome, chunksize=num_rows):
if len(alleles_chunk.index) == 0:
snp_counts.append(pd.DataFrame(columns=['position', 'ref_count', 'alt_count'], dtype=int))
continue
snp_counts_chunk = (
alleles_chunk
.groupby(['position', 'is_alt'])
.size()
.unstack(fill_value=0)
.reindex(columns=[0, 1])
.fillna(0)
.astype(int)
.rename(columns=lambda a: {0:'ref_count', 1:'alt_count'}[a])
.reset_index()
)
snp_counts.append(snp_counts_chunk)
snp_counts = pd.concat(snp_counts, ignore_index=True)
if len(snp_counts.index) == 0:
return pd.DataFrame(columns=['position', 'ref_count', 'alt_count']).astype(int)
# Consolodate positions split by chunking
snp_counts = snp_counts.groupby('position').sum().reset_index()
snp_counts.sort_values('position', inplace=True)
return snp_counts
def infer_snp_genotype_from_normal(snp_genotype_filename, seqdata_filename, chromosome, config):
""" Infer SNP genotype from normal sample.
Args:
snp_genotype_filename (str): output snp genotype file
seqdata_filename (str): input sequence data file
chromosome (str): id of chromosome for which haplotype blocks will be inferred
config (dict): relavent shapeit parameters including thousand genomes paths
The output snp genotype file will contain the following columns:
'position': het snp position
'AA': binary indicator for homozygous reference
'AB': binary indicator for heterozygous
'BB': binary indicator for homozygous alternate
"""
sequencing_base_call_error = remixt.config.get_param(config, 'sequencing_base_call_error')
het_snp_call_threshold = remixt.config.get_param(config, 'het_snp_call_threshold')
# Call snps based on reference and alternate read counts from normal
snp_counts_df = read_snp_counts(seqdata_filename, chromosome)
infer_snp_genotype(snp_counts_df, sequencing_base_call_error, het_snp_call_threshold)
snp_counts_df.to_csv(snp_genotype_filename, sep='\t', columns=['position', 'AA', 'AB', 'BB'], index=False)
def infer_snp_genotype_from_tumour(snp_genotype_filename, seqdata_filenames, chromosome, config):
""" Infer SNP genotype from tumour samples.
Args:
snp_genotype_filename (str): output snp genotype file
seqdata_filenames (str): input tumour sequence data files
chromosome (str): id of chromosome for which haplotype blocks will be inferred
config (dict): relavent shapeit parameters including thousand genomes paths
The output snp genotype file will contain the following columns:
'position': het snp position
'AA': binary indicator for homozygous reference
'AB': binary indicator for heterozygous
'BB': binary indicator for homozygous alternate
"""
sequencing_base_call_error = remixt.config.get_param(config, 'sequencing_base_call_error')
homozygous_p_value_threshold = remixt.config.get_param(config, 'homozygous_p_value_threshold')
# Calculate total reference alternate read counts in all tumours
snp_counts_df = pd.DataFrame(columns=['position', 'ref_count', 'alt_count']).astype(int)
for tumour_id, seqdata_filename in seqdata_filenames.items():
snp_counts_df = pd.concat([snp_counts_df, read_snp_counts(seqdata_filename, chromosome)], ignore_index=True)
snp_counts_df = snp_counts_df.groupby('position').sum().reset_index()
snp_counts_df['total_count'] = snp_counts_df['alt_count'] + snp_counts_df['ref_count']
snp_counts_df = snp_counts_df[snp_counts_df['total_count'] > 50]
binom_test_ref = lambda row: scipy.stats.binom_test(
row['ref_count'], row['total_count'],
p=sequencing_base_call_error, alternative='greater')
snp_counts_df['prob_no_A'] = snp_counts_df.apply(binom_test_ref, axis=1)
binom_test_alt = lambda row: scipy.stats.binom_test(
row['alt_count'], row['total_count'],
p=sequencing_base_call_error, alternative='greater')
snp_counts_df['prob_no_B'] = snp_counts_df.apply(binom_test_alt, axis=1)
snp_counts_df['has_A'] = snp_counts_df['prob_no_A'] < homozygous_p_value_threshold
snp_counts_df['has_B'] = snp_counts_df['prob_no_B'] < homozygous_p_value_threshold
snp_counts_df['AA'] = (snp_counts_df['has_A'] & ~snp_counts_df['has_B']) * 1
snp_counts_df['BB'] = (snp_counts_df['has_B'] & ~snp_counts_df['has_A']) * 1
snp_counts_df['AB'] = (snp_counts_df['has_A'] & snp_counts_df['has_B']) * 1
snp_counts_df.to_csv(snp_genotype_filename, sep='\t', columns=['position', 'AA', 'AB', 'BB'], index=False)
def infer_haps(haps_filename, snp_genotype_filename, chromosome, temp_directory, config, ref_data_dir):
""" Infer haplotype blocks for a chromosome using shapeit
Args:
haps_filename (str): output haplotype data file
snp_genotype_filename (str): input snp genotype file
chromosome (str): id of chromosome for which haplotype blocks will be inferred
temp_directory (str): directory in which shapeit temp files will be stored
config (dict): relavent shapeit parameters including thousand genomes paths
ref_data_dir (str): reference dataset directory
The output haps file will contain haplotype blocks for each heterozygous SNP position. The
file will be TSV format with the following columns:
'chromosome': het snp chromosome
'position': het snp position
'allele': binary indicator for reference (0) vs alternate (1) allele
'hap_label': label of the haplotype block
'allele_id': binary indicator of the haplotype allele
"""
def write_null():
with open(haps_filename, 'w') as haps_file:
haps_file.write('chromosome\tposition\tallele\thap_label\tallele_id\n')
accepted_chromosomes = [str(a) for a in range(1, 23)] + ['X']
if str(chromosome) not in accepted_chromosomes:
write_null()
return
# Temporary directory for shapeit files
try:
os.makedirs(temp_directory)
except OSError:
pass
# If we are analyzing male data and this is chromosome X
# then there are no het snps and no haplotypes
if chromosome == 'X' and not remixt.config.get_param(config, 'is_female'):
write_null()
return
# Impute 2 files for thousand genomes data by chromosome
phased_chromosome = chromosome
if chromosome == 'X':
phased_chromosome = remixt.config.get_param(config, 'phased_chromosome_x')
genetic_map_filename = remixt.config.get_filename(config, ref_data_dir, 'genetic_map', chromosome=phased_chromosome)
hap_filename = remixt.config.get_filename(config, ref_data_dir, 'haplotypes', chromosome=phased_chromosome)
legend_filename = remixt.config.get_filename(config, ref_data_dir, 'legend', chromosome=phased_chromosome)
snp_genotype_df = pd.read_csv(snp_genotype_filename, sep='\t')
if len(snp_genotype_df) == 0:
write_null()
return
# Remove ambiguous positions
snp_genotype_df = snp_genotype_df[(snp_genotype_df['AA'] == 1) | (snp_genotype_df['AB'] == 1) | (snp_genotype_df['BB'] == 1)]
# Read snp positions from legend
snps_df = pd.read_csv(legend_filename, compression='gzip', sep=' ', usecols=['position', 'a0', 'a1'])
# Remove indels
snps_df = snps_df[(snps_df['a0'].isin(['A', 'C', 'T', 'G'])) & (snps_df['a1'].isin(['A', 'C', 'T', 'G']))]
# Merge data specific inferred genotype
snps_df = snps_df.merge(snp_genotype_df[['position', 'AA', 'AB', 'BB']], on='position', how='inner', sort=False)
# Create genotype file required by shapeit
snps_df['chr'] = chromosome
snps_df['chr_pos'] = snps_df['chr'].astype(str) + ':' + snps_df['position'].astype(str)
temp_gen_filename = os.path.join(temp_directory, 'snps.gen')
snps_df.to_csv(temp_gen_filename, sep=' ', columns=['chr', 'chr_pos', 'position', 'a0', 'a1', 'AA', 'AB', 'BB'], index=False, header=False)
# Create single sample file required by shapeit
temp_sample_filename = os.path.join(temp_directory, 'snps.sample')
with open(temp_sample_filename, 'w') as temp_sample_file:
temp_sample_file.write('ID_1 ID_2 missing sex\n0 0 0 0\nUNR1 UNR1 0 2\n')
# Run shapeit to create phased haplotype graph
hgraph_filename = os.path.join(temp_directory, 'phased.hgraph')
hgraph_logs_prefix = hgraph_filename + '.log'
chr_x_flag = ''
if chromosome == 'X':
chr_x_flag = '--chrX'
sample_filename = remixt.config.get_filename(config, ref_data_dir, 'sample')
pypeliner.commandline.execute('shapeit', '-M', genetic_map_filename, '-R', hap_filename, legend_filename, sample_filename,
'-G', temp_gen_filename, temp_sample_filename, '--output-graph', hgraph_filename, chr_x_flag,
'--no-mcmc', '-L', hgraph_logs_prefix, '--seed', '12345')
# Run shapeit to sample from phased haplotype graph
sample_template = os.path.join(temp_directory, 'sampled.{0}')
averaged_changepoints = None
shapeit_num_samples = remixt.config.get_param(config, 'shapeit_num_samples')
for s in range(shapeit_num_samples):
sample_prefix = sample_template.format(s)
sample_log_filename = sample_prefix + '.log'
sample_haps_filename = sample_prefix + '.haps'
sample_sample_filename = sample_prefix + '.sample'
# FIXUP: sampling often fails with a segfault, retry at least 3 times
success = False
for _ in range(3):
try:
pypeliner.commandline.execute(
'shapeit', '-convert', '--input-graph', hgraph_filename, '--output-sample',
sample_prefix, '--seed', str(s), '-L', sample_log_filename)
success = True
break
except pypeliner.commandline.CommandLineException:
print(f'failed sampling with seed {s}, retrying')
continue
if not success:
raise Exception(f'failed to sample three times with seed {s}')
sample_haps = pd.read_csv(sample_haps_filename, sep=' ', header=None,
names=['id', 'id2', 'position', 'ref', 'alt', 'allele1', 'allele2'],
usecols=['position', 'allele1', 'allele2'])
sample_haps = sample_haps[sample_haps['allele1'] != sample_haps['allele2']]
sample_haps['allele'] = sample_haps['allele1']
sample_haps = sample_haps.drop(['allele1', 'allele2'], axis=1)
sample_haps.set_index('position', inplace=True)
sample_changepoints = sample_haps['allele'].diff().abs().astype(float).fillna(0.0)
if averaged_changepoints is None:
averaged_changepoints = sample_changepoints
else:
averaged_changepoints += sample_changepoints
os.remove(sample_log_filename)
os.remove(sample_haps_filename)
os.remove(sample_sample_filename)
averaged_changepoints /= float(shapeit_num_samples)
last_sample_haps = sample_haps
# Identify changepoints recurrent across samples
changepoint_confidence = np.maximum(averaged_changepoints, 1.0 - averaged_changepoints)
# Create a list of labels for haplotypes between recurrent changepoints
current_hap_label = 0
hap_label = list()
shapeit_confidence_threshold = remixt.config.get_param(config, 'shapeit_confidence_threshold')
for x in changepoint_confidence:
if x < float(shapeit_confidence_threshold):
current_hap_label += 1
hap_label.append(current_hap_label)
# Create the list of haplotypes
haps = last_sample_haps
haps['changepoint_confidence'] = changepoint_confidence
haps['hap_label'] = hap_label
haps.reset_index(inplace=True)
haps['allele_id'] = 0
haps_allele2 = haps.copy()
haps_allele2['allele_id'] = 1
haps_allele2['allele'] = 1 - haps_allele2['allele']
haps = pd.concat([haps, haps_allele2], ignore_index=True)
haps.sort_values(['position', 'allele_id'], inplace=True)
haps['chromosome'] = chromosome
haps = haps[['chromosome', 'position', 'allele', 'hap_label', 'allele_id']]
haps.to_csv(haps_filename, sep='\t', index=False)
def count_allele_reads(seqdata_filename, haps, chromosome, segments, filter_duplicates=False, map_qual_threshold=1):
""" Count reads for each allele of haplotype blocks for a given chromosome
Args:
seqdata_filename (str): input sequence data file
haps (pandas.DataFrame): input haplotype data
chromosome (str): id of chromosome for which counts will be calculated
segments (pandas.DataFrame): input genomic segments
KwArgs:
filter_duplicates (bool): filter reads marked as duplicate
map_qual_threshold (int): filter reads with less than this mapping quality
Input haps should have the following columns:
'chromosome': het snp chromosome
'position': het snp position
'allele': binary indicator for reference (0) vs alternate (1) allele
'hap_label': label of the haplotype block
'allele_id': binary indicator of the haplotype allele
Input segments should have columns 'start', 'end'.
The output allele counts table will contain read counts for haplotype blocks within each segment.
'chromosome': chromosome of the segment
'start': start of the segment
'end': end of the segment
'hap_label': label of the haplotype block
'allele_id': binary indicator of the haplotype allele
'readcount': number of reads specific to haplotype block allele
"""
# Select haps for given chromosome
haps = haps[haps['chromosome'] == chromosome]
# Merge haplotype information into read alleles table
alleles = list()
for alleles_chunk in remixt.seqdataio.read_allele_data(seqdata_filename, chromosome, chunksize=1000000):
alleles_chunk = alleles_chunk.merge(haps, left_on=['position', 'is_alt'], right_on=['position', 'allele'], how='inner')
alleles.append(alleles_chunk)
alleles = pd.concat(alleles, ignore_index=True)
# Read fragment data with filtering
reads = remixt.seqdataio.read_fragment_data(
seqdata_filename, chromosome,
filter_duplicates=filter_duplicates,
map_qual_threshold=map_qual_threshold,
)
# Merge read start and end into read alleles table
# Note this merge will also remove filtered reads from the allele table
alleles = alleles.merge(reads, on='fragment_id')
# Arbitrarily assign a haplotype/allele label to each read
alleles.drop_duplicates('fragment_id', inplace=True)
# Sort in preparation for search, reindex to allow for subsequent merge
segments = segments.sort_values('start').reset_index(drop=True)
# Annotate segment for start and end of each read
alleles['segment_idx'] = remixt.segalg.find_contained_segments(
segments[['start', 'end']].values,
alleles[['start', 'end']].values,
)
# Remove reads not contained within any segment
alleles = alleles[alleles['segment_idx'] >= 0]
# Drop unecessary columns
alleles.drop(['start', 'end'], axis=1, inplace=True)
# Merge segment start end, key for each segment (for given chromosome)
alleles = alleles.merge(segments[['start', 'end']], left_on='segment_idx', right_index=True)
# Workaround for groupy/size for pandas
if len(alleles.index) == 0:
return | pd.DataFrame(columns=['chromosome', 'start', 'end', 'hap_label', 'allele_id', 'readcount']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/2 23:26
Desc: 东方财富网-行情首页-沪深京 A 股
"""
import requests
import pandas as pd
def stock_zh_a_spot_em() -> pd.DataFrame:
"""
东方财富网-沪深京 A 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://82.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23,m:0 t:81 s:2048",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def stock_zh_b_spot_em() -> pd.DataFrame:
"""
东方财富网- B 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://28.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:7,m:1 t:3",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def code_id_map_em() -> dict:
"""
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
"""
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:81 s:2048",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["bj_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["bj_id"])))
return code_id_dict
def stock_zh_a_hist(
symbol: str = "000001",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "20500101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "前复权", "hfq": "后复权", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f116",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": start_date,
"end": end_date,
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["klines"]:
return pd.DataFrame()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_zh_a_hist_min_em(
symbol: str = "000001",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
period: str = "5",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "https://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"ndays": "5",
"iscr": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period,
"fqt": adjust_map[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
def stock_zh_a_hist_pre_min_em(
symbol: str = "000001",
start_time: str = "09:00:00",
end_time: str = "15:50:00",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情包含盘前数据
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_time: 开始时间
:type start_time: str
:param end_time: 结束时间
:type end_time: str
:return: 每日分时行情包含盘前数据
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
url = "https://push2.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"ndays": "1",
"iscr": "1",
"iscca": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
date_format = temp_df.index[0].date().isoformat()
temp_df = temp_df[
date_format + " " + start_time : date_format + " " + end_time
]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
def stock_hk_spot_em() -> pd.DataFrame:
"""
东方财富网-港股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hk_stocks
:return: 港股-实时行情
:rtype: pandas.DataFrame
"""
url = "http://72.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:128 t:3,m:128 t:4,m:128 t:1,m:128 t:2",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1624010056945",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"今开",
"最高",
"最低",
"昨收",
"成交量",
"成交额",
]
]
temp_df["序号"] = pd.to_numeric(temp_df["序号"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
return temp_df
def stock_hk_hist(
symbol: str = "40224",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "22220101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情-港股-每日行情
http://quote.eastmoney.com/hk/08367.html
:param symbol: 港股-每日行情
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "1", "hfq": "2", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://33.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"116.{symbol}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"end": "20500000",
"lmt": "1000000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
if temp_df.empty:
return pd.DataFrame()
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df = temp_df[start_date:end_date]
if temp_df.empty:
return pd.DataFrame()
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_hk_hist_min_em(
symbol: str = "01611",
period: str = "1",
adjust: str = "",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
) -> pd.DataFrame:
"""
东方财富网-行情-港股-每日分时行情
http://quote.eastmoney.com/hk/00948.html
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "http://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"iscr": "0",
"ndays": "5",
"secid": f"116.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"klt": period,
"fqt": adjust_map[adjust],
"secid": f"116.{symbol}",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
def stock_us_spot_em() -> pd.DataFrame:
"""
东方财富-美股-实时行情
http://quote.eastmoney.com/center/gridlist.html#us_stocks
:return: 美股-实时行情; 延迟 15 min
:rtype: pandas.DataFrame
"""
url = "http://72.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "20000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:105,m:106,m:107",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152",
"_": "1624010056945",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"_",
"_",
"_",
"简称",
"编码",
"名称",
"最高价",
"最低价",
"开盘价",
"昨收价",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"市盈率",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df["代码"] = temp_df["编码"].astype(str) + "." + temp_df["简称"]
temp_df = temp_df[
[
"序号",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"开盘价",
"最高价",
"最低价",
"昨收价",
"总市值",
"市盈率",
"成交量",
"成交额",
"振幅",
"换手率",
"代码",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["开盘价"] = pd.to_numeric(temp_df["开盘价"], errors="coerce")
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"], errors="coerce")
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"], errors="coerce")
temp_df["昨收价"] = pd.to_numeric(temp_df["昨收价"], errors="coerce")
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["市盈率"] = pd.to_numeric(temp_df["市盈率"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
return temp_df
def stock_us_hist(
symbol: str = "105.MSFT",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "22220101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情-美股-每日行情
http://quote.eastmoney.com/us/ENTX.html#fullScreenChart
:param symbol: 股票代码; 此股票代码需要通过调用 ak.stock_us_spot_em() 的 `代码` 字段获取
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "1", "hfq": "2", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
url = "http://63.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"{symbol}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"end": "20500000",
"lmt": "1000000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["klines"]:
return | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDatetimeIndex:
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = DataFrame({"A": idx, "B": dr})
df["C"] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
# indexing
result = df.iloc[1]
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index= | lrange(3) | pandas.compat.lrange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 16 03:18:02 2018
@author: Kazuki
"""
import numpy as np
import pandas as pd
import os
import utils
utils.start(__file__)
#==============================================================================
# setting
month_limit = 12 # max: 96
month_round = 1
PREF = 'pos_201'
KEY = 'SK_ID_CURR'
os.system(f'rm ../feature/t*_{PREF}*')
# =============================================================================
#
# =============================================================================
#pos = pd.read_csv('/Users/Kazuki/Home-Credit-Default-Risk/py/sample_POS.csv')
pos = utils.read_pickles('../data/POS_CASH_balance')
pos.drop('SK_ID_PREV', axis=1, inplace=True)
pos = pos[pos['MONTHS_BALANCE']>=-month_limit]
pos['month_round'] = (pos['MONTHS_BALANCE'] / month_round).map(np.floor)
pos.drop('MONTHS_BALANCE', axis=1, inplace=True)
# groupby other credit cards
gr = pos.groupby(['SK_ID_CURR', 'month_round'])
pos_ = gr.size()
pos_.name = 'pos_size'
pos_ = pd.concat([pos_, gr.sum()], axis=1).reset_index() # TODO:NAME_CONTRACT_STATUS
pos_.sort_values(['SK_ID_CURR', 'month_round'], ascending=[True, False], inplace=True)
pos_['CNT_INSTALMENT_FUTURE-dby-CNT_INSTALMENT'] = pos_['CNT_INSTALMENT_FUTURE'] / pos_['CNT_INSTALMENT']
#pos_['-by-'] = pos_[''] / pos_['']
#pos_['-by-'] = pos_[''] / pos_['']
#pos_['-by-'] = pos_[''] / pos_['']
#pos_['-by-'] = pos_[''] / pos_['']
# TODO: pct_change & diff & rolling mean
#gr = pos_.groupby(['SK_ID_CURR'])
#pos_['AMT_BALANCE_pctchng-1'] = gr['AMT_BALANCE'].pct_change(-1)
#pos_['AMT_BALANCE_pctchng-1'] = gr['AMT_BALANCE'].pct_change(-1)
#pos_['AMT_BALANCE_pctchng-1'] = gr['AMT_BALANCE'].pct_change(-1)
pt = pd.pivot_table(pos_, index='SK_ID_CURR', columns=['month_round'])
pt.columns = [f'{PREF}_{c[0]}_t{int(c[1])}' for c in pt.columns]
pt.reset_index(inplace=True)
# =============================================================================
# merge
# =============================================================================
train = utils.load_train([KEY])
test = utils.load_test([KEY])
train_ = pd.merge(train, pt, on=KEY, how='left').drop(KEY, axis=1)
utils.to_feature(train_, '../feature/train')
test_ = | pd.merge(test, pt, on=KEY, how='left') | pandas.merge |
import sys
import os
from tqdm import tqdm
import pmdarima as pm
from pmdarima.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
from datetime import timedelta
import pandas as pd
sys.path.insert(0, os.path.abspath('../../../covid_forecast'))
from covid_forecast.utils.data_io import get_data, download_the_data
from covid_forecast.utils.visualizations import plt_arima_forecast,plt_arima_forecast_outsample, render_pic_in_notebook
# where to save things
OUTPUT = '../outputs/survival_analysis'
#With 1085 people 42 deaths
#DATA_LOCATTION = '../data/novel-corona-virus-2019-dataset/COVID19_line_list_data.csv'
# Below file contains more cases
#DATA_LOCATTION = '../data/novel-corona-virus-2019-dataset/COVID19_open_line_list.csv'
# Data from SK, https://www.kaggle.com/kimjihoo/coronavirusdataset#PatientInfo.csv
DATA_LOCATTION = '../data/coronavirusdataset/PatientInfo.csv'
os.makedirs(OUTPUT, exist_ok=True)
data = pd.read_csv(DATA_LOCATTION)
data = data[[i for i in data.columns if not i.__contains__('Unnamed')]]
data.head().T
"""Check numbers death, recovered and sick
For file COVID19_line_list_data.csv
"""
try:
print('Size sample: {}'.format(data.shape[0]))
print('Number casualties {}'.format((data['death'] == '1').sum()))
print('Number recovered {}'.format((data['recovered'] == '1').sum()))
print('Sick People (non recovered, non death) {}'.format(((data['death'] != '1') & (data['recovered'] != '1')).sum()))
except Exception as e: print(e)
"""Check numbers death, recovered and sick
For file COVID19_line_list_data.csv
"""
try:
print('Size sample: {}'.format(data.shape[0]))
print('Number casualties {}'.format((data['death'] == '1').sum()))
print('Number recovered {}'.format((data['recovered'] == '1').sum()))
print('Sick People (non recovered, non death) {}'.format(((data['death'] != '1') & (data['recovered'] != '1')).sum()))
except Exception as e: print(e)
"""Check numbers death, recovered and sick
# Data from SK, https://www.kaggle.com/kimjihoo/coronavirusdataset#PatientInfo.csv
"""
try:
print('Size sample: {}'.format(data.shape[0]))
print('Number casualties {}'.format((data['state'] == 'deceased').sum()))
print('Number recovered {}'.format((data['state'] == 'released').sum()))
except Exception as e: print(e)
"""Features"""
data['confirmed_date'] = | pd.to_datetime(data['confirmed_date']) | pandas.to_datetime |
import os
import pandas as pd
from gym_brt.data.config.configuration import FREQUENCY
from matplotlib import pyplot as plt
def set_new_model_id(path):
model_id = 0
for (_, dirs, files) in os.walk(path):
for dir in dirs:
try:
if int(dir[:3]) >= model_id:
model_id = int(dir[:3]) + 1
except:
continue
path = os.path.join(path, str(model_id).zfill(3))
os.mkdir(path)
return model_id
def num_epochs(path, epoch_length=None, frequency=FREQUENCY):
number_of_epochs = 0
for root, dirs, files in os.walk(path):
for file in files:
if ".zip" in file:
number_of_epochs += 1
print("Number of epochs: %d" % number_of_epochs)
if epoch_length is not None:
steps = number_of_epochs * epoch_length
print("Steps: %d" % steps)
if frequency is not None:
time = steps / frequency / 60
print("Time (min): %.2f" % time)
def visualize_progress(path):
columns = ['approxkl', 'clipfrac', 'ep_len_mean', 'ep_reward_mean',
'explained_variance', 'fps', 'n_updates', 'policy_entropy',
'policy_loss', 'serial_timesteps', 'time_elapsed', 'total_timesteps',
'value_loss']
# try:
result_log = pd.read_csv(path + "/result_log.csv")
fig = plt.figure(figsize=(30, 10))
for i, column in enumerate(columns):
ax = fig.add_subplot(3, 5, i + 1)
ax.plot(result_log[column])
ax.set_title(column)
plt.show()
def save_progress(path):
progress_file = path + "/progress.csv"
columns = ['approxkl', 'clipfrac', 'ep_len_mean', 'ep_reward_mean',
'explained_variance', 'fps', 'n_updates', 'policy_entropy',
'policy_loss', 'serial_timesteps', 'time_elapsed', 'total_timesteps',
'value_loss']
if os.path.exists(progress_file):
try:
progress = pd.read_csv(progress_file)
if os.path.exists(path + "/result_log.csv"):
result_log = pd.read_csv(path + "/result_log.csv")
else:
result_log = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import pandas as pd
import pytest
from evalml.preprocessing import split_data
from evalml.problem_types import (
ProblemTypes,
is_binary,
is_multiclass,
is_regression,
is_time_series,
)
@pytest.mark.parametrize("problem_type", ProblemTypes.all_problem_types)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_split_data(
problem_type, data_type, X_y_binary, X_y_multi, X_y_regression, make_data_type
):
if is_binary(problem_type):
X, y = X_y_binary
if is_multiclass(problem_type):
X, y = X_y_multi
if is_regression(problem_type):
X, y = X_y_regression
problem_configuration = None
if is_time_series(problem_type):
problem_configuration = {"gap": 1, "max_delay": 7, "date_index": "ts_data"}
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
test_pct = 0.25
X_train, X_test, y_train, y_test = split_data(
X,
y,
test_size=test_pct,
problem_type=problem_type,
problem_configuration=problem_configuration,
)
test_size = len(X) * test_pct
train_size = len(X) - test_size
assert len(X_train) == train_size
assert len(X_test) == test_size
assert len(y_train) == train_size
assert len(y_test) == test_size
assert isinstance(X_train, pd.DataFrame)
assert isinstance(X_test, pd.DataFrame)
assert isinstance(y_train, pd.Series)
assert isinstance(y_test, pd.Series)
if is_time_series(problem_type):
if not isinstance(X, pd.DataFrame):
X = | pd.DataFrame(X) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import requests
import json
import pandas as pd
from io import StringIO
import numpy as np
import time
#
timezones={}
#function = 'TIME_SERIES_INTRADAY'
apii = 'https://www.alphavantage.co/query?function={function}&symbol={symbol}&interval={interval}&outputsize=full&datatype=csv&apikey='
apid = 'https://www.alphavantage.co/query?function={function}&symbol={symbol}&outputsize=full&datatype=csv&apikey='
#https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=ASML&interval=1min&outputsize=compact&datatype=csv&time_period=0&apikey=
sector = 'https://www.alphavantage.co/query?function=SECTOR&datatype=csv&apikey='
s_type = ['close','high','low']#,'open']
ma_types = [0,1,2,3,4,5,6,7,8]
#Moving average type By default, matype=0. INT 0 = SMA, 1 = EMA, 2 = Weighted Moving Average (WMA), 3 = Double Exponential Moving Average (DEMA), 4 = Triple Exponential Moving Average (TEMA), 5 = Triangular Moving Average (TRIMA), 6 = T3 Moving Average, 7 = Kaufman Adaptive Moving Average (KAMA), 8 = MESA Adaptive Moving Average (MAMA).
indicator_dict = {
'sma':'https://www.alphavantage.co/query?function=SMA&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'ema':'https://www.alphavantage.co/query?function=EMA&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'tema':'https://www.alphavantage.co/query?function=TEMA&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'macd':'https://www.alphavantage.co/query?function=MACD&symbol={symbol}&interval={interval}&series_type=close&fastperiod=12&slowperiod=26&signalperiod=9&datatype=csv&apikey=',
'macdext':'https://www.alphavantage.co/query?function=MACDEXT&symbol={symbol}&interval={interval}&series_type={series_type}&fastperiod={fastperiod}&slowperiod={slowperiod}&signalperiod={signalperiod}&fastmatype={fastmatype}&slowmatype={slowmatype}&signalmatype={signalmatype}&datatype=csv&apikey=',
'stoch':'https://www.alphavantage.co/query?function=STOCH&symbol={symbol}&interval={interval}&fastkperiod={fastkperiod}&slowkperiod={slowkperiod}&slowdperiod={slowdperiod}&slowkmatype={slowkmatype}&slowdmatype={slowdmatype}&datatype=csv&apikey=',
'stochf':'https://www.alphavantage.co/query?function=STOCHF&symbol={symbol}&interval={interval}&fastkperiod={fastkperiod}&fastdperiod={fastdperiod}&fastdmatype={fastdmatype}&datatype=csv&apikey=',
'rsi':'https://www.alphavantage.co/query?function=RSI&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'stochrsi':'https://www.alphavantage.co/query?function=STOCHRSI&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&fastkperiod={fastkperiod}&fastdperiod={fastdperiod}&fastdmatype={fastdmatype}&datatype=csv&apikey=',
'willr':'https://www.alphavantage.co/query?function=WILLR&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'adx':'https://www.alphavantage.co/query?function=ADX&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'adxr':'https://www.alphavantage.co/query?function=ADXR&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'apo':'https://www.alphavantage.co/query?function=APO&symbol={symbol}&interval={interval}&series_type={series_type}&fastperiod={fastperiod}&slowperiod={slowperiod}&matype={matype}&datatype=csv&apikey=',
'ppo':'https://www.alphavantage.co/query?function=PPO&symbol={symbol}&interval={interval}&series_type={series_type}&fastperiod={fastperiod}&slowperiod={slowperiod}&matype={matype}&datatype=csv&apikey=',
'mom':'https://www.alphavantage.co/query?function=MOM&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'bop':'https://www.alphavantage.co/query?function=BOP&symbol={symbol}&interval={interval}&datatype=csv&apikey=',
'cci':'https://www.alphavantage.co/query?function=CCI&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'cmo':'https://www.alphavantage.co/query?function=CMO&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'roc':'https://www.alphavantage.co/query?function=ROC&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'rocr':'https://www.alphavantage.co/query?function=ROCR&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'aroon':'https://www.alphavantage.co/query?function=AROON&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'aroonosc':'https://www.alphavantage.co/query?function=AROONOSC&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'mfi':'https://www.alphavantage.co/query?function=MFI&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'trix':'https://www.alphavantage.co/query?function=TRIX&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'ultosc':'https://www.alphavantage.co/query?function=ULTOSC&symbol={symbol}&interval={interval}&timeperiod1={timeperiod1}&timeperiod2={timeperiod2}&timeperiod3={timeperiod3}&datatype=csv&apikey=',
'dx':'https://www.alphavantage.co/query?function=DX&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'minus_di':'https://www.alphavantage.co/query?function=MINUS_DI&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'plus_di':'https://www.alphavantage.co/query?function=PLUS_DI&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'minus_dm':'https://www.alphavantage.co/query?function=MINUS_DM&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'plus_dm':'https://www.alphavantage.co/query?function=PLUS_DM&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'bbands':'https://www.alphavantage.co/query?function=BBANDS&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&nbdevup={nbdevup}&nbdevdn={nbdevdn}&matype={matype}&datatype=csv&apikey=',
'midpoint':'https://www.alphavantage.co/query?function=MIDPOINT&symbol={symbol}&interval={interval}&time_period={time_period}&series_type={series_type}&datatype=csv&apikey=',
'midprice':'https://www.alphavantage.co/query?function=MIDPRICE&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'sar':'https://www.alphavantage.co/query?function=SAR&symbol={symbol}&interval={interval}&acceleration={acceleration}&maximum={maximum}&datatype=csv&apikey=',
'trange':'https://www.alphavantage.co/query?function=TRANGE&symbol={symbol}&interval={interval}&datatype=csv&apikey=',
'atr':'https://www.alphavantage.co/query?function=ATR&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'natr':'https://www.alphavantage.co/query?function=NATR&symbol={symbol}&interval={interval}&time_period={time_period}&datatype=csv&apikey=',
'ad':'https://www.alphavantage.co/query?function=AD&symbol={symbol}&interval={interval}&datatype=csv&apikey=',
'adosc':'https://www.alphavantage.co/query?function=ADOSC&symbol={symbol}&interval={interval}&fastperiod={fastperiod}&slowperiod={slowperiod}&datatype=csv&apikey=',
'obv':'https://www.alphavantage.co/query?function=OBV&symbol={symbol}&interval={interval}&datatype=csv&apikey=',
'ht_trendline':'https://www.alphavantage.co/query?function=HT_TRENDLINE&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=',
'ht_sine':'https://www.alphavantage.co/query?function=HI_SINE&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=',
'ht_trendmode':'https://www.alphavantage.co/query?function=HT_TRENDMODE&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=',
'ht_dcperiod':'https://www.alphavantage.co/query?function=HT_DCPERIOD&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=',
'ht_dcphase':'https://www.alphavantage.co/query?function=HT_DCPHASE&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey=',
'ht_dcphasor':'https://www.alphavantage.co/query?function=HT_DCPHASOR&symbol={symbol}&interval={interval}&series_type={series_type}&datatype=csv&apikey='
}
def moving_a(ma,symbol,interval):
api = indicator_dict[ma]
ma_range = [5,10,15,20,35,50,65,100,125,200,250]
#125
out_df = | pd.DataFrame() | pandas.DataFrame |
import os
os.environ["OMP_NUM_THREADS"] = "1" # noqa E402
os.environ["OPENBLAS_NUM_THREADS"] = "1" # noqa E402
os.environ["MKL_NUM_THREADS"] = "1" # noqa E402
os.environ["VECLIB_MAXIMUM_THREADS"] = "1" # noqa E402
os.environ["NUMEXPR_NUM_THREADS"] = "1" # noqa E402
from tqdm import tqdm
from timeit import Timer
import pandas as pd
import cv2
import random
import numpy as np
cv2.setNumThreads(0) # noqa E402
cv2.ocl.setUseOpenCL(False) # noqa E402
from collections import defaultdict
from augbench import utils
from augbench import transforms
if __name__ == "__main__":
args = utils.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
package_versions = utils.get_package_versions()
if args.print_package_versions:
print(package_versions)
images_per_second = defaultdict(dict)
libraries = args.libraries
data_dir = args.data_dir
paths = list(sorted(os.listdir(data_dir)))
paths = paths[: args.images]
imgs_cv2 = [utils.read_img_cv2(os.path.join(data_dir, path), args.imsize) for path in paths]
imgs_pillow = [utils.read_img_pillow(os.path.join(data_dir, path), args.imsize) for path in paths]
benchmarks = [
transforms.HorizontalFlip(args.imsize),
transforms.VerticalFlip(args.imsize),
transforms.RotateAny(args.imsize),
transforms.Crop(224, args.imsize),
transforms.Crop(128, args.imsize),
transforms.Crop(64, args.imsize),
transforms.Crop(32, args.imsize),
transforms.Pad(300, args.imsize),
transforms.VHFlipRotateCrop(args.imsize),
transforms.HFlipCrop(args.imsize),
]
print(f"==> Setting deterministic to be {args.deterministic}")
for b in benchmarks:
b.set_deterministic(args.deterministic)
for library in libraries:
imgs = imgs_pillow if library in ("torchvision", "augmentor", "pillow") else imgs_cv2
pbar = tqdm(total=len(benchmarks))
for benchmark in benchmarks:
pbar.set_description("Current benchmark: {} | {}".format(library, benchmark))
benchmark_images_per_second = None
if benchmark.is_supported_by(library):
timer = Timer(lambda: benchmark.run(library, imgs))
run_times = timer.repeat(number=1, repeat=args.runs)
benchmark_images_per_second = [1 / (run_time / args.images) for run_time in run_times]
images_per_second[library][str(benchmark)] = benchmark_images_per_second
pbar.update(1)
pbar.close()
pd.set_option("display.width", 1000)
df = | pd.DataFrame.from_dict(images_per_second) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from pytz import timezone, utc
from scipy import stats
from time import gmtime, strftime, mktime
def data_sampler_renamer_parser(path='weather-data.txt'):
# Take columns that are useful, rename them, parse the timestamp string
data = pd.read_csv(path, delimiter=r"\s+")
data_useful = data[
['YR--MODAHRMN', 'DIR', 'SPD', 'CLG', 'SKC', 'VSB', 'MW', 'AW', 'AW.1', 'TEMP', 'DEWP', 'SLP', 'ALT', 'MAX',
'MIN', 'PCP01', 'PCP06', 'PCP24', 'PCPXX', 'SD']]
data_useful.rename(
columns={'YR--MODAHRMN': 'timestamp', 'DIR': 'wind_direction', 'SPD': 'wind_speed', 'CLG': 'cloud_ceiling',
'SKC': 'sky_cover', 'VSB': 'visibility_miles', 'MW': 'manual_weather', 'AW': 'auto_weather',
'AW.1': 'auto_weather1', 'TEMP': 'temprature', 'DEWP': 'dew_point', 'SLP': 'sea_level',
'ALT': 'altimeter', 'MAX': 'max_temp', 'MIN': 'min_temp', 'PCP01': '1hour_precip',
'PCP06': '6hour_precip', 'PCP24': '24hour_precip', 'PCPXX': '3hour_precip', 'SD': 'snow_depth'},
inplace=True)
data_useful.timestamp = data_useful.timestamp.astype(str)
data_useful['year'] = data_useful.timestamp.str[0:4]
data_useful['month'] = data_useful.timestamp.str[4:6]
data_useful['day'] = data_useful.timestamp.str[6:8]
data_useful['hour'] = data_useful.timestamp.str[8:10]
data_useful['minutes'] = data_useful.timestamp.str[10:12]
data_useful.minutes = data_useful.minutes.astype(int)
data_useful.year = data_useful.year.astype(int)
data_useful.month = data_useful.month.astype(int)
data_useful.day = data_useful.day.astype(int)
data_useful.hour = data_useful.hour.astype(int)
return data_useful
def days_fixer(dataframe):
# Unify times to have observations at every hour. Fix all the dates/times based on this criteria
df = dataframe
df.loc[(df['minutes'].values < 31) & (df['minutes'].values != 0), 'minutes'] = 0
df.loc[(df['minutes'].values > 30) & (df['minutes'].values != 0), 'hour'] = df[(df.minutes != 0) & (
df.minutes > 30)].hour + 1
df.loc[(df['minutes'].values > 30) & (df['minutes'].values != 0), 'minutes'] = 0
df.loc[(df['hour'].values == 24), 'day'] = df[df.hour == 24].day + 1
df.loc[(df['hour'].values == 24), 'hour'] = 0
df.loc[(df['day'].values == 32), 'month'] = df[df.day == 32].month + 1
df.loc[(df['day'].values == 32), 'day'] = 1
df.loc[(df['day'].values == 29) & (df['month'].values == 2), ['month', 'day']] = 3, 1
df.loc[(df['day'].values == 31) & (df['month'].values == 4), ['month', 'day']] = 5, 1
df.loc[(df['day'].values == 31) & (df['month'].values == 6), ['month', 'day']] = 7, 1
df.loc[(df['day'].values == 31) & (df['month'].values == 9), ['month', 'day']] = 10, 1
df.loc[(df['day'].values == 31) & (df['month'].values == 11), ['month', 'day']] = 12, 1
df.loc[(df['day'].values == 1) & (df['month'].values == 13), ['month', 'day', 'year']] = 1, 1, 2016
df.hour = df.hour.map("{:02}".format)
df['datetime'] = pd.to_datetime(
df.year.astype(str) + ' ' + df.month.astype(str) + ' ' + df.day.astype(str) + ' ' + df.hour.astype(str),
format='%Y %m %d %H')
return df
def grouper(dataframe):
# Take a subset of colums and group them by time stamp. Afterwards take the mean/mode of the values depending on dataype
sub_df = dataframe[
['wind_direction', 'wind_speed', 'cloud_ceiling', 'sky_cover', 'visibility_miles', 'temprature', 'dew_point',
'sea_level', 'altimeter', '1hour_precip', 'datetime']]
sub_df = sub_df.convert_objects(convert_numeric=True)
f = {'wind_direction': ['mean'], 'wind_speed': ['mean'], 'cloud_ceiling': ['mean'], 'visibility_miles': ['mean'],
'temprature': ['mean'], 'dew_point': ['mean'], 'sea_level': ['mean'], 'altimeter': ['mean'],
'1hour_precip': ['mean']}
grouped = sub_df.groupby('datetime').agg(f)
grouped.columns = grouped.columns.droplevel(-1)
grouped2 = sub_df[['sky_cover', 'datetime']]
grouped2.loc[(grouped2['sky_cover'].values == '***'), 'sky_cover'] = np.nan
grouped3 = grouped2.groupby(['datetime']).agg(lambda x: stats.mode(x)[0][0])
grouped3.loc[(grouped3['sky_cover'].values == 0), 'sky_cover'] = np.nan
data_full = grouped.merge(grouped3, how='left', on=None, left_on=None, right_on=None, left_index=True,
right_index=True)
data_full.reset_index(inplace=True)
data_full['1hour_precip'].fillna(0, inplace=True)
data_full.loc[data_full[data_full['1hour_precip'] > 0.049].index, 'precip'] = 'high'
data_full.loc[data_full[data_full['1hour_precip'] <= 0.049].index, 'precip'] = 'low'
data_full.loc[data_full[data_full['1hour_precip'] == 0].index, 'precip'] = 'no'
data_full['precip_shift'] = data_full.precip.shift(-1)
data_full = pd.get_dummies(data_full, prefix=None, columns=['precip_shift'], sparse=False, drop_first=False)
data_full = data_full.fillna(method='bfill', axis=0, inplace=False, limit=None, downcast=None)
return data_full
def convert_gmt_to_easttime(string_date):
"""
:param string_date: GMT date
:return: Date converted to eastern time
"""
# Converts the string to datetime object
string_date = str(string_date)
try:
gtm = timezone('GMT')
eastern_tz = timezone('US/Eastern')
date_obj = datetime.strptime(string_date, '%Y-%m-%d %H:%M:%S')
date_obj = date_obj.replace(tzinfo=gtm)
date_eastern = date_obj.astimezone(eastern_tz)
date_str = date_eastern.strftime('%Y-%m-%d %H:%M:%S')
return date_str
except IndexError:
return ''
def add_easterntime_column(dataframe):
"""
:param dataframe: Weather dataframe
:return: dataframe with easter time column
"""
dataframe['est_datetime'] = dataframe['datetime'].apply(convert_gmt_to_easttime)
dataframe['est_datetime'] = | pd.to_datetime(dataframe['est_datetime']) | pandas.to_datetime |
import numpy as np
import pandas as pd
from rdt.transformers.pii import AnonymizedFaker
def test_anonymizedfaker():
"""End to end test with the default settings of the ``AnonymizedFaker``."""
data = pd.DataFrame({
'id': [1, 2, 3, 4, 5],
'username': ['a', 'b', 'c', 'd', 'e']
})
instance = AnonymizedFaker()
transformed = instance.fit_transform(data, 'username')
reverse_transform = instance.reverse_transform(transformed)
expected_transformed = pd.DataFrame({
'id': [1, 2, 3, 4, 5]
})
| pd.testing.assert_frame_equal(transformed, expected_transformed) | pandas.testing.assert_frame_equal |
"""
Code to manage results of many simulations together.
"""
import pandas as pd
from tctx.networks.turtle_data import DEFAULT_ACT_BINS
from tctx.util import sim
import os.path
import logging
from pathlib import Path
import json
from tqdm.auto import tqdm as pbar
import datetime
import h5py
import numpy as np
import re
BASE_FOLDER = Path('/gpfs/gjor/personal/riquelmej/dev/tctx/data/interim')
# TODO find a way to get rid of these
LIST_LIKE_COLS = [
r'.*forced_times.*',
r'.*input_targeted_times.*',
r'.*input_pulsepacket_pulse_times.*',
r'.*voltage_measure.*',
r'foll_gids',
]
def get_col_names(name):
"""
We store results for each simulation in indexed files.
Every simulation will indicate its results with a path and an idx property.
Returns the pair of column names for the given type of results.
Eg: "spikes" -> ("spikes_path", "spikes_idx")
"""
return f'{name}_path', f'{name}_idx'
def _get_multi_store_cols(store_names):
"""return a list of columns that represent the given store names"""
import itertools
return list(itertools.chain(*[get_col_names(name) for name in store_names]))
def _hdf5_table_exists(path, key) -> bool:
"""
Check if table was saved and it's not empty
"""
with h5py.File(path, 'r') as f:
if key in f.keys():
# Empty DataFrames store an axis0 and axis1 of length 1, but no data blocks.
# This is very tied into pytables implementation, but it saves us having to load the dataframe.
return len(f[key].keys()) > 2
else:
return False
class CatMangler:
"""
because categories are nice to work with interactively but are a pain to save to HDF5
by default we save and load data as ints
this object makes it easy to convert between the two
"""
def __init__(self):
self.category_types = {
'layer': pd.CategoricalDtype(categories=['L1', 'L2', 'L3'], ordered=False),
'con_type': pd.CategoricalDtype(categories=['e2e', 'e2i', 'i2e', 'i2i'], ordered=False),
'syn_type': pd.CategoricalDtype(categories=['e2x', 'i2x'], ordered=False),
'ei_type': pd.CategoricalDtype(categories=['e', 'i'], ordered=False),
'spike_cat': pd.CategoricalDtype(categories=['baseline', 'effect'], ordered=False),
'foll_cat': pd.CategoricalDtype(categories=['bkg', 'foll', 'anti'], ordered=False),
'jump_foll_cat': pd.CategoricalDtype(
categories=['b2b', 'b2f', 'f2b', 'f2f', 'a2a', 'b2a', 'f2a', 'a2b', 'a2f'], ordered=False),
'w_cat': pd.CategoricalDtype(categories=['weak', 'mid', 'strong'], ordered=False),
'jump_dir': pd.CategoricalDtype(categories=['incoming', 'outgoing'], ordered=False),
'jump_dt': pd.CategoricalDtype(categories=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], ordered=False)
}
# renaming cols
self.mappings = {
'cells': {'ei_type': 'ei_type', 'frm_cat': 'foll_cat'},
'connections': {'con_type': 'con_type', 'syn_type': 'syn_type'},
'spikes': {'ei_type': 'ei_type', 'cat': 'spike_cat'},
'jumps': {
'con_type': 'con_type',
'target_cat': 'spike_cat',
'target_ei_type': 'ei_type',
'source_cat': 'spike_cat',
'source_ei_type': 'ei_type',
},
'default': {n: n for n in self.category_types.keys()},
}
# dropping cols
self.cleanup = {
'cells': ['layer', 'z'],
'connections': ['syn_type'],
'spikes': ['layer', 'z'],
}
self.bins = {
'jump_dt': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 100],
'w_cat': [0., 17.5, 52.5, 70.],
}
def get_cat_code(self, type_name, value_name):
"""
use like: sb.CAT.get_cat_code('foll_cat', 'foll')
"""
return self.category_types[type_name].categories.get_loc(value_name)
def get_cat_name(self, type_name, value_code):
"""
use like: sb.CAT.get_cat_code('foll_cat', 'foll')
"""
return (
self.category_types[type_name].categories[value_code]
if np.issubdtype(type(value_code), np.number)
else value_code)
def _lookup_mapping(self, cat_mapping):
if isinstance(cat_mapping, str):
return self.mappings.get(cat_mapping, {})
else:
assert isinstance(cat_mapping, dict)
return cat_mapping
def _lookup_cleanup(self, cleanup):
if isinstance(cleanup, str):
return self.cleanup.get(cleanup, [])
else:
assert isinstance(cleanup, list)
return cleanup
def remove_cats(self, df, name):
"""this edits the DF inplace! """
cat_mapping: dict = self._lookup_mapping(name)
drop: list = self._lookup_cleanup(name)
self._remove_cats(df, drop, cat_mapping)
def _remove_cats(self, df, drop, cat_mapping):
"""this edits the DF inplace! """
for c in drop:
if c in df.columns:
df.drop(c, axis=1, inplace=True)
for col_name, cat_name in cat_mapping.items():
if col_name in df.columns:
if df.dtypes[col_name] == np.dtype('O'):
# string -> cat
df[col_name] = pd.Categorical(df[col_name], dtype=self.category_types[cat_name])
if isinstance(df.dtypes[col_name], pd.CategoricalDtype):
# cat -> string
assert df.dtypes[col_name] == self.category_types[cat_name]
df[col_name] = df[col_name].cat.codes
def add_cats(self, df, cat_mapping='default'):
"""this edits the DF inplace! """
cat_mapping: dict = self._lookup_mapping(cat_mapping)
for col_name, cat_name in cat_mapping.items():
try:
if col_name in df.columns:
dtype = df.dtypes[col_name]
if np.issubdtype(dtype, np.integer):
df[col_name] = pd.Categorical.from_codes(df[col_name], dtype=self.category_types[cat_name])
else:
# noinspection PyCallingNonCallable
df[col_name] = self.category_types[cat_name](df[col_name])
except TypeError:
pass
def remove_cats_cells(self, df):
self.remove_cats(df, 'cells')
def remove_cats_conns(self, df):
self.remove_cats(df, 'connections')
def remove_cats_spikes(self, df):
self.remove_cats(df, 'spikes')
def add_cats_cells(self, df):
self.add_cats(df=df, cat_mapping='cells')
def add_cats_conns(self, df):
self.add_cats(df=df, cat_mapping='connections')
def add_cats_spikes(self, df):
self.add_cats(df=df, cat_mapping='spikes')
def add_cats_jumps(self, df):
self.add_cats(df=df, cat_mapping='jumps')
CAT = CatMangler()
def abs_path(path: str) -> Path:
if not os.path.isabs(path):
path = BASE_FOLDER / path
else:
path = Path(path)
return path.resolve()
def today_stamp():
"""return today's date as a string to stamp simulations"""
return datetime.datetime.today().strftime('%Y.%m.%d')
def now_stamp():
"""return right nowe as a string to stamp simulations"""
return datetime.datetime.now().strftime('%Y.%m.%d.%H.%M.%S.%f')
class SplitStore:
"""
Processed data of multiple simulations stored in different paths.
We typically process sims in incremental batches, which means we
end up with the results spread over multiple output files.
The registry of a SimBatch will contain a {name}_path column
indicating the HDF5 file for each sim and a {name}_idx indicating
the key in the file.
We assume all data is stored as a DataFrame per simulation.
This class looks dict-like with pairs <sim_gid, DataFrame>.
"""
def __init__(self, reg, path_col, idx_col):
self.reg = reg
self.path_col = path_col
self.idx_col = idx_col
self.cache = {}
self.opened_stores = {}
def load(self, idx=None):
# TODO make subsection return a clean slice to drop idx arg
# The stores returned by subsection still point to the old reg
reg = self.reg.sort_values([self.path_col, self.idx_col])
if idx is not None:
reg = reg.loc[idx]
for sim_gid in pbar(reg.index, desc='sim'):
_ = self[sim_gid]
def _get_locs(self):
"""get the valid pairs of path-idx"""
return self.reg[[self.path_col, self.idx_col]].dropna()
def keys(self):
"""return the index of sim_gid available in this store according to the reg"""
return self._get_locs().index
def items(self, pbar=None):
"""to iterate over the contents of this store"""
if pbar is None:
pbar = lambda x, desc: x
for k in pbar(self.keys(), desc='sim'):
yield k, self[k]
def __len__(self):
"""return the number of valid sims in this store"""
return len(self._get_locs())
def __contains__(self, sim_gid) -> bool:
"""check if this store contains the given sim"""
return sim_gid in self._get_locs().index
def __getitem__(self, key):
locations = self._get_locs()
# this will raise KeyError if we are missing the data
path, idx = locations.loc[key]
pair = (path, idx)
# we implement cache at the level of path/idx rather than
# at the level of sim_gid because some sims share data
# (like the instance)
if pair not in self.cache:
if path not in self.opened_stores:
self.opened_stores[path] = pd.HDFStore(path, mode='r')
self.cache[pair] = self.opened_stores[path][idx]
return self.cache[pair]
def close(self):
self.cache = {}
names = list(self.opened_stores.keys())
for path in names:
self.opened_stores[path].close()
self.opened_stores.pop(path)
import gc
gc.collect()
def empty_cache(self):
self.cache = {}
import gc
gc.collect()
class StoreCollection:
"""
Handling all stores for a SimBatch.
This class looks dict-like with pairs <name, SplitStore>.
"""
def __init__(self, batch):
self._reg: pd.DataFrame = batch.reg
self._names: list = batch.identify_stores()
self._stores = {}
def open(self, desc):
assert desc not in self._stores
path_col, idx_col = get_col_names(desc)
if path_col not in self._reg.columns or idx_col not in self._reg.columns:
raise KeyError(f'No columns "{path_col}" and "{idx_col}" in reg')
self._stores[desc] = SplitStore(self._reg, path_col, idx_col)
return self._stores[desc]
def get(self, desc) -> SplitStore:
if desc not in self._stores:
return self.open(desc)
return self._stores[desc]
def __len__(self):
"""return the number of valid stores in this collection"""
return len(self._names)
def __contains__(self, desc) -> bool:
"""check if this collection contains the given store"""
return desc in self._names
def keys(self):
return self._names
def __getitem__(self, desc) -> SplitStore:
return self.get(desc)
class SimBatch:
"""A collection of simulation results, with processed data stored as hdf5"""
def __init__(self, reg):
# The registry is what defines a batch
# It contains one row per sim, one col per param
# Some cols will come in pairs and identify where other data can be found
# These will always be columns <X_path, X_idx>. For instance <instance_path, instance_idx>
# and identify processed data of this simulation into an HDF5 file
self.reg = reg.copy()
if not self.reg.index.is_unique:
logging.error('reg index is not unique!')
if not self.reg.columns.is_unique:
logging.error('reg columns is not unique!')
self.stores = StoreCollection(self)
# for name in 'cells_raw_path', 'spikes_raw_path':
# if name not in reg.columns:
# logging.warning(f'No {name} col in registry. Should register these!')
def copy(self):
"""make a copy of this object (deep-copies the registry, but doesn't modify stored data)"""
return self.__class__(self.reg)
@classmethod
def load(cls, reg_path, reg_idx='sweeps', patch_lists=True):
reg_path = abs_path(reg_path)
# noinspection PyTypeChecker
reg: pd.DataFrame = pd.read_hdf(reg_path, reg_idx)
if patch_lists:
for col in reg.columns:
for pat in LIST_LIKE_COLS:
if re.match(pat, col) and isinstance(reg[col].iloc[0], str):
reg[col] = tuple(reg[col].map(json.loads))
return cls(reg=reg)
@classmethod
def load_multiple(cls, reg_path: list, only_success=True, ignore_index=False, **load_kwargs):
"""
load multiple registries as a single batch
The indices of all registries should not overlap.
"""
parts = [cls.load(path, **load_kwargs).reg for path in reg_path]
merged_reg = pd.concat(parts, axis=0, sort=True, ignore_index=ignore_index)
if only_success:
merged_reg = merged_reg[merged_reg['status'] == 'success'].copy()
assert not np.any(merged_reg[['full_path', 'sim_idx']].isna())
assert not merged_reg[['full_path', 'sim_idx']].duplicated().any()
# for failed sims, we might get some nans, which makes this column float
# since we are filtering for successful sims, we should be able to remain int
merged_reg['sim_idx'] = merged_reg['sim_idx'].astype(np.int)
assert merged_reg.index.is_unique
return cls(merged_reg)
@classmethod
def recover(
cls,
input_path: str,
partial_paths: list,
key_cols=None,
):
"""
Given a set of sims we wanted to run (input_path) and several sets of sims that
have finished (partial_paths), figure out which ones are missing from the original
input and which ones are done.
:param input_path:
:param partial_paths:
:param key_cols:
:return: A SimBatch corresponding to the filled out input batch.
"""
batch_input = cls.load(input_path)
batch_results = cls.load_multiple(partial_paths, ignore_index=True)
print('results:')
batch_results.check_sim_results()
if key_cols is None:
key_cols = pd.Index([
'input_targeted_targets',
'input_targeted_times',
'input_targeted_weight',
'input_whitenoise_mean',
'input_whitenoise_std',
'forced_times',
'targeted_gid',
'instance_path',
'instance_idx',
])
key_cols = key_cols.intersection(batch_input.reg.columns)
key_cols = list(key_cols)
final = batch_input.fill_in_missing(batch_results, key_cols=key_cols)
assert len(batch_input) == len(final)
missing = final.are_missing_results()
print(f'{np.count_nonzero(missing):,g}/{len(final):,g} sims missing')
return final
def are_missing_results(self) -> pd.Series:
"""return boolean series indicating missing results"""
return self.reg['full_path'].isna()
def __str__(self):
"""short description of contents"""
txt = f'{len(self):,g} sim' + ('s' if len(self) != 1 else '')
counts = {
c[:-len('_idx')]: np.count_nonzero(self.reg[c].notna())
for c in self.identify_store_columns()
if c.endswith('_idx')
}
full = [name for name, count in counts.items() if count == len(self)]
if full:
txt += '\nall with: ' + ', '.join(full)
partial = [
f'{name} ({len(self) - count} missing)'
for name, count in counts.items() if 0 < count < len(self)
]
if partial:
txt += '\nsome with: ' + ', '.join(partial)
return txt
def __repr__(self):
return str(self)
def __len__(self):
"""return number of simulations"""
return len(self.reg)
def add_cols(self, df: pd.DataFrame, quiet_missing=True):
"""Add new cols to the registry. Returns copy"""
shared_sims = self.reg.index.intersection(df.index)
missing_sims = self.reg.index.difference(df.index)
unknown_sims = df.index.difference(self.reg.index)
if len(unknown_sims) > 0:
logging.warning(f'Data for {len(unknown_sims)} unknown sims')
override_cols = self.reg.columns.intersection(df.columns)
if len(override_cols) > 0:
exisiting_data = self.reg.loc[shared_sims][override_cols]
if np.any(exisiting_data.notna().values):
logging.warning(f'Overriding data for {len(shared_sims)} sims')
expected_data = self.reg.loc[missing_sims][override_cols]
if not quiet_missing and np.any(expected_data.isna().values):
logging.warning(f'Missing data for {len(shared_sims)} sims')
else:
if not quiet_missing and len(missing_sims) > 0:
logging.warning(f'Missing data for {len(missing_sims)} sims')
df = df.reindex(shared_sims)
reg = self.reg.copy()
for c, vals in df.items():
reg.loc[shared_sims, c] = vals
return SimBatch(reg)
def describe_reg(self, flush=None):
"""print a text summary of the registry"""
def short_desc(k):
d = f'{val_counts[k]} {k}'
if val_counts[k] <= 10:
enum = [f'{v}' for v in np.sort(self.reg[k].unique())]
d += ': ' + ', '.join(enum)
elif np.issubdtype(self.reg[k].dtype, np.number):
d += f' between {self.reg[k].min():g} and {self.reg[k].max():g}'
return d
def get_sweep_lengths(subreg):
"""
for each column, return number of tested values
:return: pd.Series
e_input_pulsepacket_sample_targets 5
e_proj_offset 3
input_whitenoise_mean 4
instance_idx 60
dtype: int64
"""
subreg = subreg.reset_index(drop=True)
non_object = subreg.dtypes != np.dtype('O')
is_string = subreg.applymap(type).eq(str).all()
return subreg.T[non_object | is_string].T.nunique()
interesting_columns = ~self.reg.columns.str.endswith('_idx') & ~self.reg.columns.str.endswith('_path')
val_counts = get_sweep_lengths(self.reg.loc[:, interesting_columns])
val_counts = val_counts[val_counts > 1]
desc = '\n'.join([' ' + short_desc(k) for k in val_counts.keys()])
desc = f'Found {len(self.reg)} simulations that sweep over:\n{desc}'
def describe_expected(cols, name):
txt = ''
for c in cols:
how_many = None
if c not in self.reg.columns:
how_many = 'All'
else:
missing = np.count_nonzero(self.reg[c].isnull())
if missing > 0:
how_many = f'{missing: >5g}/{len(self.reg)}'
if how_many is not None:
txt += f'\n{how_many} sims missing {name} {c}'
return txt
desc += '\n' + describe_expected(['instance_path', 'instance_idx'], 'required instance column').upper()
desc += describe_expected(['full_path', 'sim_idx'], 'expected results column')
derivative = self.reg.columns[
self.reg.columns.str.endswith('_idx') & (~self.reg.columns.isin(['sim_idx', 'instance_idx']))]
if len(derivative) > 0:
missing_count = pd.Series({c: np.count_nonzero(self.reg[c].isnull()) for c in derivative})
missing_count.sort_values(inplace=True)
total = self.reg.shape[0]
desc += '\n\n' + '\n'.join([
(f'{mc: >5g}/{total} sims missing {c}' if mc > 0 else f'all {total} sims have {c}')
for c, mc in missing_count.items()
])
bad_cols = self.reg.columns[
self.reg.columns.str.endswith('_x') |
self.reg.columns.str.endswith('_y')
]
if len(bad_cols):
desc += '\n\n' + 'bad merges: ' + ', '.join(bad_cols)
if 'status' in self.reg.columns:
desc += '\n\nstatus:' + ', '.join([
f'{count} {stat}' for stat, count in self.reg['status'].value_counts().items()])
print(desc, flush=flush)
def identify_stores(self, ignored=('instance',)):
"""return a list of store names estimated from this batch's columns
A store name is identified by the presence of *both* a X_path and a X_idx column
:return: a list like ['cells', 'ewins', 'frm_norm_null_cmf', 'jumps', ... ]
"""
present_cols = {}
suffixes = ['_path', '_idx']
for suffix in suffixes:
cols = self.reg.columns[self.reg.columns.str.endswith(suffix)].str.slice(None, -len(suffix))
for c in cols:
present_cols.setdefault(c, []).append(suffix)
present_cols = [name for name, found in present_cols.items() if len(found) == 2]
for c in ignored:
if c in present_cols:
present_cols.remove(c)
return present_cols
def identify_store_columns(self, ignored=('instance',)):
"""
see identify_stores
:return: a list like ['cells_path', 'cells_idx', 'ewins_path', 'ewins_idx', ... ]
"""
store_names = self.identify_stores(ignored=ignored)
store_cols = _get_multi_store_cols(store_names)
assert np.all([c in self.reg.columns for c in store_cols])
return store_cols
def copy_clean_reg(self, drop_cluster_params=True, drop_protocol=False, quiet=True):
"""
Sometimes we re-run old sims with slight modifications.
This returns a copy of our registry with only columns that are strictly new sim parameters.
All identifiable columns relative to the results of simulations will be dropped.
:param drop_protocol: should protocol-related columns be removed?
:param quiet: print output on which cols are dropped
:return: a new SimBatch with a copy of this reg with less columns
"""
drop_cols = []
# store columns
drop_cols.extend(list(self.identify_store_columns()))
if drop_cluster_params:
drop_cols.extend(['hostname'])
# analysis result columns
drop_cols.extend([
'full_path', 'sim_idx', 'status', 'date_added', 'not_forced',
'tag',
'low_act', 'high_act',
'low_act_compatible', 'high_act_compatible',
'cell_count_e', 'cell_count_i', 'cell_count_total',
'spike_count_e', 'spike_count_i', 'spike_count_total',
'spike_count_pre_e', 'spike_count_pre_i', 'spike_count_pre_total',
'spike_count_post_e', 'spike_count_post_i', 'spike_count_post_total',
'spike_count_induction_e', 'spike_count_induction_i', 'spike_count_induction_total',
'spike_count_baseline_e', 'spike_count_baseline_i', 'spike_count_baseline_total',
'spike_count_effect_e', 'spike_count_effect_i', 'spike_count_effect_total',
'cell_hz_e', 'cell_hz_i', 'cell_hz_total',
'cell_hz_pre_e', 'cell_hz_pre_i', 'cell_hz_pre_total',
'cell_hz_post_e', 'cell_hz_post_i', 'cell_hz_post_total',
'cell_hz_induction_e', 'cell_hz_induction_i', 'cell_hz_induction_total',
'cell_hz_baseline_e', 'cell_hz_baseline_i', 'cell_hz_baseline_total',
'cell_hz_effect_e', 'cell_hz_effect_i', 'cell_hz_effect_total',
'pop_hz_e', 'pop_hz_i', 'pop_hz_total',
'pop_hz_pre_e', 'pop_hz_pre_i', 'pop_hz_pre_total',
'pop_hz_post_e', 'pop_hz_post_i', 'pop_hz_post_total',
'pop_hz_induction_e', 'pop_hz_induction_i', 'pop_hz_induction_total',
'pop_hz_baseline_e', 'pop_hz_baseline_i', 'pop_hz_baseline_total',
'pop_hz_effect_e', 'pop_hz_effect_i', 'pop_hz_effect_total',
'bkg_count', 'bkg_count_e', 'bkg_count_i',
'foll_count', 'foll_count_e', 'foll_count_i',
'foll_gids_e', 'foll_gids_i', 'foll_gids',
'e_foll_gids', 'i_foll_gids',
'e_anti_count', 'e_bkg_count', 'e_foll_count',
'i_anti_count', 'i_bkg_count', 'i_foll_count',
'furthest_follower_distance_e',
'last_foll_activation_jitter_e', 'last_foll_activation_time_e', 'last_foll_distance_e',
'mean_foll_activation_jitter_e', 'mean_foll_jitter_e',
'furthest_follower_distance_i',
'last_foll_activation_jitter_i', 'last_foll_activation_time_i', 'last_foll_distance_i',
'mean_foll_activation_jitter_i', 'mean_foll_jitter_i',
'e_furthest_follower_distance',
'e_last_foll_activation_jitter', 'e_last_foll_activation_time', 'e_last_foll_distance',
'e_mean_foll_activation_jitter', 'e_mean_foll_jitter',
'furthest_follower_distance',
'last_foll_activation_jitter', 'last_foll_activation_time', 'last_foll_distance',
'mean_foll_activation_jitter', 'mean_foll_jitter',
'i_furthest_follower_distance',
'i_last_foll_activation_jitter', 'i_last_foll_activation_time', 'i_last_foll_distance',
'i_mean_foll_activation_jitter', 'i_mean_foll_jitter',
])
if drop_protocol:
drop_cols.extend([
'tstart', 'tend',
'tstart_pre', 'tstop_pre',
'tstart_post', 'tstop_post',
'tstart_induction', 'tstop_induction',
'duration_baseline', 'duration_effect',
'trial_count', 'trial_length_ms',
'forced_times', 'input_targeted_times',
])
cols_to_remove = self.reg.columns.intersection(drop_cols)
if not quiet:
print(f'Removing {len(cols_to_remove)} cols: {cols_to_remove}')
new_reg = self.reg.drop(cols_to_remove, axis=1, errors='ignore')
return SimBatch(new_reg)
def check_sim_results(self):
"""verify results are readable and the batch seems correct"""
errors = (
self._check_sim_results_unique() +
self._check_sim_results_readable()
)
if len(errors) == 0:
print('all good')
else:
print('\n'.join(errors))
def _check_sim_results_unique(self) -> list:
"""
Check that all sim results are unique.
"""
errors = []
count = np.count_nonzero(self.reg[['full_path', 'sim_idx']].isna().any(axis=1))
if count > 0:
errors.append(f'{count} simulations with missing results')
count = np.count_nonzero(self.reg[['full_path', 'sim_idx']].duplicated())
if count > 0:
errors.append(f'{count} sims duplicated in batch registry')
return errors
def _check_sim_results_readable(self) -> list:
"""
Try to open every results file to check that they exist and that they are good HDF5.
If the sim is suddenly killed, the HDF5 may become corrupted (truncated).
This has happened when the we run out of storage space and hdf5 segfaults as a consequence.
"""
failed = []
unique = self.reg['full_path'].unique()
if len(unique) > 50:
unique = pbar(unique, desc='check results files')
for path in unique:
try:
with h5py.File(path, 'r'):
pass
except OSError as e:
failed.append(f'Failed to open {path} {e}')
return failed
@staticmethod
def new_reg_path(desc, folder=None, filename='registry') -> Path:
"""create a new path to store this batch"""
if folder is None:
folder = f'batch_{today_stamp()}_{desc}'
path = abs_path(folder) / f'{filename}_{now_stamp()}.h5'
assert not path.exists(), f'Path {str(path)} already exists'
path.parent.mkdir(parents=True, exist_ok=True)
return path
def save_registry(self, path, key='sweeps', quiet=False, patch_lists=False, fmt='fixed'):
copy: pd.DataFrame = self.reg.copy()
if patch_lists:
for col in copy.columns:
for pat in LIST_LIKE_COLS:
if re.match(pat, col): # don't check type because some may be NaNs and others lists
copy[col] = copy[col].map(json.dumps)
full_path = str(abs_path(path))
import warnings
import tables
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
warnings.simplefilter(action='ignore', category=tables.PerformanceWarning)
copy.to_hdf(full_path, key, format=fmt)
if not quiet:
print(f'saved reg of {len(self):,g} sims to: {full_path}')
else:
return path
def _register_results(self, name, paths, idcs, expected_diff=False):
"""register results for a simulation that have been saved in an indexed"""
path_col, idx_col = get_col_names(name)
self.reg.loc[paths.index, path_col] = paths
self.reg.loc[idcs.index, idx_col] = idcs
# noinspection PyUnresolvedReferences
assert (self.reg[path_col].isna() == self.reg[idx_col].isna()).all(), 'inconsistent path/idcs'
if expected_diff:
repeated = np.count_nonzero(self.reg[[path_col, idx_col]].duplicated())
if repeated > 0:
logging.warning(f'Repeated "{name}" results for {repeated} simulations')
def register_raw(self):
"""
Find all common sim results for the simulations of this batch.
These are results that are not the result of a post-processing but rather
the raw simulation output, and the network instance.
"""
self.register_spikes_raw()
self.register_voltages_raw()
self.register_conns_raw()
self.register_cells_raw()
def register_cells_raw(self, name='cells_raw'):
"""
Add the path and index of the cells to each simulation so that they can
be loaded through a SplitStore.
We stored cells in 'instance' files with the connectivity following
a convention cells_{instance_idx}.
"""
self._register_results(
name,
self.reg['instance_path'],
self.reg['instance_idx'].apply(lambda x: f'cells_{x}' if isinstance(x, str) else f'cells_{x:06g}'),
expected_diff=False,
)
def register_voltages_raw(self, name='voltages_raw'):
"""
Add the path and index of the voltages to each simulation so that they can
be loaded through a SplitStore.
"""
voltage_idcs = self.reg['sim_idx'].map(sim.get_voltages_key)
voltage_paths = self.reg['full_path'].copy()
voltage_idcs[voltage_paths.isna()] = np.nan
sim_gids = voltage_paths.dropna().index
sim_gids = pbar(sim_gids, total=len(sim_gids), desc=f'register {name}')
for sim_gid in sim_gids:
path = self.reg.loc[sim_gid, 'full_path']
idx = voltage_idcs.loc[sim_gid]
if not _hdf5_table_exists(path, idx):
voltage_idcs.drop(sim_gid, inplace=True)
voltage_paths.drop(sim_gid, inplace=True)
if len(voltage_idcs) > 0:
self._register_results(name, voltage_paths, voltage_idcs)
path_col, idx_col = get_col_names(name)
if 'voltage_measure' in self.reg.columns:
missing = np.count_nonzero(self.reg['voltage_measure'].notna() & self.reg[idx_col].isna())
if missing > 0:
logging.error(f'Expected voltage measurements missing for {missing} simulations')
unexpected = np.count_nonzero(self.reg['voltage_measure'].isna() & self.reg[idx_col].notna())
if unexpected > 0:
logging.warning(f'Unexpected voltage measurements for {unexpected} simulations')
def register_conns_raw(self, name='conns_raw'):
"""
Add the path and index of the connections to each simulation so that they can
be loaded through a SplitStore.
We stored connections in 'instance' files with the connectivity following
a convention connections_{instance_idx}.
"""
def get_conns_raw_idx(x):
if np.issubdtype(type(x), np.number):
return f'connections_{x:06d}'
else:
return f'connections_{x}'
self._register_results(
name,
self.reg['instance_path'],
self.reg['instance_idx'].apply(get_conns_raw_idx),
expected_diff=False,
)
def register_spikes_raw(self, name='spikes_raw'):
"""
The raw results of a simulation (spikes, sometimes voltages) are stored in HDF
but they are registered with a non-standard path column (full_path) and an implicit
idx as a format that depends on the sim_gid. That format has changed over the years.
To try to standarise accessing this data, we can build new path/idx columns, but we need
to go through each file to check which version of the data was used.
This needs to be done only once and then we can save the batch as is.
"""
spikes_raw_idx = {}
raw_loc = self.reg[['full_path', 'sim_idx']].dropna()
raw_loc = pbar(raw_loc.iterrows(), total=len(raw_loc), desc=f'register {name}')
for sim_id, (filename, spikes_idx) in raw_loc:
if isinstance(spikes_idx, str):
spikes_idx = float(spikes_idx)
try:
# noinspection PyProtectedMember
spikes_raw_idx[sim_id] = sim._identify_df_key(
filename, [sim.get_spikes_key(spikes_idx), f'spikes_{spikes_idx:g}'])
except KeyError:
logging.error(f'sim #{sim_id} (sim idx {spikes_idx}) has no spikes in {filename}')
# be consistent on the path-idx pairs for simulations where spikes are missing
spikes_raw_idx = | pd.Series(spikes_raw_idx) | pandas.Series |
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import os
import pandas as pd
import time
print('PID '+str(os.getpid()))
df = | pd.read_csv('stock-ticker.csv') | pandas.read_csv |
import pkg_resources
from unittest.mock import sentinel
import pandas as pd
import pytest
import osmo_jupyter.dataset.combine as module
@pytest.fixture
def test_picolog_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_picolog.csv"
)
@pytest.fixture
def test_calibration_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_calibration_log.csv"
)
class TestOpenAndCombineSensorData:
def test_interpolates_data_correctly(
self, test_calibration_file_path, test_picolog_file_path
):
combined_data = module.open_and_combine_picolog_and_calibration_data(
calibration_log_filepaths=[test_calibration_file_path],
picolog_log_filepaths=[test_picolog_file_path],
).reset_index() # move timestamp index to a column
# calibration log has 23 columns, but we only need to check that picolog data is interpolated correctly
subset_combined_data_to_compare = combined_data[
[
"timestamp",
"equilibration status",
"setpoint temperature (C)",
"PicoLog temperature (C)",
]
]
expected_interpolation = pd.DataFrame(
[
{
"timestamp": "2019-01-01 00:00:00",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39,
},
{
"timestamp": "2019-01-01 00:00:01",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39.5,
},
{
"timestamp": "2019-01-01 00:00:03",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
{
"timestamp": "2019-01-01 00:00:04",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
]
).astype(
subset_combined_data_to_compare.dtypes
) # coerce datatypes to match
pd.testing.assert_frame_equal(
subset_combined_data_to_compare, expected_interpolation
)
class TestGetEquilibrationBoundaries:
@pytest.mark.parametrize(
"input_equilibration_status, expected_boundaries",
[
(
{ # Use full timestamps to show that it works at second resolution
pd.to_datetime("2019-01-01 00:00:00"): "waiting",
pd.to_datetime("2019-01-01 00:00:01"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:02"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:03"): "waiting",
},
[
{
"start_time": pd.to_datetime("2019-01-01 00:00:01"),
"end_time": pd.to_datetime("2019-01-01 00:00:02"),
}
],
),
(
{ # Switch to using only years as the timestamp for terseness and readability
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
}
],
),
(
{
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
pd.to_datetime("2023"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": | pd.to_datetime("2022") | pandas.to_datetime |
import os
import click
import pandas as pd
import numpy as np
from datetime import timedelta
from codigo.desafio_iafront.data.dataframe_utils import read_csv
from utils import *
from codigo.desafio_iafront.jobs.constants import DEPARTAMENTOS
from bokeh.plotting import figure, output_file
from bokeh.io import output_file, save
@click.command()
@click.option('--pedidos', type=click.Path(exists=True))
@click.option('--visitas', type=click.Path(exists=True))
@click.option('--produtos', type=click.Path(exists=True))
@click.option('--data-inicial', type=click.DateTime(formats=["%d/%m/%Y"]))
@click.option('--data-final', type=click.DateTime(formats=["%d/%m/%Y"]))
def main(pedidos, visitas, produtos, data_inicial, data_final):
produtos_df = read_csv(produtos)
produtos_df["product_id"] = produtos_df["product_id"].astype(str)
delta: timedelta = (data_final - data_inicial)
date_partitions = [data_inicial.date() + timedelta(days=days) for days in range(delta.days)]
count=0
conversao_dia={d:[] for d in range(7)}
conversao_hora={h:[] for h in range(24)}
count_dia=0
for data in date_partitions:
hour_partitions = list(range(0, 24))
for hour in hour_partitions:
hour_snnipet = f"hora={hour}"
data_str = data.strftime('%Y-%m-%d')
date_partition = f"data={data_str}"
print(f"EDA: {date_partition} {hour}h")
visitas_df = create_visitas_df(date_partition, hour_snnipet, visitas)
pedidos_df = create_pedidos_df(date_partition, hour_snnipet, pedidos)
visita_com_produto_e_conversao_df = merge_visita_produto(data_str, hour, pedidos_df, produtos_df, visitas_df)
#checa missing vals
nan_values = pd.DataFrame(visita_com_produto_e_conversao_df.isna().mean()*100)
nan=pd.DataFrame()
nan['cols'] = nan_values.index
nan['values']=nan_values.values
nan['index'] = 1
nan=nan.pivot(index='index',columns='cols')
try:
missing_vals = | pd.concat((missing_vals,nan )) | pandas.concat |
from typing import Dict, List, Optional
from copy import deepcopy
import pytz
from datetime import datetime, timedelta
try:
import MetaTrader5 as Mt5
except:
pass
import pandas as pd
import yaml
from pathlib import Path
from termcolor import colored
from mt5_connector.account import Account
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, <NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def calc_lot_forex(
risk: float,
symbol: str,
sl: float,
balance: float,
account_currency_conversion: float,
):
"""
calc lot size for forex
"""
percentage_converter = 0.01
risk_percentage = risk * percentage_converter
currency_2 = symbol[3:6]
if currency_2 == "jpy":
jpy_pip_converter = 100
pip_value = (balance * risk_percentage) / (sl * jpy_pip_converter)
else:
pip_value = (balance * risk_percentage) / sl
one_lot_price = 100_000
calculate_lot = (pip_value / one_lot_price) * account_currency_conversion
lot_size = round(calculate_lot, 2)
return lot_size
def calc_account_currency_conversion(
account_currency: str, symbol: str, current_price_symbols: Dict
):
"""
calculate the price conversion between the traded symbol and your account currency
"""
currency_2 = symbol[3:6]
other_character = symbol[6:]
if account_currency == currency_2:
account_currency_conversion = 1
else:
symbol_to_convert = account_currency + currency_2 + other_character
symbol_is_real = check_symbol(symbol_to_convert)
if symbol_is_real:
account_currency_conversion = float(
current_price_symbols[symbol_to_convert]["close"]
)
else:
symbol_to_convert = currency_2 + account_currency + other_character
symbol_is_real = check_symbol(symbol_to_convert)
if symbol_is_real:
account_currency_conversion = 1 / float(
current_price_symbols[symbol_to_convert]["close"]
)
else:
print(
f"unable to find a lot for {currency_2 + account_currency + other_character}"
)
return None
return account_currency_conversion
def calc_position_size_forex(
symbol: str,
account_currency: str,
risk: float,
sl: float,
current_price_symbols: dict,
) -> Optional[float]:
"""
return lot size for forex
"""
account = Mt5.account_info()
balance = account.balance
account_currency_conversion = calc_account_currency_conversion(
account_currency, symbol, current_price_symbols
)
lot_size = calc_lot_forex(risk, symbol, sl, balance, account_currency_conversion)
return lot_size
def get_order_history(
date_from: datetime = datetime.now() - timedelta(hours=24),
date_to: datetime = datetime.now() + timedelta(hours=5),
):
"""
get history of trades from the connected account
"""
res = Mt5.history_deals_get(date_from, date_to)
if res is not None and res != ():
df = pd.DataFrame(list(res), columns=res[0]._asdict().keys())
df["time"] = pd.to_datetime(df["time"], unit="s")
return df
return pd.DataFrame()
def calc_daily_lost_trades():
"""
calculate the daily lost trades
"""
now = datetime.now().astimezone(pytz.timezone("Etc/GMT-3"))
now = datetime(now.year, now.month, now.day, hour=now.hour, minute=now.minute)
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
res = get_order_history(midnight, now)
if res.empty:
return 0
else:
lost_trade_count = 0
for i, row in res.iterrows():
profit = float(row["profit"])
if profit < 0:
lost_trade_count = lost_trade_count + 1
return lost_trade_count
def get_daily_trade_data():
"""
calculate the daily lost trades
"""
now = datetime.now().astimezone(pytz.timezone("Etc/GMT-3"))
now = datetime(now.year, now.month, now.day, hour=now.hour, minute=now.minute)
yesterday = now - timedelta(hours=24)
res = get_order_history(yesterday, now)
return res
def check_max_drawdown(
initial_balance: float, current_balance: float, max_drawdown: float
) -> bool:
"""
check if the loss exceed the max given drawdown
"""
percentage = 0.01
max_drawdown_percentage = max_drawdown * percentage
is_in_drawdown = False
if current_balance < (initial_balance - initial_balance * max_drawdown_percentage):
is_in_drawdown = True
return is_in_drawdown
def positions_get(symbol=None) -> pd.DataFrame:
"""
return all on going positions
"""
if symbol is None:
res = Mt5.positions_get()
else:
res = Mt5.positions_get(symbol=symbol)
if res is not None and res != ():
df = pd.DataFrame(list(res), columns=res[0]._asdict().keys())
df["time"] = pd.to_datetime(df["time"], unit="s")
return df
return | pd.DataFrame() | pandas.DataFrame |
import csv
import json
import numpy as np
import pandas as pd
def read_delim(filepath):
"""
Reads delimited file (auto-detects delimiter + header). Returns list.
:param filepath: (str) location of delimited file
:return: (list) list of records w/o header
"""
f = open(filepath, 'r')
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
has_header = csv.Sniffer().has_header(f.read(1024))
f.seek(0)
reader = csv.reader(f, dialect)
if has_header:
reader.next()
ret = [line for line in reader]
return ret
def read_delim_pd(filepath):
"""
Reads delimited file (auto-detects delimiter + header). Returns pandas DataFrame.
:param filepath: (str) location of delimited file
:return: (DataFrame)
"""
f = open(filepath)
has_header = None
if csv.Sniffer().has_header(f.read(1024)):
has_header = 0
f.seek(0)
return pd.read_csv(f, header=has_header, sep=None, engine='python')
def lookup(table, lookup_cols, lookup_vals, output_cols=None, output_recs=None):
"""
Looks up records where lookup_cols == lookup_vals.
Optionally returns only specified output_cols and/or specified output_recs.
:param table: (DataFrame) the pandas DataFrame to use as a lookup table
:param lookup_cols: (str | list)
:param lookup_vals: (val | list)
:param output_cols:
:param output_recs:
:return:
"""
if type(lookup_cols) == str:
lookup_cols = [lookup_cols]
lookup_vals = [lookup_vals]
temp_df = pd.DataFrame(data=lookup_vals, columns=lookup_cols, copy=False)
output = table.merge(temp_df, copy=False)
if output_cols is not None:
if type(output_cols) == str:
output_cols = [output_cols]
output = output[output_cols]
if output_recs is not None:
output = output.iloc[output_recs]
return output
def generate_position_table(num_rc, space_rc, offset=(0.0,0.0,0.0), to_clipboard=False):
"""
Generates a position table for a plate. Assumes that 'x' and 'c' are aligned and that
'y' and 'r' are aligned. These axes can be reflected by negating the corresponding 'space_rc';
translations can be applied via 'offset'. All entries are indexed by 'n' (newspaper order)
and 's' (serpentine order). Other columns may be added as needed, but Autosampler.goto()
requires 'x', 'y', and 'z' to function properly.
:param num_rc: (tup) number of rows and columns (num_rows, num_cols)
:param space_rc: (tup) spacing for rows and columns [mm] (spacing_rows, spacing_cols)
:param offset: (tup) 3-tuple of floats to be added to x,y,z [mm]
:param to_clipboard: (bool) whether to copy the position_table to the OS clipboard
:return: (DataFrame)
"""
# TODO: instead of offset, full affine option? can use negative space rc to reflect,
# but can't remap x -> y
temp = list()
headers = ['n', 's', 'r', 'c', 'name', 'x', 'y', 'z']
for r in range(num_rc[0]):
for c in range(num_rc[1]):
n = c + r * num_rc[1]
s = ((r + 1) % 2) * (c + r * num_rc[1]) + (r % 2) * ((r + 1) * num_rc[1] - (c + 1))
name = chr(64 + r + 1) + '{:02d}'.format(c + 1)
x = float(c * space_rc[1] + offset[0])
y = float(r * space_rc[0] + offset[1])
z = float(offset[2])
temp.append([n, s, r, c, name, x, y, z])
position_table = | pd.DataFrame(temp, columns=headers) | pandas.DataFrame |
import nltk
import numpy as np
import pandas as pd
import os
from collections import Counter
import sklearn
from sklearn.preprocessing import LabelEncoder
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
import tensorflow as tf
class Pipeline:
def __init__(self, load_setting, n_classes):
self.load_setting = load_setting
self.n_classes = n_classes
pass
# label names getter
def get_label_names(self):
try: return self.enc.inverse_transform(list(range(self.n_classes)))
except AttributeError:
print("There is no attribute enc in Preprocessor. encoder is not used in binary classification")
# loading and preprocessing data.
def pipe(self):
data = self.read_data()
tweets, labels = self.transform_data(data)
tweets = self.preprocessing(tweets)
return tweets, labels
def preprocessing(self, tw):
tw = [nltk.re.sub(r"http\S+", "link", text) for text in tw] # replacing links: <LINK>
tw = [nltk.re.sub(r"@\S+", "tag", text) for text in tw] # replacing tags: <TAG>
tw = [nltk.re.sub(r'[0-9]+', " digits ", text) for text in tw] # replacing tags: digit
tw = [nltk.re.sub(r"[\'|\"]", " ", text) for text in tw] # removing ' and "
tw = [nltk.re.sub(r"\b\w\b", "", text) for text in tw] # remove single character words
text_to_list = tf.keras.preprocessing.text.text_to_word_sequence
tw = [text_to_list(sentence) for sentence in tw]
stopwords = set(nltk.corpus.stopwords.words('english'))
tw = [[word for word in words if word not in stopwords] for words in tw]
tw = [" ".join(tweet) for tweet in tw]
return np.asarray(tw)
def balancing(self, tweets, labels, strategy="under"):
tweets = tweets.reshape((-1, 1))
labels = np.argmax(labels, axis=1)
if strategy == "under":
undersample = RandomUnderSampler(sampling_strategy='majority')
tweets, labels = undersample.fit_resample(tweets, labels)
else:
oversample = RandomOverSampler(sampling_strategy='minority')
tweets, labels = oversample.fit_resample(tweets, labels)
tweets = tweets.reshape((-1,))
labels = tf.keras.utils.to_categorical(labels, num_classes=len(Counter(labels)), dtype='float32')
return tweets, labels
def splitting(self, tweets, labels):
split = sklearn.model_selection.StratifiedShuffleSplit(n_splits=1,
test_size=0.1,
random_state=20)
train_idx, test_idx = list(split.split(tweets, labels))[0]
xtrain = tweets[train_idx]
ytrain = labels[train_idx]
xtest = tweets[test_idx]
ytest = labels[test_idx]
return xtrain, ytrain, xtest, ytest
def transform_data(self, data):
tweets = data.iloc[:, 1].to_numpy() # tweets to nd array
if self.load_setting == "relatedness":
labels = data.iloc[:, 4].to_numpy() # informativeness is the label column.
elif self.load_setting == "info_source":
labels = data.iloc[:,2]
elif self.load_setting == "info_type":
labels = data.iloc[:,3]
if self.load_setting != "relatedness":
self.enc = LabelEncoder()
labels = self.enc.fit_transform(labels)
else:
# convert label values: not related --> 0 , related --> 1
for i, label in enumerate(labels):
if label == "Not applicable":
labels[i] = 0
elif label == "Not related":
labels[i] = 0
else:
labels[i] = 1
labels = tf.keras.utils.to_categorical(labels, num_classes=len(Counter(labels)), dtype='float32')
return tweets, labels
def read_data(self):
try:
data = pd.read_csv("data/data.csv")
except FileNotFoundError:
print("there is no dataset in ")
list_subfolders = sorted([f.name for f in os.scandir("data") if
f.is_dir()]) # scans the folder "data" to get a list of all subfolders
# data is the dataframe for all concatenated datasets , initialized with the first crisis data
data = | pd.read_csv("data/" + list_subfolders[0] + "/" + list_subfolders[0] + "-tweets_labeled.csv") | pandas.read_csv |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import Index, MultiIndex, Series, date_range, isna
import pandas._testing as tm
@pytest.fixture(
params=[
"linear",
"index",
"values",
"nearest",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def nontemporal_method(request):
"""Fixture that returns an (method name, required kwargs) pair.
This fixture does not include method 'time' as a parameterization; that
method requires a Series with a DatetimeIndex, and is generally tested
separately from these non-temporal methods.
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
@pytest.fixture(
params=[
"linear",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def interp_methods_ind(request):
"""Fixture that returns a (method name, required kwargs) pair to
be tested for various Index types.
This fixture does not include methods - 'time', 'index', 'nearest',
'values' as a parameterization
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
class TestSeriesInterpolateData:
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method="linear")
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series(
[d.toordinal() for d in datetime_series.index], index=datetime_series.index
).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method="time")
tm.assert_series_equal(time_interp, ord_ts)
def test_interpolate_time_raises_for_non_timeseries(self):
# When method='time' is used on a non-TimeSeries that contains a null
# value, a ValueError should be raised.
non_ts = Series([0, 1, 2, np.NaN])
msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
@td.skip_if_no_scipy
def test_interpolate_cubicspline(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
result = ser.reindex(new_index).interpolate(method="cubicspline")[1:3]
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(
Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
).astype(float)
interp_s = ser.reindex(new_index).interpolate(method="pchip")
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima")
tm.assert_series_equal(interp_s[1:3], expected)
# interpolate at new_index where `der` is a non-zero int
expected = Series(
[11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="from_derivatives")
tm.assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
tm.assert_series_equal(s.interpolate(**kwargs), s)
s = Series([], dtype=object).interpolate()
tm.assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method="index")
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]
)
tm.assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method="values")
tm.assert_series_equal(other_result, result)
tm.assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
"time-weighted interpolation only works on Series or DataFrames "
"with a DatetimeIndex"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="time")
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0.0, 1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9])
tm.assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list("abcd"))
result = s.interpolate()
expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_quad(self):
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method="quadratic")
expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_scipy_basic(self):
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
result = s.interpolate(method="slinear")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="slinear", downcast="infer")
tm.assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method="nearest")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="nearest", downcast="infer")
tm.assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method="zero")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="zero", downcast="infer")
tm.assert_series_equal(result, expected)
# quadratic
# GH #15662.
expected = Series([1, 3.0, 6.823529, 12.0, 18.058824, 25.0])
result = s.interpolate(method="quadratic")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="quadratic", downcast="infer")
tm.assert_series_equal(result, expected)
# cubic
expected = Series([1.0, 3.0, 6.8, 12.0, 18.2, 25.0])
result = s.interpolate(method="cubic")
tm.assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
result = s.interpolate(method="linear", limit=2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("limit", [-1, 0])
def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method, limit):
# GH 9217: make sure limit is greater than zero.
s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
with pytest.raises(ValueError, match="Limit must be greater than 0"):
s.interpolate(limit=limit, method=method, **kwargs)
def test_interpolate_invalid_float_limit(self, nontemporal_method):
# GH 9217: make sure limit is an integer.
s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
limit = 2.0
with pytest.raises(ValueError, match="Limit must be an integer"):
s.interpolate(limit=limit, method=method, **kwargs)
@pytest.mark.parametrize("invalid_method", [None, "nonexistent_method"])
def test_interp_invalid_method(self, invalid_method):
s = Series([1, 3, np.nan, 12, np.nan, 25])
msg = f"method must be one of.* Got '{invalid_method}' instead"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method)
# When an invalid method and invalid limit (such as -1) are
# provided, the error message reflects the invalid method.
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method, limit=-1)
def test_interp_invalid_method_and_value(self):
# GH#36624
ser = Series([1, 3, np.nan, 12, np.nan, 25])
msg = "Cannot pass both fill_value and method"
with pytest.raises(ValueError, match=msg):
ser.interpolate(fill_value=3, method="pad")
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
result = s.interpolate(method="linear", limit=2, limit_direction="forward")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="linear", limit=2, limit_direction="FORWARD")
tm.assert_series_equal(result, expected)
def test_interp_unlimited(self):
# these test are for issue #16282 default Limit=None is unlimited
s = Series([np.nan, 1.0, 3.0, np.nan, np.nan, np.nan, 11.0, np.nan])
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
result = s.interpolate(method="linear", limit_direction="both")
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
result = s.interpolate(method="linear", limit_direction="forward")
tm.assert_series_equal(result, expected)
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, np.nan])
result = s.interpolate(method="linear", limit_direction="backward")
tm.assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
r"Invalid limit_direction: expecting one of \['forward', "
r"'backward', 'both'\], got 'abc'"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit=2, limit_direction="abc")
# raises an error even if no limit is specified.
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit_direction="abc")
# limit_area introduced GH #16284
def test_interp_limit_area(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 3.0, 4.0, 5.0, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(method="linear", limit_area="inside")
tm.assert_series_equal(result, expected)
expected = Series(
[np.nan, np.nan, 3.0, 4.0, np.nan, np.nan, 7.0, np.nan, np.nan]
)
result = s.interpolate(method="linear", limit_area="inside", limit=1)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, 4.0, np.nan, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(
method="linear", limit_area="inside", limit_direction="both", limit=1
)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0])
result = s.interpolate(method="linear", limit_area="outside")
tm.assert_series_equal(result, expected)
expected = Series(
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan]
)
result = s.interpolate(method="linear", limit_area="outside", limit=1)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan])
result = s.interpolate(
method="linear", limit_area="outside", limit_direction="both", limit=1
)
tm.assert_series_equal(result, expected)
expected = Series([3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan])
result = s.interpolate(
method="linear", limit_area="outside", limit_direction="backward"
)
tm.assert_series_equal(result, expected)
# raises an error even if limit type is wrong.
msg = r"Invalid limit_area: expecting one of \['inside', 'outside'\], got abc"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit_area="abc")
@pytest.mark.parametrize(
"method, limit_direction, expected",
[
("pad", "backward", "forward"),
("ffill", "backward", "forward"),
("backfill", "forward", "backward"),
("bfill", "forward", "backward"),
("pad", "both", "forward"),
("ffill", "both", "forward"),
("backfill", "both", "backward"),
("bfill", "both", "backward"),
],
)
def test_interp_limit_direction_raises(self, method, limit_direction, expected):
# https://github.com/pandas-dev/pandas/pull/34746
s = Series([1, 2, 3])
msg = f"`limit_direction` must be '{expected}' for method `{method}`"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=method, limit_direction=limit_direction)
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1.0, 3.0, np.nan, 7.0, 9.0, 11.0])
result = s.interpolate(method="linear", limit=2, limit_direction="backward")
tm.assert_series_equal(result, expected)
expected = Series([1.0, 3.0, 5.0, np.nan, 9.0, 11.0])
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12, np.nan])
expected = Series([1.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0])
result = s.interpolate(method="linear", limit=2, limit_direction="both")
tm.assert_series_equal(result, expected)
expected = Series(
[1.0, 3.0, 4.0, np.nan, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0]
)
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, np.nan])
result = s.interpolate(method="linear", limit=2, limit_direction="backward")
tm.assert_series_equal(result, expected)
expected = | Series([5.0, 5.0, 5.0, 7.0, 9.0, 9.0]) | pandas.Series |
import discord
import os
import pandas as pd
client = discord.Client()
## Initiate IEX
import pyEX as p
iex = p.Client(api_token=iex_key, version='stable')
## Get Quote
## Get News
## Date
import datetime
def convert_date(x):
stamp = x
date = datetime.datetime.fromtimestamp(stamp / 1e3)
date = date.strftime("%Y-%m-%d")
return date
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
msg = message.content
if message.author == client.user:
return
if message.content.startswith('$hello'):
await message.channel.send(msg)
await message.channel.send('Hello!')
### Quote
if message.content.startswith('$Quote'):
mes = msg.split()
tkr = mes[1]
quote = iex.quote(symbol=tkr)
await message.channel.send(quote['iexRealtimePrice'])
### News
if message.content.startswith('$News'):
mes = msg.split()
tkr = mes[1]
news = iex.news(count = 1,symbol = tkr)
news = | pd.DataFrame(news) | pandas.DataFrame |
from opendatatools.common import RestAgent, md5
from progressbar import ProgressBar
import json
import pandas as pd
import io
import hashlib
import time
index_map = {
'Barclay_Hedge_Fund_Index' : 'ghsndx',
'Convertible_Arbitrage_Index' : 'ghsca',
'Distressed_Securities_Index' : 'ghsds',
'Emerging_Markets_Index' : 'ghsem',
'Equity_Long_Bias_Index' : 'ghselb',
'Equity_Long_Short_Index' : 'ghsels',
'Equity_Market_Neutral_Index' : 'ghsemn',
'European_Equities_Index' : 'ghsee',
'Event_Driven_Index' : 'ghsed',
'Fixed_Income_Arbitrage_Index' : 'ghsfia',
'Fund_of_Funds_Index' : 'ghsfof',
'Global_Macro_Index' : 'ghsmc',
'Healthcare_&_Biotechnology_Index': 'ghsbio',
'Merger_Arbitrage_Index' : 'ghsma',
'Multi_Strategy_Index' : 'ghsms',
'Pacific_Rim_Equities_Index' : 'ghspre',
'Technology_Index' : 'ghstec',
}
class SimuAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.user_info = None
self.df_fundlist = None
self.cookies = None
def login(self, username, password):
url = 'https://passport.simuwang.com/index.php?m=Passport&c=auth&a=login&type=login&name=%s&pass=%s&reme=1&rn=1' % (username, password)
self.add_headers({'Referer': 'https://dc.simuwang.com/'})
response = self.do_request(url)
if response is None:
return None, '登录失败'
jsonobj = json.loads(response)
suc = jsonobj['suc']
msg = jsonobj['msg']
if suc != 1:
return None, msg
self.cookies = self.get_cookies()
self.user_info = jsonobj['data']
return self.user_info, msg
def prepare_cookies(self, url):
response = self.do_request(url, None)
if response is not None:
cookies = self.get_cookies()
return cookies
else:
return None
def _get_rz_token(self, time):
mk = time * 158995555893
mtoken = md5(md5(str(mk))) + '.' + str(time)
return mtoken
def _get_fund_list_page(self, page_no):
url = 'https://dc.simuwang.com/ranking/get?page=%s&condition=fund_type:1,6,4,3,8,2;ret:9;rating_year:1;istiered:0;company_type:1;sort_name:profit_col2;sort_asc:desc;keyword:' % page_no
response = self.do_request(url)
if response is None:
return None, '获取数据失败', None
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['msg']
if code != 1000:
return None, msg, None
df = pd.DataFrame(jsonobj['data'])
pageinfo = jsonobj['pager']
return df, '', pageinfo
def load_data(self):
page_no = 1
df_list = []
df, msg, pageinfo = self._get_fund_list_page(page_no)
if df is None:
return None, msg
df_list.append(df)
page_count = pageinfo['pagecount']
process_bar = ProgressBar().start(max_value=page_count)
page_no = page_no + 1
while page_no <= page_count:
df, msg, pageinfo = self._get_fund_list_page(page_no)
if df is None:
return None, msg
df_list.append(df)
process_bar.update(page_no)
page_no = page_no + 1
self.df_fundlist = pd.concat(df_list)
return self.df_fundlist, ''
def get_fund_list(self):
if self.df_fundlist is None:
return None, '请先加载数据 load_data'
return self.df_fundlist, ''
def _get_sign(self, url, params):
str = url
for k,v in params.items():
str = str + k + params[k]
sha1 = hashlib.sha1()
sha1.update(str.encode('utf8'))
sign = sha1.hexdigest()
return sign
def _get_token(self, fund_id):
sign = self._get_sign('https://dc.simuwang.com/Api/getToken', {'id' : fund_id})
url = 'https://dc.simuwang.com/Api/getToken?id=%s&sign=%s' % (fund_id, sign)
self.add_headers({'Referer': 'https://dc.simuwang.com/'})
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['message']
if code != 1000 :
return code, msg
self.cookies.update(self.get_cookies())
salt = jsonobj['data']
muid = self.user_info['userid']
#str = 'id%smuid%spage%s%s' % (fund_id, muid, page_no, salt)
str = '%s%s' % (fund_id, salt)
sha1 = hashlib.sha1()
sha1.update(str.encode('utf8'))
token = sha1.hexdigest()
return token, ''
def _get_fund_nav_page(self, fund_id, page_no):
muid = self.user_info['userid']
token, msg = self._get_token(fund_id)
if token is None:
return None, '获取token失败: ' + msg, ''
url = 'https://dc.simuwang.com/fund/getNavList.html'
self.add_headers({'Referer': 'https://dc.simuwang.com/product/%s.html' % fund_id})
data = {
'id' : fund_id,
'muid' : muid,
'page' : str(page_no),
'token': token,
}
response = self.do_request(url, param=data, cookies=self.cookies, encoding="utf8")
if response is None:
return None, '获取数据失败', ''
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['msg']
if code != 1000 :
return code, msg, ''
df = | pd.DataFrame(jsonobj['data']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed May 20 17:56:56 2020
@author: CatsAndProcurement
The purpose of this script is to extract search-specific data from the
Government Accountability Office (GAO) Recommendations Database.
GAO is the primary legislative branch audit agency of the U.S. government.
This database contains recommendations made in GAO reports that have still
not been addressed by agencies.
This script uses Pandas (a Python math module) to categorize unaddressed
recommendations by month and year and create a bar chart of the time-series
data.
Sample web API call:
https://www.gao.gov/index.php?system_action=newRecommendations_results_as_csv
&rec_type=all_open
&field=rectext_t
&q=acquisition
"""
# Pandas lets us do fancy calculations
import pandas as pd
# Datetime lets us convert date strings into integers and print today's date
import datetime as dt
from datetime import date
# Calendar lets us convert some date info to plain English text
import calendar
# Describes for the user what the code is supposed to do
print("\n"
"Hi! This Python script will extract data from the Government"+
" Accountability Office (GAO) Recommendations Database. GAO is the"+
" primary legislative branch audit agency of the U.S. government."+
" This database contains recommendations made in GAO reports that"+
" have still not been addressed by agencies."
"\n")
# Asks the user for a search term
callTerm = input("Please input a search term (or just hit Enter and "+
"the script will automatically search for acquisition-"+
"related recommendations): ")
print("\n")
# If no search term entered, searches the word 'acquisition' by default
if callTerm == "":
callTerm = "acquisition"
else:
callTerm = callTerm
# Builds the web API call using the user input or the word 'acquisition'
callURL = ("https://www.gao.gov/index.php?system_action=newRecommendations_results_as_csv"+
"&rec_type=all_open"+
"&field=rectext_t"+
"&q="+callTerm)
# Lets the user know where their data is coming from
print("\nAccessing data from: \n" + callURL)
# Pulls specified data from GAO.gov into a Pandas dataframe
# We need to skip 5 rows because for some reason GAO's data starts on row 6
dfGAO = | pd.read_csv(callURL,skiprows=5) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[5]:
import pandas as pd
import numpy as np
import glob,os
from glob import iglob
#import scanpy as sc
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import RocCurveDisplay
from sklearn.datasets import load_wine
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
import joblib
import time
import random
import matplotlib as mpl
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
# # RA PBMC data for machine learning
# In[6]:
### training data import
ra=pd.read_csv('../RNA_seq_for_autoimmune_disease/RA_bulk/GSE90081/GSE90081_ra_part.csv',index_col=0)
hd=pd.read_csv('../RNA_seq_for_autoimmune_disease/RA_bulk/GSE90081/GSE90081_hd_part.csv',index_col=0)
hd1=pd.read_csv('../RNA_seq_for_autoimmune_disease/health_bulk/GSE183204_HC_fpkm.csv',sep=',',index_col=0)
# In[7]:
### feature import
features=pd.read_csv('../script4paper2/combined_gene_for_machine_learning.csv',index_col=1).index.values
features=np.append(features,'patient')
features=[i for i in features if i in ra.index.values]
features=[i for i in features if i in hd1.index.values ]
# # remove unwanted gene
# In[8]:
### remove unwanted gene from validation data
hd1=hd1.loc[features,:].T
ra_part=ra.loc[features,:].T
hd_part=hd.loc[features,:].T
# # label data
# In[9]:
### label training data
ra_part['patient']=1
hd_part['patient']=0
hd1['patient']=0
# # machine learning data training
# In[39]:
### merge training data
df=pd.concat([ra_part,hd_part,hd1],axis=0)
### get data labels
label=df.patient.values
### split data with ratio 30% for test and 70% for training
Xtrain, Xtest, Ytrain, Ytest = train_test_split(df.drop(columns=['patient']),label,test_size=0.3)
### rf model initialization
rfc = RandomForestClassifier(random_state=43,class_weight='balanced',oob_score=True)
rfc = rfc.fit(Xtrain,Ytrain)
### document model score
score_r = rfc.score(Xtest,Ytest)
### save feature importance
ra_pbmc=pd.DataFrame(rfc.feature_importances_)
ra_pbmc['feature_importance']=features
ra_pbmc.to_csv('./model/ra_pbmc_feature_importance_bulk.csv')
### print F score and Out of bag score
print("Random Forest:{}".format(score_r))
print("OOB score:",rfc.oob_score_)
# # Figure 7A
# In[40]:
### Generating ROC curve
fig = plt.figure(figsize=(8, 8))
ax = plt.gca()
rfc_disp = RocCurveDisplay.from_estimator(rfc, Xtest, Ytest, ax=ax, alpha=0.8)
plt.legend(loc=4,prop={'size': 10})
plt.xlabel('False Positive Rate', fontsize=18)
plt.ylabel('True Positive Rate', fontsize=16)
ax.plot([0, 1], [0, 1], ls="--", c=".3")
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
plt.savefig('./figure6_and_7/7a_ra_pbmc_bulk_auc.pdf',width=4,height=5)
# # save/load best performance model
# In[24]:
### save the best performance model
#joblib.dump(rfc, './model/ra_synovial_bulk_best.model')
### load model
#rfc=joblib.load('./model/sle_best.model')
# In[19]:
### 10-fold cross validation
print(cross_val_score(rfc,df.drop(columns=['patient']),label,cv=10).mean())
print(cross_val_score(rfc,df.drop(columns=['patient']),label,cv=10).var())
# # Figure 7D
# In[42]:
ra_feature= | pd.read_csv('./model/ra_pbmc_feature_importance_bulk.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2022, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import pandas as pd
from pandas._testing import assert_series_equal
from wetterdienst.core.scalar.values import ScalarValuesCore
def test_coerce_strings():
series = ScalarValuesCore._coerce_strings(pd.Series(["foobar"]))
series_expected = pd.Series(["foobar"], dtype=pd.StringDtype())
| assert_series_equal(series, series_expected) | pandas._testing.assert_series_equal |
"""
General collection of functions for manipulating dataframes, generally to isolate proteins or peptides that fit the criteria of interest.
"""
import numpy as np
import pandas as pd
from scipy import stats
import os
import logging
from ProteomicsUtils.LoggerConfig import logger_config
logger = logger_config(__name__)
logger.info('Import ok')
def quantified_data(data_frame):
"""Collects only the data for which quantification was completed
Parameters
----------
data_frame : DataFrame
Contains raw input from proteomics results preprocessed with ProteomeDiscoverer and exported to excel.
Returns
-------
quant_data: DataFrame
Contains the filtered dataframe, with Average Abundance ratio column appended.
col_list: list
list of column headers containing the abundance ratio data
"""
#collects only data for which quantifiable info was gathered from PD
quant_data = data_frame[(data_frame['Quan Info'] == 'Unique')]
#add a new column which is the average Abundance ratio for multiple replicates
col_list = [col for col in quant_data.columns if 'Abundance Ratio: (' in col]
logger.debug('Replicate Columns: {}'.format(col_list))
AvAbun = []
for index, row in quant_data.iterrows():
abundance_ratios = row[col_list]
av_abun = abundance_ratios.mean()
AvAbun.append(av_abun)
quant_data['Abundance Ratio (Average)'] = AvAbun
logger.debug('Quant_data: {}'.format(quant_data.shape))
return quant_data, col_list
def Unique_Cys_sorter(quant_data):
""" Collects proteins for which two unique peptides were found, at least one of which contains a cysteine residue.
Parameters
----------
quant_data : DataFrame
Contains raw data from preprocessing of proteomics results, generally one includes quantified proteins
Returns
-------
two_unique_cys, cys_pep, non_cys_pep : DataFrames
seperate dataframes to collect (1) all proteins that fit the criteria, and individually those that contain (2) or do not contain (3) a cysteine residue.
"""
# create empty dataframes to store filtered data
two_unique = | pd.DataFrame() | pandas.DataFrame |
import calendar
import datetime as dt
from io import BytesIO, StringIO
import uuid
from fastapi import HTTPException
import numpy as np
import pandas as pd
import pytest
from rq import SimpleWorker
from solarperformanceinsight_api import models, storage, compute
from solarperformanceinsight_api.routers import jobs
pytestmark = pytest.mark.usefixtures("add_example_db_data")
def test_list_jobs(client, stored_job):
response = client.get("/jobs")
jobs = [models.StoredJob(**j) for j in response.json()]
assert response.status_code == 200
assert len(jobs) == 5
assert jobs[0] == stored_job
def test_get_job(client, stored_job, job_id):
response = client.get(f"/jobs/{job_id}")
assert response.status_code == 200
assert models.StoredJob(**response.json()) == stored_job
@pytest.mark.parametrize(
"fnc,endpoint",
(
("GET", "/jobs/{other}"),
("GET", "/jobs/{other}/status"),
("GET", "/jobs/{other}/data/{data_id}"),
("GET", "/jobs/{jobid}/data/{baddataid}"),
("DELETE", "/jobs/{other}"),
("POST", "/jobs/{other}/compute"),
("GET", "/jobs/{other}/results"),
("GET", "/jobs/{jobid}/results/{baddataid}"),
),
)
def test_job_404s(client, other_job_id, job_data_ids, fnc, endpoint, job_id):
response = client.request(
fnc,
endpoint.format(
other=other_job_id,
data_id=job_data_ids[0],
jobid=job_id,
baddataid=str(uuid.uuid1()),
),
)
assert response.status_code == 404
def test_get_job_noauth(noauthclient, job_id):
response = noauthclient.get(f"/jobs/{job_id}")
assert response.status_code == 403
def test_get_job_status(client, job_id, job_status):
response = client.get(f"/jobs/{job_id}/status")
assert response.status_code == 200
assert models.JobStatus(**response.json()) == job_status
def test_get_job_status_running(client, job_id, mocker):
running = models.JobStatus(
status="running",
last_change=dt.datetime(2021, 12, 11, 20, tzinfo=dt.timezone.utc),
)
mocker.patch(
"solarperformanceinsight_api.storage.StorageInterface.get_job_status",
return_value=models.JobStatus(status="queued", last_change=running.last_change),
)
mocker.patch(
"solarperformanceinsight_api.queuing.QueueManager.job_status",
return_value=running,
)
response = client.get(f"/jobs/{job_id}/status")
assert response.status_code == 200
assert models.JobStatus(**response.json()) == running
def test_get_job_status_queued(client, job_id, mocker, mock_redis):
queued = models.JobStatus(
status="queued",
last_change=dt.datetime(2021, 12, 11, 20, tzinfo=dt.timezone.utc),
)
mocker.patch(
"solarperformanceinsight_api.storage.StorageInterface.get_job_status",
return_value=queued,
)
response = client.get(f"/jobs/{job_id}/status")
assert response.status_code == 200
assert models.JobStatus(**response.json()) == queued
def test_delete_job(nocommit_transaction, client, job_id):
response = client.delete(f"/jobs/{job_id}")
assert response.status_code == 204
@pytest.mark.parametrize(
"inp,exp",
[
("text/csv", (jobs.CSVResponse, "text/csv")),
("text/*", (jobs.CSVResponse, "text/csv")),
("*/*", (jobs.CSVResponse, "text/csv")),
(
"application/vnd.apache.arrow.file",
(jobs.ArrowResponse, "application/vnd.apache.arrow.file"),
),
("application/*", (jobs.ArrowResponse, "application/vnd.apache.arrow.file")),
(None, (jobs.CSVResponse, "text/csv")),
pytest.param(
"application/json",
(),
marks=pytest.mark.xfail(strict=True, raises=HTTPException),
),
],
)
def test_get_return_type(inp, exp):
assert jobs._get_return_type(inp) == exp
def test_get_job_data(client, job_id, job_data_ids, job_data_meta):
response = client.get(f"/jobs/{job_id}/data/{job_data_ids[1]}")
assert response.status_code == 200
assert response.content == (
b"time,performance\n2020-01-01 00:00:00+00:00,0\n"
b"2020-01-01 01:00:00+00:00,1\n"
)
def test_get_job_data_not_there(client, job_id, job_data_ids, job_data_meta):
response = client.get(f"/jobs/{job_id}/data/{job_data_ids[0]}")
assert response.status_code == 204
def test_get_job_data_arrow(
client, job_id, job_data_ids, job_data_meta, arrow_job_data
):
response = client.get(
f"/jobs/{job_id}/data/{job_data_ids[1]}",
headers={"Accept": "application/vnd.apache.arrow.file"},
)
assert response.status_code == 200
assert response.content == arrow_job_data
def test_get_job_data_bad_type(client, job_id, job_data_ids, job_data_meta):
response = client.get(
f"/jobs/{job_id}/data/{job_data_ids[1]}", headers={"Accept": "application/json"}
)
assert response.status_code == 406
def test_convert_job_data():
out = jobs._convert_job_data(
b"thisiswrong",
"application/vnd.apache.arrow.file",
"application/vnd.apache.arrow.file",
lambda x: x,
)
assert out == b"thisiswrong"
def test_convert_job_data_invalid():
with pytest.raises(HTTPException) as err:
jobs._convert_job_data(
b"thisiswrong", "application/vnd.apache.arrow.file", "text/csv", lambda x: x
)
assert err.value.status_code == 500
def test_convert_job_data_bad_type():
with pytest.raises(HTTPException) as err:
jobs._convert_job_data(
b"thisiswrong",
"application/vnd.apache.arrow.file",
"text/html",
lambda x: x,
)
assert err.value.status_code == 400
@pytest.fixture()
def new_job(system_id):
return models.CalculatePerformanceJobParameters(
system_id=system_id,
calculate="expected performance",
time_parameters=models.JobTimeindex(
start="2020-01-01T00:00:00+00:00",
end="2020-12-31T23:59:59+00:00",
step="15:00",
timezone="UTC",
),
weather_granularity="system",
irradiance_type="poa",
temperature_type="module",
)
def test_create_job(client, nocommit_transaction, new_job):
response = client.post("/jobs/", data=new_job.json())
assert response.status_code == 201
response = client.get(response.headers["Location"])
assert response.status_code == 200
def test_create_job_inaccessible(
client, nocommit_transaction, other_system_id, new_job
):
new_job.system_id = other_system_id
response = client.post("/jobs/", data=new_job.json())
assert response.status_code == 404
def test_check_job(client, new_job):
response = client.post("/jobs/", data=new_job.json())
assert response.status_code == 201
def test_check_job_bad(client):
response = client.post("/jobs/", data="reasllybad")
assert response.status_code == 422
@pytest.fixture()
def performance_df(job_params):
return pd.DataFrame(
{
"time": job_params.time_parameters._time_range,
"performance": np.random.randn(len(job_params.time_parameters._time_range)),
}
)
@pytest.fixture()
def weather_df(job_params):
return pd.DataFrame(
{
"time": job_params.time_parameters._time_range,
**{
col: np.random.randn(len(job_params.time_parameters._time_range))
for col in (
"poa_global",
"poa_direct",
"poa_diffuse",
"module_temperature",
)
},
}
)
@pytest.fixture(params=["int", "float", "abbr", "full", "floatstr", "shortfloatstr"])
def monthly_weather_df(request):
out = pd.DataFrame(
{
"month": list(range(1, 13)),
**{
col: np.random.randn(12)
for col in ("total_poa_insolation", "average_daytime_cell_temperature")
},
}
)
if request.param == "int":
return out
elif request.param == "float":
return out.astype({"month": float})
elif request.param == "abbr":
out.loc[:, "month"] = calendar.month_abbr[1:]
return out
elif request.param == "full":
out.loc[:, "month"] = calendar.month_name[1:]
return out
elif request.param == "floatstr":
out.loc[:, "month"] = [f"{i}.0" for i in range(1, 13)]
return out
elif request.param == "shortfloatstr":
out.loc[:, "month"] = [f"{i}." for i in range(1, 13)]
return out
@pytest.fixture(params=[0, 1])
def either_df(weather_df, performance_df, request):
if request.param == 0:
return weather_df, 0
else:
return performance_df, 1
def test_add_job_data_no_data(client, job_id, job_data_ids):
response = client.post(f"/jobs/{job_id}/data/{job_data_ids[0]}")
assert response.status_code == 422
def test_post_job_data_arrow(
client, nocommit_transaction, job_data_ids, job_id, either_df
):
df, ind = either_df
iob = BytesIO()
df.to_feather(iob)
iob.seek(0)
response = client.post(
f"/jobs/{job_id}/data/{job_data_ids[ind]}",
files={
"file": (
"job_data.arrow",
iob,
"application/vnd.apache.arrow.file",
)
},
)
assert response.status_code == 200
rjson = response.json()
assert rjson["number_of_missing_rows"] == 0
assert rjson["missing_times"] == []
assert rjson["number_of_extra_rows"] == 0
assert rjson["extra_times"] == []
assert rjson["number_of_expected_rows"] == len(df)
assert rjson["number_of_missing_values"] == {
c: 0 for c in df.columns if c != "time"
}
job_resp = client.get(f"/jobs/{job_id}")
assert (
job_resp.json()["data_objects"][ind]["definition"]["filename"]
== "job_data.arrow"
)
assert (
job_resp.json()["data_objects"][ind]["definition"]["data_format"]
== "application/vnd.apache.arrow.file"
)
def test_post_job_data_csv(
client, nocommit_transaction, job_data_ids, job_id, either_df
):
df, ind = either_df
iob = StringIO()
df.to_csv(iob, index=False)
iob.seek(0)
response = client.post(
f"/jobs/{job_id}/data/{job_data_ids[ind]}",
files={
"file": (
"job_data.csv",
iob,
"text/csv",
)
},
)
assert response.status_code == 200
rjson = response.json()
assert rjson["number_of_missing_rows"] == 0
assert rjson["missing_times"] == []
assert rjson["number_of_extra_rows"] == 0
assert rjson["extra_times"] == []
assert rjson["number_of_expected_rows"] == len(df)
assert rjson["number_of_missing_values"] == {
c: 0 for c in df.columns if c != "time"
}
job_resp = client.get(f"/jobs/{job_id}")
assert (
job_resp.json()["data_objects"][ind]["definition"]["filename"] == "job_data.csv"
)
assert (
job_resp.json()["data_objects"][ind]["definition"]["data_format"]
== "application/vnd.apache.arrow.file"
)
def test_post_job_data_wrong_id(client, job_id, performance_df):
iob = BytesIO()
performance_df.to_feather(iob)
iob.seek(0)
response = client.post(
f"/jobs/{job_id}/data/{job_id}",
files={
"file": (
"job_data.arrow",
iob,
"application/vnd.apache.arrow.file",
)
},
)
assert response.status_code == 404
def test_post_job_data_wrong_job_id(client, other_job_id, job_data_ids, performance_df):
iob = BytesIO()
performance_df.to_feather(iob)
iob.seek(0)
response = client.post(
f"/jobs/{other_job_id}/data/{job_data_ids[1]}",
files={
"file": (
"job_data.arrow",
iob,
"application/vnd.apache.arrow.file",
)
},
)
assert response.status_code == 404
def test_post_job_data_bad_data_type(client, job_id, job_data_ids, performance_df):
iob = StringIO()
performance_df.to_csv(iob)
iob.seek(0)
response = client.post(
f"/jobs/{job_id}/data/{job_data_ids[1]}",
files={"file": ("job_data.json", iob, "application/json")},
)
assert response.status_code == 415
def test_post_job_data_missing_col(client, job_id, job_data_ids, weather_df):
iob = BytesIO()
weather_df.drop(columns="poa_direct").to_feather(iob)
iob.seek(0)
response = client.post(
f"/jobs/{job_id}/data/{job_data_ids[0]}",
files={
"file": (
"job_data.arrow",
iob,
"application/vnd.apache.arrow.file",
)
},
)
assert response.status_code == 400
def test_post_job_data_not_enough(client, job_id, job_data_ids, weather_df):
iob = BytesIO()
weather_df.iloc[:10].reset_index().to_feather(iob)
iob.seek(0)
response = client.post(
f"/jobs/{job_id}/data/{job_data_ids[0]}",
files={
"file": (
"job_data.arrow",
iob,
"application/vnd.apache.arrow.file",
)
},
)
assert response.status_code == 400
def test_post_job_data_invalid_time_col(client, job_id, job_data_ids):
iob = BytesIO()
df = pd.DataFrame({"time": [0, -99.0, 88.0], "performance": [0, 1, 2.0]})
df.to_feather(iob)
iob.seek(0)
response = client.post(
f"/jobs/{job_id}/data/{job_data_ids[1]}",
files={
"file": (
"job_data.arrow",
iob,
"application/vnd.apache.arrow.file",
)
},
)
assert response.status_code == 400
assert "not be parsed as a timestamp" in response.json()["detail"]
def test_post_job_data_duplicate_points(client, job_id, job_data_ids, weather_df):
iob = BytesIO()
ndf = weather_df.copy()
| pd.concat([weather_df, ndf], ignore_index=True) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
from PIL import Image
def users_database():
files = os.listdir()
if 'shurlz_database.csv' in files:
pass
else:
users_database = pd.DataFrame(columns=['Name', 'Price', 'Date'])
users_database.to_csv('shurlz_database.csv', index=False)
if 'personal_info.csv' in files:
pass
else:
users_info = pd.DataFrame({'Budget': 0} ,index=[0])
users_info.to_csv('user_info_database.csv', index=False)
def add_new_spending(Name=None,Price=None,Date=None):
if type(Name) == str and type(Price) == int:
data = pd.read_csv('shurlz_database.csv')
new_data= {'Name':Name, 'Price': Price ,'Date': Date}
shurlz = data.append(new_data,ignore_index=True)
os.remove('shurlz_database.csv')
shurlz.to_csv('shurlz_database.csv', index=False)
return True
data = pd.read_csv('shurlz_database.csv')
data['Date'] = | pd.to_datetime(data.Date) | pandas.to_datetime |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import pydot
from sklearn import preprocessing, model_selection
from sklearn.tree import export_graphviz
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error, mean_absolute_error
from treeinterpreter import treeinterpreter as ti
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# Preparation of initial dataset
df_name1 = 'DatasetRF'
path = 'your path to dataset'
df_name2 = '.csv'
df_name3 = path + df_name1 + df_name2
datas = | pd.read_csv(df_name3) | pandas.read_csv |
import pandas as pd # type: ignore
from arkouda.pdarrayclass import pdarray
from arkouda.pdarraycreation import arange, ones
from arkouda.pdarraysetops import argsort, in1d, unique
from arkouda.sorting import coargsort
from arkouda.dtypes import int64, float64, bool
from arkouda.util import register, convert_if_categorical, concatenate, get_callback
from arkouda.groupbyclass import GroupBy
from arkouda.alignment import in1dmulti
class Index:
def __init__(self, index):
self.index = index
self.size = index.size
def __getitem__(self,key):
from arkouda.series import Series
if type(key) == Series:
key = key.values
return Index(self.index[key])
def __repr__(self):
return repr(self.index)
def __len__(self):
return len(self.index)
def __eq__(self,v):
return self.index == v
@staticmethod
def factory(index):
t = type(index)
if isinstance(index, Index):
return index
elif t != list and t != tuple:
return Index(index)
else:
return MultiIndex(index)
def to_pandas(self):
val = convert_if_categorical(self.index)
return val.to_ndarray()
def set_dtype(self, dtype):
"""Change the data type of the index
Currently only aku.ip_address and ak.array are supported.
"""
new_idx = dtype(self.index)
self.index = new_idx
return self
def register(self, label):
register(self.index, "{}_key".format(label))
return 1
def to_dict(self, label):
data = {}
if label is None:
label = "idx"
elif type(label) == list:
label = label[0]
data[label] = self.index
return data
def _check_types(self, other):
if type(self) != type(other):
raise TypeError("Index Types must match")
def _merge(self, other):
self._check_types(other)
callback = get_callback(self.index)
idx = concatenate([self.index, other.index], ordered=False)
return Index(callback(unique(idx)))
def _merge_all(self, array):
idx = self.index
callback = get_callback(idx)
for other in array:
self._check_types(other)
idx = concatenate([idx, other.index], ordered=False)
return Index(callback(unique(idx)))
def _check_aligned(self, other):
self._check_types(other)
l = len(self)
return len(other) == l and (self == other.index).sum() == l
def argsort(self, ascending=True):
if not ascending:
if isinstance(self.index, pdarray) and self.index.dtype in (int64, float64):
i = argsort(-self.index)
else:
i = argsort(self.index)[arange(self.index.size - 1, -1, -1)]
else:
i = argsort(self.index)
return i
def concat(self, other):
self._check_types(other)
idx = concatenate([self.index, other.index], ordered=True)
return Index(idx)
def lookup(self, key):
if not isinstance(key, pdarray):
raise TypeError("Lookup must be on an arkouda array")
return in1d(self.index, key)
class MultiIndex(Index):
def __init__(self,index):
if not(isinstance(index,list) or isinstance(index,tuple)):
raise TypeError("MultiIndex should be an iterable")
self.index = index
first = True
for col in self.index:
if first:
self.size = col.size
first = False
else:
if col.size != self.size:
raise ValueError("All columns in MultiIndex must have same length")
self.levels = len(self.index)
def __getitem__(self,key):
from arkouda.series import Series
if type(key) == Series:
key = key.values
return MultiIndex([ i[key] for i in self.index])
def __len__(self):
return len(self.index[0])
def __eq__(self,v):
if type(v) != list and type(v) != tuple:
raise TypeError("Cannot compare MultiIndex to a scalar")
retval = ones(len(self), dtype=bool)
for a,b in zip(self.index, v):
retval &= (a == b)
return retval
def to_pandas(self):
idx = [convert_if_categorical(i) for i in self.index]
mi = [i.to_ndarray() for i in idx]
return | pd.Series(index=mi, dtype='float64') | pandas.Series |
#Use to plot models on top of data
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
from astropy.table import Table
import math
from matplotlib.colors import PowerNorm
import matplotlib.colors as colors
import pandas as pd
import sys
from scipy.interpolate import RectBivariateSpline, CubicSpline
sys.path.append('../scripts/')
from chemevo import *
hl = chem_evo_data('../output.hdf5')
fl = chem_evo_data('../comparison.hdf5')
#fl = chem_evo_data('../multioutput.hdf5')
gl = chem_evo_data('../comparison.hdf5')
#plt.plot(fl.t,fl.SFR)
#plt.show()
data_file_1 = '/data/ktfm2/apogee_data/apogee_astroNN_DR16.fits' #The vac file
data_file_2 = '/data/ktfm2/apogee_data/allStar_r12_l33.fits' #The all star file from apogeee
hdu_list_1 = fits.open(data_file_1, memmap=True) #Open the fits file
apogee_data = Table(hdu_list_1[1].data) #Creates table from the fits file
#print(apogee_data.colnames) #Prints column names
#print(apogee_data['apogee_id','GALR','MG_H','FE_H','e','GALZ']) #Prints columns
#hdu_list_1.info() #Overview of file, 473307 rows
#print(hdu_list_1[1].columns) #Prints column details, including name and format
#hdu_list_2 = fits.open(data_file_2, memmap=True)
#hdu_list_2.info() #473307 rows - match!
#print(hdu_list_2[1].columns)
apogee_data.sort(['e'])
def betw(x,l,u):
return (x>l)&(x<u)
def outs(x,l,u):
return (x<l)|(x>u)
#Create individual plot at solar radius
#fltr = (~pd.isna(apogee_data['GALR']))&(~pd.isna(apogee_data['GALZ']))&(~pd.isna(apogee_data['e']))&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['FE_H_ERR']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(apogee_data['LOGG']<3.5)&outs(apogee_data['GALZ'],-1.0,1.0)&(apogee_data['FE_H_ERR']<0.2)&betw(apogee_data['GALR'],8.0,8.2)
#Solar radius with guiding radius
#fltr = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['GALZ']))&(~pd.isna(apogee_data['e']))&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['FE_H_ERR']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(apogee_data['LOGG']<3.5)&outs(apogee_data['GALZ'],-1.0,1.0)&(betw(apogee_data['GALZ'],-5.0,5.0))&(apogee_data['FE_H_ERR']<0.2)&betw(apogee_data['rl'],7.6,8.6)
fltr = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['GALZ']))&(~pd.isna(apogee_data['e']))&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['FE_H_ERR']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(apogee_data['LOGG']<3.5)&(betw(apogee_data['GALZ'],-5.0,5.0))&(outs(apogee_data['GALZ'],-1.0,1.0))&(apogee_data['FE_H_ERR']<0.2)&betw(apogee_data['rl'],7.6,8.6)
#==========================================================================================================================
c=plt.scatter([0.,1.],[0.,1.],c=[0.,1.])
plt.clf()
#Set up to plot [alpha/Fe] against [Fe/H] from model at above radius
radius=8.1;
dat = fl.abund['Fe']
t = np.linspace(fl.t[0],fl.t[-1],100)
rbs = RectBivariateSpline(fl.R,fl.t,dat)
a = rbs(radius,t)
dat=fl.abund['Mg']-fl.abund['Fe']
rbs = RectBivariateSpline(fl.R,fl.t,dat)
b = rbs(radius,t)
#Paint on time stamps
#timestamp =[1.0];
#radiusstamp =[8.1];
timestamp = [1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0];
radiusstamp= [8.1,8.1,8.1,8.1,8.1,8.1,8.1,8.1,8.1,8.1,8.1,8.1,8.1];
time_fe = fl.paint(radiusstamp,timestamp,['Fe'])
time_mg = fl.paint(radiusstamp,timestamp,['Mg'])
#plt.scatter(apogee_data['FE_H'][fltr],(apogee_data['MG_H'][fltr] - apogee_data['FE_H'][fltr]),c=plt.cm.viridis(apogee_data['e'][fltr]),s=2.0,zorder=-1);
#plt.plot(a.T,b.T,color='black',linewidth=3.0,zorder=0, label='2.7Gyr, 6.4x10^9')
plt.hist2d(apogee_data['FE_H'][fltr],
(apogee_data['MG_H']-apogee_data['FE_H'])[fltr],
bins=50,range=[[-1.5,0.6],[-0.1,0.4]],norm=colors.LogNorm(),cmap=plt.cm.Spectral_r);
plt.plot(a.T,b.T,color='black',linewidth=3.0, label='2.7Gyr, 6.4x10^9')
#plt.scatter(time_fe['Fe_H'],time_mg['Mg_H']-time_fe['Fe_H'],s=8.0,color='r',zorder=1)
plt.title('Model against data, radius = 8.1 kpc')
#plt.colorbar(c,label='Eccentricity')
plt.legend()
plt.xlim(-1.5,0.6) #Usually -2.5 but -1.5 to cut off initial rise
plt.ylim(-0.1,0.4)
plt.xlabel('[Fe/H]')
plt.ylabel('[Mg/Fe]')
plt.savefig('../../../../Project_Images/ForProject/GridSearchResult.pdf', bbox_inches='tight')
plt.show()
#plt.plot(gl.t,gl.SFR)
#plt.xlabel(r'$t/\,\mathrm{Gyr}$')
#plt.ylabel(r'Rate /$\,\mathrm{M}_\odot\,\mathrm{pc}^{-2}\,\mathrm{Gyr}^{-1}$')
#plt.ylim(0.,13.0)
#plt.title('SFR for gas dump scenario')
#plt.show()
#=========================================================================================================================
#Density Plots
ssM=[np.argmin(np.abs(fl.R-radii)) for radii in [6.1,8.1,10.1]]
ssG=[np.argmin(np.abs(gl.R-radii)) for radii in [6.1,8.1,10.1]]
ssC=[np.argmin(np.abs(hl.R-radii)) for radii in [6.1,8.1,10.1]]
rranges = [[5.6,6.6],[7.6,8.6],[9.6,10.6]]
#rranges = [[5,7],[7,9],[9,11]]
#Selection for GALR
NewRadii = [6.1,8.1,10.1]
#selectionR = [
# (apogee_data['FE_H_ERR']<0.2)&(apogee_data['LOGG']<3.5)&betw(apogee_data['GALR'],rd,ru)&(np.abs(apogee_data['GALZ'])>1) for rd,ru in rranges
#]
#Selection with guiding radius
#selectionR = [
# (apogee_data['FE_H_ERR']<0.2)&(apogee_data['LOGG']<3.5)&betw(apogee_data['rl'],rd,ru)&(np.abs(apogee_data['GALZ'])>1.0)&(np.abs(apogee_data['GALZ'])<5.0) for rd,ru in rranges
#]
selectionR = [
(apogee_data['FE_H_ERR']<0.2)&(apogee_data['LOGG']<3.5)&betw(apogee_data['rl'],rd,ru)&(np.abs(apogee_data['GALZ']>1.0))&(np.abs(apogee_data['GALZ'])<5.0) for rd,ru in rranges
]
f,a=plt.subplots(1,3,figsize=[15.,3.],sharex=True,sharey=True)
e='Mg'
plt.subplots_adjust(wspace=0.05)
for rr in range(3):
plt.sca(a[rr])
plt.plot(hl.abund['Fe'][ssC[rr]]-hl.abund['H'][ssC[rr]],
(hl.abund[e]-hl.abund['Fe'])[ssC[rr]],c='r',lw=3, label='2.7Gyr, 6.4x10^9')
# plt.plot(gl.abund['Fe'][ssG[rr]]-gl.abund['H'][ssG[rr]],
# (gl.abund[e]-gl.abund['Fe'])[ssG[rr]],c='k',lw=3, label='Fiducial model')
# plt.plot(fl.abund['Fe'][ssM[rr]]-fl.abund['H'][ssM[rr]],
# (fl.abund[e]-fl.abund['Fe'])[ssM[rr]],c='b',lw=3, label='10^10')
plt.hist2d(apogee_data['FE_H'][selectionR[rr]],
(apogee_data['%s_H'%e.upper()]-apogee_data['FE_H'])[selectionR[rr]],
bins=50,range=[[-1.5,0.6],[-0.1,0.4]],norm=colors.LogNorm(),cmap=plt.cm.Spectral_r);
plt.xlim(-1.5,0.5)
plt.ylim(-0.1,0.4)
plt.xlabel(r'$\mathrm{[Fe/H]}$', fontsize=12)
if rr==0:
plt.ylabel(r'$\mathrm{[%s/Fe]}$'%e, fontsize=12)
plt.title('$%.1f<R/\mathrm{kpc}<%.1f$'%(rranges[rr][0],rranges[rr][1]),fontsize=12)
# plt.title('Galactocentric Radius $R/\mathrm{kpc}=%.1f$'%(NewRadii[rr]),fontsize=12)
# plt.legend(loc='upper right')
# f.suptitle('Different models against data', fontsize=12)
f.figsize = [8.0,6.0]
plt.legend()
#f.savefig('../../../../Project_Images/ForProject/VaryMassDensity.pdf', bbox_inches='tight')
#f.savefig('../../../../Project_Images/ForProject/GasNoGas.pdf', bbox_inches='tight')
#f.savefig('../../../../Project_Images/ForProject/GridSearchResult.pdf', bbox_inches='tight')
plt.show()
#========================================================================================================================
radii = [6.1,8.1,10.1]; #Can switch to whatever
#lower_radii = [6.0,8.0,10.0];
#upper_radii = [6.2,8.2,10.2];
lower_radii = [5.6,7.6,9.6];
upper_radii = [6.6,8.6,10.6];
f,ax = plt.subplots(1,3,figsize=[15.,3.], sharex=True, sharey=True)
plt.subplots_adjust(wspace=0.05)
for x in range(3):
radius =radii[x]; #Radius for the models
lower_radius = lower_radii[x]; #Lower radius for data
upper_radius = upper_radii[x]; #Upper radius for data
#Filter with GALR
# fltr = (~pd.isna(apogee_data['GALR']))&(~pd.isna(apogee_data['GALZ']))&(~pd.isna(apogee_data['e']))&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['FE_H_ERR']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(apogee_data['LOGG']<3.5)&outs(apogee_data['GALZ'],-1.0,1.0)&(apogee_data['FE_H_ERR']<0.2)&betw(apogee_data['GALR'],lower_radius,upper_radius)
#Filter with rl
#fltr = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['GALZ']))&(~pd.isna(apogee_data['e']))&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['FE_H_ERR']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(apogee_data['LOGG']<3.5)&outs(apogee_data['GALZ'],-1.0,1.0)&(betw(apogee_data['GALZ'],-5.0,5.0))&(apogee_data['FE_H_ERR']<0.2)&betw(apogee_data['rl'],lower_radius,upper_radius)
fltr = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['GALZ']))&(~pd.isna(apogee_data['e']))&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['FE_H_ERR']))&(~ | pd.isna(apogee_data['MG_H']) | pandas.isna |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
aisles_df= pd.read_csv("./input/aisles.csv")
departments_df= pd.read_csv("./input/departments.csv")
order_products_prior_df= pd.read_csv("./input/order_products_prior.csv")
order_products_train_df= pd.read_csv("./input/order_products_train.csv")
orders_df= pd.read_csv("./input/orders.csv")
products_df= | pd.read_csv("./input/products.csv") | pandas.read_csv |
###############
#
# Transform R to Python Copyright (c) 2019 <NAME> Released under the MIT license
#
###############
import os
import numpy as np
import pystan
import pandas
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.special import expit as logistic
germination_dat = pandas.read_csv('3-9-1-germination.csv')
print(germination_dat.head())
print(germination_dat.describe())
sns.scatterplot(
x='nutrition',
y='germination',
hue='solar',
data=germination_dat
)
plt.show()
germination_dat_d = | pandas.get_dummies(germination_dat) | pandas.get_dummies |
import pandas as pd
import numpy as np
import os
import datetime
import git
from pathlib import Path
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
inputdir = f"{homedir}" + "/data/us/mobility/"
inputdir2 = f"{homedir}" + "/data/google_mobility/"
outputdir = f"{homedir}" + "/models/data/us/mobility/"
Path(outputdir).mkdir(parents=True, exist_ok=True)
outputdir += "county_"
def edit_column_date(frame,index):
#Edits the date format of columns of dataframes
#index: index of the first column of dates + 1
i = 0
for col in frame:
i += 1
if i >= index:
new_d = date_format(col)
frame = frame.rename(columns={col : new_d})
return frame
def sort_dates(frame,index):
#Sorts the columns by date of a frame with many nonconsecutive dates (several factors per date)
Beg = list(frame.columns[:index]) #First four entries
End = list(np.sort(np.array(frame.columns[index:]))) #Every Date Sorted
cols = list(Beg + End) #Ordered Columns
frame = frame[cols]
return frame
def date_format(date):
d = datetime.datetime.strptime(date, '%Y-%m-%d')
return datetime.date.strftime(d, "%m/%d/%y")
def main():
#Loading in mobility data
DL_us_m50 = pd.read_csv(inputdir+'DL-us-m50.csv', encoding='latin1')
DL_us_m50_index = pd.read_csv(inputdir+'DL-us-m50_index.csv', encoding='latin1')
DL_us_samples = pd.read_csv(inputdir+'DL-us-samples.csv')
#Cleaning the datasets
DL_us_m50 = edit_column_date(DL_us_m50,6)
DL_us_m50_index = edit_column_date(DL_us_m50_index,6)
DL_us_samples = edit_column_date(DL_us_samples,6)
DL_us_m50 = DL_us_m50.drop(columns=['country_code','admin_level','admin1','admin2'])
DL_us_m50_index = DL_us_m50_index.drop(columns=['country_code','admin_level','admin1','admin2'])
DL_us_samples = DL_us_samples.drop(columns=['country_code','admin_level','admin1','admin2'])
#Separating data into county info
DL_us_m50_County = DL_us_m50[DL_us_m50.fips >= 1000]
DL_us_m50_index_County = DL_us_m50_index[DL_us_m50_index.fips >= 1000]
DL_us_samples_County = DL_us_samples[DL_us_samples.fips >= 1000]
#merging the 3 datasets together
Mobility_County = pd.merge(DL_us_m50_County, DL_us_m50_index_County, left_on='fips', right_on='fips', suffixes=('_M_m50', ''), sort=True)
Mobility_County = pd.merge(Mobility_County, DL_us_samples_County, left_on='fips', right_on='fips', suffixes=('_M_idx', '_M_samples'), sort=True)
Mobility_County = Mobility_County[Mobility_County.fips >= -1]
Mobility_County.columns = Mobility_County.columns.str.replace('fips','FIPS')
#saving datasets with 3 values not consecutive and then consecutive
Mobility_County_Nonconsecutive = Mobility_County
Mobility_County_Consecutive = sort_dates(Mobility_County,1)
#MAking FIPS the main index
Mobility_County_Consecutive = Mobility_County_Consecutive.set_index('FIPS')
Mobility_County_Nonconsecutive = Mobility_County_Nonconsecutive.set_index('FIPS')
Mobility_County_Consecutive.to_csv(outputdir+'consecutive.csv')
Mobility_County_Nonconsecutive.to_csv(outputdir+'nonconsecutive.csv')
#New Google Mobility Data, must be processed
google_mobility = pd.read_csv(inputdir2+'mobility_report_US.csv', encoding='latin1')
#Taking only county data
google_mobility_county = google_mobility[google_mobility['Region'] != 'Total']
#Key to map counties to FIPS, and states to state abbreviations
Key = pd.read_csv('county_key.csv').sort_values(by=['FIPS'])
State_Abv = | pd.read_csv('State_Abbrev.csv') | pandas.read_csv |
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
import seaborn as sn
import matplotlib.pyplot as plt
#veri setini excel dosyasından okuyoruz
dataset = pd.read_excel('dataset.xlsx',index_col=0)
#veri setini dataframe olacak şekilde pandas küt. yardımıyla tanımlıyoruz.
df = | pd.DataFrame(dataset) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.