prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Data loader for telemetry log files
"""
from functools import reduce
import math
from matplotlib import pyplot
import pandas as pd
from scipy.optimize import curve_fit
import statistics
from typing import Iterable, List, Optional, Tuple, Union
from telemetrydisc.database import get_logs_table, get_raw_data
from telemetrydisc.util import *
ANGULAR_VELOCITY_WINDOW_SIZE = 150 # Size of the sliding window for throw detection (ms)
ANGULAR_VELOCITY_WINDOW_THRESHOLD = 50 # Abs value mean to threshold
ANGULAR_ACCELERATION_WINDOW_SIZE = 50 # Size of the sliding window for flight detection (ms)
ANGULAR_ACCELERATION_WINDOW_THRESHOLD = 2 # Abs value mean to threshold
def process_all():
logs = get_logs_table()
for crc in logs.index:
process_log(crc)
import itertools
class sliding_window:
def __init__(self, collection: Iterable, window: int, post_window: Optional[int] = None):
# if len(collection) < (window * 2 + 1):
# raise ValueError("sliding_window collection must be at least (window * 2 + 1) in size")
self._iterator = iter(collection)
self._pre_window = window
self._post_window = window if post_window is None else post_window
self._pre = None
self._now = None
self._post = None
def __iter__(self):
return self
def __next__(self):
if self._pre is None:
self._pre = list(itertools.islice(self._iterator, self._pre_window))
else:
self._pre.pop(0)
self._pre.append(self._now)
if self._now is None:
self._now = self._iterator.__next__()
else:
self._now = self._post[0]
if self._post is None:
self._post = list(itertools.islice(self._iterator, self._post_window))
else:
self._post.pop(0)
self._post.append(self._iterator.__next__())
return self._pre, self._now, self._post
def smooth(data: pd.Series, *args, window: Optional[int] = 15, iterations: Optional[int] = None):
if iterations is not None:
smoothed = data.copy()
for i in range(iterations):
smoothed = smooth(smoothed, window=window)
return smoothed
smoothed = pd.Series()
for pre, now, post in sliding_window(data.iteritems(), window):
# Do Stuff
pre_mean = statistics.mean([item[1] for item in pre])
post_mean = statistics.mean([item[1] for item in post])
if pre_mean > now[1] and post_mean > now[1] or pre_mean < now[1] and post_mean < now[1]:
smoothed.set_value(now[0], statistics.mean([pre_mean, post_mean]))
else:
smoothed.set_value(now[0], now[1])
return smoothed
def find_releases(data: pd.DataFrame):
releases = [] # type: List[List[Tuple[int, int]]]
for pre, now, post in sliding_window(data["gyroZ"].iteritems(), 10):
if now[1] - statistics.mean([item[1] for item in pre]) >= 500 and\
now[1] - statistics.mean([item[1] for item in post]) <= 250:
if len(releases) and len(releases[-1]) and pre[-1][0] == releases[-1][-1][0]:
releases[-1].append(now)
else:
releases.append([now])
return releases
def find_ends(data: pd.DataFrame):
ends = [] # type: List[List[Tuple[int, int]]]
for pre, now, post in sliding_window(data["gyroZ"].iteritems(), 10):
if now[1] - statistics.mean([item[1] for item in pre]) <= 500 and\
now[1] - statistics.mean([item[1] for item in post]) >= 250:
if len(ends) and len(ends[-1]) and pre[-1][0] == ends[-1][-1][0]:
ends[-1].append(now)
else:
ends.append([now])
return ends
def process_log(log_crc: int):
log_data = get_raw_data(log_crc)
s_log_data = pd.DataFrame()
s_log_data["gyroZ"] = smooth(log_data["gyroZ"], window=10, iterations=3)
s_log_data["accelX"] = smooth(log_data["accelX"])
s_log_data["accelY"] = smooth(log_data["accelY"])
releases = [item[-1][0] for item in find_releases(s_log_data)]
flights = []
for n, release_range in enumerate(zip(releases, releases[1:] + [None])):
ends = [item[0][0] for item in find_ends(s_log_data.loc[release_range[0]:release_range[1]])]
print(f"Flight Candidate {n+1:>2}: {release_range[0]}-{ends[0]}")
# print(f"Release Candidate {n+1:>2}: {release_range[0]}")
# print(f" End Candidate {n+1:>2}: {ends[0]}")
flights.append((release_range[0], ends[0]))
# exit()
for flight in flights:
output_directory = os.path.join(LOCAL_DATA_PATH, f"{log_crc}")
if not os.path.exists(output_directory):
os.mkdir(output_directory)
fig, l_axis = pyplot.subplots()
r_axis = l_axis.twinx()
pyplot.suptitle("gyroZ")
l_axis.plot(s_log_data["gyroZ"].loc[flight[0]:flight[1]], linewidth=1)
l_axis.plot(s_log_data["gyroZ"].diff().loc[flight[0]:flight[1]], linewidth=1)
r_axis.plot(log_data["accelX"].loc[flight[0]:flight[1]], linewidth=1, color="g")
r_axis.plot(log_data["accelY"].loc[flight[0]:flight[1]], linewidth=1, color="brown")
fig.savefig(os.path.join(output_directory, f"gyroZ_{flight[0]}_{flight[1]}.png"), dpi=300, format="png")
pyplot.close(fig)
# pyplot.clf()
def isolate_flights(data: pd.DataFrame):
start = None
start_flight = None
end_flight = None
flight_candidates = []
throws = []
angular_velocity_window = []
angular_acceleration_window = []
for t in data.index:
angular_velocity_window.append(SeriesValue(t, data["gyroZ"][t]))
while angular_velocity_window[-1].t - angular_velocity_window[0].t > ANGULAR_VELOCITY_WINDOW_SIZE:
angular_velocity_window.pop(0)
angular_velocity_avg = statistics.mean([abs(sv.value) for sv in angular_velocity_window])
angular_acceleration_window.append(SeriesValue(t, data["d_gyroZ"][t]))
while angular_acceleration_window[-1].t - angular_acceleration_window[0].t > ANGULAR_ACCELERATION_WINDOW_SIZE:
angular_acceleration_window.pop(0)
angular_acceleration_avg = statistics.mean([abs(sv.value) for sv in angular_acceleration_window])
if start is None and angular_velocity_avg >= ANGULAR_VELOCITY_WINDOW_THRESHOLD:
start = angular_velocity_window[0].t
if start is not None:
if start_flight is None and angular_acceleration_avg <= ANGULAR_ACCELERATION_WINDOW_THRESHOLD:
start_flight = angular_acceleration_window[0].t
if start_flight is not None and angular_acceleration_avg > ANGULAR_ACCELERATION_WINDOW_THRESHOLD:
end_flight = angular_acceleration_window[-1].t
flight_candidates.append((start_flight, end_flight))
start_flight = None
if angular_velocity_avg < ANGULAR_VELOCITY_WINDOW_THRESHOLD:
end = angular_velocity_window[-1].t
throw_gyroZ = data["gyroZ"].iloc[data.index.get_loc(start): data.index.get_loc(end)]
max_gyroZ = max([abs(throw_gyroZ.max()), abs(throw_gyroZ.min())])
if max_gyroZ > 100:
if len(flight_candidates) != 0:
flight = reduce(lambda fca, fcb: fca if fca[1] - fca[0] > fcb[1] - fcb[0] else fcb,
flight_candidates)
throws.append(Throw(start, flight[0], flight[1], end))
start = None
start_flight = None
flight_candidates = []
return throws
def find_idle(data: pd.Series, window_size: int, threshold: Union[float, int]):
idles = [] # type: List[TimeSlice]
window = [] # type: List[SeriesValue]
start = None
for t in data.index:
window.append(SeriesValue(t, data[t]))
while window[-1].t - window[0].t > window_size:
window.pop(0)
window_avg = statistics.mean([abs(sv.value) for sv in window])
if start is None and window_avg < threshold:
start = window[-1].t
if start is not None and window_avg > threshold:
idles.append(TimeSlice(start, window[0].t))
start = None
return idles
def smooth_data(data: Union[pd.DataFrame, pd.Series], window_size: int):
if isinstance(data, pd.DataFrame):
smoothed_data = pd.DataFrame(index=data.index)
for column in data:
smoothed_data[column] = smooth_data(data[column], window_size)
return smoothed_data
else:
window = [] # type: List[SeriesValue]
smoothed_data = | pd.Series(index=data.index) | pandas.Series |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
| tm.assert_series_equal(res, expected) | pandas.util.testing.assert_series_equal |
# Project: fuelmeter-tools
# Created by: # Created on: 5/7/2020
from pandas.tseries.offsets import MonthEnd
from puma.Report import Report
import pandas as pd
import numpy as np
import puma.plot as pplot
import puma.tex as ptex
import datetime
import os
class MultiMonthReport(Report):
def __init__(self,start,end,title,nc,houses,monthly_fuel_price):
super(MultiMonthReport, self).__init__(start,end,title,nc,houses,monthly_fuel_price)
def getAveCostPerDay(self):
'''calculates the average cost of fuel per day. If the attribute gph_hdd
is available this will be used to calculate costs otherwise the attribute
fuel_by_day is used.'''
if 'gpd_hdd' not in self.__dict__:
self.cost_per_day = self.getCostPerDay(self.fuel_by_day)
else:
self.cost_per_day = self.getCostPerDay(self.gpd_hdd)
return self.cost_per_day.mean()
def getCostPerDay(self,fuel_by_day):
'''calculate cost for each day based on a fuel price for each day and fuel consumption for each day'''
self.fuel_price.name = 'fuel_price'
df = pd.concat([fuel_by_day, self.fuel_price.groupby(pd.Grouper(freq='D')).mean()], axis=1)
df.fuel_price = df.fuel_price.ffill() # filled for days that did not match
return df.fuel_consumption * df.fuel_price
# def getEstimatedTotalGallons(self):
# '''calculates the total gallons used each month and sets the attribute gallons_by_month
# :return float total gallons for the entire report period'''
# self.estimated_gallons_by_month = self.calculateTotalGallonsByMonth()
# return self.gallons_by_month.sum()
def getCostPerMonth(self):
'''calculates the total cost of consumed fuel per month by summing cost per day for every day within a month'''
if self.cost_per_day == None:
if 'gpd_hdd' in self.__dict__:
self.cost_per_day = self.getCostPerDay(self.gpd_hdd)
else:
self.cost_per_day = self.getCostPerDay(self.fuel_by_day)
self.cost_per_month = self.cost_per_day.groupby(pd.Grouper(freq="M")).sum()
return
def getTotalCost(self):
'''uses hdd corrected estimate of fuel consumption to estimate cost per day and aggregate to the entire report period.'''
costPerDay = self.getCostPerDay(self.gpd_hdd)
return costPerDay.sum()
def calculateMeanDailyGallonsPerMonth(self):
'''Calculates the total gallons consumed by month based on an average daily consumption rate for each month'''
#actual measured total by day We use a count of 5 records as our cutoff for producing a legit average
groupedDaily = self.filtered_df['fuel_consumption'].groupby(pd.Grouper(freq="D")).sum(min_count=5) #total gallons each day
#total days needing estimates
self.meanDailyByMonth = groupedDaily.groupby( | pd.Grouper(freq='M') | pandas.Grouper |
#########################################################################
#########################################################################
# Classes for handling genome-wide association input and output files, ##
# analysis and qc programs, and post-hoc analyses ##
#########################################################################
#########################################################################
import cgatcore.experiment as E
import cgatcore.iotools as iotools
import numpy as np
import pandas as pd
import pandas.io.sql as pdsql
import re
import random
import os
import subprocess
import rpy2.robjects as ro
from rpy2.robjects import r as R
from rpy2.robjects import pandas2ri as py2ri
# set matplotlib non-interactive backend to Agg to
# allow running on cluster
import collections
import sqlite3 as sql
from math import *
import scipy.stats as stats
class FileGroup(object):
'''
An object for holding, formatting and processing files for genome-wide
association analysis including compressed and binary files
File types supported:
* plink - .ped and .map files
* plink binary - .bim, .fam. and .bed files
* variant call format - .vcf and .bcf (including gzipped vcf)
* Oxford format - .gen or .bgen with matched sample text file (must
be .sample)
* GRM_binary - genetic relationship matrix calculated in an appropriate
program in binary format. File suffixes are *.grm.bin, *.grm.N.bin
and *.grmid
* GRM_gz - previously calcualted gzip compressed GRM, file suffixes
are *.grm.gz and *.grm.id
Phenotypes are assumed to be contained in the relevant files, if not
then an additional phenotypes files can be included using the
`phenotypes` argument. Covariate files (if different from the phenotypes
file) can also be included in the instantiation of a :FileGroup:
object using the `covarite_files` argument.
Only the `files` and `file_format` arguments are required.
Genotype data are assumed to be raw genotype calls. This can be modified
using the `genotype_format` argument upon instantiation. Values allowed
are:
* calls - standard bi-allelic genotype calls, i.e. AA, AB, BB
* imputed_call - discrete genotype calls from imputed data,
essentially treated the same as ``calls``
* genotype_prob - posterior probabilities for each genotype class,
i.e. 0.88 0.07 0.05 corresponding to homozygote
reference, heterozygote then homozygote rare allele.
'''
# Defaults for file formats
ped_file = None
map_file = None
bim_file = None
fam_file = None
bed_file = None
sample_file = None
gen_file = None
bgen_file = None
vcf_file = None
bcf_file = None
def __init__(self, files, file_format, phenotypes=None,
genotype_format="calls", covariate_files=None):
self.files = files
self.file_format = file_format
self.pheno_file = phenotypes
self.genotype_format = genotype_format
self.covariate_files = covariate_files
self.set_file_prefix(files)
def set_file_prefix(self, infiles):
'''Get file prefixes from input files. These are used across all
file formats, e.g. myfile.bed, myfile.bim, myfile.fam name=myfile.
Only use periods, '.' to denote file suffixes. use hyphens and
underscores for separating file names.
Set these to the appropriate attributes.
'''
file_prefixes = set()
for f in infiles:
# get all input file prefixes
if len(f.split("/")) > 1:
g = f.split("/")[-1]
fdir = f.split("/")[:-1]
fdir = "/".join(fdir)
ffile = fdir + "/" + g.split(".")[0]
file_prefixes.add(ffile)
else:
file_prefixes.add(f.split(".")[0])
# if only prefix then use this for all data files
if len(file_prefixes) == 1:
self.name = [xf for xf in file_prefixes][0]
else:
# if there are multiple prefixes then use separate
# flags for file inputs
self.name = None
# define file types by their suffix instead
if self.file_format == "plink":
self.ped_file = [pf for pf in infiles if re.search(".ped",
pf)][0]
self.map_file = [mf for mf in infiles if re.search(".map",
mf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.ped_file
except AssertionError:
raise ValueError(".ped file is missing, please "
"specify")
try:
assert self.map_file
except AssertionError:
raise ValueError(".map file is missing, please "
"specify")
elif self.file_format == "plink_binary":
self.fam_file = [ff for ff in infiles if re.search(".fam",
ff)][0]
self.bim_file = [fb for fb in infiles if re.search(".bim",
fb)][0]
self.bed_file = [bf for bf in infiles if re.search(".bed",
bf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.fam_file
except AssertionError:
raise ValueError(".fam file is missing, please "
"specify")
try:
assert self.bim_file
except AssertionError:
raise ValueError(".bim file is missing, please "
"specify")
try:
assert self.bed_file
except AssertionError:
raise ValueError(".bed file is missing, please "
"specify")
elif self.file_format == "oxford":
self.gen_file = [gf for gf in infiles if re.search(".gen",
gf)][0]
self.sample_file = [sf for sf in infiles if re.search(".sample",
sf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.gen_file
except AssertionError:
raise ValueError(".gen file missing, please "
"specify")
try:
assert self.sample_file
except AssertionError:
raise ValueError(".sample file missing, please "
"specify")
elif self.file_format == "oxford_binary":
self.bgen_file = [bg for bg in infiles if re.search(".bgen",
bg)][0]
self.sample_file = [sf for sf in infiles if re.search(".sample",
sf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.bgen_file
except AssertionError:
raise ValueError(".bgen file is missing, please "
"specify")
try:
assert self.sample_file
except AssertionError:
raise ValueError(".sample file is missing, please "
"specify")
elif self.file_format == "vcf":
self.vcf_file = [vf for vf in infiles if re.search(".vcf",
vf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.vcf_file
except AssertionError:
raise ValueError(".vcf file is missing, please "
"specify")
elif self.file_format == "bcf":
self.bcf_file = [bv for bv in infiles if re.search(".bcf",
bv)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.bcf_file
except AssertionError:
raise ValueError(".bcf file is missing, please "
"specify")
elif self.file_format == "GRM_binary":
self.id_file = [ig for ig in infiles if re.search(".grm.id",
ig)][0]
self.n_file = [gn for gn in infiles if re.search(".grm.N.bin",
gn)][0]
self.bin_file = [gb for gb in infiles if re.search(".grm.bin",
gb)][0]
# check files exits
try:
assert self.id_file
except AssertionError:
raise ValueError("GRM ids file is missing, please "
"specify")
try:
assert self.n_file
except AssertionError:
raise ValueError("grm.N file is missing, please "
"specify")
try:
assert self.bin_file
except AssertionError:
VaueError("GRM genotype is missing, please "
"specify")
elif self.file_format == "GRM_plink":
self.id_file = [ig for ig in infiles if re.search(".rel.id",
ig)][0]
self.rel_file = [gn for gn in infiles if re.search(".rel.N.bin",
gn)][0]
# check files exits
try:
assert self.id_file
except AssertionError:
raise ValueError("GRM ids file is missing, please "
"specify")
try:
assert self.rel_file
except AssertionError:
raise ValueError("rel.N file is missing, please "
"specify")
def set_phenotype(self, pheno_file=None, pheno=1):
'''
Set the phenotype for a set of individuals
using an external phenotypes file.
Default is to use the (n+2)th column, designated
as pheno 1.
'''
if type(pheno) == int:
pheno = str(pheno)
elif type(pheno) == str:
pass
else:
raise AttributeError("Type of pheno unknown. "
"Must be str or int.")
self.pheno_file = pheno_file
self.pheno = pheno
class GWASProgram(object):
'''
A base level object for programs designed to perform genome-wide
association analysis and operate on genome-wide genotyping data.
[INSERT PROPER DOCSTRING - see style guide]
'''
def __init__(self, executable=None, required_format=None):
self.executable = executable
self.require_format = required_format
def program_call(self, infiles, outfile):
'''build a statement to perform genome-wide
analysis using infiles
'''
return ""
def postprocess(self, infiles, outfile):
'''collect and process output files from
program - format for Result class objects'''
return ""
def build(self, infiles, outfile):
'''run analysis program'''
cmd_program = self.program_call(infile, outfile)
cmd_postprocess = self.postprocess(infiles, outfile)
if cmd_postprocess:
cmd_postprocess = cmd_postprocess.strip().endswith(";")
assert cmd_postprocess
else:
pass
statement = " checkpoint; ".join((cmd_program,
cmd_postprocess))
return statement
class GCTA(GWASProgram):
'''
GCTA is designed for computing genetic relationship matrices, linear
mixed model analyses and phenotype estimation/prediction.
It can also perform SNP-wise GWAS.
Files MUST be in Plink binary format
'''
def __init__(self, files, options=None, settings=None,
design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "gcta64"
self.statement = {}
self.filters = []
def program_call(self, infiles, outfile):
'''build GCTA call statement on infiles'''
statement = []
statement.append(self.executable)
if infiles.name:
inputs = self._build_single_file_input(infiles,
infiles.file_format)
statement.append(inputs)
else:
raise AttributeError("Files must be in binary plink format "
"or as a GRM to use GCTA. Please "
"convert and try again.")
if infiles.pheno_file:
statement.append(" --pheno %s --mpheno %s " % (infiles.pheno_file,
infiles.pheno))
else:
pass
self.statement["program"] = " ".join(statement)
def _build_single_file_input(self, infiles, file_format):
'''internal function only. Use it to construct the
file input flags with --file, --bfile or --data
'''
statement = None
if file_format == "plink":
statement = " --file %s " % infiles.name
elif file_format == "plink_binary":
statement = " --bfile %s " % infiles.name
elif file_format == "oxford" or file_format == "oxford_binary":
statement = " --data %s" % infiles.name
elif file_format == "GRM_binary" or file_format == "GRM_plink":
statement = " --grm %s " % infiles.name
else:
raise AttributeError("file format is not defined or recognised."
"Please define the input corectly when "
"instantiating a FileGroup object")
return statement
def PCA(self, n_pcs="20"):
'''
Perform PCA analysis on previosly generated GRM, output the number n
principal componets, default = 20
'''
self._run_tasks(pca=n_pcs)
def apply_filters(self, filter_type, filter_value):
'''
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* autosome_number - for non-human species, the number of chromosomes to
be considered autosomes
* exclude_snps - text file list of variant IDs to exclude from analysis
[file]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
'''
if filter_type == "chromosome":
self._construct_filters(chromosome=filter_value)
elif filter_type == "autosome_number":
self._construct_filters(autosome_number=filter_value)
elif filter_type == "exclude_snps":
self._construct_filters(exclude_snps=filter_value)
elif filter_type == "extract":
self._construct_filters(extract=filter_value)
elif filter_type == "min_allele_frequency":
self._construct_filters(min_allele_frequency=filter_value)
elif filter_type == "max_allele_frequency":
self._construct_filters(max_allele_frequency=filter_value)
elif filter_type == "keep":
self._construct_filters(keep=filter_value)
elif filter_type == "remove":
self._construct_filters(remove=filter_value)
def _construct_filters(self, **kwargs):
'''
Add filter to each GCTA run.
The filters accepted are defined below. These are input as keyword
arguments supported by this function.
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* keep - keep individuals with matching individual and family IDs.
[file]
* remove - remove all individuals with matching individual and family
IDs. [file]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* exclude - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* covariates_file - specify the covariates file with family and
individual IDs in the first two columns. Covariates are in the
(n+2)th column. Only used in conjunction with `covariate_filter`.
[file]
* covariate_filter - covariate columns value to filter on. Can be
used with non-numeric values to filter out individuals with
covariate =/= `covariate_filter` value. [str/int/float]
* covariate_column - column number to apply filtering to if more
than one covariate in the file. [int]
* update_gender - provide gender information in a separate text
file. [file]
* grm_threshold - remove one of a pair of individuals with
estimated relatedness greater than this value.
* ld_significance - p-value threshold for regression test
of LD significance
* genotype_call - GenCall score cut-off for calling raw
genotypes into Plink PED format
* meta_pval - p-value threshold cut-off for conditional
and joint genome-wide analysis
* cojo_window - distance in kb beyond wich SNPs this
distance apart are assumed to be in linkage equilibrium
* cojo_collinear - multiple regression R^2 on selected SNPs
value above which the testing SNP will not be selected.
* cojo_inflation - adjust COJO analysis test statistics
for genomic control. [boolean]
* reml_iterations - maximum number of iterations to use
during reml analysis. Default is 100. [int]
'''
statement = []
# map of keyword arguments recognised to Plink2 filtering flags
filter_map = {"min_allele_frequency": " --maf %s ",
"max_allele_frequency": " --max-maf %s ",
"keep": " --keep %s ",
"remove": " --remove %s ",
"extract": " --extract %s ",
"exclude": " --exclude %s ",
"chromosome": " --chr %s ",
"autosome": " --autosome ",
"autosome_number": " --autosome-num %s ",
"grm_threshold": " --grm-cutoff %s ",
"ld_significance": " --ls-sig %s ",
"genotype_call": " --gencall %s ",
"meta_pval": " --cojo-p %s ",
"cojo_window": " --cojo-wind %s ",
"cojo_collinear": " --cojo-collinear %s ",
"cojo_inflation": " --cojo-gc ",
"reml_iterations": " --reml-maxit %s "}
# compile all filters together, checking for dependencies.
# use a mapping dictionary to extract the relevant flags and
# combinations to use.
filters = []
filter_dict = {}
for key, value in kwargs.items():
filter_dict[key] = value
for each in filter_dict.keys():
try:
assert filter_map[each]
# check for data type <- behaviour is type dependent
if type(filter_dict[each]) == 'bool':
filters.append(filter_map[each])
else:
filter_val = filter_dict[each]
filters.append(filter_map[each] % filter_val)
except KeyError:
E.warn("%s filter not recognised, please see "
"documentation for allowed filters" % each)
self.filters.append(" ".join(filters))
self.statement["filters"] = " ".join(self.filters)
def mixed_model(self, lmm_method, grm=None, qcovar=None,
dcovar=None):
'''
Run a linear mixed model with the GRM used to model
random effects of an estimated genetic relationshi
between individuals
'''
# add the mlm flag to the statement
self._run_tasks(lmm=lmm_method)
# construct the rest of mlm statement
statement = []
if qcovar:
statement.append(" --qcovar %s " % qcovar)
else:
pass
if dcovar:
statement.append(" --covar %s " % dcovar)
else:
pass
try:
statement.append(" --grm %s " % grm)
except ValueError:
E.warn("No GRM has been provided, the GRM ")
self.statement["mlm"] = " ".join(statement)
def reml_analysis(self, method, parameters, prevalence=None,
qcovariates=None, discrete_covar=None):
'''
Use REML to estimate the proportion of phenotypic variance
explained by the estimated genetic relationship between
individuals.
Arguments
---------
method: string
GCTA method to use for REML estimation of h2. Includes:
* snpBLUP - calculate the SNP BLUPs from the genotype
data and the estimated total genetic value/ breeding value
* fixed_cor -
* priors - provide initial priors for the variance components
estimation
* unconstrained - allow variance estimates to fall outside
of the normal parameter space, bounded [0, ).
* GxE - estimate the contribution of GxE with covariates
to the phenotype variance
* BLUP_EBV - output individual total genetic effect/breeding
values
'''
statement = []
try:
params = parameters.split(",")
if len(params) == 1:
params = params[0]
else:
pass
except AttributeError:
params = parameters
self._run_tasks(parameter=params,
greml=method)
if prevalence:
statement.append(" --prevalence %0.3f " % prevalence)
else:
pass
if qcovariates:
statement.append(" --qcovar %s " % qcovariates)
else:
pass
if discrete_covar:
statement.append(" --covar %s " % discrete_covar)
else:
pass
self.statement["reml"] = " ".join(statement)
def _run_tasks(self, parameter=None, **kwargs):
'''
The principal functions of GCTA revolve around GRM estimation
and variance components analysis, such as REML estimation of
heritability and variance components, BLUP and phenotype prediciton.
It can also be used to do PCA and conditional and joint GWAS.
Tasks
-----
* pca - perform principal components analysis on a GRM
* greml - perform restricted maximum likelihood analysis
for estimation of variance components
* estimate_ld - estimate the linkage disequilibrium structure
over the genomic regions specified
* simulate_gwas - simulate genome-wide association data based
on observed genotype data
* cojo - conditional and joint genome-wide association
analysis across SNPs and covariates
* bivariate_reml - perform GREML on two traits, either both
binary, both quantitative or one of each
* lmm - perform a linear mixed model based association analysis
'''
statement = []
# set up a dictionary of recognised tasks with key word argument
# values as further dictionaries. Use the parameter argument
# to pass arguments by value to string formatting
# put all of the other tasks as options in the calling function
task_map = {"pca": " --pca %s ",
"greml": {"standard": " --reml ",
"priors": " --reml --reml-priors %s ",
"reml_algorithm": " --reml --reml-alg %s ",
"unconstrained": " --reml --reml-no-constrain ",
"GxE": " --reml --gxe %s ",
"LRT": " --reml --reml-lrt %s ",
"BLUP_EBV": " --reml --reml-pred-rand ",
"snpBLUP": " --blup-snp %s "},
"estimate_ld": " --ld %s ",
"simulate_gwas": {"quantitative": " --simu-qt ",
"case_control": " --simu-cc %s %s "},
"cojo": {"stepwise": " --cojo-file %s --cojo-slct ",
"no_selection": " --cojo-file %s --cojo-joint ",
"snp_conditional": " --cojo-file %s --cojo-cond %s "},
"bivariate_reml": {"standard": " --reml-bivar %s ",
"no_residual": " --reml-bivar %s --reml-bivar-nocove ",
"fixed_cor": " --reml-bivar %s --reml-bivar-lrt-rg %s "},
"lmm": {"standard": " --mlma ",
"loco": " --mlma-loco ",
"no_covar": " --mlma-no-adj-covar "},
"remove_relations": {"cutoff": " --grm-cutoff %s "}}
for task, value in kwargs.items():
# check for PCA first as it is not nested in task_map
if task == "pca":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
statement.append(task_map[task])
statement.append
# LD estimation is likewise not nested
elif task == "estimate_ld":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
raise IOError("no SNP file list detected")
elif task != "parameter":
try:
# sub_task is a nested dictionary
sub_task = task_map[task]
try:
assert sub_task[value]
try:
# some tasks do not contain task values for the
# parameter argument - catch these with the TypeError
# exception
statement.append(sub_task[value] % parameter)
# the default for parameter is None, check this is appropriate
if not parameter:
E.warn("Parameter value is set to NoneType. "
"Please check this is an appropriate value "
"to pass for this task")
else:
pass
except TypeError:
statement.append(sub_task[value])
except KeyError:
raise KeyError("% Task not recognised, see docs for details of "
"recognised tasks" % task)
except KeyError:
raise KeyError("Task not recognised, see docs for details of "
"recognised tasks")
else:
pass
self.statement["tasks"] = " ".join(statement)
def genetic_relationship_matrix(self, compression="binary", metric=None,
shape="square", options=None):
'''
Calculate the estimated genetic relationship matrix from
genotyping data
* estimate_grm - estimate the realized genetic relationship
matrix between individuals from genotyping data
'''
mapf = {"binary": " --make-grm-bin ",
"gzip": " --make-grm-gz ",
"no_compress": " --make-grm ",
"X_chr": " --make-grm-chr ",
"X_chr_gz": " --make-grm-gz ",
"inbreeding": " --ibc "}
if options == "X_chr":
if compression == "gz":
state = mapf["X_chr_gz"]
else:
state = mapf["X_chr"]
elif options == "inbreding":
state = mapf["inbreeding"]
else:
pass
# check compression is compatible
if compression == "gz":
state = mapf["gzip"]
elif compression == "bin":
state = mapf["binary"]
elif compression is None and not options:
state = mapf["no_compress"]
self.statement["matrix"] = state
def build_statement(self, infiles, outfile, threads=None,
memory=None, parallel=None):
'''
Build statement and execute from components
'''
statement = []
exec_state = self.executable
# calls to function add to the self.statement dictionary
try:
statement.append(self.statement["program"])
except KeyError:
raise AttributeError("Input files and format not detected")
try:
statement.append(self.statement["filters"])
except KeyError:
pass
try:
statement.append(self.statement["tasks"])
except KeyError:
pass
try:
statement.append(self.statement["matrix"])
except KeyError:
pass
try:
statement.append(self.statement["mlm"])
except KeyError:
pass
try:
statement.append(self.statement["reml"])
except KeyError:
pass
if threads:
statement.append(" --thread-num %i " % threads)
else:
pass
# add output flag
statement.append(" --out %s " % outfile)
os.system(" ".join(statement))
class Plink2(GWASProgram):
'''
Run various Plink functions and analysis, including file processing, GRM
calculation, PCA and other GWA tasks
Require Plink v1.9 to be in the users PATH variable as ``plink2`` to
distinguish it from Plink v1.07.
'''
def __init__(self, files, options=None,
settings=None, design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "plink2"
self.statement = {}
self.filters = []
def program_call(self, infiles, outfile):
''' build Plink call statement on infiles'''
statement = []
statement.append(self.executable)
if infiles.name:
inputs = self. _build_single_file_input(infiles,
infiles.file_format)
statement.append(inputs)
else:
inputs = self._build_multiple_file_input(infiles,
infiles.file_format)
statement.append(inputs)
# check for the presence of an additional phenotypes file
try:
if infiles.pheno_file:
statement.append(" --pheno %s --mpheno %s " % (infiles.pheno_file,
infiles.pheno))
else:
pass
except AttributeError:
pass
self.statement["program"] = " ".join(statement)
def hamming_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise distance matrix between
individuals using Hamming distance across all variants
'''
# check shape is compatible
if not shape:
shape = "triangle"
elif shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="hamming", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="hamming", shape=shape,
compression=compression)
self.statement["matrix"] = state
def ibs_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise similarity matrix between
individuals using proportion of IBS alleles
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="ibs", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="ibs", shape=shape,
compression=compression)
self.statement["matrix"] = state
def genome_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise distance matrix between
individuals using 1 - proportion of IBS alleles
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="genomic", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="genomic", shape=shape,
compression=compression)
self.statement["matrix"] = state
def genetic_relationship_matrix(self, shape, compression, metric,
options=None):
'''
Calculate genomic pair-wise distance matrix between
individuals using proportion of IBS alleles
Requires the use of the Plink2 parallelisation to run with large
cohorts of patients
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if metric in ["cov", "ibc2", "ibc3"]:
state = self._matrices(matrix_type="grm", shape=shape,
compression=compression, options=metric)
else:
E.info("%s metric not recognised. Running with default Fhat1" % metric)
state = self._matrices(matrix_type="grm", shape=shape,
compression=compression)
self.statement["matrix"] = state
def apply_filters(self, filter_type, filter_value):
'''
arguments supported by this function.
* genotype_rate - exclude SNPs with a genotyping rate below this
value. [float]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* exclude_snp - exclude this single variant
* exclude_snps - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* exclude_chromosome - exclude all variants on the specified
chromosome(s). [str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* pseudo_autosome - include the pseudo-autosomal region of chromosome
X. [boolean]
* ignore_indels - remove all indels/multi-character allele coding
variants. [boolean]
* snp_bp_range - (from, to) range in bp of variants to include in
analysis. [tuple]
'''
if filter_type == "genotype_rate":
self._construct_filters(genotype_rate=filter_value)
elif filter_type == "hwe":
self._construct_filters(hwe=filter_value)
elif filter_type == "missingness":
self._construct_filters(missingness=filter_value)
elif filter_type == "min_allele_frequency":
self._construct_filters(min_allele_frequency=filter_value)
elif filter_type == "max_allele_frequency":
self._construct_filters(max_allele_frequency=filter_value)
elif filter_type == "exclude_snp":
self._construct_filters(exclude_snp=filter_value)
elif filter_type == "exclude":
self._construct_filters(exclude=filter_value)
elif filter_type == "extract":
self._construct_filters(extract=filter_value)
elif filter_type == "chromosome":
self._construct_filters(chromosome=filter_value)
elif filter_type == "exclude_chromosome":
self._constuct_filters(exclude_chromosome=filter_value)
elif filter_type == "autosome":
self._construct_filters(autosome=filter_value)
elif filter_type == "pseudo_autosome":
self._construct_filters(pseudo_autosome=filter_value)
elif filter_type == "ignore_indels":
self._construct_filters(ignore_indels=filter_value)
elif filter_type == "snp_bp_range":
self._construct_filters(snp_bp_range=filter_value)
elif filter_type == "conditional_snp":
self._construct_filters(conditional_snp=filter_value)
elif filter_type == "keep":
self._construct_filters(keep=filter_value)
elif filter_type == "remove":
self._construct_filters(remove=filter_value)
elif filter_type == "ignore_indels":
self._construct_filters(ignore_indels=filter_value)
def _build_multiple_file_input(self, infiles, file_format):
'''
internal function only. Use it to construct
the appropriate file input flags
'''
statement = None
if file_format == "oxford":
statement = " --gen %s --sample %s " % (infiles.gen_file,
infiles.sample_file)
elif file_format == "oxford_binary":
statement = " --bgen %s --sample %s " % (infiles.bgen_file,
infiles.sample_file)
elif file_format == "plink":
statement = " --ped %s --map %s " % (infiles.ped_file,
infiles.sample_file)
elif file_format == "plink_binary":
statement = " --bed %s --bim %s --fam %s " % (infiles.bed_file,
infiles.bim_file,
infiles.fam_file)
elif file_format == "vcf":
statement = " --vcf %s.vcf.gz " % infiles.vcf_file
elif file_format == "bcf":
statement = " --bcf %s " % infiles.vcf_file
elif file_format == "GRM_binary":
statement = " --grm-bin %s " % infiles.name
else:
raise AttributeError("file format is not defined. Please "
"define the input file formats when "
"instantiating a FileGroup object")
return statement
def _build_single_file_input(self, infiles, file_format):
'''internal function only. Use it to construct the
file input flags with --file, --bfile or --data
'''
statement = None
if file_format == "plink":
statement = " --file %s " % infiles.name
elif file_format == "plink_binary":
statement = " --bfile %s " % infiles.name
elif file_format == "oxford" or file_format == "oxford_binary":
statement = " --data %s" % infiles.name
elif file_format == "GRM_plink":
statement = " --grm.bin %s " % infiles.name
elif file_format == "GRM_binary":
statement = " --grm-bin %s " % infiles.name
elif file_format == "vcf":
statement = " --vcf %s.vcf.gz " % infiles.name
else:
raise AttributeError("file format is not defined or recognised."
"Please define the input corectly when "
"instantiating a FileGroup object")
return statement
def _construct_filters(self, **kwargs):
'''
Add filter to each plink run. [data type]
The filters accepted are defined below. These are input as keyword
arguments supported by this function.
* genotype_rate - exclude SNPs with a genotyping rate below this
value. [float]
* missingness - exclude individuals with total genotype missingness
above this value. [float]
* hwe - p-value threshold for excluding SNPs deviating from
Hardy-Weinberg expectations. [float]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* mendelian_error - filter out samples/trios exceeding the error
threshold. [float]
* keep - keep individuals with matching individual and family IDs.
[file]
* remove - remove all individuals with matching individual and family
IDs. [file]
* quality_score_file - vcf file with variants and quality scores. Use
`qual_score_column` and `var_id_col` to specify which columns
correspond to the quality score and variant ID columns.
[file] <int> <int>
* min_qual_score - alters the lower bound of the quality score
threshold; default is 0.[int]
* max_qual_score - sets an upper limit on the quality scores;
default is Inf. [int]
* allow_no_sex - prevents phenotypes set to missing if there is no
gender information. [boolean]
* enforce_sex - force phenotype missing when using --make-bed, --recode
or --write-covar. [boolean]
* subset_filter - filter on a particular subset. Choices are: cases,
controls, males, females, founders, nonfounders. [str]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* exclude - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* exclude_chromosome - exclude all variants on the specified
chromosome(s). [str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* pseudo_autosome - include the pseudo-autosomal region of chromosome
X. [boolean]
* ignore_indels - remove all indels/multi-character allele coding
variants. [boolean]
* snp_bp_range - (from, to) range in bp of variants to include in
analysis. [tuple]
* specific_snp - only load the variant specified. [str]
* exclude_snp - exclude this single variant
* window_size - alters behaviour of `specific_snp` and `exclude_snp`
to include/exclude SNPs within +/- half of this distance (kb) are
also included. [float]
* range_resolution - sets the resolution of the (from, to) range.
Either bp, kb or mb. If set it will take the values from
`snp_bp_range`. [str/int/float]
* covariates_file - specify the covariates file with family and
individual IDs in the first two columns. Covariates are in the
(n+2)th column. Only used in conjunction with `covariate_filter`.
[file]
* covariate_filter - covariate columns value to filter on. Can be
used with non-numeric values to filter out individuals with
covariate =/= `covariate_filter` value. [str/int/float]
* covariate_column - column number to apply filtering to if more
than one covariate in the file. [int]
'''
statement = []
# map of keyword arguments recognised to Plink2 filtering flags
filter_map = {"genotype_rate": " --geno %s ",
"missingness": "--mind %s ",
"hwe": " --hwe %s ",
"min_allele_frequency": " --maf %s ",
"max_allele_frequency": " --max-maf %s ",
"mendelian_error": " --me %s ",
"keep": " --keep %s ",
"remove": " --remove %s ",
"quality_score_file": " --qual-scores %s ",
"qual_score_column": " %s ",
"var_id_col": " %s ",
"min_qual_score": " --qual-threshold %s ",
"max_qual_score": " --qual-max-threshold %s ",
"allow_no_sex": " --allow-no-sex ",
"enforce_sex": " --must-have-sex ",
"subset_filter": " --filter-%s ",
"extract": " --extract %s ",
"exclude": " --exclude %s ",
"chromosome": " --chr %s ",
"exclude_chromosome": " --not-chr %s ",
"autosome": " --autosome ",
"pseudo_autosome": " --autosome-xy ",
"ignore_indels": " --snps-only no-DI ",
"snp_id_range": " --from %s --to %s ",
"specific_snp": " --snp %s ",
"window_size": " --window %s ",
"exclude_snp": " --exclude-snp %s ",
"snp_bp_range": "--from-bp %s --to-bp %s ",
"covariates_file": " --filter %s ",
"covariate_filter": " %s ",
"covariate_column": " --mfilter %s ",
"missing_phenotype": " --prune ",
"conditional_snp": " --condition %s ",
"haplotype_size": " --blocks-max-kb %s ",
"haplotype_frequency": " --blocks-min-maf %s "}
# compile all filters together, checking for dependencies.
# use a mapping dictionary to extract the relevant flags and
# combinations to use.
filters = []
filter_dict = {}
for key, value in kwargs.items():
filter_dict[key] = value
# need to check for covariates and qual scores - these
# are more complex. Deal with these first and remove
# from dictionary once complete.
try:
assert filter_dict["quality_score_file"]
assert filter_dict["qual_score_column"]
assert filter_dict["var_id_col"]
quals = []
qual_file = filter_dict["quality_score_file"]
score_col = filter_dict["qual_score_column"]
id_col = filter_dict["var_id_col"]
quals.append(filter_map["quality_score_file"] % qual_file)
quals.append(filter_map["qual_score_column"] % score_col)
quals.append(filter_map["var_id_col"] % id_col)
# remove from dictionary
filter_dict.pop("qual_score_column", None)
filter_dict.pop("var_id_col", None)
filters.append(" ".join(quals))
except KeyError:
pass
try:
assert filter_dict["covariates_file"]
assert filter_dict["covariate_filter"]
covars = []
covar_file = filter_dict["covariates_file"]
covar_val = filter_dict["covariate_filter"]
covars.append(filter_map["covariates_file"] % covar_file)
covars.append(filter_map["covariate_filter"] % covar_val)
# check to filter on specific column numnber, default is 3rd file
# column, i.e. (n+2)th column
try:
assert filter_dict["covariate_column"]
covar_col = filter_dict["covariate_column"]
covars.append(filter_map["covariate_column"] % covar_col)
filter_dict.pop("covariate_column", None)
except KeyError:
pass
# remove from dictionary
filter_dict.pop("covariates_file", None)
filter_dict.pop("covariate_filter", None)
filters.append(" ".join(covars))
except KeyError:
pass
# range_resolution and snp_bp_range are used together
try:
assert filter_dict["snp_bp_range"]
flags = filter_map["snp_bp_range"]
from_pos = filter_dict["snp_bp_range"].split(",")[0]
to_pos = filter_dict["snp_bp_range"].split(",")[1]
filters.append(flags % (from_pos, to_pos))
# remove so they are not duplicated - source of bugs
filter_dict.pop("snp_bp_range", None)
except KeyError:
pass
for each in filter_dict.keys():
try:
assert filter_map[each]
# check for data type <- behaviour is type dependent
if type(filter_dict[each]) == bool:
filters.append(filter_map[each])
# handle multiple arguments in string format
elif len(filter_dict[each].split(",")) > 1:
vals = tuple(filter_dict[each].split(","))
filters.append(filter_map[each] % vals)
else:
filter_val = filter_dict[each]
filters.append(filter_map[each] % filter_val)
except KeyError:
E.warn("%s filter not recognised, please see "
"documentation for allowed filters" % each)
self.filters.append(" ".join(filters))
self.statement["filters"] = " ".join(self.filters)
def calc_ld(self, ld_statistic, ld_threshold,
ld_shape="table"):
'''
Calculate linkage disequilibrium between all SNP
pairs.
Arguments
---------
ld_statistic: string
The LD statistic to report, either correlation or squared correlation
of inter-variant allele counts
ld_threshold: float
minimum value to report for pair-wise LD
ld_window: int
max distance (in Kb) between SNPs for calculating LD
ld_shape: string
shape to use for reporting LD, either a table or a matrix. If a
matrix then either square, square with diagnonal (square0) or
triangular. Square matrices are symmetric.
'''
statement = []
ld_map = {"r": " --r %s dprime ",
"r2": "--r2 %s dprime "}
shape_map = {"table": "inter-chr gz",
"square": "square gz",
"square0": "square0 gz",
"triangle": "triangle gz"}
try:
statement.append(ld_map[ld_statistic] % shape_map[ld_shape])
except KeyError:
raise ValueError("%s LD statistic not recognised. Please "
"use eithr 'r' or 'r2'" % ld_statistic)
if type(ld_threshold) == float:
statement.append(" --ld-window-r2 %0.3f " % ld_threshold)
else:
E.warn("threshold type not recognised, setting to default "
"value of 0.2")
self.statement["tasks"] = " ".join(statement)
def _run_tasks(self, parameter=None, **kwargs):
'''
Plink2 is capable of much more than just running basic association
analyses.
These include file processing, reformating, filtering, data summaries,
PCA, clustering, GRM calculation (slow and memory intense), etc.
multiple tasks can be added by separate calls to this function.
For instance, adding phenotype and gender information using the
update_samples task whilst change the file format.
Tasks
-----
* change_format - convert from input format to an alternative format
after applying filters.
* change_missing_values - alters the genotype or phenotype missing
value into the value supplied.
* update_variants - use this to fill in missing variant IDs, useful
for data from exome or whole-genome sequencing that have
non-standard IDs.
* update_samples - update phenotype and sample information
* flip_strands - flip the strand for alleles, swaps A for T and
C for G.
* flip_scan - use the LD-based scan to check SNPs have not had
incorrect strand assignment. Particularly useful if cases and
controls were genotyped separately, or the cohort was genotyped
in different batches.
* sort - sort files by individual and/or family IDs
* merge - merge new filesets with reference fileset.
* merge_mode - handling of missing values and overwriting values
* find_duplicates - find and output duplicate variants based on bp position,
or variant ID. Useful to output for the --exclude filtering flag.
* remove_relations - remove one of a pair of individuals with IBS >=
a threshold. Recommended minimum is 3rd cousins (IBS >= 0.03125).
* check_gender - check imputed gender from non-pseudoautosomal X
chromsome genotypes against self-reported gender
* estimate_haplotypes - assign SNPs to haplotype blocks and get
positional information
'''
statement = []
# set up a dictionary of recognised tasks with key word argument
# values as further dictionaries. Use the parameter argument
# to pass arguments by value to string formatting
task_map = {'change_format': {"plink_binary": " --make-bed ",
"plink": " --recode ",
"oxford": " --recode oxford ",
"oxford_binary": " --recode oxford gen-gz ",
"raw": " --recode A tabx "},
"change_missing_values": {"genotype": " --missing-genotype %s ",
"phenotype": " --missing-phenotype %s "},
"update_variants": {"variant_ids": " --set-missing-var-ids %s ",
"missing_id": " --mising-var-code %s ",
"chromosome": " --update-chr %s ",
"centimorgan": " --update-cm %s ",
"name": " --update-name %s ",
"alleles": " --update-alleles %s ",
"map": " --update-map %s "},
"update_samples": {"sample_ids": " --update-ids %s ",
"parents": " --update-parents %s ",
"gender": " --update-sex %s %s "},
"flip_strands": {"all_samples": " --flip %s ",
"subset": " --flip-subset %s "},
"flip_scan": {"default": " --flip-scan verbose ",
"window": "--flip-scan --flip-scan-window %s ",
"kb": " --flip-scan --flip-scan-window-kb %s ",
"threshold": " --flip-scan --flip-scan-threshold %s "},
"sort": {"none": " --indiv-sort %s ",
"natural": " --indiv-sort %s ",
"ascii": " --indiv-sort %s ",
"file": " --indiv-sort %s "},
"merge": {"plink": " --merge %s ",
"binary_plink": " --bmerge %s "},
"merge_mode": {"default": " --merge-mode 1 ",
"orginal_missing": " --merge-mode 2 ",
"new_nonmissing": " --merge-mode 3 ",
"no_overwrite": " --merge-mode 4 ",
"force": " --merge-mode 5 ",
"report_all": " --merge-mode 6 ",
"report_nonmissing": " --merge-mode 7"},
"find_duplicates": {"same_ref": " --list-duplicate-vars require-same-ref ",
"id_match": " --list-duplicate-vars ids-only ",
"suppress_first": " --list-duplicate-vars suppress-first"},
"remove_relations": {"cutoff": " --rel-cutoff %s "},
"check_gender": " --check-sex ",
"pca": " --pca %s ",
"estimate_haplotypes": " --blocks "}
for task, value in kwargs.items():
# check for PCA first as it is not nested in task_map
if task == "pca":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
statement.append(task_map[task])
statement.append
elif task == "check_gender":
statement.append(task_map[task])
elif task == "estimate_haplotypes":
statement.append(task_map[task])
elif task != "parameter":
try:
# sub_task is a nested dictionary
sub_task = task_map[task]
try:
assert sub_task[value]
try:
# gender has two string formats
if value == "gender":
gcol = 1
statement.append(sub_task[value] % (parameter,
gcol))
else:
# some tasks do not contain task values for the
# parameter argument - catch these with the TypeError
# exception
statement.append(sub_task[value] % parameter)
# the default for parameter is None, check this is appropriate
if not parameter:
E.warn("Parameter value is set to NoneType. "
"Please check this is an appropriate value "
"to pass for this task")
else:
pass
except TypeError:
statement.append(sub_task[value])
except KeyError:
raise KeyError("No sub task found, see docs for details of "
"recognised tasks")
except KeyError:
raise KeyError("Task not recognised, see docs for details of "
"recognised tasks")
else:
pass
# handle multiple tasks for a single run
try:
curr_tasks = self.statement["tasks"]
new_tasks = " ".join(statement)
self.statement["tasks"] = " ".join([curr_tasks, new_tasks])
except KeyError:
self.statement["tasks"] = " ".join(statement)
def _output_statistics(self, **kwargs):
'''
Summary statistics are written to specific files dictated by the
type of statistic
Statistics
----------
* allele_frequency - writes out MAF to `plink`.frq, this can be
modified with specific keywords.
* missing_data - generates a report of data missingness, can be subset
into within family and/or cluster reports
* hardy_weinberg - calculates all HWE p-values using exact test
statistics. For case/control studies reports are written for case,
controls and combined.
* mendel_errors - generates a Mendelian error report across all trios.
There are 10 different codes responding to different Mendelian error
scenarios.
* inbreeding - calculate observed and expected homozygosity across
individuals and F statistics. If the sample size is small then a
file of MAFs is required. Inbreeding coefficients can also be
reported on request using inbreeding_coef.
* gender_checker - checks gender assignment against X chromosome
genotypes. Gender values can also be imputed based on genotype
information using gender_impute.
* wrights_fst - calculate Wright's Fst statistic given a set of
subpopulations for each autosomal diploid variant. Used in
conjunction with the --within flag.
'''
stats_map = {"allele_frequency": " --freq %s ",
"missing_data": " --missing %s ",
"hardy_weinberg": " --hardy midp ",
"mendel_errors": " --mendel %s ",
"inbreeding": " --het %s ",
"inbreeding_coef": " --ibc ",
"gender_checker": " --check-sex ",
"gender_impute": " --impute-sex ",
"wrights_fst": " --fst --within %s ",
"case_control_fst": "--fst %s "}
statement = []
for key, value in kwargs.tems():
if value:
try:
assert stats_map[key]
statement.append(stats_map[key] % value)
except KeyError:
raise KeyError("statistic not recognised. Please "
"consult the documentation for allowed "
"options.")
else:
try:
assert stats_map[key]
flag = stats_map[key].rstrip("%s ")
statement.append(flag)
except KeyError:
raise KeyError("statistic not recognised. Please "
"consult the documentation for allowed "
"options.")
self.statement["stats"] = " ".join(statement)
def run_association(self, association=None, model=None,
run_options=None,
permutation=False, n_perms=None,
random_seed=None, permutation_options=None,
covariates_file=None, covariates=None):
'''
Construct a statement for a plink2 association analysis.
QC filters are constructed from input during instantiation.
run options include redirecting logging output, using parallelisation,
defining number of threads to use, etc
The default association uses the --assoc flag. Plink will check
phenotype coding, if it is not case/control it assumes
it is a continuous trait and uses linear regression.
Alternative regression models that include covariates can be used,
i.e. logistic and linear regression.
key
***
{CC} - applies to case/control analysis only
{quant} - applies to quantitative trait only
{CC/quant} - applies to both
run_options
-----------
``--assoc``:
* `fisher | fisher-midp` - uses Fisher's exact test to calculate
association p-values or applies Lancaster's mid-p adjustment. {CC}
* `counts` - causes --assoc to report allele counts instead of
frequencies. {CC}
* `set-test` - implements and tests the significance of variant
sets. See documentation below. {CC/quant}
* `qt-means` - generates a .qassoc.means file reporting trait means
and standard deviations by genotype. {quant}
* `lin` - reports the Lin et al (2006) statistic to be reported. If
multiple testing adjustments and/or permutation is also used, they
will be based on this statistic. {quant}
``--model``:
* `fisher | fisher-midp | trend-only` - uses Fisher's exact test
to calculate association p-values or applies Lancaster's mid-p
adjustment. trend-only forces only a trend test to be performed.
{CC}
* `dom | rec | gen | trend` - use the specified test as the basis
for the model permutation. If none are defined the result with the
smallest p-value is reported. {CC}
* --cell - sets the minimum number of observations per cell in the
2x3 contingency table. The default is 0 with the Fisher and
Fiser-midp test, otherwise 5. {CC}
``--linear/logistic``:
* `set-test` - implements and tests the significance of variant
sets. See documentation below. {CC/quant}
* `hide-covar` - removes the covariate specific sections from the
results output. {CC/quant
* `sex | no-x-sex` - `sex` adds sex as covariate to all models,
whislt `no-x-sex` does not include gender into X-chromosome SNP
models. {CC/quant}
* `interaction` - adds in genotype X covariate interaction terms
into the model. Can only be used with permutation is ``--tests``
is also specified. {CC/quant}
* `beta` - reports the beta coefficients instead of the OR in a
logistic model. {CC}
* `standard-beta` - standardizes the phenotype and all predictor
variables to zero mean and unit variance prior to regression
(separate for each variant analysed). {quant}
* `intercept` - includes the intercept in the output results.
{quant}
model
-----
* `recessive` - `recessive` specifies the model assuming the A1 allele
as recessive. {CC/quant}
* `dominant` - `dominant` specifies the model assuming the A1 allele is
dominant. {CC/quant}
* `genotype` - `genotype` adds an additive effect/dominance deviation
2df joint test with two genotype variables in the test (coded 0/1/2
and 0/1/0). {CC/quant}
* `trend` - forces a trend test to be performed. {CC/quant}
* `hethom` - `hethom` uses 0/0/1 and 0/1/0 instead of the genotype
coding. With permutation it will be based on the joint test instead
of just the additive effects. This can be overriden using the
`--tests` flag. {CC/quant}
* `no-snp` - `no-snp` defines a regression of phenotype on covariates
without reference to genotype data, except where `--conditon{-list}`
is specified. If used with permuation, test results will be reported
for every covariate. {CC/quant}
permutation
-----------
If permutation is True, run an adaptive Monte Carlo permutation test.
If n_perms is set, this will run a max(T) permutation test with the n
replications. A random seed will need to be provided.
* `perm-count` - this alters the permutation output report to include
counts instead of frequencies
covariates
----------
These should be provided in a separate file. Specifying which
covariates to include can be done as either a comma-separated list
of covariate names or numbers. These numbers will correspond to the
(n+2)th covariate file column as per the plink documentation.
'''
# model map maps common option effects onto specific syntax
model_map = {"--logistic": {"recessive": "recssive",
"dominant": "dominant",
"genotype": "genotypic"},
"--linear": {"recessive": "recssive",
"dominant": "dominant",
"genotype": "genotypic"},
"--model": {"recessive": "rec",
"dominant": "dom",
"genotype": "gen"}}
statement = []
# construct analysis flags
# add model, i.e. additive, recessive, dominant, etc.
# see docstring for details. Make sure correct modifier is used
# with a mapping dictionary
if association == "logistic":
statement.append(" --logistic ")
m_map = model_map["--logistic"]
if model:
statement.append(m_map[model])
else:
pass
elif association == "linear":
statement.append(" --linear ")
m_map = model_map["--linear"]
if model:
statement.append(m_map[model])
else:
pass
elif association == "model":
statement.append(" --model ")
m_map = model_map["--model"]
statement.append(m_map[model])
else:
statement.append(" --assoc ")
# add in run options. These need to be in their correct
# format already
if run_options:
modifiers = " ".join(run_options)
statement.append(modifiers)
else:
pass
# permutation should have a random seed set by the user. Allow
# this to set it's own seed if one not provided, but report it in
# the log file
if permutation:
try:
assert random_seed
except AssertionError:
rand_seed = random.randint(0, 100000000)
E.warn("No seed is provided for the permutation test. "
"Setting seed to %s. Record this for future "
"replicability" % random_seed)
if n_perms:
statement.append(" mperm=%i --seed %s " % (n_perms,
random_seed))
else:
statement.append(" perm --seed %s " % (random_seed))
else:
pass
# if using linear or logistic, covariates can be added into the model
# to adjust for their effects - assumes fixed effects of covariates
# mixed models are not yet implemented in Plink2.
if covariates:
covars = covariates.split(",")
if len(covars) > 1:
if type(covars[0]) == str:
m_covar = " --covar-name %s " % covariates
elif type(covars[0]) == int:
m_covar = " --covar-number %s " % covariates
else:
# if none are specified then don't adjust the model for any
# and log a warning
E.warn("Covariate header or numbers are not recognised."
"No covariates will be included in the model. Please"
"specifiy them exactly")
covariates = None
covariates_file = None
elif len(covars) == 1:
if type(covars) == str:
m_covar = " --covar-name %s " % covariates
elif type(covars) == int:
m_covar = " --covar-number %i " % covariates
else:
# if none are specified then don't adjust the model for any
# and log a warning
E.warn("Covariate header or numbers are not recognised."
"No covariates will be included in the model. Please"
"specifiy them exactly")
covariates = None
covariates_file = None
if covariates and covariates_file:
statement.append(" --covar %s %s " % (covariates_file,
m_covar))
elif covariates and not covaries_file:
E.warn("No covariate file specified. None included in model.")
elif covariates_file and not covariates:
E.warn("No covariates specified to include in the model."
"None included")
else:
pass
self.statement["assoc"] = " ".join(statement)
def PCA(self, n_pcs="20"):
'''
Perform PCA analysis on previosly generated GRM, output the number n
principal componets, default = 20
'''
self._run_tasks(pca=n_pcs)
def _dimension_reduction(self, **kwargs):
'''
Use PCA to perform dimensionality reduction on
input samples. A PCA can be calculated using
a subset of samples which can then be projected on
to other samples.
'''
# FINISH ME!!!!
def _detect_interactions(self, method=None, modifier=None,
set_file=None, set_mode=None,
report_threshold=None,
sig_threshold=None,
covariates_file=None, covariates=None):
'''
Detect epistatic interactions between SNPs using either an inaccurate
scan (fast-epistasis) or a fully saturated linear model
Methods
-------
fast_epistasis - uses an "imprecise but fast" scan of all 3x3 joint genotype
count tables to test for interactions. Can be modified to use a likelihood
ration test `boost` or a joint-effects test `joint-effects`. Default is
`joint-effects`.
epistasis - uses a linear model to test for interactions between additive
effects after main effects. Logistic regression for case/control and
linear regression for quantitative traits.
two_locus - tests a single interaction between two variants using joint genotype
counts and frequencies.
adjusted - allows adjustment for covariates in the interaction test, and also adjusts
for main effects from both the test and target SNP. Requires and R plugin script.
'''
interact_map = {"fast_epistasis": " --fast-epistasis %s ",
"epistasis": " --epistasis %s ",
"two_locus": " --twolocus %s ",
"adjusted": " --R %s "}
statement = []
if modifier:
statement.append(interact_map[method] % modifier)
else:
modifier = ""
statement.append(interact_map[method] % modifier)
if covariates_file:
statement.append("--covar %s --covar-name %s " % (covariates_file,
covariates))
else:
pass
if set_mode and set_file:
# does not work with two-locus test
if method == "two_locus" and set_mode:
E.warn("Two locus test cannot be used in conjunction "
"with a set-based test.")
elif set_mode:
statement.append(" %s --set %s " % (set_mode, set_file))
else:
pass
else:
pass
# alter reporting of significant interactions and significance
# level of interactions
if report_threshold:
statement.append(" --epi1 %0.3f " % float(report_threshold))
else:
pass
if sig_threshold:
statement.append(" --epi2 %0.3f " % float(sig_threshold))
else:
pass
self.statement["epistasis"] = " ".join(statement)
def _matrices(self, matrix_type, shape="triangle", compression=None, options=None):
'''
Calculate a number of different distance matrices:
realised genetic relationship matrix
relationship covariance matrix
identity by descent/state matrix
hamming distance matrix
* matrix_type - matrix to compute. Can be either IBS, 1 - IBS,
Hamming, GRM
'''
statement = []
if matrix_type == "hamming":
flag = " --distance "
elif matrix_type == "ibs":
flag = " --distance ibs "
elif matrix_type == "genomic":
flag = " --distance 1-ibs "
elif matrix_type == "grm":
flag = " --make-grm-bin "
if options:
statement.append(" ".join([flag, shape, compression, options]))
elif matrix_type == "grm":
statement.append(flag)
else:
statement.append(" ".join([flag, shape, compression]))
return " ".join(statement)
def _qc_methods(self, parameter=None, **kwargs):
''''
Perform QC on genotyping data, SNP-wise and sample-wise.
All arguments are passed as key word arguments, except
cases detailed in `Parameters` where they are passed with
the ``parameter`` argument.
Methods
-------
* ld_prune - generate a list of SNPs in linkage equilibrium by
pruning SNPs on either an LD statistic threshold, i.e. r^2,
or use a variance inflation factor (VIF) threshold
* heterozygosity - calculate average heterozygosity from each
individual across a set of SNPs, threshold on individuals
with deviation from expected proportions
* ibd - calculate the genetic relationship of individuals to
infer relatedness between individuals, threshold on given
degree of relatedness, e.g. IBD > 0.03125, 3rd cousins
* genetic_gender - estimate the gender of an individual
from the X chromosome genotypes - correlate with reported
gender and output discrepancies
* ethnicity_pca - perform PCA using a subset of independent
SNPs to infer genetic ancestry. Compare and contrast this
to individuals reported ancestry. Report discrepancies
and individuals greater than a threshold distance away
from a reference population.
* homozygosity - identifies sets of runs of homozygosity
within individuals. These may be indicative of inbreeding,
systematic genotyping errors or regions under selection.
Parameters
----------
Method parameters can also be passed through this function
as keyword=value pairs.
* ld_prune:
`kb` - this modifier changes the window resolution to kb
rather than bp.
`r2` - the r^2 threshold above which SNPs are to be removed
`vif` - the VIF threshold over which SNPs will be removed
`window` - window size to calculate pair-wise LD over
`step` - step size to advance window by
'''
qc_dict = {"ld_prune": {"R2": " --indep-pairwise %s %s %s ",
"VIF": " --indep %s %s %s "},
"heterozygosity": {"gz": " --het gz",
"raw": " --het "},
"ibd": {"relatives": " --genome gz rel-check ",
"full": " --genome gz full ",
"norm": " --genome gz "},
"genetic_gender": "none",
"ethnicity_pca": "none",
"homozygosity": {"min_snp": " --homozyg-snp %s ",
"min_kb": " --homozyg-kb %s ",
"default": " --homozyg ",
"density": " --homozyg-density ",
"set_gap": " --homozyg-gap ",
"snp_window": " --homozyg-window-snp %s ",
"het_max": " --homozyg-het %s "}}
task_dict = {}
state = []
# put everything in an accessible dictionary first
for task, value in kwargs.items():
task_dict[task] = value
# LD pruning can be passed multiple parameters,
# handle this separately
try:
sub_task = task_dict["ld_prune"]
ld_prune_task = qc_dict["ld_prune"]
try:
step = task_dict["step"]
except KeyError:
raise AttributeError("No step size found, please "
"pass a step size to advance the "
"window by")
try:
window = task_dict["window"]
try:
task_dict["kb"]
window = "".join([window, "kb"])
task_dict.pop("kb", None)
except KeyError:
pass
except KeyError:
raise AttributeError("No window size found. Please input "
"a window size to prune over")
try:
threshold = task_dict["threshold"]
except KeyError:
raise AttributeError("No threshold value, please input "
"a value to LD prune SNPs on")
# add in the kb if it is passed as an argument
state.append(ld_prune_task[sub_task] % (window, step, threshold))
task_dict.pop("threshold", None)
task_dict.pop("ld_prune", None)
task_dict.pop("window", None)
task_dict.pop("step", None)
except KeyError:
pass
for task, value in task_dict.items():
try:
sub_task = qc_dict[task]
try:
state.append(sub_task[value] % parameter)
except TypeError:
state.append(sub_task[value])
except KeyError:
raise AttributeError("Task not found, please see "
"documentation for available features")
self.statement["QC"] = " ".join(state)
def build_statement(self, infiles, outfile, threads=None,
memory="60G", parallel=None):
'''
Build statement and execute from components
'''
statement = []
exec_state = self.executable
# calls to function add to the self.statement dictionary
try:
statement.append(self.statement["program"])
except KeyError:
raise AttributeError("Input files and format not detected")
try:
statement.append(self.statement["QC"])
except KeyError:
pass
try:
statement.append(self.statement["filters"])
except KeyError:
pass
try:
statement.append(self.statement["tasks"])
except KeyError:
pass
try:
statement.append(self.statement["stats"])
except KeyError:
pass
try:
statement.append(self.statement["assoc"])
except KeyError:
pass
try:
statement.append(self.statement["matrix"])
except KeyError:
pass
try:
statement.append(self.statement["epistasis"])
except KeyError:
pass
if threads:
statement.append(" --threads %i " % threads)
else:
pass
if not memory:
pass
elif memory != "60G":
memory = int(memory.strip("G")) * 1000
statement.append(" --memory %i " % memory)
else:
statement.append(" --memory 60000 ")
# add output flag
# outfile needs to be complete path for Plink to save
# results properly - check if it starts with '/',
# if so is already a full path
if not parallel:
if os.path.isabs(outfile):
statement.append(" --out %s " % outfile)
else:
outpath = "/".join([os.getcwd(), outfile])
statement.append(" --out %s " % outpath)
os.system(" ".join(statement))
else:
# parallelisation only really applies to GRM calculation
# at the moment <- need to generalise
# if parallelisation is used, invoke temp files
# then agglomerate files
statements = []
if os.path.isabs(outfile):
outpath = outfile
else:
outpath = "/".join([os.getcwd(), outfile])
for i in range(1, parallel+1):
# copy list, assigning just makes a pointer
p_state = statement[:]
p_state.append(" --parallel %i %i " % (i, parallel))
p_state.append(" --out %s.%i " % (outpath, i))
statements.append(" ".join(p_state))
os.system(";".join(statements))
class PlinkDev(Plink2):
'''
Run various Plink functions and analysis, including file processing, GRM
calculation, PCA and other GWA tasks
Require Plink v1.9_devel to be in the users PATH variable as ``plinkdev`` to
distinguish it from Plink v1.07 and v1.9.
Currently uses Nov 11 development build.
'''
def __init__(self, files, options=None,
settings=None, design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "plinkdev"
self.statement = {}
self.filters = []
class GWASResults(object):
'''
A class for handling the results from a GWA, used for plotting
and post-analysis QC
'''
def __init__(self, assoc_file, **kwargs):
# if the assoc_file is a list of multiple files,
# then merge them into a single files
if type(assoc_file) == list and len(assoc_file) > 1:
E.info("multiple results files detected")
self.infiles = assoc_file
self.infile = None
self.results = self.parse_genome_wide(assoc_file)
else:
E.info("single results file detected")
self.infile = assoc_file
self.infiles = None
# results is a pandas dataframe to operate on
self.results = self.get_results(assoc_file, **kwargs)
def parse_genome_wide(self, association_files):
'''
Accept a list of results files, merge them together
and output as a single dataframe
Will this take a lot of memory??
'''
file0 = association_files.pop(0)
df = self.get_results(file0)
for afile in association_files:
_df = self.get_results(afile)
df = df.append(_df)
df["CHR"] = df["CHR"].astype(np.int64)
df.sort_values(by=["CHR", "BP"], inplace=True)
return df
def get_results(self, association_file,
epistasis=False,
file_format="plink"):
'''
Parse a GWA or epistasis results file and return the table
'''
# use Pandas for now - try something different later
# SQLite DB maybe?
# inconsistent number of white spaces between
# fields means Pandas parsing breaks down
# fields need to be the correct data type,
# i.e. BP = int, P = float, SNP = str, etc
# if the file has already been parsed and processed
# just assign it instead
# epistasis results don't have a header
try:
peek = pd.read_table(association_file, nrows=5,
sep="\s*", header=0,
index_col=None,
engine='python')
except StopIteration:
peek = pd.read_table(association_file, nrows=5,
sep="\t", header=0,
index_col=None)
if epistasis:
try:
results_frame = pd.read_table(association_file,
sep="\s*", header=0,
index_col=None)
except StopIteration:
results_frame = pd.read_table(association_file,
sep="\t", header=0,
index_col=None)
# results from fast epistasis are different to others
if file_format == "cassi_covar":
if results_frme.shape[1] == 12:
results_frame.columns = ["SNP1", "CHR1", "ID1", "BP1",
"SNP2", "CHR2", "ID2", "BP2",
"OR", "SE", "STAT", "P"]
elif results_frame.shape[1] == 14:
results_frame.columns = ["SNP1", "CHR1", "ID1", "BP1",
"SNP2", "CHR2", "ID2", "BP2",
"OR", "SE", "STAT", "P",
"CASE_RSQ", "CTRL_RSQ"]
elif results_frame.shape[1] == 16:
results_frame.columns = ["SNP1", "CHR1", "ID1", "BP",
"SNP2", "CHR2", "ID2", "BP2",
"OR", "SE", "STAT", "P",
"CASE_RSQ", "CTRL_RSQ",
"CASE_DPRIME" "CTRL_DPRIME"]
results_frame.loc[:, "BP"] = pd.to_numeric(results_frame["BP"],
errors="coerce")
elif file_format == "cassi":
pass
elif file_format == "plink":
if results_frame.shape[1] == 7:
results_frame.columns = ["CHR1", "SNP1", "CHR",
"SNP", "OR", "STAT", "P"]
elif results_frame.shape[1] == 9:
results_frame.columns = ["CHR", "SNP", "BP", "A1", "NMISS",
"OR", "SE", "STAT", "P"]
else:
results_frame.columns = ["CHR", "SNP", "BP", "A1", "OR",
"SE", "STAT", "P"]
results_frame.loc[:, "BP"] = pd.to_numeric(results_frame["BP"],
errors="coerce")
results_frame.loc[:, "P"] = pd.to_numeric(results_frame["P"],
errors="coerce")
return results_frame
else:
try:
assert peek["log10P"].any()
results_frame = pd.read_table(association_file,
sep="\t", header=0,
index_col=None,
dtype={"BP": np.int64,
"NMISS": np.int64})
return results_frame
except KeyError:
pass
l_count = 0
E.info("parsing file: %s" % association_file)
with open(association_file, "r") as ifile:
for line in ifile:
# check if spacing is whitespace or tab
if len(line.split(" ")) > 1:
parsed = line.split(" ")
elif len(line.split("\t")) > 1:
parsed = line.split("\t")
else:
raise IOError("file separator not recognised. "
"Must be whitespace or tab")
# remove multiple blank spaces
for i in range(parsed.count('')):
parsed.remove('')
# get rid of the newline
try:
parsed.remove('\n')
except ValueError:
parsed = [(px).rstrip("\n") for px in parsed]
if l_count == 0:
header = [iy.upper() for ix, iy in enumerate(parsed)]
head_idx = [ix for ix, iy in enumerate(parsed)]
map_dict = dict(zip(head_idx, header))
res_dict = dict(zip(header, [[] for each in header]))
l_count += 1
else:
col_idx = [lx for lx, ly in enumerate(parsed)]
col = [ly for lx, ly in enumerate(parsed)]
for i in col_idx:
res_dict[map_dict[i]].append(col[i])
l_count += 1
# substract one from the index for the header column
df_idx = range(l_count-1)
results_frame = pd.DataFrame(res_dict, index=df_idx)
results_frame.fillna(value=1.0, inplace=True)
try:
results_frame = results_frame[results_frame["TEST"] == "ADD"]
except KeyError:
pass
# need to handle NA as strings
results_frame["P"][results_frame["P"] == "NA"] = 1.0
results_frame["BP"] = [int(bx) for bx in results_frame["BP"]]
results_frame["P"] = [np.float64(fx) for fx in results_frame["P"]]
try:
results_frame["STAT"][results_frame["STAT"] == "NA"] = 1.0
results_frame["STAT"] = [np.float64(sx) for sx in results_frame["STAT"]]
except KeyError:
try:
results_frame["CHISQ"][results_frame["CHISQ"] == "NA"] = 1.0
results_frame["CHISQ"] = [np.float64(sx) for sx in results_frame["CHISQ"]]
except KeyError:
try:
results_frame["T"][results_frame["T"] == "NA"] = 1.0
results_frame["T"] = [np.float64(sx) for sx in results_frame["T"]]
except KeyError:
pass
try:
results_frame["F_U"][results_frame["F_U"] == "NA"] = 0.0
results_frame["F_U"] = [np.float64(ux) for ux in results_frame["F_U"]]
except KeyError:
pass
try:
results_frame["F_A"][results_frame["F_A"] == "NA"] = 0.0
results_frame["F_A"] = [np.float64(ax) for ax in results_frame["F_A"]]
except KeyError:
pass
try:
results_frame["FREQ"][results_frame["FREQ"] == "NA"] = 0.0
results_frame["FREQ"] = [np.float64(fx) for fx in results_frame["FREQ"]]
except KeyError:
pass
try:
results_frame["OR"][results_frame["OR"] == "NA"] = 1.0
results_frame["OR"] = [np.float64(ox) for ox in results_frame["OR"]]
except KeyError:
try:
results_frame["BETA"][results_frame["BETA"] == "NA"] = 1.0
results_frame["BETA"] = [np.float64(ox) for ox in results_frame["BETA"]]
except KeyError:
results_frame["B"][results_frame["B"] == "NA"] = 0.0
results_frame["B"] = [np.float64(ox) for ox in results_frame["B"]]
return results_frame
def plotManhattan(self, save_path, resolution="chromosome",
write_merged=True, sig_level=8):
'''
Generate a basic manhattan plot of the association results
Just deal with chromosome-by-chromosome for now.
'''
# use the python ggplot plotting package
# need to calculate -log10P values separately
self.results["log10P"] = np.log10(self.results["P"])
# or using rpy2
py2ri.activate()
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''suppressPackageStartupMessages(library(scales))''')
R('''suppressPackageStartupMessages(library(qqman))''')
R('''sink(file="sink.text")''')
r_df = py2ri.py2ri_pandasdataframe(self.results)
R.assign("assoc.df", r_df)
if resolution == "chromosome":
R('''assoc.df$CHR <- factor(assoc.df$CHR, '''
'''levels=levels(ordered(unique(assoc.df$CHR))),'''
'''labels=unique(paste0("chr", assoc.df$CHR)))''')
R('''nchrom <- length(unique(assoc.df$CHR))''')
R('''myCols <- rep(c("#ca0020", "#404040"), nchrom)[1:nchrom]''')
R('''names(myCols) <- sort(unique(assoc.df$CHR))''')
R('''colScale <- scale_colour_manual(name = "CHR", values=myCols)''')
R('''bp_indx <- seq_len(dim(assoc.df[1]))''')
R('''assoc.df$BPI <- bp_indx''')
R('''p <- ggplot(assoc.df, aes(x=BPI, y=-log10(P), colour=CHR)) + '''
'''geom_point(size=1) + colScale + '''
'''geom_hline(yintercept=6, linetype="dashed", colour="blue") + '''
'''theme_bw() + labs(x="Chromosome position (bp)", '''
'''y="-log10 P-value") + facet_grid(~CHR, scale="free_x") + '''
'''theme(axis.text.x = element_text(size=8))''')
R('''png("%s", res=90, unit="in", height=8, width=12)''' % save_path)
R('''print(p)''')
R('''dev.off()''')
elif resolution == "genome_wide":
R('''nchroms <- length(unique(assoc.df$CHR))''')
R('''png("%s", width=720, height=540)''' % save_path)
R('''p <- manhattan(assoc.df, main="Manhattan plot",'''
'''ylim=c(0, 50), cex=0.9, suggestiveline=T,'''
'''genomewideline=-log10(5e-8), chrlabs=c(1:nchroms), '''
'''col=c("#8B1A1A","#8470FF"))''')
R('''print(p)''')
R('''dev.off()''')
R('''sink(file=NULL)''')
if write_merged:
return self.results
else:
return False
def plotQQ(self, save_path, resolution="chromosome"):
'''
Generate a QQ-plot of expected vs. observed
test statistics
'''
self.results["log10P"] = np.log(self.results["P"])
py2ri.activate()
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''suppressPackageStartupMessages(library(scales))''')
R('''suppressPackageStartupMessages(library(qqman))''')
r_df = py2ri.py2ri_pandasdataframe(self.results)
R.assign("assoc.df", r_df)
R('''png("%s", width=720, height=540)''' % save_path)
R('''qq(assoc.df$P)''')
R('''dev.off()''')
def plotEpistasis(self, save_path, resolution="chromosome"):
'''
Generate both manhattan plot of the SNPs tested for
epistasis with their target SNP, and a QQplot
of the association test p-values
'''
# plot QQplot
qq_save = "_".join([save_path, "qqplot.png"])
self.plotQQ(qq_save)
manhattan_save = "_".join([save_path, "manhattan.png"])
self.plotManhattan(manhattan_save,
resolution=resolution,
sig_level=6,
write_merged=False)
def getHits(self, threshold=0.00000005):
'''
Pull out regions of association by selecting
all SNPs with association p-values less than
a certain threshold. Defaults is genome-wide
signifance, p < 5x10-8.
Then select region +/- 1.5Mb of the index SNP.
'''
hits_df = self.results[self.results["P"] <= threshold]
# find the range of SNPs with 3Mb of each index SNP
contig_group = hits_df.groupby(["CHR"])
# there may be multiple independent hits on a given
# chromosome. Need to identify independent regions.
# Independent regions are defined by their statistical
# independence, not distance. Just take all SNPs
# in 3Mb of the lead SNP for each signal
# this will create overlaps of associatation signals
for contig, region in contig_group:
region.index = region["BP"]
chr_df = self.results[self.results["CHR"] == contig]
chr_df.index = chr_df["BP"]
# find independent regions and output consecutively
# if only a single SNP above threshold then there is
# only one independent region!!
if len(region) > 1:
independents = self.findIndependentRegions(region)
indi_group = independents.groupby("Group")
else:
region["Group"] = 1
indi_group = region.groupby("Group")
for group, locus in indi_group:
# if there is only a single variant should
# the region be kept? Likely a false
# positive
if min(locus["BP"]) == max(locus["BP"]):
pass
else:
try:
try:
locus.loc[:, "STAT"] = abs(locus["STAT"])
locus.sort_values(by="STAT", inplace=True)
except KeyError:
locus.loc[:, "T"] = abs(locus["T"])
locus.sort_values(by="STAT", inplace=True)
except KeyError:
locus.sort_values(by="CHISQ", inplace=True)
index_bp = locus.iloc[0]["BP"]
E.info("Lead SNP for regions is: {}".format(locus.iloc[0]["SNP"]))
left_end = min(chr_df.loc[chr_df.index >= index_bp - 1500000, "BP"])
right_end = max(chr_df.loc[chr_df.index <= index_bp + 1500000, "BP"])
range_df = chr_df.loc[left_end: right_end, :]
max_stat = max(abs(range_df["STAT"]))
yield contig, range_df
def extractSNPs(self, snp_ids):
'''
Extract a specific set of SNP results
Arguments
---------
snp_ids: list
a list of SNP IDs to extract from the
GWAS results
Returns
-------
snp_results: pandasd.Core.DataFrame
'''
self.results.index = self.results["SNP"]
snp_results = self.results.loc[snp_ids]
return snp_results
def findIndependentRegions(self, dataframe):
'''
Find the number of independent regions on
a chromsome. Uses R distance and tree
cutting functions
'''
# mong dataframe into R
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(dataframe)
R.assign("rdf", r_df)
R('''mat <- as.matrix(rdf$BP)''')
# get distances then cluster, chop tree at 1x10^7bp
R('''dist.mat <- dist(mat, method="euclidean")''')
R('''clusts <- hclust(dist.mat, "average")''')
R('''cut <- cutree(clusts, h=1e6)''')
R('''out.df <- rdf''')
R('''out.df$Group <- cut''')
# need to handle changes in pandas2ri API
try:
regions_df = pd.DataFrame(py2ri.ri2py(R["out.df"]))
except NotImplementedError:
regions_df = pd.DataFrame(R["out.df"])
return regions_df
def mergeFrequencyResults(self, freq_dir, file_regex):
'''
Merge GWAS results with frequency information,
and format for GCTA joint analysis input
'''
# create a dummy regex to compare
# file_regex type against
test_re = re.compile("A")
if type(file_regex) == str:
file_regex = re.compile(file_regex)
elif type(file_regex) == type(test_re):
pass
else:
raise TypeError("Regex type not recognised. Must"
"be string or re.SRE_Pattern")
all_files = os.listdir(freq_dir)
freq_files = [fx for fx in all_files if re.search(file_regex, fx)]
gwas_df = self.results
df_container = []
for freq in freq_files:
freq_file = os.path.join(freq_dir, freq)
E.info("Adding information from {}".format(freq_file))
# files may or may not be tab-delimited
try:
_df = pd.read_table(freq_file,
sep="\s*", header=0,
index_col=None,
engine='python')
except StopIteration:
_df = pd.read_table(freq_file,
sep="\t", header=0,
index_col=None)
merge_df = pd.merge(self.results, _df,
left_on=["CHR", "SNP"],
right_on=["CHR", "SNP"],
how='left')
df_container.append(merge_df)
count = 0
for df in df_container:
if not count:
gwas_df = df
count += 1
else:
gwas_df = gwas_df.append(df)
E.info("Calculating Z scores and SEs")
z_scores = -0.862 + np.sqrt(0.743 - 0.2404 *
np.log(gwas_df.loc[:, "P"]))
se = np.log(gwas_df.loc[:, "OR"])/z_scores
gwas_df.loc[:, "Z"] = z_scores
gwas_df.loc[:, "SE"] = se
gwas_df.loc[:, "logOR"] = np.log(gwas_df.loc[:, "OR"])
out_cols = ["SNP", "A1_x", "A2", "MAF", "logOR", "SE", "P", "NMISS"]
out_df = gwas_df[out_cols]
# need to remove duplicates, especially those
# that contain NaN for A2 and MAF
out_df = out_df.loc[~np.isnan(out_df["MAF"])]
return out_df
##########################################################
# unbound methods that work on files and data structures #
##########################################################
def plotMapPhenotype(data, coords, coord_id_col, lat_col,
long_col, save_path, xvar, var_type,
xlabels=None, level=None):
'''
Generate a map of the UK, with phenotype data overlaid
'''
# merge co-ordinate data with phenotype data
merged_df = pd.merge(left=coords, right=data, left_on=coord_id_col,
right_on=coord_id_col, how='inner')
# pheno column and set level of categorical variable
if xlabels and var_type == "categorical":
# convert to string type as a categorical variable
# drop NA observations from the merged data frame
na_mask = pd.isnull(merged_df.loc[:, xvar])
merged_df = merged_df[~na_mask]
rvar = merged_df.loc[:, xvar].copy()
nvar = pd.Series(np.nan_to_num(rvar), dtype=str)
var = [v for v in set(nvar)]
var.sort()
# recode the variables according to the input labels
xlabs = xlabels.split(",")
lbls = [str(xlabs[ix]) for ix in range(len(var))]
for xv in range(len(var)):
nvar[nvar == var[xv]] = lbls[xv]
merged_df.loc[:, "cat_var"] = nvar
else:
pass
if level:
lvar = merged_df.loc[:, "cat_var"].copy()
mask = lvar.isin([level])
lvar[mask] = 1
lvar[~mask] = 0
lvar = lvar.fillna(0)
merged_df.loc[:, "dichot_var"] = lvar
else:
pass
# push the df into the R env
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(merged_df)
R.assign("pheno.df", r_df)
# setup the map and plot the points
R('''suppressPackageStartupMessages(library(maps))''')
R('''suppressPackageStartupMessages(library(mapdata))''')
R('''uk_map <- map("worldHires", c("UK", "Isle of Wight",'''
'''"Ireland", "Isle of Man", "Wales:Anglesey"), '''
'''xlim=c(-11, 3), ylim=c(50, 60.9), plot=F)''')
# colour by reference, or a colour for each discrete value
if level:
R('''red <- rep("#FF0000", '''
'''times=length(pheno.df$dichot_var[pheno.df$dichot_var == 1]))''')
R('''black <- rep("#000000", '''
'''times=length(pheno.df$dichot_var[pheno.df$dichot_var == 0]))''')
R('''png("%(save_path)s", width=540, height=540, res=90)''' % locals())
R('''map(uk_map)''')
R('''points((-pheno.df[,"%(lat_col)s"])[pheno.df$dichot_var == 1], '''
'''(-pheno.df[,"%(long_col)s"])[pheno.df$dichot_var == 1], pch=".", col=red)''' % locals())
R('''points((pheno.df[,"%(long_col)s"])[pheno.df$dichot_var == 0], '''
'''(pheno.df[,"%(lat_col)s"])[pheno.df$dichot_var == 0], pch=".", col=black)''' % locals())
R('''legend('topleft', legend=c("not-%(level)s", "%(level)s"),'''
'''fill=c("#000000", "#FF0000"))''' % locals())
R('''dev.off()''')
else:
R('''png("%(save_path)s", width=540, height=540, res=90)''' % locals())
R('''map(uk_map)''')
R('''points(pheno.df[,"%(long_col)s"], pheno.df[,"%(lat_col)s"], pch=".", '''
'''col=factor(pheno.df$cat_var))''' % locals())
R('''legend('topleft', legend=unique(pheno.df$cat_var),'''
'''fill=unique(pheno.df$cat_var))''' % locals())
R('''dev.off()''')
def plotPhenotype(data, plot_type, x, y=None, group=None,
save_path=None, labels=None, xlabels=None,
ylabels=None, glabels=None, var_type="continuous"):
'''
Generate plots of phenotypes using ggplot
'''
# change data format if necessary and convert nan/NA to missing
if not y and var_type == "categorical":
var = np.nan_to_num(data.loc[:, x].copy())
data.loc[:, x] = pd.Series(var, dtype=str)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif not y and var_type == "integer":
var = np.nan_to_num(data.loc[:, x].copy())
data.loc[:, x] = pd.Series(var, dtype=np.int64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif not y and var_type == "continuous":
var = data.loc[:, x].copy()
data.loc[:, x] = pd.Series(var, dtype=np.float64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif y and var_type == "categorical":
xvar = np.nan_to_num(data.loc[:, x].copy())
yvar = np.nan_to_num(data.loc[:, y].copy())
data.loc[:, x] = pd.Series(xvar, dtype=str)
data.loc[:, y] = pd.Series(yvar, dtype=str)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif y and var_type == "integer":
xvar = np.nan_to_num(data.loc[:, x].copy())
yvar = np.nan_to_num(data.loc[:, y].copy())
data.loc[:, x] = pd.Series(xvar, dtype=np.int64)
data.loc[:, y] = pd.Series(yvar, dtype=np.int64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif y and var_type == "continuous":
# NAs and NaNs should be handled by ggplot
xvar = data.loc[:, x].copy()
yvar = data.loc[:, y].copy()
data.loc[:, x] = pd.Series(xvar, dtype=np.float64)
data.loc[:, y] = pd.Series(yvar, dtype=np.float64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
R('''suppressPackageStartupMessages(library(ggplot2))''')
# put the pandas dataframe in to R with rpy2
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(data)
R.assign("data_f", r_df)
# plotting parameters, including grouping variables and labels
# axis labels
try:
labs = labels.split(",")
except AttributeError:
labs = []
# if variable labels have been provided then assume they are
# categorical/factor variables.
# assume variable labels are input in the correct order
if xlabels:
try:
unique_obs = len(set(data.loc[:, x]))
xfact = len(xlabels.split(","))
if xfact == unique_obs:
R('''lvls <- unique(data_f[,"%(x)s"])''' % locals())
lbls = ro.StrVector([ri for ri in xlabels.split(",")])
R.assign("lbls", lbls)
R('''lvls <- lvls[order(lvls, decreasing=F)]''')
R('''data_f[,"%(x)s"] <- ordered(data_f[,"%(x)s"], '''
'''levels=lvls, labels=lbls)''' % locals())
else:
E.warn("the number of labels does not match the "
"number of unique observations, labels not "
"used.")
except AttributeError:
xlabels = None
else:
pass
if glabels:
unique_obs = len(set(data.loc[:, group]))
gfact = len(glabels.split(","))
if gfact == unique_obs:
R('''lvls <- unique(data_f[, "%(group)s"])''' % locals())
lbls = ro.StrVector([rg for rg in glabels.split(",")])
R.assign("lbls", lbls)
R('''lvls <- lvls[order(lvls, decreasing=F)]''')
R('''data_f[,"%(group)s"] <- ordered(data_f[,"%(group)s"], '''
'''levels=lvls, labels=lbls)''' % locals())
else:
E.warn("the number of labels does not match the "
"number of unique observations, labels not "
"used.")
# start constructing the plot
# if X and Y are specified, assume Y is a variable to colour
# observations by, unless group is also set.
# If Y and group then colour by group and split by Y
if y:
R('''p <- ggplot(aes(x=%s, y=%s), data=data_f)''' % (x, y))
if plot_type == "histogram":
if group:
R('''p <- p + geom_histogram(aes(colour=%(group)s)) + '''
'''facet_grid(. ~ %(y)s)''' % locals())
else:
R('''p <- p + geom_histogram(aes(colour=%(y)s))''' % locals())
elif plot_type == "barplot":
if group:
R('''p <- p + geom_bar(aes(colour=%(group)s)) + '''
'''facet_grid(. ~ %(y)s)''' % locals())
else:
R('''p <- p + geom_bar(aes(colour=%(y)s))''' % locals())
elif plot_type == "density":
if group:
R('''p <- p + geom_density(aes(colour=%(group)s)) + '''
'''facet_grid(. ~ %(y)s)''' % locals())
else:
R('''p <- p + geom_density(aes(colour=%(y)s))''' % locals())
elif plot_type == "boxplot":
if group:
R('''p <- p + geom_boxplot(group=%(group)s,'''
'''aes(x=factor(%(x)s), y=%(y)s, fill=%(group)s))''' % locals())
else:
R('''p <- p + geom_boxplot(aes(colour=%(x)s))''' % locals())
elif plot_type == "scatter":
if group:
R('''p <- p + geom_point(size=1, aes(colour=%(group)s))''' % locals())
else:
R('''p <- p + geom_point(size=1)''')
if len(labs) == 1:
xlab = labs[0]
R('''p <- p + labs(x="%s")''' % xlab)
elif len(labs) == 2:
xlab = labs[0]
ylab = labs[1]
R('''p <- p + labs(x="%(xlab)s", y="%(ylab)s")''' % locals())
elif len(labs) == 3:
xlab = labs[0]
ylab = labs[1]
title = labs[2]
R('''p <- p + labs(x="%(xlab)s", y="%(ylab)s", '''
'''title="%(title)s")''' % locals())
elif len(labs) == 4:
xlab = labs[0]
ylab = labs[1]
glab = labs[2]
title = labs[3]
R('''p <- p + labs(x="%(xlab)s", y="%(ylab)s",'''
'''title="%(title)s")''' % locals())
# need to add in guide/legend title
else:
R('''p <- ggplot(data=data_f)''')
if plot_type == "histogram":
if group:
R('''p <- p + geom_histogram(aes(%(x)s)) + '''
'''facet_grid(. ~ %(group)s)''' % locals())
else:
R('''p <- p + geom_histogram(aes(%s))''' % x)
elif plot_type == "barplot":
if group:
R(''' p <- p + geom_bar(aes(%(x)s)) + '''
'''facet_grid(. ~ %(group)s)''')
else:
R('''p <- p + geom_bar(aes(%s))''' % x)
elif plot_type == "density":
if group:
R('''p <- p + geom_density(aes(%(x)s)) + '''
'''facet_grid(. ~ %(group)s)''' % locals())
else:
R('''p <- p + geom_density(aes(%s))''' % x)
elif plot_type == "boxplot":
if group:
R('''p <- p + geom_boxplot(aes(y=%(x)s, '''
'''x=factor(%(group)s)))''' % locals())
else:
raise AttributeError("Y or group variable is missing")
if len(labs) == 1:
xlab = labs[0]
R('''p <- p + labs(x="%s")''' % xlab)
elif len(labs) == 2:
xlab = labs[0]
title = labs[1]
R('''p <- p + labs(x="%(xlab)s", '''
'''title="%(title)s")''' % locals())
elif len(labs) == 3:
if group:
xlab = labs[0]
glab = labs[1]
title = labs[2]
R('''p <- p + labs(x="%(glab)s", y="%(xlab)s",'''
'''title="%(title)s")''' % locals())
else:
E.warn("too many labels provided, assume first is X, "
"and second is plot title")
xlab = labs[0]
title = labs[1]
R('''p <- p + labs(x="%(xlab)s", '''
'''title="%(title)s")''' % locals())
# the default theme is bw
R('''p <- p + theme_bw()''')
R('''png("%(save_path)s")''' % locals())
R('''print(p)''')
R('''dev.off()''')
def parseFlashPCA(pcs_file, fam_file):
'''
Parse the principal components file from FlashPCA
and match with individual identifiers. This
assumes the output order of FlashPCA is the same
as the input order in the .fam file
'''
try:
pc_df = pd.read_table(pcs_file, sep="\s*",
header=None, index_col=None)
except StopIteration:
pc_df = pd.read_table(pcs_file, sep="\t",
header=None, index_col=None)
# add a header to the pc_df file
headers = ["PC%i" % (n + 1) for n,
m in enumerate(pc_df.columns)]
pc_df.columns = headers
fam_df = pd.read_table(fam_file, sep="\t",
header=None, index_col=None)
fam_df.columns = ["FID", "IID", "PAR", "MAT", "GENDER",
"PHENO"]
pc_df[["FID", "IID"]] = fam_df.iloc[:, :2]
return pc_df
def plotPCA(data, nPCs, point_labels, save_path,
headers, metadata=None, multiplot=False):
'''
Plot N principal components from a PCA either as
a single plot of the first 2 PCs, a grid plot of
N PCs.
Arguments
---------
data: string
PATH to file containing principal components
nPCs: int
number of principal components to plot. If this
value is > 2, then multiplot will be enabled
automatically
point_labels: vector
a vector of labels of length correpsonding to
the number of rows in the data file. These are
used to colour the points in the plot with relevant
metadata. Alternatively, can be the column header
in the metadata file that corresponds to annotations
save_path: string
An absolute PATH to save the plot(s) to
headers: boolean
whether the `data` file contains header delineating the
columns
metadata: string
file containing metadata to annotate plot with, includes
point_labels data
multiplot: boolean
If True, generate a grid of scatter plots with successive
PCs plotted against each other
Returns
-------
None
'''
py2ri.activate()
if metadata:
meta_df = pd.read_table(metadata, sep="\t", header=0,
index_col=None)
else:
pass
labels = meta_df[["FID", "IID", point_labels]]
merged = pd.merge(data, labels, left_on="FID",
right_on="FID", how='inner')
# TO DO: enable multiplotting of many PCs
r_df = py2ri.py2ri_pandasdataframe(merged)
R.assign("pc.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''pc.df[["%(point_labels)s"]] <- as.factor(pc.df[["%(point_labels)s"]])''' % locals())
R('''p_pcs <- ggplot(pc.df, aes(x=PC1, y=PC2, colour=%s)) + '''
'''geom_point(size=1) + theme_bw() + '''
'''labs(x="PC1", y="PC2", title="PC1 vs. PC2 LD trimmed genotypes")''' % point_labels)
R('''png("%s")''' % save_path)
R('''print(p_pcs)''')
R('''dev.off()''')
def countByVariantAllele(ped_file, map_file):
'''
Count the number of individuals carrying the variant allele
for each SNP.
Count the number of occurences of each allele with the variant
allele of each other SNP.
Requires ped file genotyping to be in format A1(minor)=1, A2=2
'''
# parse the ped file - get the variant column headers from
# the map file - no headers with these files
# variant order in the map file matters, use an ordered dict
variants = collections.OrderedDict()
with open(map_file, "r") as mfile:
for snp in mfile.readlines():
attrs = snp.split("\t")
snpid = attrs[1]
variants[snpid] = {"chr": attrs[0],
"pos": attrs[-1].strip("\n")}
variant_ids = variants.keys()
# store genotype matrix as an array
# rows and columns are variant IDs
homA1 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
homA2 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
het = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
tcount = 0
with open(ped_file, "r") as pfile:
for indiv in pfile.readlines():
indiv = indiv.strip("\n")
indiv_split = indiv.split("\t")
fid = indiv_split[0]
iid = indiv_split[1]
mid = indiv_split[2]
pid = indiv_split[3]
gender = indiv_split[4]
phen = indiv_split[5]
genos = indiv_split[6:]
tcount += 1
# get genotype counts
for i in range(len(genos)):
# missing genotypes are coded '00' in plink format
if genos[i] == "00":
pass
elif genos[i] == "11":
homA1[i, i] += 1
elif genos[i] == "12":
het[i, i] += 1
else:
homA2[i, i] += 1
allele_counts = ((2 * homA2) + het)/float(2 * tcount)
mafs = 1 - allele_counts.diagonal()
maf_df = pd.DataFrame(zip(variant_ids, mafs), columns=["SNP", "MAF"],
index=[x for x, y in enumerate(variant_ids)])
maf_df["A2_HOMS"] = (2 * homA1).diagonal()
maf_df["A2_HETS"] = het.diagonal()
maf_df.index = maf_df["SNP"]
maf_df.drop(["SNP"], axis=1, inplace=True)
E.info("allele frequencies calculated over %i SNPs and "
"%i individuals" % (len(genos), tcount))
return maf_df
def calcMaxAlleleFreqDiff(ped_file, map_file, group_file,
test=None, ref=None):
'''
Calculate the allele frequency difference between
two groups of individuals based upon some prior
assignment.
Arguments
---------
ped_file: string
plink text format .ped file - see Plink documentation
for details (https://www.cog-genomics.org/plink2/input#ped)
map_file: string
plink test format .map file - see Plink documentation
for details (https://www.cog-genomics.org/plink2/input#ped)
group_file: string
a file containing grouping information, must be in standard
Plink format with IID, FID, GROUP as the columns
test: string
group label to use as the test case for calculating
allele frequency differences. If this isn't set, then
the first non-ref value encountered will be set as test
ref: string
group label to use as the reference case for calculating
allele frequency differences. If not set, then the first
value encountered will be the test.
Returns
-------
freq_diffs: pandas.Core.DataFrame
dataframe of SNP information and allele frequency difference
between group labels
'''
# group labels need to be of the same type, convert all
# group values to string
group_df = pd.read_table(group_file, sep="\t", header=0,
index_col=None,
converters={"GROUP": str,
"FID": str,
"IID": str})
group_df["GROUP"] = [str(xg) for xg in group_df["GROUP"]]
try:
assert ref
E.info("Reference label set to %s" % ref)
except AssertionError:
ref = set(group_df["GROUP"])[0]
E.info("Reference label not provided. Setting "
"reference label to %s" % ref)
try:
assert test
E.info("Test label set to %s" % test)
except AssertionError:
test = [tx for tx in set(group_df["GROUP"]) if not ref][0]
E.info("Test label not provided, setting test "
"label to %s." % test)
# parse the ped file - get the variant column headers from
# the map file - no headers with these files
# variant order in the map file matters, use an ordered dict
variants = collections.OrderedDict()
with open(map_file, "r") as mfile:
for snp in mfile.readlines():
attrs = snp.split("\t")
snpid = attrs[1]
variants[snpid] = {"chr": attrs[0],
"pos": attrs[-1].strip("\n")}
variant_ids = variants.keys()
# store genotype matrix as an array
# rows and columns are variant IDs
ref_homA1 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
ref_homA2 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
ref_het = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
test_homA1 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
test_homA2 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
test_het = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
tcount = 0
rcount = 0
ncount = 0
ref_ids = group_df["IID"][group_df["GROUP"] == ref].values
test_ids = group_df["IID"][group_df["GROUP"] == test].values
total = len(group_df)
with open(ped_file, "r") as pfile:
for indiv in pfile.readlines():
indiv = indiv.strip("\n")
indiv_split = indiv.split("\t")
fid = indiv_split[0]
iid = indiv_split[1]
mid = indiv_split[2]
pid = indiv_split[3]
gender = indiv_split[4]
phen = indiv_split[5]
genos = indiv_split[6:]
# check for ref and test conditions
# ignore individuals in neither camp
if iid in test_ids:
tcount += 1
# get genotype counts
for i in range(len(genos)):
# missing genotypes are coded '00' in plink format
if genos[i] == "00":
pass
elif genos[i] == "11":
test_homA1[i, i] += 1
elif genos[i] == "12":
test_het[i, i] += 1
else:
test_homA2[i, i] += 1
elif iid in ref_ids:
rcount += 1
# get genotype counts
for i in range(len(genos)):
# missing genotypes are coded '00' in plink format
if genos[i] == "00":
pass
elif genos[i] == "11":
ref_homA1[i, i] += 1
elif genos[i] == "12":
ref_het[i, i] += 1
else:
ref_homA2[i, i] += 1
else:
ncount += 1
if round((tcount + rcount + ncount)/total, 2) == 0.25:
E.info("%i samples counted."
"Approximately 25% samples counted" % tcount + rcount + ncount)
elif round((tcount + rcount + ncount)/total, 2) == 0.50:
E.info("%i samples counted."
"Approximately 50% samples counted" % tcount + rcount + ncount)
elif round((tcount + rcount + ncount)/total, 2) == 0.75:
E.info("%i samples counted."
"Approximately 75% samples counted" % tcount + rcount + ncount)
E.info("Counted alleles for %i test cases, %i ref cases,"
" %i neither reference nor test." % (tcount, rcount,
ncount))
ref_allele_counts = ((2 * ref_homA2) + ref_het)/float(2 * rcount)
test_allele_counts = ((2 * test_homA2) + test_het)/float(2 * tcount)
ref_mafs = 1 - ref_allele_counts.diagonal()
test_mafs = 1 - ref_allele_counts.diagonal()
ref_maf_df = pd.DataFrame(zip(variant_ids, ref_mafs),
columns=["SNP", "ref_MAF"],
index=[x for x, y in enumerate(variant_ids)])
ref_maf_df["ref_A2_HOMS"] = (2 * ref_homA1).diagonal()
ref_maf_df["ref_A2_HETS"] = ref_het.diagonal()
ref_maf_df.index = ref_maf_df["SNP"]
ref_maf_df.drop(["SNP"], axis=1, inplace=True)
test_maf_df = pd.DataFrame(zip(variant_ids, test_mafs),
columns=["SNP", "test_MAF"],
index=[x for x, y in enumerate(variant_ids)])
test_maf_df["test_A2_HOMS"] = (2 * test_homA1).diagonal()
test_maf_df["test_A2_HETS"] = test_het.diagonal()
test_maf_df.index = test_maf_df["SNP"]
test_maf_df.drop(["SNP"], axis=1, inplace=True)
freq_diffs = pd.merge(ref_maf_df, test_maf_df,
left_index=True, right_index=True,
how='inner')
freq_diffs["MAF_diff"] = freq_diffs["ref_MAF"] - freq_diffs["test_MAF"]
E.info("allele frequencies calculated over %i SNPs and "
"%i individuals" % (len(genos), tcount + rcount))
return freq_diffs
def calcPenetrance(ped_file, map_file, mafs=None,
subset=None, snpset=None):
'''
Calculate the proportion of times an allele is observed
in the phenotype subset vs it's allele frequency.
This is the penetrance of the allele
i.e. if observed in 100% of affected individuals and 0%
of controls, then penetrance is 100%
Generates a table of penetrances for each variants/allele
and a plot of MAF vs # cases carrying the allele
Generates a heatmap of compound heterozygotes, and homozygotes
with penetrances.
Outputs a table of SNPs, homozygote and heterozygote counts
among subset individuals and proportion of subset individual
phenotype explained by homozygotes and heterozygotes
Requires alleles are coded A1(minor)=1, A2=2
'''
# check subset is set, if not then throw an error
# cannot calculate penetrance without a phenotype
if not subset:
raise ValueError("Cannot calculate penetrance of alleles "
"without a phenotype to subset in")
else:
pass
# parse the ped file - get the variant column headers from
# the map file - no headers with these files
# variant order in the map file matters, use an ordered dict
variants = collections.OrderedDict()
with open(map_file, "r") as mfile:
for snp in mfile.readlines():
attrs = snp.split("\t")
snpid = attrs[1]
variants[snpid] = {"chr": attrs[0],
"pos": attrs[-1].strip("\n")}
if snpset:
with iotools.open_file(snpset, "r") as sfile:
snps = sfile.readlines()
snps = [sx.rstrip("\n") for sx in snps]
variant_ids = [ks for ks in variants.keys() if ks in snps]
else:
variant_ids = variants.keys()
var_idx = [si for si, sj in enumerate(variant_ids)]
case_mat = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.float64)
all_mat = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.float64)
tcount = 0
ncases = 0
# missing phenotype individuals must be ignored, else
# they will cause the number of individuals explained
# to be underestimated
with open(ped_file, "r") as pfile:
for indiv in pfile.readlines():
indiv = indiv.strip("\n")
indiv_split = indiv.split("\t")
fid = indiv_split[0]
iid = indiv_split[1]
mid = indiv_split[2]
pid = indiv_split[3]
gender = int(indiv_split[4])
phen = int(indiv_split[5])
if phen != -9:
if subset == "cases":
select = phen
elif subset == "gender":
select = gender
else:
select = None
genos = np.array(indiv_split[6:])
genos = genos[var_idx]
tcount += 1
het = np.zeros(len(genos), dtype=np.float64)
hom = np.zeros(len(genos), dtype=np.float64)
for i in range(len(genos)):
# missing values are coded '00' in plink format
# A2 homs are coded '11' in plink format
if genos[i] == "11":
hom[i] += 1
elif genos[i] == "12":
het[i] += 1
else:
pass
hom_mat = np.outer(hom, hom)
het_mat = np.outer(het, het)
homs = hom_mat.diagonal()
het_mat[np.diag_indices(len(genos))] = homs
gen_mat = het_mat
# separate matrix for subset
# reference is always level 2 for plink files,
# either cases or females
if select == 2:
case_mat += gen_mat
all_mat += gen_mat
ncases += 1
else:
all_mat += gen_mat
else:
pass
E.info("alleles counted over %i SNPs "
"and %i individuals, of which %i are "
"in the %s subset" % (len(genos), tcount, ncases, subset))
penetrance = np.divide(case_mat, all_mat)
# round for the sake of aesthetics
penetrance = np.round(penetrance, decimals=5)
pen_df = pd.DataFrame(penetrance, columns=variant_ids,
index=variant_ids)
pen_df = pen_df.fillna(0.0)
case_df = pd.DataFrame(case_mat, columns=variant_ids,
index=variant_ids)
all_df = pd.DataFrame(all_mat, columns=variant_ids,
index=variant_ids)
# plot heatmap of penetrances as percentages
indf = pen_df * 100
py2ri.activate()
# only plot penetrances > 0%
r_pen = py2ri.py2ri_pandasdataframe(indf)
r_cases = py2ri.py2ri_pandasdataframe(case_df)
r_all = py2ri.py2ri_pandasdataframe(all_df)
R.assign("pen.df", r_pen)
R.assign("case.df", r_cases)
R.assign("all.df", r_all)
R('''suppressPackageStartupMessages(library(gplots))''')
R('''suppressPackageStartupMessages(library(RColorBrewer))''')
# penetrances
E.info("plotting penetrance matrix")
R('''hmcol <- colorRampPalette(brewer.pal(9, "BuGn"))(100)''')
R('''rowpen <- pen.df[rowSums(pen.df) > 0,]''')
R('''colpen <- rowpen[,colSums(rowpen) > 0]''')
R('''png("%s/penetrance-matrix.png", width=720, height=720)''' % os.getcwd())
R('''heatmap.2(as.matrix(colpen), trace="none", col=hmcol,'''
'''dendrogram="none", Colv=colnames(colpen), key=FALSE, '''
'''Rowv=rownames(colpen), margins=c(10,10), cellnote=round(colpen),'''
'''notecol="white")''')
R('''dev.off()''')
E.info("plotting case counts matrix")
R('''rowcase <- case.df[rowSums(case.df) > 0,]''')
R('''colcase <- rowcase[,colSums(rowcase) > 0]''')
R('''png("%s/cases-matrix.png", width=720, height=720)''' % os.getcwd())
R('''heatmap.2(as.matrix(colcase), trace="none", col=rep("#F0F8FF", 100),'''
'''dendrogram="none", Colv=colnames(colcase), key=FALSE, '''
'''colsep=seq(1:length(colnames(colcase))), '''
'''rowsep=seq(1:length(rownames(colcase))),'''
'''Rowv=rownames(colcase), margins=c(10,10), cellnote=round(colcase),'''
'''notecol="black")''')
R('''dev.off()''')
E.info("plotting all individuals matrix")
R('''rowall <- all.df[rownames(colcase),]''')
R('''colall <- rowall[,colnames(colcase)]''')
R('''png("%s/all-matrix.png", width=720, height=720)''' % os.getcwd())
R('''heatmap.2(as.matrix(colall), trace="none", col=rep("#F0F8FF", 100),'''
'''dendrogram="none", Colv=colnames(colall), key=FALSE, '''
'''colsep=seq(1:length(colnames(colall))), '''
'''rowsep=seq(1:length(rownames(colall))), '''
'''Rowv=rownames(colall), margins=c(10,10), cellnote=round(colall),'''
'''notecol="black")''')
R('''dev.off()''')
# plot MAF vs homozygosity
maf_df = pd.read_table(mafs, sep="\t", header=0, index_col=0)
plot_df = pd.DataFrame(columns=["MAF"],
index=maf_df.index)
plot_df["MAF"] = maf_df["MAF"]
homs = case_mat.diagonal()
hom_series = pd.Series({x: y for x, y in zip(variant_ids,
homs)})
plot_df["explained_by_homozygotes"] = hom_series
plot_df["SNP"] = plot_df.index
plot_df.index = [ix for ix, iy in enumerate(plot_df.index)]
plotPenetrances(plotting_df=plot_df)
out_df = summaryPenetrance(maf_df=maf_df,
case_counts=case_mat,
variants=variant_ids,
n_cases=ncases,
n_total=tcount)
return out_df, pen_df
def summaryPenetrance(maf_df, case_counts,
variants, n_cases, n_total):
'''
Summarise genotype counts and proportion of cases explained
by the observed homozygotes and compound heterozygotes.
This is a function of the total population size and
population allele frequency - does this assume 100%
penetrance of each allele?
'''
# homozygous individuals are on the
# diagonal of the case_counts array
homozyg_cases = case_counts.diagonal()
homozyg_series = pd.Series({x: y for x, y in zip(variants,
homozyg_cases)})
# heterozygotes are on the off-diagonal elements
# get all off diagonal elements by setting diagonals to zero
# matrix is diagonal symmetric
np.fill_diagonal(case_counts, 0)
het_counts = np.sum(case_counts, axis=0)
het_series = pd.Series({x: y for x, y in zip(variants,
het_counts)})
out_df = pd.DataFrame(columns=["homozygote_cases",
"heterozygote_cases"],
index=maf_df.index)
out_df["MAF"] = maf_df["MAF"]
out_df["homozygote_cases"] = np.round(homozyg_series, 1)
out_df["expected_cases"] = np.round(((out_df["MAF"] ** 2) * n_total), 3)
out_df["heterozygote_cases"] = het_series
out_df["hom_prop_explained"] = np.round(homozyg_series/float(n_cases), 3)
out_df["het_prop_explained"] = np.round(het_series/float(n_cases), 3)
return out_df
def plotPenetrances(plotting_df):
'''
Plot the proportion of cases/phenotype explained by
individuals carrying allele vs. population allele frequency.
Generate final output summary table (should be in separate function)
'''
# only need to plot variants with MAF >= 0.01
low_frq = plotting_df["MAF"] < 0.01
hi_df = plotting_df[~low_frq]
# get into R and use ggplot for MAF vs homozygosity amongs cases
r_plot = py2ri.py2ri_pandasdataframe(hi_df)
R.assign("hom.df", r_plot)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''png("%s/penetrance-plot.png", height=720, width=720)''' % os.getcwd())
R('''pen_p <- ggplot(hom.df, aes(x=explained_by_homozygotes, y=MAF, colour=SNP)) + '''
'''geom_point(size=4) + theme_bw() + '''
'''geom_text(aes(label=explained_by_homozygotes),'''
'''colour="black",vjust=0.5, hjust=0.5) + '''
'''labs(x="Number of Red haired homozygotes", y="MAF") + '''
'''theme(axis.title=element_text(size=10, colour="black"))''')
R('''print(pen_p)''')
R('''dev.off()''')
def findDuplicateVariants(bim_file, take_last=False):
'''
identify variants with duplicate position and reference
alleles
'''
# count the number of lines first to get
# the necessary array sizes
E.info("getting number of variants")
lines = 1
with open(bim_file, "r") as bfile:
for line in bfile.readlines():
lines += 1
E.info("%i variants found" % lines)
# setup index arrays
var_array = np.empty(lines, dtype=object)
ref_alleles = np.empty(lines, dtype=object)
pos_array = np.zeros(lines, dtype=np.int64)
minor_alleles = np.empty(lines, dtype=object)
idx = 0
# find duplicates on position
with open(bim_file, "r") as bfile:
for line in bfile.readlines():
line = line.rstrip("\n")
varline = line.split("\t")
var = varline[1]
pos = int(varline[3])
ref_allele = varline[-1]
minor_allele = varline[-2]
var_array[idx] = var
ref_alleles[idx] = ref_allele
minor_alleles[idx] = minor_allele
pos_array[idx] = pos
idx += 1
# find duplicates using pandas series
pos_series = pd.Series(pos_array)
dup_last = pos_series[pos_series.duplicated(take_last=True)]
dup_first = pos_series[pos_series.duplicated(take_last=False)]
var_series = pd.Series(var_array)
ref_series = pd.Series(ref_alleles)
alt_series = pd.Series(minor_alleles)
# a few variants have duplicate IDs - count these as duplicates
# and add to the exclusion list - these won't be identified
# based on shared position necessarily - force add them
ref_first = ref_series[ref_series.duplicated(take_last=False)]
ref_last = ref_series[ref_series.duplicated(take_last=True)]
ref_dups = set(ref_first.index).union(ref_last.index)
# union of take first and take last
dup_all = set(dup_last.index).union(set(dup_first.index))
dup_complete = dup_all.union(ref_dups)
dup_idx = np.array([sx for sx in dup_complete])
dup_idx.sort()
# make a dataframe to hold all triallelic and duplicate variants
dup_dict = {"SNP": var_series[dup_idx],
"BP": pos_series[dup_idx],
"REF": ref_series[dup_idx],
"VAR": alt_series[dup_idx]}
dup_df = | pd.DataFrame(dup_dict) | pandas.DataFrame |
"""
<NAME>017
PanCancer Classifier
scripts/pancancer_classifier.py
Usage: Run in command line with required command argument:
python pancancer_classifier.py --genes $GENES
Where GENES is a comma separated string. There are also optional arguments:
--diseases comma separated string of disease types for classifier
default: Auto (will pick diseases from filter args)
--folds number of cross validation folds
default: 5
--drop drop the input genes from the X matrix
default: False if flag omitted
--copy_number optional flag to supplement copy number to define Y
default: False if flag omitted
--filter_count int of low count of mutation to include disease
default: 15
--filter_prop float of low proportion of mutated samples per disease
default: 0.05
--num_features int of number of genes to include in classifier
default: 8000
--alphas comma separated string of alphas to test in pipeline
default: '0.1,0.15,0.2,0.5,0.8,1'
--l1_ratios comma separated string of l1 parameters to test
default: '0,0.1,0.15,0.18,0.2,0.3'
--alt_genes comma separated string of alternative genes to test
default: None
--alt_diseases comma separated string of alternative diseases to test
default: Auto
--alt_filter_count int of low count of mutations to include alt_diseases
default: 15
--alt_filter_prop float of low proportion of mutated samples alt_disease
default: 0.05
--alt_folder string of where to save the classifier figures
default: Auto
--remove_hyper store_true: remove hypermutated samples
default: False if flag omitted
--keep_intermediate store_true: keep intermediate roc curve items
default: False if flag omitted
--x_matrix string of which feature matrix to use
default: raw
Output:
ROC curves, AUROC across diseases, and classifier coefficients
"""
import os
import sys
import warnings
import pandas as pd
import csv
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split, cross_val_predict
from dask_searchcv import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from statsmodels.robust.scale import mad
sys.path.insert(0, os.path.join('scripts', 'util'))
from tcga_util import get_args, get_threshold_metrics, integrate_copy_number
from tcga_util import shuffle_columns
# Load command arguments
args = get_args()
genes = args.genes.split(',')
diseases = args.diseases.split(',')
folds = int(args.folds)
drop = args.drop
drop_rasopathy = args.drop_rasopathy
copy_number = args.copy_number
filter_count = int(args.filter_count)
filter_prop = float(args.filter_prop)
num_features_kept = args.num_features
alphas = [float(x) for x in args.alphas.split(',')]
l1_ratios = [float(x) for x in args.l1_ratios.split(',')]
alt_genes = args.alt_genes.split(',')
alt_filter_count = int(args.alt_filter_count)
alt_filter_prop = float(args.alt_filter_prop)
alt_diseases = args.alt_diseases.split(',')
alt_folder = args.alt_folder
remove_hyper = args.remove_hyper
keep_inter = args.keep_intermediate
x_matrix = args.x_matrix
shuffled = args.shuffled
shuffled_before_training = args.shuffled_before_training
no_mutation = args.no_mutation
drop_expression = args.drop_expression
drop_covariates = args.drop_covariates
warnings.filterwarnings('ignore',
message='Changing the shape of non-C contiguous array')
# Generate file names for output
genes_folder = args.genes.replace(',', '_')
base_folder = os.path.join('classifiers', genes_folder)
if alt_folder != 'Auto':
base_folder = alt_folder
if not os.path.exists(base_folder):
os.makedirs(base_folder)
else:
warnings.warn('Classifier may have already been built! Classifier results'
' will be overwritten!', category=Warning)
disease_folder = os.path.join(base_folder, 'disease')
if not os.path.exists(disease_folder):
os.makedirs(disease_folder)
count_table_file = os.path.join(base_folder, 'summary_counts.csv')
cv_heatmap_file = os.path.join(base_folder, 'cv_heatmap.pdf')
full_roc_file = os.path.join(base_folder, 'all_disease_roc.pdf')
full_pr_file = os.path.join(base_folder, 'all_disease_pr.pdf')
disease_roc_file = os.path.join(base_folder, 'disease', 'classifier_roc_')
disease_pr_file = os.path.join(base_folder, 'disease', 'classifier_pr_')
dis_summary_auroc_file = os.path.join(base_folder, 'disease_auroc.pdf')
dis_summary_aupr_file = os.path.join(base_folder, 'disease_aupr.pdf')
classifier_file = os.path.join(base_folder, 'classifier_coefficients.tsv')
roc_results_file = os.path.join(base_folder, 'pancan_roc_results.tsv')
alt_gene_base = 'alt_gene_{}_alt_disease_{}'.format(
args.alt_genes.replace(',', '_'),
args.alt_diseases.replace(',', '_'))
alt_count_table_file = os.path.join(base_folder, 'alt_summary_counts.csv')
alt_gene_auroc_file = os.path.join(base_folder,
'{}_auroc_bar.pdf'.format(alt_gene_base))
alt_gene_aupr_file = os.path.join(base_folder,
'{}_aupr_bar.pdf'.format(alt_gene_base))
alt_gene_summary_file = os.path.join(base_folder,
'{}_summary.tsv'.format(alt_gene_base))
# Load Datasets
if x_matrix == 'raw':
expr_file = os.path.join('data', 'pancan_rnaseq_freeze.tsv.gz')
else:
expr_file = x_matrix
mut_file = os.path.join('data', 'pancan_mutation_freeze.tsv.gz')
mut_burden_file = os.path.join('data', 'mutation_burden_freeze.tsv')
sample_freeze_file = os.path.join('data', 'sample_freeze.tsv')
rnaseq_full_df = pd.read_table(expr_file, index_col=0)
mutation_df = pd.read_table(mut_file, index_col=0)
sample_freeze = pd.read_table(sample_freeze_file, index_col=0)
mut_burden = pd.read_table(mut_burden_file)
# Construct data for classifier
common_genes = set(mutation_df.columns).intersection(genes)
if x_matrix == 'raw':
common_genes = list(common_genes.intersection(rnaseq_full_df.columns))
else:
common_genes = list(common_genes)
y = mutation_df[common_genes]
missing_genes = set(genes).difference(common_genes)
if len(common_genes) != len(genes):
warnings.warn('All input genes were not found in data. The missing genes '
'are {}'.format(missing_genes), category=Warning)
if drop:
if x_matrix == 'raw':
rnaseq_full_df.drop(common_genes, axis=1, inplace=True)
if drop_rasopathy:
rasopathy_genes = set(['BRAF', 'CBL', 'HRAS', 'KRAS', 'MAP2K1', 'MAP2K2',
'NF1', 'NRAS', 'PTPN11', 'RAF1', 'SHOC2', 'SOS1',
'SPRED1', 'RIT1'])
rasopathy_drop = list(rasopathy_genes.intersection(rnaseq_full_df.columns))
rnaseq_full_df.drop(rasopathy_drop, axis=1, inplace=True)
# Incorporate copy number for gene activation/inactivation
if copy_number:
# Load copy number matrices
copy_loss_file = os.path.join('data', 'copy_number_loss_status.tsv.gz')
copy_loss_df = pd.read_table(copy_loss_file, index_col=0)
copy_gain_file = os.path.join('data', 'copy_number_gain_status.tsv.gz')
copy_gain_df = pd.read_table(copy_gain_file, index_col=0)
# Load cancer gene classification table
vogel_file = os.path.join('data', 'vogelstein_cancergenes.tsv')
cancer_genes = pd.read_table(vogel_file)
y = integrate_copy_number(y=y, cancer_genes_df=cancer_genes,
genes=common_genes, loss_df=copy_loss_df,
gain_df=copy_gain_df,
include_mutation=no_mutation)
# Process y matrix
y = y.assign(total_status=y.max(axis=1))
y = y.reset_index().merge(sample_freeze,
how='left').set_index('SAMPLE_BARCODE')
count_df = y.groupby('DISEASE').sum()
prop_df = count_df.divide(y['DISEASE'].value_counts(sort=False).sort_index(),
axis=0)
count_table = count_df.merge(prop_df, left_index=True, right_index=True,
suffixes=('_count', '_proportion'))
count_table.to_csv(count_table_file)
# Filter diseases
mut_count = count_df['total_status']
prop = prop_df['total_status']
if diseases[0] == 'Auto':
filter_disease = (mut_count > filter_count) & (prop > filter_prop)
diseases = filter_disease.index[filter_disease].tolist()
# Load mutation burden and process covariates
y_df = y[y.DISEASE.isin(diseases)].total_status
common_samples = list(set(y_df.index) & set(rnaseq_full_df.index))
y_df = y_df.loc[common_samples]
rnaseq_df = rnaseq_full_df.loc[y_df.index, :]
if remove_hyper:
burden_filter = mut_burden['log10_mut'] < 5 * mut_burden['log10_mut'].std()
mut_burden = mut_burden[burden_filter]
y_matrix = mut_burden.merge(pd.DataFrame(y_df), right_index=True,
left_on='SAMPLE_BARCODE')\
.set_index('SAMPLE_BARCODE')
# Add covariate information
y_sub = y.loc[y_matrix.index]['DISEASE']
covar_dummy = pd.get_dummies(sample_freeze['DISEASE']).astype(int)
covar_dummy.index = sample_freeze['SAMPLE_BARCODE']
covar = covar_dummy.merge(y_matrix, right_index=True, left_index=True)
covar = covar.drop('total_status', axis=1)
# How cross validation splits will be balanced and stratified
y_df = y_df.loc[y_sub.index]
strat = y_sub.str.cat(y_df.astype(str))
x_df = rnaseq_df.loc[y_df.index, :]
# Subset x matrix to MAD genes and scale
if x_matrix == 'raw':
med_dev = pd.DataFrame(mad(x_df), index=x_df.columns)
mad_genes = med_dev.sort_values(by=0, ascending=False)\
.iloc[0:num_features_kept].index.tolist()
x_df = x_df.loc[:, mad_genes]
fitted_scaler = StandardScaler().fit(x_df)
x_df_update = pd.DataFrame(fitted_scaler.transform(x_df),
columns=x_df.columns)
x_df_update.index = x_df.index
x_df = x_df_update.merge(covar, left_index=True, right_index=True)
# Remove information from the X matrix given input arguments
if drop_expression:
x_df = x_df.iloc[:, num_features_kept:]
elif drop_covariates:
x_df = x_df.iloc[:, 0:num_features_kept]
# Shuffle expression matrix _before_ training - this can be used as NULL model
if shuffled_before_training:
# Shuffle genes
x_train_genes = x_df.iloc[:, range(num_features_kept)]
rnaseq_shuffled_df = x_train_genes.apply(shuffle_columns, axis=1,
result_type='broadcast')
x_train_cov = x_df.iloc[:, num_features_kept:]
x_df = pd.concat([rnaseq_shuffled_df, x_train_cov], axis=1)
# Build classifier pipeline
x_train, x_test, y_train, y_test = train_test_split(x_df, y_df,
test_size=0.1,
random_state=0,
stratify=strat)
clf_parameters = {'classify__loss': ['log'],
'classify__penalty': ['elasticnet'],
'classify__alpha': alphas, 'classify__l1_ratio': l1_ratios}
estimator = Pipeline(steps=[('classify', SGDClassifier(random_state=0,
class_weight='balanced',
loss='log',
max_iter=5,
tol=None))])
cv_pipeline = GridSearchCV(estimator=estimator, param_grid=clf_parameters,
n_jobs=-1, cv=folds, scoring='roc_auc',
return_train_score=True)
cv_pipeline.fit(X=x_train, y=y_train)
cv_results = pd.concat([pd.DataFrame(cv_pipeline.cv_results_)
.drop('params', axis=1),
pd.DataFrame.from_records(cv_pipeline
.cv_results_['params'])],
axis=1)
# Cross-validated performance heatmap
cv_score_mat = pd.pivot_table(cv_results, values='mean_test_score',
index='classify__l1_ratio',
columns='classify__alpha')
ax = sns.heatmap(cv_score_mat, annot=True, fmt='.1%')
ax.set_xlabel('Regularization strength multiplier (alpha)')
ax.set_ylabel('Elastic net mixing parameter (l1_ratio)')
plt.tight_layout()
plt.savefig(cv_heatmap_file, dpi=600, bbox_inches='tight')
plt.close()
# Get predictions
y_predict_train = cv_pipeline.decision_function(x_train)
y_predict_test = cv_pipeline.decision_function(x_test)
metrics_train = get_threshold_metrics(y_train, y_predict_train,
drop_intermediate=keep_inter)
metrics_test = get_threshold_metrics(y_test, y_predict_test,
drop_intermediate=keep_inter)
# Rerun "cross validation" for the best hyperparameter set to define
# cross-validation disease-specific performance. Each sample prediction is
# based on the fold that the sample was in the testing partition
y_cv = cross_val_predict(cv_pipeline.best_estimator_, X=x_train, y=y_train,
cv=folds, method='decision_function')
metrics_cv = get_threshold_metrics(y_train, y_cv,
drop_intermediate=keep_inter)
# Determine shuffled predictive ability of shuffled gene expression matrix
# representing a test of inflation of ROC metrics. Be sure to only shuffle
# gene names, retain covariate information (tissue type and log10 mutations)
if shuffled:
# Shuffle genes
x_train_genes = x_train.iloc[:, range(num_features_kept)]
rnaseq_shuffled_df = x_train_genes.apply(shuffle_columns, axis=1,
result_type='broadcast')
x_train_cov = x_train.iloc[:, num_features_kept:]
rnaseq_shuffled_df = pd.concat([rnaseq_shuffled_df, x_train_cov], axis=1)
y_predict_shuffled = cv_pipeline.decision_function(rnaseq_shuffled_df)
metrics_shuffled = get_threshold_metrics(y_train, y_predict_shuffled,
drop_intermediate=keep_inter)
# Decide to save ROC results to file
if keep_inter:
train_roc = metrics_train['roc_df']
train_roc = train_roc.assign(train_type='train')
test_roc = metrics_test['roc_df']
test_roc = test_roc.assign(train_type='test')
cv_roc = metrics_cv['roc_df']
cv_roc = cv_roc.assign(train_type='cv')
full_roc_df = pd.concat([train_roc, test_roc, cv_roc])
if shuffled:
shuffled_roc = metrics_shuffled['roc_df']
shuffled_roc = shuffled_roc.assign(train_type='shuffled')
full_roc_df = pd.concat([full_roc_df, shuffled_roc])
full_roc_df = full_roc_df.assign(disease='PanCan')
# Plot ROC
sns.set_style("whitegrid")
plt.figure(figsize=(3, 3))
total_auroc = {}
colors = ['blue', 'green', 'orange', 'grey']
idx = 0
metrics_list = [('Training', metrics_train), ('Testing', metrics_test),
('CV', metrics_cv)]
if shuffled:
metrics_list += [('Random', metrics_shuffled)]
for label, metrics in metrics_list:
roc_df = metrics['roc_df']
plt.plot(roc_df.fpr, roc_df.tpr,
label='{} (AUROC = {:.1%})'.format(label, metrics['auroc']),
linewidth=1, c=colors[idx])
total_auroc[label] = metrics['auroc']
idx += 1
plt.axis('equal')
plt.plot([0, 1], [0, 1], color='navy', linewidth=1, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=8)
plt.ylabel('True Positive Rate', fontsize=8)
plt.title('')
plt.tick_params(labelsize=8)
lgd = plt.legend(bbox_to_anchor=(1.03, 0.85),
loc=2,
borderaxespad=0.,
fontsize=7.5)
plt.savefig(full_roc_file, dpi=600, bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
# Plot PR
sns.set_style("whitegrid")
plt.figure(figsize=(3, 3))
total_aupr = {}
colors = ['blue', 'green', 'orange', 'grey']
idx = 0
metrics_list = [('Training', metrics_train), ('Testing', metrics_test),
('CV', metrics_cv)]
if shuffled:
metrics_list += [('Random', metrics_shuffled)]
for label, metrics in metrics_list:
pr_df = metrics['pr_df']
plt.plot(pr_df.recall, pr_df.precision,
label='{} (AUPR = {:.1%})'.format(label, metrics['aupr']),
linewidth=1, c=colors[idx])
total_aupr[label] = metrics['aupr']
idx += 1
plt.axis('equal')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall', fontsize=8)
plt.ylabel('Precision', fontsize=8)
plt.title('')
plt.tick_params(labelsize=8)
lgd = plt.legend(bbox_to_anchor=(1.03, 0.85),
loc=2,
borderaxespad=0.,
fontsize=7.5)
plt.savefig(full_pr_file, dpi=600, bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
# disease specific performance
disease_metrics = {}
for disease in diseases:
# Get all samples in current disease
sample_sub = y_sub[y_sub == disease].index
# Get true and predicted training labels
y_disease_train = y_train[y_train.index.isin(sample_sub)]
if y_disease_train.sum() < 1:
continue
y_disease_predict_train = y_predict_train[y_train.index.isin(sample_sub)]
# Get true and predicted testing labels
y_disease_test = y_test[y_test.index.isin(sample_sub)]
if y_disease_test.sum() < 1:
continue
y_disease_predict_test = y_predict_test[y_test.index.isin(sample_sub)]
# Get predicted labels for samples when they were in cross validation set
# The true labels are y_pred_train
y_disease_predict_cv = y_cv[y_train.index.isin(sample_sub)]
# Get classifier performance metrics for three scenarios for each disease
met_train_dis = get_threshold_metrics(y_disease_train,
y_disease_predict_train,
disease=disease,
drop_intermediate=keep_inter)
met_test_dis = get_threshold_metrics(y_disease_test,
y_disease_predict_test,
disease=disease,
drop_intermediate=keep_inter)
met_cv_dis = get_threshold_metrics(y_disease_train,
y_disease_predict_cv,
disease=disease,
drop_intermediate=keep_inter)
# Get predictions and metrics with shuffled gene expression
if shuffled:
y_dis_predict_shuf = y_predict_shuffled[y_train.index.isin(sample_sub)]
met_shuff_dis = get_threshold_metrics(y_disease_train,
y_dis_predict_shuf,
disease=disease,
drop_intermediate=keep_inter)
if keep_inter:
train_roc = met_train_dis['roc_df']
train_roc = train_roc.assign(train_type='train')
test_roc = met_test_dis['roc_df']
test_roc = test_roc.assign(train_type='test')
cv_roc = met_cv_dis['roc_df']
cv_roc = cv_roc.assign(train_type='cv')
full_dis_roc_df = train_roc.append(test_roc).append(cv_roc)
if shuffled:
shuffled_roc = met_shuff_dis['roc_df']
shuffled_roc = shuffled_roc.assign(train_type='shuffled')
full_dis_roc_df = full_dis_roc_df.append(shuffled_roc)
full_dis_roc_df = full_dis_roc_df.assign(disease=disease)
full_roc_df = full_roc_df.append(full_dis_roc_df)
# Store results in disease indexed dictionary
disease_metrics[disease] = [met_train_dis, met_test_dis, met_cv_dis]
if shuffled:
disease_metrics[disease] += [met_shuff_dis]
disease_auroc = {}
disease_aupr = {}
for disease, metrics_val in disease_metrics.items():
labels = ['Training', 'Testing', 'CV', 'Random']
met_list = []
idx = 0
for met in metrics_val:
lab = labels[idx]
met_list.append((lab, met))
idx += 1
disease_pr_sub_file = '{}_pred_{}.pdf'.format(disease_pr_file, disease)
disease_roc_sub_file = '{}_pred_{}.pdf'.format(disease_roc_file, disease)
# Plot disease specific PR
plt.figure(figsize=(3, 3))
aupr = []
idx = 0
for label, metrics in met_list:
pr_df = metrics['pr_df']
plt.plot(pr_df.recall, pr_df.precision,
label='{} (AUPR = {:.1%})'.format(label, metrics['aupr']),
linewidth=1, c=colors[idx])
aupr.append(metrics['aupr'])
idx += 1
disease_aupr[disease] = aupr
plt.axis('equal')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall', fontsize=8)
plt.ylabel('Precision', fontsize=8)
plt.title('')
plt.tick_params(labelsize=8)
lgd = plt.legend(bbox_to_anchor=(1.03, 0.85),
loc=2,
borderaxespad=0.,
fontsize=7.5)
plt.savefig(disease_pr_sub_file, dpi=600, bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
# Plot disease specific ROC
plt.figure(figsize=(3, 3))
auroc = []
idx = 0
for label, metrics in met_list:
roc_df = metrics['roc_df']
plt.plot(roc_df.fpr, roc_df.tpr,
label='{} (AUROC = {:.1%})'.format(label, metrics['auroc']),
linewidth=1, c=colors[idx])
auroc.append(metrics['auroc'])
idx += 1
disease_auroc[disease] = auroc
plt.axis('equal')
plt.plot([0, 1], [0, 1], color='navy', linewidth=1, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=8)
plt.ylabel('True Positive Rate', fontsize=8)
plt.title('')
plt.tick_params(labelsize=8)
lgd = plt.legend(bbox_to_anchor=(1.03, 0.85),
loc=2,
borderaxespad=0.,
fontsize=7.5)
plt.savefig(disease_roc_sub_file, dpi=600, bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
index_lab = ['Train', 'Test', 'Cross Validation']
if shuffled:
index_lab += ['Random']
disease_auroc_df = pd.DataFrame(disease_auroc, index=index_lab).T
disease_auroc_df = disease_auroc_df.sort_values('Cross Validation',
ascending=False)
ax = disease_auroc_df.plot(kind='bar', title='Disease Specific Performance')
ax.set_ylabel('AUROC')
plt.tight_layout()
plt.savefig(dis_summary_auroc_file, dpi=600, bbox_inches='tight')
plt.close()
disease_aupr_df = pd.DataFrame(disease_aupr, index=index_lab).T
disease_aupr_df = disease_aupr_df.sort_values('Cross Validation',
ascending=False)
ax = disease_aupr_df.plot(kind='bar', title='Disease Specific Performance')
ax.set_ylabel('AUPR')
plt.tight_layout()
plt.savefig(dis_summary_aupr_file, dpi=600, bbox_inches='tight')
plt.close()
# Save classifier coefficients
final_pipeline = cv_pipeline.best_estimator_
final_classifier = final_pipeline.named_steps['classify']
coef_df = pd.DataFrame.from_dict(
{'feature': x_df.columns,
'weight': final_classifier.coef_[0]})
coef_df['abs'] = coef_df['weight'].abs()
coef_df = coef_df.sort_values('abs', ascending=False)
coef_df.to_csv(classifier_file, sep='\t')
if keep_inter:
full_roc_df.to_csv(roc_results_file, sep='\t')
# Apply the same classifier previously built to predict alternative genes
if alt_genes[0] is not 'None':
# Classifying alternative mutations
y_alt = mutation_df[alt_genes]
# Add copy number info if applicable
if copy_number:
y_alt = integrate_copy_number(y=y_alt, cancer_genes_df=cancer_genes,
genes=alt_genes, loss_df=copy_loss_df,
gain_df=copy_gain_df)
# Append disease id
y_alt = y_alt.assign(total_status=y_alt.max(axis=1))
y_alt = y_alt.reset_index().merge(sample_freeze,
how='left').set_index('SAMPLE_BARCODE')
# Filter data
alt_count_df = y_alt.groupby('DISEASE').sum()
alt_prop_df = alt_count_df.divide(y_alt['DISEASE'].value_counts(sort=False)
.sort_index(), axis=0)
alt_count_table = alt_count_df.merge(alt_prop_df,
left_index=True,
right_index=True,
suffixes=('_count', '_proportion'))
alt_count_table.to_csv(alt_count_table_file)
mut_co = alt_count_df['total_status']
prop = alt_prop_df['total_status']
if alt_diseases[0] == 'Auto':
alt_filter_dis = (mut_co > alt_filter_count) & (prop > alt_filter_prop)
alt_diseases = alt_filter_dis.index[alt_filter_dis].tolist()
# Subset data
y_alt_df = y_alt[y_alt.DISEASE.isin(alt_diseases)].total_status
common_alt_samples = list(set(y_alt_df.index) & set(rnaseq_full_df.index))
y_alt_df = y_alt_df.loc[common_alt_samples]
rnaseq_alt_df = rnaseq_full_df.loc[y_alt_df.index, :]
y_alt_matrix = mut_burden.merge( | pd.DataFrame(y_alt_df) | pandas.DataFrame |
from calendar import month_name, monthrange
from pathlib import Path, PureWindowsPath
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import math as mt
from dataclima import helioclim3
class energycalc:
def __init__(self, df,
horizon,
shadings,
iam,
soiling,
lowirradeff,
temperatureloss,
modulequality,
lid,
mismatch,
ohmicdcloss,
inverterloss,
plantcontroller,
transf_lv_mv,
transf_mv_hv,
auxloadsloss,
ohmicac_poi,
systemunavailability,
gridunavailability):
self.horizon = horizon
self.shadings = shadings
self.iam = iam
self.soiling = soiling
self.lowirradeff = lowirradeff
self.temperatureloss = temperatureloss
self.modulequality = modulequality
self.modulequality = modulequality
self.lid = lid
self.mismatch = mismatch
self.ohmicdcloss = ohmicdcloss
self.inverterloss = inverterloss
self.plantcontroller = plantcontroller
self.transf_lv_mv = transf_lv_mv
self.transf_mv_hv = transf_mv_hv
self.auxloadsloss = auxloadsloss
self.ohmicac_poi = ohmicac_poi
self.systemunavailability = systemunavailability
self.gridunavailability = gridunavailability
self.df = self._datatreat(df)
def _datatreat(self, df):
# df.drop(columns=['Top of Atmosphere', 'Code', 'Relative Humidity',
# 'Wind direction', 'Rainfall', 'Snowfall',
# 'Snow depth'],
# inplace=True)
df = df.drop(columns=['Top of Atmosphere',
'Code', 'Relative Humidity',
'Wind direction', 'Rainfall', 'Snowfall',
'Snow depth'])
return df
def perfratio(self):
lossesModuleFactors = {'Horizon': self.horizon,
'Shadings': self.shadings,
'IAM': self.iam,
'Soiling': self.soiling}
lossesLocalFactors = {'Low Irradiance efficiency fall-off': self.lowirradeff,
'Temperature': self.temperatureloss,
'Module Quality': self.modulequality,
'LID': self.lid,
'Mismatch': self.mismatch,
'Ohmic (DC)': self.ohmicdcloss,
'Inverter': self.inverterloss,
'Plant Controller': self.plantcontroller,
'Transformers LV-MV': self.transf_lv_mv,
'Transformers MV-HV': self.transf_mv_hv,
'Auxiliary Loads': self.auxloadsloss,
'Ohmic AC until POI': self.ohmicac_poi,
'System Unavailability': self.systemunavailability,
'Grid Unavailability': self.gridunavailability}
'''
Performace Ratio (Desempenho global)
'''
dflocalloss = pd.DataFrame(data=lossesLocalFactors.values(),
index=lossesLocalFactors.keys(),
columns=['value'])
dfmodloss = pd.DataFrame(data=lossesModuleFactors.values(),
index=lossesModuleFactors.keys(),
columns=['value'])
vector1 = np.array(dflocalloss['value'])
frac1 = (1 - (vector1)/100)
local_losses = np.cumprod(frac1, dtype=float)[-1]
vector2 = np.array(dfmodloss['value'])
frac2 = (1 - (vector2)/100)
module_losses = np.cumprod(frac2, dtype=float)[-1]
perf_ratio = local_losses * module_losses
# print('Desempenho global: {0:.2f} %'.format(perf_ratio * 100))
return module_losses, local_losses
def production(self, modulearea, totalpower, modulepower, trackeradd):
module_losses, local_losses = self.perfratio()
# Eficiencia do painel solar
eff = modulepower / (modulearea * 1000)
# Calculo total area
number_modul = totalpower / modulepower # MWp total / MWp
totalmodulearea = number_modul * modulearea # m²
# Irradiância da UFV no plano dos módulos (kWh/m²)
irrad_eff = self.df['Global Horiz'] * \
trackeradd * module_losses
# Calculating gross and net production
gross_production = irrad_eff * totalmodulearea * eff / 1e6 # Wh para MWh
# net_prodution = gross_production * perf_ratio
net_prodution = gross_production * local_losses
dfprod = pd.DataFrame({'Gross Prod': gross_production,
'Net Prod': net_prodution})
dfconcat = | pd.concat([self.df, dfprod], axis=1, sort=False) | pandas.concat |
# Code written by <NAME>
# Purpose: Converts all subdirectories of CSV and TSV files into one Excel file.
# Required modules installed from PIP: pandas, xlsxwriter
import os
import pandas as pd
class DataToExcel:
""" Converts all subdirectory data into an unified Excel file."""
def __init__(self):
"""
self.partners is populated within the script due to events where a certain subdirectory is to be omitted.
"""
self.source = None
self.partners = self.the_partners() # Subdirectories (folder names) must be prepopulated before executing the script.
self.path_list = []
self.df_list = []
self.aggregated_data = None
def the_partners(self):
""" Update partners here.
The purpose of using the dictionary is for scalable subdirectories with varying number of skipped heading rows.
Variables:
partner_dictionary = {STRING - parner name (Must match the folder name): INTEGER - skip heading rows (Enter 0, otherwise any integer greater than 0)}
SAMPLE: {
"partnerName": 1,
"folder2": 0,
"OtHeR", 5
}
Returns:
partner_dictionary: dictionary data per the subdirectory name and the number of rows skipped.
"""
# PartnerName (Has to match the folder name), skipHeadingRows
partner_dictionary = {
"": 0
}
return partner_dictionary
def set_source(self):
""" User input of the parent "root" directory path to get the corresponding partner subdirectory files.
To add a path, simply copy and paste or type the main directory.
Example: Parent directory is, for Windows, where the parent directory: +, subdirectory: *, and file: ^.
+C:\downloads
| |
*folder1 *partnerName
| |
^file.csv | ^ partnerfile.tsv # Please have only one filetype per folder if the data's structure differs (Such as headings, columns, etc.)
| ^ Partner2.csv
What you need to enter is the main parent directory in this example.
Enter folder source path: C:\downloads
"""
self.source = input("Enter folder source path: ")
# Aggregators
def data_gather(self):
""" Gather all data from sosurce directory folder and aggregate them into a single list."""
if self.source is None: # Initiate the source if it is not yet set.
self.set_source()
for root, dirs, files in os.walk(self.source):
for name in files:
if name.endswith('.csv') or name.endswith('.tsv'): # Further branching or additional conditions can be added to scale up all file types.
self.path_list.append(os.path.join(root, name))
def frame_gen(self, file_list, frame_type, skipping_rows):
""" Generate individual pandas dataframes for each file stored in the list."""
if self.source is None: # Prevent execution if there are no files in the list.
return
for each_file in file_list:
if each_file.endswith('.csv'):
df = pd.read_csv(each_file, skiprows=skipping_rows)
elif i.endswith('.tsv'):
df = | pd.read_csv(each_file, sep='\t', skiprows=skipping_rows) | pandas.read_csv |
import pandas as pd
import numpy as np
import unicodedata
import re
import json
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
from nltk.tokenize.toktok import ToktokTokenizer
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest, RFE, f_regression
from sklearn.preprocessing import MinMaxScaler
def split_dataframe(df, stratify_by=None, rand=1414, test_size=.2, validate_size=.3):
"""
Utility function to create train, validate, and test splits.
Generates train, validate, and test samples from a dataframe.
Credit to @ryanorsinger
Parameters
----------
df : DataFrame
The dataframe to be split
stratify_by : str
Name of the target variable. Ensures different results of target variable are spread between the samples. Default is None.
test_size : float
Ratio of dataframe (0.0 to 1.0) that should be kept for testing sample. Default is 0.2.
validate_size: float
Ratio of train sample (0.0 to 1.0) that should be kept for validate sample. Default is 0.3.
random_stat : int
Value provided to create re-produceable results. Default is 1414.
Returns
-------
DataFrame
Three dataframes representing the training, validate, and test samples
"""
if stratify_by == None:
train, test = train_test_split(df, test_size=test_size, random_state=rand)
train, validate = train_test_split(train, test_size=validate_size, random_state=rand)
else:
train, test = train_test_split(df, test_size=test_size, random_state=rand, stratify=df[stratify_by])
train, validate = train_test_split(train, test_size=validate_size, random_state=rand, stratify=train[stratify_by])
return train, validate, test
def split_dataframe_continuous_target(dframe, target, bins=5, rand=1414, test_size=.2, validate_size=.3):
"""
Utility function to create train, validate, and test splits when targeting a continuous variable.
Generates train, validate, and test samples from a dataframe when targeting a continuous variable.
Credit to @ryanorsinger
Parameters
----------
df : DataFrame
The dataframe to be split
target : str
Name of the continuous target variable. Ensures different results of target variable are spread between the samples.
test_size : float
Ratio of dataframe (0.0 to 1.0) that should be kept for testing sample. Default is 0.2.
validate_size: float
Ratio of train sample (0.0 to 1.0) that should be kept for validate sample. Default is 0.3.
random_stat : int
Value provided to create re-produceable results. Default is 1414.
Returns
-------
DataFrame
Three dataframes representing the training, validate, and test samples
"""
df = dframe.copy()
binned_y = pd.cut(df[target], bins=bins, labels=list(range(bins)))
df["bins"] = binned_y
train_validate, test = train_test_split(df, stratify=df["bins"], test_size=test_size, random_state=rand)
train, validate = train_test_split(train_validate, stratify=train_validate["bins"], test_size=validate_size, random_state=rand)
train = train.drop(columns=["bins"])
validate = validate.drop(columns=["bins"])
test = test.drop(columns=["bins"])
return train, validate, test
def nan_null_empty_check(df):
"""
Utility function that checks for missing values in a dataframe.
This function will return a tuple containing the positions of NaN, None, NaT, or empty strings.
Parameters
----------
df : DataFrame
The dataframe that you want to search.
Returns
-------
tuple
A tuple containing coordinates of the missing values: ([rows], [columns])
"""
result = {}
result['nan_positions'] = np.where(pd.isna(df))
result['empty_positions'] = np.where(df.applymap(lambda x: str(x).strip() == ""))
_print_positions(result['nan_positions'], "NaN values")
_print_positions(result['empty_positions'], "Empty values")
return result
def _print_positions(result, position_type):
print(position_type)
rows = pd.DataFrame(data=result[0], columns=['rows'])
columns = pd.DataFrame(data=result[1], columns=['columns'])
print(pd.concat([rows, columns], axis=1))
print("--------------------------------")
def generate_outlier_bounds_iqr(df, column, multiplier=1.5):
"""
Takes in a dataframe, the column name, and can specify a multiplier (default=1.5). Returns the upper and lower bounds for the
values in that column that signify outliers.
"""
q1 = df[column].quantile(.25)
q3 = df[column].quantile(.75)
iqr = q3 - q1
upper = q3 + (multiplier * iqr)
lower = q1 - (multiplier * iqr)
return upper, lower
def generate_scaled_splits(train, validate, test, scaler=MinMaxScaler()):
"""
Takes in a train, validate, test samples and can specify the type of scaler to use (default=MinMaxScaler). Returns the samples
after scaling as dataframes.
"""
scaler.fit(train)
train_scaled = pd.DataFrame(scaler.transform(train), columns=train.columns)
validate_scaled = pd.DataFrame(scaler.transform(validate), columns=validate.columns)
test_scaled = pd.DataFrame(scaler.transform(test), columns=test.columns)
return train_scaled, validate_scaled, test_scaled
def rfe(predictors, targets, model_type, k=1):
"""
Takes in a a dataframe of predictors (ie X_train), the dataframe of targets (ie y_train), the type of model, and can specify
the amount of features you want (default k=1). Returns a list ordered by most important to least important feature. The top
features will be assigned a rank of 1 (ie if k=2, there will be 2 features with a rank of 1).
"""
model = model_type
rfe = RFE(model, k)
rfe.fit(predictors, targets)
rfe_feature_mask = rfe.support_
_print_ranks(rfe, predictors)
return predictors.iloc[:, rfe_feature_mask].columns.tolist()
def select_kbest(predictors, targets, k=1):
"""
Takes in a a dataframe of predictors (ie X_train), the dataframe of targets (ie y_train), and can specify
the amount of features you want (default k=1). Returns a list ordered by most important to least important feature.
"""
f_selector = SelectKBest(f_regression, k=k)
f_selector.fit(predictors, targets)
feature_mask = f_selector.get_support()
return predictors.iloc[:, feature_mask].columns.tolist()
def _print_ranks(selector, predictors):
var_ranks = selector.ranking_
var_names = predictors.columns.tolist()
rfe_ranks_df = pd.DataFrame({'Var': var_names, 'Rank': var_ranks})
print(rfe_ranks_df.sort_values('Rank'))
def show_missing_value_stats_by_col(df):
"""
Takes in a data frame and returns information on missing values in each column.
"""
cols = df.columns
rows = len(df)
result = pd.DataFrame(index=cols, columns=['num_rows_missing', 'pct_rows_missing'])
pd.set_option('max_rows', rows)
result['num_rows_missing'] = df.isnull().sum()
result['pct_rows_missing'] = round(df.isnull().sum() / rows, 6)
return result
def show_missing_value_stats_by_row(df):
"""
Takes in a data frame and returns information on missing values in each row.
"""
total_cols = df.shape[1]
total_rows = df.shape[0]
result = pd.DataFrame(df.isnull().sum(axis=1).value_counts(), columns=['num_rows'])
pd.set_option('max_rows', total_rows)
result = result.reset_index()
result = result.rename(columns={'index' : 'num_cols_missing'})
result['pct_cols_missing'] = result['num_cols_missing'] / total_cols
result = result.set_index('num_cols_missing')
result = result.sort_values('num_cols_missing', ascending=True)
return result
def handle_missing_values(df, col_thresh, row_thresh):
"""
Takes in a data frame and thresholds (0.0 - 1.0) for columns and rows and returns the data frame after dropping the rows and
columns that are not populated at the specified threshold amounts.
"""
req_col = int(round(col_thresh * len(df.index), 0))
req_row = int(round(row_thresh * len(df.columns), 0))
df = df.dropna(axis=1, thresh=req_col)
df = df.dropna(axis=0, thresh=req_row)
return df
def set_index_to_datetime(df, column_name):
"""
Takes in a dataframe and the column name of a column containing string values formatted as dates. This function converts the
column to a pandas.Datetime object and sets as the index of the dataframe then returns the dataframe sorted by index.
"""
date_df = df.copy()
date_df[column_name] = | pd.to_datetime(date_df[column_name]) | pandas.to_datetime |
import numpy as np
import tempfile
import logging
import pandas as pd
from sklearn.metrics import jaccard_score
import rpGraph
######################################################################################################################
############################################## UTILITIES #############################################################
######################################################################################################################
## Compute the Jaccard index similarity coefficient score between two MIRIAM dicts
# 1.0 is a perfect match and 0.0 is a complete miss
# We assume that the meas has the "complete" information and simulated has the incomplete info
#TODO: interchange meas and sim since actually the measured has inclomple info and sim has more info
def jaccardMIRIAM(meas_miriam, sim_miriam):
values = list(set([ x for y in list(meas_miriam.values())+list(sim_miriam.values()) for x in y]))
meas_data = {}
sim_data = {}
for key in set(list(meas_miriam.keys())+list(sim_miriam.keys())):
tmp_meas_row = []
tmp_sim_row = []
for value in values:
if key in meas_miriam:
if value in meas_miriam[key]:
tmp_meas_row.append(1)
else:
tmp_meas_row.append(0)
else:
tmp_meas_row.append(0)
if key in sim_miriam:
if value in sim_miriam[key]:
tmp_sim_row.append(1)
else:
tmp_sim_row.append(0)
else:
tmp_sim_row.append(0)
meas_data[key] = tmp_meas_row
sim_data[key] = tmp_sim_row
meas_dataf = pd.DataFrame(meas_data, index=values)
sim_dataf = pd.DataFrame(sim_data, index=values)
#return meas_dataf, sim_dataf, jaccard_score(meas_dataf, sim_dataf, average='weighted')
return jaccard_score(meas_dataf, sim_dataf, average='weighted')
## Function to find the unique species
#
# pd_matrix is organised such that the rows are the simulated species and the columns are the measured ones
#
def findUniqueRowColumn(pd_matrix):
logging.debug(pd_matrix)
to_ret = {}
######################## filter by the global top values ################
logging.debug('################ Filter best #############')
#transform to np.array
x = pd_matrix.values
#resolve the rouding issues to find the max
x = np.around(x, decimals=5)
#first round involves finding the highest values and if found set to 0.0 the rows and columns (if unique)
top = np.where(x==np.max(x))
#as long as its unique keep looping
if np.count_nonzero(x)==0:
return to_ret
while len(top[0])==1 and len(top[1])==1:
if np.count_nonzero(x)==0:
return to_ret
pd_entry = pd_matrix.iloc[[top[0][0]],[top[1][0]]]
row_name = str(pd_entry.index[0])
col_name = str(pd_entry.columns[0])
if col_name in to_ret:
logging.debug('Overwriting (1): '+str(col_name))
logging.debug(x)
to_ret[col_name] = [row_name]
#delete the rows and the columns
logging.debug('==================')
logging.debug('Column: '+str(col_name))
logging.debug('Row: '+str(row_name))
pd_matrix.loc[:, col_name] = 0.0
pd_matrix.loc[row_name, :] = 0.0
x = pd_matrix.values
x = np.around(x, decimals=5)
top = np.where(x==np.max(x))
logging.debug(pd_matrix)
logging.debug(top)
logging.debug('==================')
#################### filter by columns (measured) top values ##############
logging.debug('################ Filter by column best ############')
x = pd_matrix.values
x = np.around(x, decimals=5)
if np.count_nonzero(x)==0:
return to_ret
reloop = True
while reloop:
if np.count_nonzero(x)==0:
return to_ret
reloop = False
for col in range(len(x[0])):
if np.count_nonzero(x[:,col])==0:
continue
top_row = np.where(x[:,col]==np.max(x[:,col]))[0]
if len(top_row)==1:
top_row = top_row[0]
#if top_row==0.0:
# continue
#check to see if any other measured pathways have the same or larger score (accross)
row = list(x[top_row, :])
#remove current score consideration
row.pop(col)
if max(row)>=x[top_row, col]:
logging.warning('For col '+str(col)+' there are either better or equal values: '+str(row))
logging.warning(x)
continue
#if you perform any changes on the rows and columns, then you can perform the loop again
reloop = True
pd_entry = pd_matrix.iloc[[top_row],[col]]
logging.debug('==================')
row_name = pd_entry.index[0]
col_name = pd_entry.columns[0]
logging.debug('Column: '+str(col_name))
logging.debug('Row: '+str(row_name))
if col_name in to_ret:
logging.debug('Overwriting (2): '+str(col_name))
logging.debug(pd_matrix.values)
to_ret[col_name] = [row_name]
#delete the rows and the columns
pd_matrix.loc[:, col_name] = 0.0
pd_matrix.loc[row_name, :] = 0.0
x = pd_matrix.values
x = np.around(x, decimals=5)
logging.debug(pd_matrix)
logging.debug('==================')
################## laslty if there are multiple values that are not 0.0 then account for that ######
logging.debug('################# get the rest ##########')
x = pd_matrix.values
x = np.around(x, decimals=5)
if np.count_nonzero(x)==0:
return to_ret
for col in range(len(x[0])):
if not np.count_nonzero(x[:,col])==0:
top_rows = np.where(x[:,col]==np.max(x[:,col]))[0]
if len(top_rows)==1:
top_row = top_rows[0]
pd_entry = pd_matrix.iloc[[top_row],[col]]
row_name = pd_entry.index[0]
col_name = pd_entry.columns[0]
if not col_name in to_ret:
to_ret[col_name] = [row_name]
else:
logging.warning('At this point should never have only one: '+str(x[:,col]))
logging.warning(x)
else:
for top_row in top_rows:
pd_entry = pd_matrix.iloc[[top_row],[col]]
row_name = pd_entry.index[0]
col_name = pd_entry.columns[0]
if not col_name in to_ret:
to_ret[col_name] = []
to_ret[col_name].append(row_name)
logging.debug(pd_matrix)
logging.debug('###################')
return to_ret
######################################################################################################################
############################################### SPECIES ##############################################################
######################################################################################################################
## Match all the measured chemical species to the simulated chemical species between two SBML
#
# TODO: for all the measured species compare with the simualted one. Then find the measured and simulated species that match the best and exclude the
# simulated species from potentially matching with another
#
def compareSpecies(measured_rpsbml, sim_rpsbml, measured_comp_id=None, sim_comp_id=None):
############## compare species ###################
meas_sim = {}
sim_meas = {}
species_match = {}
for measured_species in measured_rpsbml.model.getListOfSpecies():
#skip the species that are not in the right compartmennt, if specified
if measured_comp_id and not measured_species.getCompartment()==measured_comp_id:
continue
logging.debug('--- Trying to match chemical species: '+str(measured_species.getId())+' ---')
meas_sim[measured_species.getId()] = {}
species_match[measured_species.getId()] = {}
#species_match[measured_species.getId()] = {'id': None, 'score': 0.0, 'found': False}
#TODO: need to exclude from the match if a simulated chemical species is already matched with a higher score to another measured species
for sim_species in sim_rpsbml.model.getListOfSpecies():
#skip the species that are not in the right compartmennt, if specified
if sim_comp_id and not sim_species.getCompartment()==sim_comp_id:
continue
meas_sim[measured_species.getId()][sim_species.getId()] = {'score': 0.0, 'found': False}
if not sim_species.getId() in sim_meas:
sim_meas[sim_species.getId()] = {}
sim_meas[sim_species.getId()][measured_species.getId()] = {'score': 0.0, 'found': False}
measured_brsynth_annot = sim_rpsbml.readBRSYNTHAnnotation(measured_species.getAnnotation())
sim_rpsbml_brsynth_annot = sim_rpsbml.readBRSYNTHAnnotation(sim_species.getAnnotation())
measured_miriam_annot = sim_rpsbml.readMIRIAMAnnotation(measured_species.getAnnotation())
sim_miriam_annot = sim_rpsbml.readMIRIAMAnnotation(sim_species.getAnnotation())
#### MIRIAM ####
if sim_rpsbml.compareMIRIAMAnnotations(measured_species.getAnnotation(), sim_species.getAnnotation()):
logging.debug('--> Matched MIRIAM: '+str(sim_species.getId()))
#meas_sim[measured_species.getId()][sim_species.getId()]['score'] += 0.4
meas_sim[measured_species.getId()][sim_species.getId()]['score'] += 0.2+0.2*jaccardMIRIAM(sim_miriam_annot, measured_miriam_annot)
meas_sim[measured_species.getId()][sim_species.getId()]['found'] = True
##### InChIKey ##########
#find according to the inchikey -- allow partial matches
#compare either inchikey in brsynth annnotation or MIRIAM annotation
#NOTE: here we prioritise the BRSynth annotation inchikey over the MIRIAM
measured_inchikey_split = None
sim_rpsbml_inchikey_split = None
if 'inchikey' in measured_brsynth_annot:
measured_inchikey_split = measured_brsynth_annot['inchikey'].split('-')
elif 'inchikey' in measured_miriam_annot:
if not len(measured_miriam_annot['inchikey'])==1:
#TODO: handle mutliple inchikey with mutliple compare and the highest comparison value kept
logging.warning('There are multiple inchikey values, taking the first one: '+str(measured_miriam_annot['inchikey']))
measured_inchikey_split = measured_miriam_annot['inchikey'][0].split('-')
if 'inchikey' in sim_rpsbml_brsynth_annot:
sim_rpsbml_inchikey_split = sim_rpsbml_brsynth_annot['inchikey'].split('-')
elif 'inchikey' in sim_miriam_annot:
if not len(sim_miriam_annot['inchikey'])==1:
#TODO: handle mutliple inchikey with mutliple compare and the highest comparison value kept
logging.warning('There are multiple inchikey values, taking the first one: '+str(sim_rpsbml_brsynth_annot['inchikey']))
sim_rpsbml_inchikey_split = sim_miriam_annot['inchikey'][0].split('-')
if measured_inchikey_split and sim_rpsbml_inchikey_split:
if measured_inchikey_split[0]==sim_rpsbml_inchikey_split[0]:
logging.debug('Matched first layer InChIkey: ('+str(measured_inchikey_split)+' -- '+str(sim_rpsbml_inchikey_split)+')')
meas_sim[measured_species.getId()][sim_species.getId()]['score'] += 0.2
if measured_inchikey_split[1]==sim_rpsbml_inchikey_split[1]:
logging.debug('Matched second layer InChIkey: ('+str(measured_inchikey_split)+' -- '+str(sim_rpsbml_inchikey_split)+')')
meas_sim[measured_species.getId()][sim_species.getId()]['score'] += 0.2
meas_sim[measured_species.getId()][sim_species.getId()]['found'] = True
if measured_inchikey_split[2]==sim_rpsbml_inchikey_split[2]:
logging.debug('Matched third layer InChIkey: ('+str(measured_inchikey_split)+' -- '+str(sim_rpsbml_inchikey_split)+')')
meas_sim[measured_species.getId()][sim_species.getId()]['score'] += 0.2
meas_sim[measured_species.getId()][sim_species.getId()]['found'] = True
sim_meas[sim_species.getId()][measured_species.getId()]['score'] = meas_sim[measured_species.getId()][sim_species.getId()]['score']
sim_meas[sim_species.getId()][measured_species.getId()]['found'] = meas_sim[measured_species.getId()][sim_species.getId()]['found']
#build the matrix to send
meas_sim_mat = {}
for i in meas_sim:
meas_sim_mat[i] = {}
for y in meas_sim[i]:
meas_sim_mat[i][y] = meas_sim[i][y]['score']
unique = findUniqueRowColumn(pd.DataFrame(meas_sim_mat))
logging.debug('findUniqueRowColumn:')
logging.debug(unique)
for meas in meas_sim:
if meas in unique:
species_match[meas] = {}
for unique_spe in unique[meas]:
species_match[meas][unique_spe] = round(meas_sim[meas][unique[meas][0]]['score'], 5)
else:
logging.warning('Cannot find a species match for the measured species: '+str(meas))
logging.debug('#########################')
logging.debug('species_match:')
logging.debug(species_match)
logging.debug('-----------------------')
return species_match
######################################################################################################################
############################################# REACTION ###############################################################
######################################################################################################################
##
# Compare that all the measured species of a reactions are found within sim species to match with a reaction.
# We assume that there cannot be two reactions that have the same species and reactants. This is maintained by SBML
# Compare also by EC number, from the third ec to the full EC
# TODO: need to remove from the list reactions simulated reactions that have matched
def compareReactions(measured_rpsbml, sim_rpsbml, species_match, pathway_id='rp_pathway'):
############## compare the reactions #######################
#construct sim reactions with species
logging.debug('------ Comparing reactions --------')
#match the reactants and products conversion to sim species
tmp_reaction_match = {}
meas_sim = {}
sim_meas = {}
for measured_reaction_id in measured_rpsbml.readRPpathwayIDs(pathway_id):
logging.debug('Species match of measured reaction: '+str(measured_reaction_id))
measured_reaction = measured_rpsbml.model.getReaction(measured_reaction_id)
measured_reaction_miriam = measured_rpsbml.readMIRIAMAnnotation(measured_reaction.getAnnotation())
################ construct the dict transforming the species #######
meas_sim[measured_reaction_id] = {}
tmp_reaction_match[measured_reaction_id] = {}
for sim_reaction_id in sim_rpsbml.readRPpathwayIDs(pathway_id):
if not sim_reaction_id in sim_meas:
sim_meas[sim_reaction_id] = {}
sim_meas[sim_reaction_id][measured_reaction_id] = {}
meas_sim[measured_reaction_id][sim_reaction_id] = {}
logging.debug('\t=========== '+str(sim_reaction_id)+' ==========')
logging.debug('\t+++++++ Species match +++++++')
tmp_reaction_match[measured_reaction_id][sim_reaction_id] = {'reactants': {},
'reactants_score': 0.0,
'products': {},
'products_score': 0.0,
'species_score': 0.0,
'species_std': 0.0,
'species_reaction': None,
'ec_score': 0.0,
'ec_reaction': None,
'score': 0.0,
'found': False}
sim_reaction = sim_rpsbml.model.getReaction(sim_reaction_id)
sim_reactants_id = [reactant.species for reactant in sim_reaction.getListOfReactants()]
sim_products_id = [product.species for product in sim_reaction.getListOfProducts()]
############ species ############
logging.debug('\tspecies_match: '+str(species_match))
logging.debug('\tspecies_match: '+str(species_match.keys()))
logging.debug('\tsim_reactants_id: '+str(sim_reactants_id))
logging.debug('\tmeasured_reactants_id: '+str([i.species for i in measured_reaction.getListOfReactants()]))
logging.debug('\tsim_products_id: '+str(sim_products_id))
logging.debug('\tmeasured_products_id: '+str([i.species for i in measured_reaction.getListOfProducts()]))
#ensure that the match is 1:1
#1)Here we assume that a reaction cannot have twice the same species
cannotBeSpecies = []
#if there is a match then we loop again since removing it from the list of potential matches would be appropriate
keep_going = True
while keep_going:
logging.debug('\t\t----------------------------')
keep_going = False
for reactant in measured_reaction.getListOfReactants():
logging.debug('\t\tReactant: '+str(reactant.species))
#if a species match has been found AND if such a match has been found
founReaIDs = [tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants'][i]['id'] for i in tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants'] if not tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants'][i]['id']==None]
logging.debug('\t\tfounReaIDs: '+str(founReaIDs))
if reactant.species and reactant.species in species_match and not list(species_match[reactant.species].keys())==[] and not reactant.species in founReaIDs:
#return all the similat entries
'''
speMatch = list(set(species_match[reactant.species].keys())&set(sim_reactants_id))
speMatch = list(set(speMatch)-set(cannotBeSpecies))
logging.debug('\t\tspeMatch: '+str(speMatch))
if len(speMatch)==1:
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants'][reactant.species] = {'id': speMatch[0], 'score': species_match[reactant.species]['score'], 'found': True}
cannotBeSpecies.append(speMatch[0])
keep_going = True
logging.debug('\t\tMatched measured reactant species: '+str(reactant.species)+' with simulated species: '+str(speMatch[0]))
elif not reactant.species in tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants']:
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants'][reactant.species] = {'id': None, 'score': 0.0, 'found': False}
#logging.debug('\t\tCould not find the folowing measured reactant in the currrent reaction: '+str(reactant.species))
'''
best_spe = [k for k, v in sorted(species_match[reactant.species].items(), key=lambda item: item[1], reverse=True)][0]
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants'][reactant.species] = {'id': best_spe, 'score': species_match[reactant.species][best_spe], 'found': True}
cannotBeSpecies.append(best_spe)
elif not reactant.species in tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants']:
logging.warning('\t\tCould not find the following measured reactant in the matched species: '+str(reactant.species))
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants'][reactant.species] = {'id': None, 'score': 0.0, 'found': False}
for product in measured_reaction.getListOfProducts():
logging.debug('\t\tProduct: '+str(product.species))
foundProIDs = [tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products'][i]['id'] for i in tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products'] if not tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products'][i]['id']==None]
logging.debug('\t\tfoundProIDs: '+str(foundProIDs))
if product.species and product.species in species_match and not list(species_match[product.species].keys())==[] and not product.species in foundProIDs:
'''
#return all the similat entries
speMatch = list(set(species_match[product.species]['id'])&set(sim_products_id))
speMatch = list(set(speMatch)-set(cannotBeSpecies))
logging.debug('\t\tspeMatch: '+str(speMatch))
if len(speMatch)==1:
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products'][product.species] = {'id': speMatch[0], 'score': species_match[product.species]['score'], 'found': True}
cannotBeSpecies.append(speMatch[0])
keep_going = True
logging.debug('\t\tMatched measured product species: '+str(product.species)+' with simulated species: '+str(speMatch[0]))
elif not product.species in tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products']:
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products'][product.species] = {'id': None, 'score': 0.0, 'found': False}
#logging.debug('\t\tCould not find the following measured product in the matched species: '+str(product.species))
'''
best_spe = [k for k, v in sorted(species_match[product.species].items(), key=lambda item: item[1], reverse=True)][0]
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants'][product.species] = {'id': best_spe, 'score': species_match[product.species][best_spe], 'found': True}
cannotBeSpecies.append(best_spe)
elif not product.species in tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products']:
logging.warning('\t\tCould not find the following measured product in the matched species: '+str(product.species))
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products'][product.species] = {'id': None, 'score': 0.0, 'found': False}
logging.debug('\t\tcannotBeSpecies: '+str(cannotBeSpecies))
reactants_score = [tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants'][i]['score'] for i in tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants']]
reactants_found = [tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants'][i]['found'] for i in tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants']]
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['reactants_score'] = np.mean(reactants_score)
products_score = [tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products'][i]['score'] for i in tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products']]
products_found = [tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products'][i]['found'] for i in tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products']]
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['products_score'] = np.mean(products_score)
### calculate pathway species score
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['species_score'] = np.mean(reactants_score+products_score)
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['species_std'] = np.std(reactants_score+products_score)
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['species_reaction'] = sim_reaction_id
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['found'] = all(reactants_found+products_found)
#tmp_reaction_match[measured_reaction_id][sim_reaction_id]['found'] = True
#break #only if we assume that one match is all that can happen TODO: calculate all matches and take the highest scoring one
#continue #if you want the match to be more continuous
########## EC number ############
#Warning we only match a single reaction at a time -- assume that there cannot be more than one to match at a given time
logging.debug('\t+++++ EC match +++++++')
if 'ec-code' in measured_reaction_miriam:
sim_reaction = sim_rpsbml.model.getReaction(sim_reaction_id)
sim_reaction_miriam = sim_rpsbml.readMIRIAMAnnotation(sim_reaction.getAnnotation())
if 'ec-code' in sim_reaction_miriam:
#we only need one match here
measured_ec = [i for i in measured_reaction_miriam['ec-code']]
sim_ec = [i for i in sim_reaction_miriam['ec-code']]
#perfect match - one can have multiple ec score per reaction - keep the score for the highest matching one
logging.debug('\t~~~~~~~~~~~~~~~~~~~~')
logging.debug('\tMeasured EC: '+str(measured_ec))
logging.debug('\tSimulated EC: '+str(sim_ec))
measured_frac_ec = [[y for y in ec.split('.') if not y=='-'] for ec in measured_reaction_miriam['ec-code']]
sim_frac_ec = [[y for y in ec.split('.') if not y=='-'] for ec in sim_reaction_miriam['ec-code']]
#complete the ec numbers with None to be length of 4
for i in range(len(measured_frac_ec)):
for y in range(len(measured_frac_ec[i]),4):
measured_frac_ec[i].append(None)
for i in range(len(sim_frac_ec)):
for y in range(len(sim_frac_ec[i]),4):
sim_frac_ec[i].append(None)
logging.debug('\t'+str(measured_frac_ec))
logging.debug('\t'+str(sim_frac_ec))
best_ec_compare = {'meas_ec': [], 'sim_ec': [], 'score': 0.0, 'found': False}
for ec_m in measured_frac_ec:
for ec_s in sim_frac_ec:
tmp_score = 0.0
for i in range(4):
if not ec_m[i]==None and not ec_s[i]==None:
if ec_m[i]==ec_s[i]:
tmp_score += 0.25
if i==2:
best_ec_compare['found'] = True
else:
break
if tmp_score>best_ec_compare['score']:
best_ec_compare['meas_ec'] = ec_m
best_ec_compare['sim_ec'] = ec_s
best_ec_compare['score'] = tmp_score
logging.debug('\t'+str(best_ec_compare))
if best_ec_compare['found']:
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['found'] = True
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['ec_reaction'] = sim_reaction_id
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['ec_score'] = best_ec_compare['score']
logging.debug('\t~~~~~~~~~~~~~~~~~~~~')
#WRNING: Here 80% for species match and 20% for ec match
tmp_reaction_match[measured_reaction_id][sim_reaction_id]['score'] = np.average([tmp_reaction_match[measured_reaction_id][sim_reaction_id]['species_score'], tmp_reaction_match[measured_reaction_id][sim_reaction_id]['ec_score']], weights=[0.8, 0.2])
sim_meas[sim_reaction_id][measured_reaction_id] = tmp_reaction_match[measured_reaction_id][sim_reaction_id]['score']
meas_sim[measured_reaction_id][sim_reaction_id] = tmp_reaction_match[measured_reaction_id][sim_reaction_id]['score']
### matrix compare #####
unique = findUniqueRowColumn( | pd.DataFrame(meas_sim) | pandas.DataFrame |
import sys
import os
import glob as gb
import sqlite3
import pandas as pd
def open_eso(file):
with open(file, 'r') as f:
flist = f.readlines()
return flist
def get_data_dict(flist):
data_dict = []
for f in flist:
if "End of Data Dictionary" in f:
break
data_dict.append(f)
return data_dict
def parse_header(header):
rpt_dict = {}
header = header.replace('\n','')
right = header.split('!')[1]
period = right.split(' ')[0]
left = header.split('!')[0]
leftsplit = left.split(',')
idx = leftsplit[0]
p_type = leftsplit[1]
if len(leftsplit) > 3:
keyvalue = leftsplit[2]
name = leftsplit[3]
else:
keyvalue = leftsplit[2]
name = ''
name_clean = keyvalue + ', ' + name
return [name_clean, period, keyvalue, name, p_type, idx]#, right]
def get_avail_series(file):
flist = open_eso(file)
data_dict = get_data_dict(flist)
dflist = []
for d in data_dict[7:]:
series = parse_header(d)
dflist.append(series)
df = pd.DataFrame(dflist)
df.columns = ['series_name', 'rpt_period', 'keyvalue', 'name', 'period_type', 'rpt_idx']#, 'leftover']
return df
def get_series(idx, flist, data_dict_df):
seriesarray = [[b.replace('\n','') for b in f.split(',')] for f in flist if f.split(',')[0] == str(idx)]
info = data_dict_df[data_dict_df.rpt_idx == str(idx)]
singlearray = [s[1:] for s in seriesarray[1:]]
return info, singlearray
def array_to_df(array):
info, data = array
coldict = {
'Hourly': ['Value'],
'Daily': ['Value','Min','Hour','Minute','Max','Hour','Minute'],
'Monthly': ['Value','Min','Day','Hour','Minute','Max','Day','Hour','Minute'],
'RunPeriod': ['Value','Min','Month','Day','Hour','Minute','Max','Month','Day','Hour','Minute'],
}
period = info.rpt_period.tolist()[0]
dfcolumns = coldict[period]
df = pd.DataFrame(data)
df.columns = dfcolumns
return info, df
def multi_index_df(info, df):
cols = info.values.tolist()[0]
rows = df.columns.tolist()
multi_rows = []
for row in rows:
multi_rows.append(tuple(cols + [row]))
multi = | pd.MultiIndex.from_tuples(multi_rows) | pandas.MultiIndex.from_tuples |
# The analyser
import pandas as pd
import matplotlib.pyplot as plt
import dill
import os
import numpy as np
from funcs import store_namespace
from funcs import load_namespace
import datetime
from matplotlib.font_manager import FontProperties
from matplotlib import rc
community = 'ResidentialCommunity'
sim_ids = ['MinEne_0-2']
model_id = 'R2CW_HP'
bldg_list = load_namespace(os.path.join('path to models', 'teaser_bldgs_residential'))
#
bldg_list = [bldg_list[0], bldg_list[1]]
print(bldg_list)
folder = 'results'
step = 300
nodynprice=0
mon = 'jan'
constr_folder = 'decentr_enemin_constr_'+mon
#bldg_list = bldg_list[0:1]
if mon == 'jan':
start = '1/7/2017 16:30:00'
end = '1/7/2017 19:00:00'
controlseq_time = '01/07/2017 16:55:00'
elif mon == 'mar':
start = '3/1/2017 16:30:00'
end = '3/1/2017 19:00:00'
controlseq_time = '03/01/2017 16:55:00'
elif mon=='nov':
start = '11/20/2017 16:30:00'
end = '11/20/2017 19:00:00'
controlseq_time = '11/20/2017 16:55:00'
sim_range = pd.date_range(start, end, freq = str(step)+'S')
simu_path = "path to simulation folder"
other_input = {}
price = {}
flex_cost = {}
ref_profile = {}
controlseq = {}
opt_control = {}
emutemps = {}
mpctemps = {}
opt_stats = {}
flex_down = {}
flex_up = {}
power = {}
for bldg in bldg_list:
building = bldg+'_'+model_id
for sim_id in sim_ids:
opt_stats[sim_id] = {}
controlseq[sim_id] = {}
mpctemps[sim_id] = {}
emutemps[sim_id] = {}
power[sim_id] = {}
for time_idx in sim_range:
time_idx = time_idx.strftime('%m/%d/%Y %H:%M:%S')
t = time_idx.replace('/','-').replace(':','-').replace(' ','-')
opt_stats[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'opt_stats_'+sim_id+'_'+t))
emutemps[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'emutemps_'+sim_id+'_'+t))
mpctemps[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'mpctemps_'+sim_id+'_'+t))
controlseq[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'controlseq_'+sim_id)+'_'+t)
power[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'power_'+sim_id)+'_'+t)
#flex_down[sim_id] = load_namespace(os.path.join(simu_path, folder, 'flex_down'+sim_id))
#flex_up[sim_id] = load_namespace(os.path.join(simu_path, folder, 'flex_up'+sim_id))
i=0
for sim_id in sim_ids:
if i == 0:
emutemps_df = pd.DataFrame.from_dict(emutemps[sim_id],orient='index')
emutemps_df.index = pd.to_datetime(emutemps_df.index)
emutemps_df.index = emutemps_df.index.shift(1, freq=str(step)+'S')
power_df = pd.DataFrame.from_dict(power[sim_id],orient='index')
power_df.index = pd.to_datetime(power_df.index)
opt_stats_df = pd.DataFrame.from_dict(opt_stats[sim_id],orient='index')
opt_stats_df.index = pd.to_datetime(opt_stats_df.index)
power_df.index = power_df.index.shift(1, freq=str(step)+'S')
else:
emutemps_df1 = pd.DataFrame.from_dict(emutemps[sim_id],orient='index')
emutemps_df1.index = pd.to_datetime(emutemps_df1.index)
emutemps_df1.index = emutemps_df1.index.shift(1, freq=str(step) + 'S')
emutemps_df = pd.concat([emutemps_df, emutemps_df1])
power_df1 = pd.DataFrame.from_dict(power[sim_id],orient='index')
power_df1.index = pd.to_datetime(power_df1.index)
power_df1.index = power_df1.index.shift(1, freq=str(step)+'S')
power_df = pd.concat([power_df, power_df1])
opt_stats_df1 = pd.DataFrame.from_dict(opt_stats[sim_id],orient='index')
opt_stats_df1.index = | pd.to_datetime(opt_stats_df1.index) | pandas.to_datetime |
import json
from datetime import datetime
import pandas as pd
import scrapy
class MatchesSpider(scrapy.Spider):
# set the attributes for the spider
name = "matches"
def __init__(self, **kwargs):
"""initialize the data"""
super().__init__(**kwargs)
# create data frames and safe them only if they not already existing
# matches_df = pd.DataFrame(
# columns=['Match_ID', 'Duration', 'Start_Time', 'Radiant_Team_ID', 'Radiant_Name', 'Dire_Team_ID',
# 'Dire_Name', 'League_ID', 'League_Name', 'Radiant_Score', 'Dire_Score', 'Radiant_Win', 'Dire_Win'])
# matches_df.to_csv('../Data/DotaMatches.csv', index=False)
#
# team_player_df = pd.DataFrame(
# columns=["Team_ID", "Team_Name", "Account_ID", "Account_Name", "Games_Played", "Wins", "Is_Current_Member"])
# team_player_df.to_csv('../Data/DotaTeamPlayers.csv', index=False)
# read in the dfs and convert the date column to a pandas datetime column
self.matches_df = pd.read_csv('../Data/DotaMatches.csv', index_col=False, header=0)
self.matches_df["Start_Time"] = | pd.to_datetime(self.matches_df["Start_Time"]) | pandas.to_datetime |
# Preprocessing time series data
import pandas as pd
import numpy as np
from tsfresh import extract_features
df = pd.read_csv('complete_df_7.csv')
df.drop('Unnamed: 0', axis=1, inplace=True)
df['stock_open'] = df['stock_open'].astype(float)
# Create aggregate of sales down to product level
aggregate = df.groupby(['sku_key', 'tran_date']).agg({'sales':'sum',
'selling_price':'mean',
'avg_discount': 'mean',
'stock_open': 'sum'})
aggregate.reset_index(inplace=True)
# Create categorical to join to aggregates
categorical = df[['sku_key', 'sku_department', 'sku_subdepartment',
'sku_category', 'sku_subcategory', 'sku_label']]
nw_df = pd.DataFrame([], columns=['sku_key', 'sku_department',
'sku_subdepartment', 'sku_category',
'sku_subcategory', 'sku_label'])
for i in categorical['sku_key'].unique():
cats = pd.DataFrame(categorical[categorical['sku_key'] == i].iloc[0]).T
nw_df = pd.concat([nw_df, cats])
# Join categoricals and aggregates and write sku labels/joint table to csv
nw_df.reset_index(inplace=True, drop=True)
nw_df.to_csv('sku_labels.csv', index=False)
aggregate['sku_key'] = aggregate['sku_key'].astype(int)
nw_df['sku_key'] = nw_df['sku_key'].astype(int)
aggregate_df = aggregate.merge(nw_df, how='left', on='sku_key')
aggregate_df.to_csv('aggregate_products.csv', index=False)
# Extract features from TS using tsfresh and write
aggregate_df['tran_date'] = | pd.to_datetime(df['tran_date']) | pandas.to_datetime |
import itertools
import pandas as pd
import requests
from task_geo.dataset_builders.nasa.references import PARAMETERS
def nasa_data_loc(lat, lon, str_start_date, str_end_date, parms_str):
"""
Extract data for a single location.
Parameters
----------
lat : string
lon : string
str_start_date : string
str_end_date : string
parms_str : string
Returns
-------
df : pandas.DataFrame
"""
base_url = "https://power.larc.nasa.gov/cgi-bin/v1/DataAccess.py"
identifier = "identifier=SinglePoint"
user_community = "userCommunity=SSE"
temporal_average = "tempAverage=DAILY"
output_format = "outputList=JSON,ASCII"
user = "user=anonymous"
url = (
f"{base_url}?request=execute&{identifier}&{parms_str}&"
f"startDate={str_start_date}&endDate={str_end_date}&"
f"lat={lat}&lon={lon}&{temporal_average}&{output_format}&"
f"{user_community}&{user}"
)
response = requests.get(url)
data_json = response.json()
df = | pd.DataFrame(data_json['features'][0]['properties']['parameter']) | pandas.DataFrame |
import pandas as pd
import os
os.chdir("/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/")
# Helper functions for cleanup...
import helpers
viral_ebola = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/expt_summary_data/viral_seq/survival_dataset_ebov_02262019.csv"
viral_lassa = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/expt_summary_data/viral_seq/survival_dataset_lasv_04112019.csv"
ebv = pd.read_csv(viral_ebola)
lsv = pd.read_csv(viral_lassa)
cols2save = ['patientID', 'cohort', 'outcome', 'country', 'source']
# [clean ebola] ----------------------------------------------------------------------------------------------------
ebv['cohort'] = "Ebola"
ebv['source'] = "ViralSeq_Raphaelle_EBV02262019"
ebv['outcome_input'] = ebv.outcome
ebv['outcome'] = ebv.outcome_input.apply(helpers.cleanOutcome)
ebv['country'] = ebv.country.apply(helpers.getCountryName)
ebv['patientID'] = ebv.sample_id.apply(helpers.interpretID)
ebv = ebv[cols2save]
ebv['kgh']= ebv.patientID.apply(helpers.checkIDstructure).apply(lambda x: not x)
ebv.kgh.value_counts()
# [clean lassa] ----------------------------------------------------------------------------------------------------
lsv['cohort'] = "Lassa"
lsv['source'] = "ViralSeq_Raphaelle_LSV04112019"
lsv['outcome_input'] = lsv.outcome
lsv['outcome'] = lsv.outcome_input.apply(helpers.cleanOutcome)
lsv['country'] = lsv.country.apply(helpers.getCountryName)
lsv['patientID'] = lsv.sample_id.apply(helpers.interpretID)
lsv = lsv[cols2save]
# [concat!] ----------------------------------------------------------------------------------------------------
viralseq = | pd.concat([lsv, ebv]) | pandas.concat |
# Copyright (c) 2021 Sony Group Corporation and Hanjuku-kaso Co., Ltd. All Rights Reserved.
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
import argparse
from distutils.util import strtobool
from pathlib import Path
import pickle
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
import numpy as np
from pandas import DataFrame
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier as RandomForest
from sklearn.ensemble import HistGradientBoostingClassifier as LightGBM
from sklearn.model_selection import RandomizedSearchCV
from sklearn.calibration import CalibratedClassifierCV
from obp.dataset import MultiClassToBanditReduction
from obp.ope import (
InverseProbabilityWeightingTuning,
SelfNormalizedInverseProbabilityWeighting,
DirectMethod,
DoublyRobustTuning,
SelfNormalizedDoublyRobust,
SwitchDoublyRobustTuning,
DoublyRobustWithShrinkageTuning,
)
from pyieoe.evaluator import InterpretableOPEEvaluator
# hyperparameter space for the OPE estimators themselves
from conf import ope_estimator_hyperparams
# hyperparameter space for the regression model used in model dependent OPE estimators
from conf import ope_regression_uniform_hyperparams
from conf import ope_regression_rscv_hyperparams
# parameters for behavior policy and candidate evaluation policies
from conf import base_eval_policy_params
# preprocess for dataset
from util.preprocess import preprocess_and_get_dataset
# load and preprocess datasets
filepath = "data/"
optdigits, pendigits, satimage = preprocess_and_get_dataset(filepath)
# dict for datasets
dataset_dict = {
"optdigits": optdigits,
"pendigits": pendigits,
"satimage": satimage,
}
# compared ope estimators
ope_estimators = [
InverseProbabilityWeightingTuning(
lambdas=ope_estimator_hyperparams.tau_lambda, estimator_name="IPWps"
),
SelfNormalizedInverseProbabilityWeighting(estimator_name="SNIPW"),
DirectMethod(estimator_name="DM"),
DoublyRobustTuning(
lambdas=ope_estimator_hyperparams.tau_lambda, estimator_name="DRps"
),
SelfNormalizedDoublyRobust(estimator_name="SNDR"),
SwitchDoublyRobustTuning(
taus=ope_estimator_hyperparams.tau_lambda, estimator_name="Switch-DR"
),
DoublyRobustWithShrinkageTuning(
lambdas=ope_estimator_hyperparams.tau_lambda, estimator_name="DRos"
),
]
ope_estimator_hyperparams_ = {
DirectMethod.estimator_name: ope_estimator_hyperparams.dm_param,
DoublyRobustTuning.estimator_name: ope_estimator_hyperparams.dr_param,
SelfNormalizedDoublyRobust.estimator_name: ope_estimator_hyperparams.sndr_param,
SwitchDoublyRobustTuning.estimator_name: ope_estimator_hyperparams.switch_dr_param,
DoublyRobustWithShrinkageTuning.estimator_name: ope_estimator_hyperparams.dros_param,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="evaluate off-policy estimators with multi-class classification data."
)
parser.add_argument(
"--n_seeds",
type=int,
default=500,
help="number of seeds used in the experiment",
)
parser.add_argument(
"--dataset_name",
type=str,
choices=["optdigits", "pendigits", "satimage"],
required=True,
help="the name of the multi-class classification dataset",
)
parser.add_argument(
"--use_random_search",
type=strtobool,
default=False,
help="whether to use random search for hyperparamter selection or not, otherwise uniform sampling is used",
)
parser.add_argument(
"--use_estimated_pscore",
type=strtobool,
default=False,
help="whether to use estimated pscore or not, otherwise ground-truth pscore is used",
)
parser.add_argument(
"--use_calibration",
type=strtobool,
default=False,
help="whether to use calibration for pscore estimation or not, only available when use_random_search=True",
)
parser.add_argument(
"--eval_size",
type=float,
default=0.7,
help="the proportion of the dataset to include in the evaluation split",
)
parser.add_argument(
"--au_cdf_threshold",
type=float,
default=0.001,
help="threshold (the maximum error allowed, z_max) for AU-CDF",
)
parser.add_argument(
"--cvar_alpha",
type=int,
default=70,
help="the percentile used for calculating CVaR, should be in (0, 100)",
)
parser.add_argument("--random_state", type=int, default=12345)
args = parser.parse_args()
print(args)
# configurations
n_seeds = args.n_seeds
dataset_name = args.dataset_name
use_random_search = args.use_random_search
use_estimated_pscore = args.use_estimated_pscore
use_calibration = args.use_calibration
eval_size = args.eval_size
au_cdf_threshold = args.au_cdf_threshold
cvar_alpha = args.cvar_alpha
random_state = args.random_state
np.random.seed(random_state)
# assertion
assert 0 < eval_size < 1
assert 0 < au_cdf_threshold
assert 0 < cvar_alpha < 100
assert not (not (use_estimated_pscore and use_random_search) and use_calibration)
print("initializing experimental condition..")
# load raw data
X, y = dataset_dict[dataset_name]
# convert the raw classification data into a logged bandit dataset
dataset = MultiClassToBanditReduction(
X=X,
y=y,
base_classifier_b=base_eval_policy_params.behavior_policy.base_classifier,
alpha_b=base_eval_policy_params.behavior_policy.alpha,
dataset_name=dataset_name,
)
# split the original data into the training and evaluation sets
dataset.split_train_eval(eval_size=eval_size, random_state=random_state)
# obtain logged bandit feedback generated by the behavior policy
bandit_feedback = dataset.obtain_batch_bandit_feedback(random_state=random_state)
# obtain action choice probabilities and the ground-truth policy value for each evaluation policies
evaluation_policies = []
for eval_policy in base_eval_policy_params.evaluation_policies:
action_dist_e = dataset.obtain_action_dist_by_eval_policy(
base_classifier_e=eval_policy.base_classifier, alpha_e=eval_policy.alpha
)
ground_truth_e = dataset.calc_ground_truth_policy_value(
action_dist=action_dist_e
)
evaluation_policies.append((ground_truth_e, action_dist_e))
# regression models used in ope estimators
if use_random_search:
logistic_regression = RandomizedSearchCV(
LogisticRegression(),
ope_regression_rscv_hyperparams.logistic_regression_param,
random_state=random_state,
n_iter=5,
)
random_forest = RandomizedSearchCV(
RandomForest(),
ope_regression_rscv_hyperparams.random_forest_param,
random_state=random_state,
n_iter=5,
)
lightgbm = RandomizedSearchCV(
LightGBM(),
ope_regression_rscv_hyperparams.lightgbm_param,
random_state=random_state,
n_iter=5,
)
regression_models = [
logistic_regression,
random_forest,
lightgbm,
]
regressor_hyperparams = None
else: # uniform sampling
regression_models = [
LogisticRegression,
RandomForest,
LightGBM,
]
regressor_hyperparams = {
LogisticRegression: ope_regression_uniform_hyperparams.logistic_regression_param,
RandomForest: ope_regression_uniform_hyperparams.random_forest_param,
LightGBM: ope_regression_uniform_hyperparams.lightgbm_param,
}
# pscore estimator
if use_estimated_pscore:
if use_random_search and use_calibration:
pscore_estimation_models = [
CalibratedClassifierCV(
base_estimator=regression_models[i],
cv=2,
)
for i in range(len(regression_models))
]
else:
pscore_estimation_models = regression_models
pscore_estimator_hyperparams = regressor_hyperparams
# initializing class
if use_estimated_pscore:
if use_random_search:
evaluator = InterpretableOPEEvaluator(
random_states=np.arange(n_seeds),
bandit_feedbacks=[bandit_feedback],
evaluation_policies=evaluation_policies,
ope_estimators=ope_estimators,
ope_estimator_hyperparams=ope_estimator_hyperparams_,
regression_models=regression_models,
pscore_estimators=pscore_estimation_models,
)
else: # uniform sampling
evaluator = InterpretableOPEEvaluator(
random_states=np.arange(n_seeds),
bandit_feedbacks=[bandit_feedback],
evaluation_policies=evaluation_policies,
ope_estimators=ope_estimators,
ope_estimator_hyperparams=ope_estimator_hyperparams_,
regression_models=regression_models,
regression_model_hyperparams=regressor_hyperparams,
pscore_estimators=pscore_estimation_models,
pscore_estimator_hyperparams=pscore_estimator_hyperparams,
)
else: # ground-truth pscore
if use_random_search:
evaluator = InterpretableOPEEvaluator(
random_states=np.arange(n_seeds),
bandit_feedbacks=[bandit_feedback],
evaluation_policies=evaluation_policies,
ope_estimators=ope_estimators,
ope_estimator_hyperparams=ope_estimator_hyperparams_,
regression_models=regression_models,
)
else: # uniform sampling
evaluator = InterpretableOPEEvaluator(
random_states=np.arange(n_seeds),
bandit_feedbacks=[bandit_feedback],
evaluation_policies=evaluation_policies,
ope_estimators=ope_estimators,
ope_estimator_hyperparams=ope_estimator_hyperparams_,
regression_models=regression_models,
regression_model_hyperparams=regressor_hyperparams,
)
# estimate policy values
print("started experiment")
policy_value = evaluator.estimate_policy_value()
# calculate statistics
print("calculating statistics of estimators' performance..")
au_cdf = evaluator.calculate_au_cdf_score(threshold=au_cdf_threshold)
au_cdf_scaled = evaluator.calculate_au_cdf_score(
threshold=au_cdf_threshold, scale=True
)
cvar = evaluator.calculate_cvar_score(alpha=cvar_alpha)
cvar_scaled = evaluator.calculate_cvar_score(alpha=cvar_alpha, scale=True)
std = evaluator.calculate_variance(std=True)
std_scaled = evaluator.calculate_variance(scale=True, std=True)
mean = evaluator.calculate_mean()
mean_scaled = evaluator.calculate_mean(scale=True)
# rscv/uniform, estimated/ground-truth pscore option
if use_random_search:
if use_estimated_pscore:
if use_calibration:
option = "/rscv_pscore_estimate_calibration"
else:
option = "/rscv_pscore_estimate"
else:
option = "/rscv_pscore_true"
else:
if use_estimated_pscore:
if use_calibration:
option = "/uniform_pscore_estimate_calibration"
else:
option = "/uniform_pscore_estimate"
else:
option = "/uniform_pscore_true"
# save results of the evaluation of off-policy estimators
# in './logs/(dataset_name)/(option)' directory.
log_path = Path("./logs/" + dataset_name + option)
log_path.mkdir(exist_ok=True, parents=True)
print("the results will be saved in", log_path)
# save evaluator in order to change au_cdf_threshold and cvar_alpha afterwhile
f = open(log_path / "evaluator.pickle", "wb")
pickle.dump(evaluator, f)
f.close()
# save au_cdf
au_cdf_df = | DataFrame() | pandas.DataFrame |
from utils.utils import load_yaml
import pandas as pd
import logging
logger = logging.getLogger(__name__)
| pd.set_option('display.max_columns', 10) | pandas.set_option |
"""
Unit test of Inverse Transform
"""
import unittest
import pandas as pd
import numpy as np
import category_encoders as ce
import catboost as cb
import sklearn
import lightgbm
import xgboost
from shapash.utils.transform import inverse_transform, apply_preprocessing, get_col_mapping_ce
class TestInverseTransformCaterogyEncoder(unittest.TestCase):
def test_inverse_transform_1(self):
"""
Test no preprocessing
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR']})
original = inverse_transform(train)
pd.testing.assert_frame_equal(original, train)
def test_inverse_transform_2(self):
"""
Test multiple preprocessing
"""
train = pd.DataFrame({'Onehot1': ['A', 'B', 'A', 'B'], 'Onehot2': ['C', 'D', 'C', 'D'],
'Binary1': ['E', 'F', 'E', 'F'], 'Binary2': ['G', 'H', 'G', 'H'],
'Ordinal1': ['I', 'J', 'I', 'J'], 'Ordinal2': ['K', 'L', 'K', 'L'],
'BaseN1': ['M', 'N', 'M', 'N'], 'BaseN2': ['O', 'P', 'O', 'P'],
'Target1': ['Q', 'R', 'Q', 'R'], 'Target2': ['S', 'T', 'S', 'T'],
'other': ['other', np.nan, 'other', 'other']})
test = pd.DataFrame({'Onehot1': ['A', 'B', 'A'], 'Onehot2': ['C', 'D', 'ZZ'],
'Binary1': ['E', 'F', 'F'], 'Binary2': ['G', 'H', 'ZZ'],
'Ordinal1': ['I', 'J', 'J'], 'Ordinal2': ['K', 'L', 'ZZ'],
'BaseN1': ['M', 'N', 'N'], 'BaseN2': ['O', 'P', 'ZZ'],
'Target1': ['Q', 'R', 'R'], 'Target2': ['S', 'T', 'ZZ'],
'other': ['other', '123', np.nan]})
expected = pd.DataFrame({'Onehot1': ['A', 'B', 'A'], 'Onehot2': ['C', 'D', 'missing'],
'Binary1': ['E', 'F', 'F'], 'Binary2': ['G', 'H', 'missing'],
'Ordinal1': ['I', 'J', 'J'], 'Ordinal2': ['K', 'L', 'missing'],
'BaseN1': ['M', 'N', 'N'], 'BaseN2': ['O', 'P', np.nan],
'Target1': ['Q', 'R', 'R'], 'Target2': ['S', 'T', 'NaN'],
'other': ['other', '123', np.nan]})
y = pd.DataFrame(data=[0, 1, 0, 0], columns=['y'])
enc_onehot = ce.OneHotEncoder(cols=['Onehot1', 'Onehot2']).fit(train)
train_onehot = enc_onehot.transform(train)
enc_binary = ce.BinaryEncoder(cols=['Binary1', 'Binary2']).fit(train_onehot)
train_binary = enc_binary.transform(train_onehot)
enc_ordinal = ce.OrdinalEncoder(cols=['Ordinal1', 'Ordinal2']).fit(train_binary)
train_ordinal = enc_ordinal.transform(train_binary)
enc_basen = ce.BaseNEncoder(cols=['BaseN1', 'BaseN2']).fit(train_ordinal)
train_basen = enc_basen.transform(train_ordinal)
enc_target = ce.TargetEncoder(cols=['Target1', 'Target2']).fit(train_basen, y)
input_dict1 = dict()
input_dict1['col'] = 'Onehot2'
input_dict1['mapping'] = pd.Series(data=['C', 'D', np.nan], index=['C', 'D', 'missing'])
input_dict1['data_type'] = 'object'
input_dict2 = dict()
input_dict2['col'] = 'Binary2'
input_dict2['mapping'] = pd.Series(data=['G', 'H', np.nan], index=['G', 'H', 'missing'])
input_dict2['data_type'] = 'object'
input_dict3 = dict()
input_dict3['col'] = 'Ordinal2'
input_dict3['mapping'] = pd.Series(data=['K', 'L', np.nan], index=['K', 'L', 'missing'])
input_dict3['data_type'] = 'object'
list_dict = [input_dict2, input_dict3]
result1 = enc_onehot.transform(test)
result2 = enc_binary.transform(result1)
result3 = enc_ordinal.transform(result2)
result4 = enc_basen.transform(result3)
result5 = enc_target.transform(result4)
original = inverse_transform(result5, [enc_onehot, enc_binary, enc_ordinal, enc_basen, enc_target, input_dict1,
list_dict])
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_3(self):
"""
Test target encoding
"""
train = pd.DataFrame({'city': ['chicago', 'paris', 'paris', 'chicago', 'chicago'],
'state': ['US', 'FR', 'FR', 'US', 'US'],
'other': ['A', 'A', np.nan, 'B', 'B']})
test = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, np.nan]})
expected = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, np.nan]})
y = pd.DataFrame(data=[0, 1, 1, 0, 1], columns=['y'])
enc = ce.TargetEncoder(cols=['city', 'state']).fit(train, y)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_4(self):
"""
Test ordinal encoding
"""
train = pd.DataFrame({'city': ['chicago', 'st louis']})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
expected = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='value')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_5(self):
"""
Test inverse_transform having Nan in train and handle missing value expect returned with nan_Ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_6(self):
"""
test inverse_transform having Nan in train and handle missing return Nan expect returned with nan_Ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OrdinalEncoder(handle_missing='return_nan', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_7(self):
"""
test inverse_transform both fields are return Nan with Nan Expect ValueError Ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = ce.OrdinalEncoder(handle_missing='return_nan', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_8(self):
"""
test inverse_transform having missing and no Uknown expect inversed ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_9(self):
"""
test inverse_transform having handle missing value and handle unknown return Nan expect best inverse ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']})
expected = pd.DataFrame({'city': ['chicago', np.nan, np.nan]})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = enc.inverse_transform(result)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_10(self):
"""
test inverse_transform with multiple ordinal
"""
data = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['a', 'b']})
test = pd.DataFrame({'city': [1, 2, 2],
'state': [1, 2, 2],
'other': ['a', 'b', 'a']})
expected = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['a', 'b', 'a']})
enc = ce.OrdinalEncoder(cols=['city', 'state'])
enc.fit(data)
original = inverse_transform(test, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inverse_transform_11(self):
"""
Test binary encoding
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'paris', 'monaco'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, 'B']})
expected = pd.DataFrame({'city': ['chicago', 'paris', np.nan],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, 'B']})
enc = ce.BinaryEncoder(cols=['city', 'state']).fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inverse_transform_12(self):
"""
test inverse_transform having data expecting a returned result
"""
train = pd.Series(list('abcd')).to_frame('letter')
enc = ce.BaseNEncoder(base=2)
result = enc.fit_transform(train)
inversed_result = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, inversed_result)
def test_inverse_transform_13(self):
"""
Test basen encoding
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.BaseNEncoder(handle_missing='value', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_14(self):
"""
test inverse_transform having Nan in train and handle missing expected a result with Nan
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.BaseNEncoder(handle_missing='return_nan', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_15(self):
"""
test inverse_transform having missing and no unknown
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = ce.BaseNEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_16(self):
"""
test inverse_transform having handle missing value and Unknown
"""
train = | pd.DataFrame({'city': ['chicago', np.nan]}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 14:43:48 2020
@author: afo
"""
import pandas as pd
import os
from os import listdir
from os.path import abspath, isfile, join
from inspect import getsourcefile
import operator
from math import nan
import numpy as np
# custom function
from get_averages import get_means
p = abspath(getsourcefile(lambda:0))
p = p.rsplit('/', 1)[0]
os.chdir(p)
print('Working Directory is: %s' % os.getcwd())
# Get list of files with data
files = [f for f in listdir(p + '/annual_financials_tech') if isfile(join(p + '/annual_financials_tech', f))]
try :
files.remove('.DS_Store')
except ValueError:
print()
# Get means for each indicator from custom function
means = get_means()
lst_hard = []
lst_soft = []
tickers = []
# List of ticker names
tickers = [t.split('.', 1)[0] for t in files]
"""
# Function is evaluating each ratio based on the benchmark value and outputs results
inputs:
ratio = list of values of ratio
ratio name = full name
ratio_short_name = name to be compared with averages & used for outputs
fail_sign = operator to determine failure of the indicator, if > that means indicator if failing when it is higher than a benchmark
benchmark_value = an arbitrary value (e.g. 1) that determines a min/max good value of an indicator
e.g. if fail_sign = '>' & benchmark_value = 5.0 for a D/E ratio, then must be lower than 5 in order not to fail
last_year = the most recent data value in a dataset (year)
fmt = the format to be used for the output, usually 3 decimals or %
growth = to indicate, if the ratio given is a growth ratio in % or flat values
growth_benchmark = minimum growth rate to not get a fail on the growth rate (e.g. if = 0, than any positive growth would result in success)
not_zero = special fail operator, if true uses omparison of the ratio to zero (ignoring previous fail_sign)
Function returns two dicts that contain all the values, where this company failed at + years when these fails occured
If the function fails to the arbitrary benchmark, that counted as hard failure, and if it is worse than mean of all companies - soft failure
Currently the number of years failed at does not matter, as it still counted as 1 fail, but that could be improved in future
"""
def evaluate_ratio(ratio, ratio_name, ratio_short_name, fail_sign, benchmark_value=nan, last_year='2020', fmt="{:.3f}", growth=False, growth_benchmark=0, not_zero=False):
r = ratio[0:2] ### !!! Get only first 2 years for the hard failure check, can be adjusted for more or less (from 1 to 5)
# Assign the right evaluation operator
if fail_sign == '>':
comp = operator.gt
bmark = "The lower, the better:"
else:
comp = operator.lt
bmark = "The higher, the better:"
# Get global mean for the ratio
mean_ratio = float(means.at[ratio_short_name, last_year])
# Print ratio values
print("\n============================================")
print("\n" + ratio_name + '(s):')
print(ratio.astype(float).map(fmt.format).to_string())
# Print benchmarking method
print("\n"+bmark)
# Print arbitrary benchmark
print("\nBenchmark value for " + ratio_name + ":")
if benchmark_value != nan:
print(str(fmt.format(benchmark_value) + " " + fail_sign))
else:
print('No hard benchmark for this indicator.')
# Print benchmark mean
print("\nMean " + last_year + " " + ratio_name + " for all peers:")
print(fmt.format(mean_ratio))
# Compare ratios to global mean
temp = ratio.to_frame()
temp['fail'] = comp(temp.iloc[:,0], mean_ratio)
temp2 = temp.iloc[:,0].loc[temp['fail'] == True]
if len(temp2) > 0:
fails_soft[ratio_short_name+'_worse_avg'] = temp2.to_dict()
# Compare to arbitrary benchmark
if not_zero == True:
comp = operator.ne
if benchmark_value != nan:
temp = r[comp(r, benchmark_value)]
if len(temp)>0:
fails_hard[ratio_short_name+'_worse_benchmark'] = temp.to_dict()
if growth == True:
# Calculate mean growth rate
temp = ratio.mean()
print("\nMean " + ratio_name + " Growth Rate:")
print(fmt.format(temp))
if temp < growth_benchmark:
fails_hard[ratio_short_name+'_mean_growth'] = temp
return fails_hard, fails_soft
"""
# Similar function that calculates CAGR rate for ratios that matter - currently only for EPS
inputs:
ratio = list of values of ratio
ratio name = full name
ratio_short_name = name to be compared with averages & used for outputs
fail_sign = operator to determine failure of the indicator, if > that means indicator if failing when it is higher than a benchmark
benchmark_value = an arbitrary value (e.g. 1) that determines a min/max good value of an indicator
e.g. if fail_sign = '>' & benchmark_value = 5.0 for a D/E ratio, then must be lower than 5 in order not to fail
last_year = the most recent data value in a dataset (year)
fmt = the format to be used for the output, usually 3 decimals or %
cagr_benchmark = minimum cagr rate to not get a fail on the growth rate (e.g. if = 0, than any positive cagr would result in success)
Function returns two dicts that contain all the values, where this company failed at + years when these fails occured
Currently the number of years failed at does not matter, as it still counted as 1 fail, but that could be improved in future
"""
def evaluate_cagr(ratio, ratio_name, ratio_short_name, last_year='2020', fmt="{:.3%}", cagr_benchmark=0):
# Calculate CAGR
if ratio[l-1] != 0:
if len(ratio) != 1:
temp = (ratio[0]/ratio[l-1])**(1/(l-1))-1
else:
temp = 0
else:
temp = 0
print("\nCAGR " + ratio_name + " Growth Rate:")
if type(temp) == complex:
print("Result is a complex number")
fails_hard[ratio_short_name+'_cagr_growth'] = temp
else:
print(fmt.format(temp))
if temp < cagr_benchmark:
fails_hard[ratio_short_name+'_cagr_growth'] = temp
return fails_hard, fails_soft
## Function to write back data to excels and add fails tabs
def write_excel(fails_hard, fails_soft, ticker):
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter( p + '/annual_financials_tech/' + ticker + '.xlsx', engine='xlsxwriter')
# Write each dataframe to a different worksheet.
balance_sheet.to_excel(writer, sheet_name='BS')
income_statement.to_excel(writer, sheet_name='IS')
cash_flow.to_excel(writer, sheet_name='CF')
key_metrics.to_excel(writer, sheet_name='Key Metrics')
financial_ratios.to_excel(writer, sheet_name='Ratios')
growth_ratios.to_excel(writer, sheet_name='Growth')
dcf.to_excel(writer, sheet_name='DCF')
d1 = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in fails_hard.items() ]))
d1 = d1.transpose()
d1 = d1.rename(columns={ d1.columns[0]: "general" })
d1.to_excel(writer, sheet_name='fails_hard')
d2 = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in fails_soft.items() ]))
d2 = d2.transpose()
d2 = d2.rename(columns={ d1.columns[0]: "general" })
d2.to_excel(writer, sheet_name='fails_soft')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
# Main Loop through each ticker (file)
for i in range(0, len(files)):
xls = pd.ExcelFile( p + '/annual_financials_tech/' + files[i])
#xls = pd.ExcelFile( p + '/annual_financials_tech/MSFT.xlsx')
balance_sheet = pd.read_excel(xls, 'BS', index_col=0)
income_statement = pd.read_excel(xls, 'IS', index_col=0)
cash_flow = pd.read_excel(xls, 'CF', index_col=0)
key_metrics = pd.read_excel(xls, 'Key Metrics', index_col=0)
financial_ratios = pd.read_excel(xls, 'Ratios', index_col=0)
growth_ratios = pd.read_excel(xls, 'Growth', index_col=0)
dcf = pd.read_excel(xls, 'DCF', index_col=0)
# Get the number of columns (years), needed for CAGR, to find out, how many years are in the calculation
l = len(balance_sheet.columns)
# Dicts to store the results for each company
fails_hard = {}
fails_soft = {}
### For each indicator, calling an evaluate_ratio function with set parameters.
### for EPS also CAGR function
### Results are stored into dicts
# Eps growth should be non-negative each year, non-negative on average and non negative CAGR
fails_hard, fails_soft = evaluate_ratio(growth_ratios.loc['epsgrowth'], 'EPS Growth', 'epsGr', fail_sign='<', benchmark_value=0.0, fmt="{:.3%}", growth=True, growth_benchmark = 0.0)
fails_hard, fails_soft = evaluate_cagr(income_statement.loc['eps'], 'EPS', 'eps', cagr_benchmark=0.0)
# P/E ratio should be less than 15 (a long-term S&P 500 average)
fails_hard, fails_soft = evaluate_ratio(key_metrics.loc['peRatio'], 'P/E Ratio', 'pe', fail_sign='>', benchmark_value=15.0, fmt="{:.3f}")
# PEG should be less than 1 (arbitrary value)
fails_hard, fails_soft = evaluate_ratio((key_metrics.loc['peRatio'] / (growth_ratios.loc['epsgrowth']*100)), 'PEG', 'peg', fail_sign='>', benchmark_value=1.0, fmt="{:.3f}")
# P/B ratios hould be less than 3 (arbitrary value)
fails_hard, fails_soft = evaluate_ratio(key_metrics.loc['pbRatio'], 'P/B Ratio', 'pb', fail_sign='>', benchmark_value=3.0, fmt="{:.3f}")
# P/S ratio, no hard benchmark
fails_hard, fails_soft = evaluate_ratio(key_metrics.loc['priceToSalesRatio'], 'P/S Ratio', 'ps', fail_sign='>', benchmark_value=nan, fmt="{:.3f}")
# Dividend payout ratio should be not 0 (at least something was paid in dividends)
fails_hard, fails_soft = evaluate_ratio(financial_ratios.loc['dividendPayoutRatio'], 'Dividend Payout Ratio', 'divPyr', fail_sign='<', benchmark_value=0.0, fmt="{:.3%}", not_zero=True)
# Dividend payout ratio should more than 0 (at least some dividends are paid)
fails_hard, fails_soft = evaluate_ratio(key_metrics.loc['dividendYield'], 'Dividend Yield', 'divYield', fail_sign='<', benchmark_value=0.0, fmt="{:.3%}")
# ROE should be at least 10% (long-term S&P average is 14%)
fails_hard, fails_soft = evaluate_ratio(key_metrics.loc['roe'], 'ROE', 'roe', fail_sign='<', benchmark_value=0.10, fmt="{:.3%}")
# ROA should be at least 5% (own arbitrary value)
fails_hard, fails_soft = evaluate_ratio(financial_ratios.loc['returnOnAssets'], 'ROA', 'roa', fail_sign='<', benchmark_value=0.05, fmt="{:.3%}")
# Operating income margin should be more than 13% (S&P 500 average in 2019-2020)
fails_hard, fails_soft = evaluate_ratio(income_statement.loc['operatingIncomeRatio'], 'Operating Income Ratio', 'opIncR', fail_sign='<', benchmark_value=0.13, fmt="{:.3%}")
# Growth in operating income margin should be more than 0%, i.e. at least some growth (own arbitrary value)
fails_hard, fails_soft = evaluate_ratio(growth_ratios.loc['operatingIncomeGrowth'], 'Operating Income Growth', 'opIncGr', fail_sign='<', benchmark_value=0.0, fmt="{:.3%}", growth=True, growth_benchmark=0.0)
# Net income margin should be more than 9% (S&P 500 average in 2019-2020)
fails_hard, fails_soft = evaluate_ratio(financial_ratios.loc['netProfitMargin'], 'Net Profit Margin', 'netPrMar', fail_sign='<', benchmark_value=0.09, fmt="{:.3%}")
# Cash ratio should be more than 0.12, as indicator of stability (average for S&P companies in 2019)
fails_hard, fails_soft = evaluate_ratio(financial_ratios.loc['cashRatio'], 'Cash Ratio', 'cashR', fail_sign='<', benchmark_value=0.12, fmt="{:.3f}")
# Current ratio should be more than 1, as indicator of stability (arbitrary value)
fails_hard, fails_soft = evaluate_ratio(financial_ratios.loc['currentRatio'], 'Current Ratio', 'currentR', fail_sign='<', benchmark_value=1, fmt="{:.3f}")
# Current ratio should be less than 5, as indicator of stability (average for S&P companies in 2019-2020)
fails_hard, fails_soft = evaluate_ratio(key_metrics.loc['debtToEquity'], 'D/E Ratio', 'de', fail_sign='>', benchmark_value=5.0, fmt="{:.3f}")
# Interest Coverage ratio should be more than 5, as indicator of stability (average for S&P companies in 2019-2020)
fails_hard, fails_soft = evaluate_ratio(key_metrics.loc['interestCoverage'], 'Interest Coverage Ratio', 'intCov', fail_sign='<', benchmark_value=5.0, fmt="{:.3f}")
# Assetturnover ratio should be more than 0.3, as indicator of stability (average for S&P companies in 2019-2020)
fails_hard, fails_soft = evaluate_ratio(financial_ratios.loc['assetTurnover'], 'Asset Turnover Ratio', 'asTurn', fail_sign='<', benchmark_value=0.3, fmt="{:.3f}")
# Asset growth should be more than 0%, i.e. at least some growth (own arbitrary value)
fails_hard, fails_soft = evaluate_ratio(growth_ratios.loc['assetGrowth'], 'Asset Growth', 'asGr', fail_sign='<', benchmark_value=0.0, fmt="{:.3%}", growth=True, growth_benchmark = 0.0)
# FCF growth should be more than 0%, i.e. at least some growth (own arbitrary value)
fails_hard, fails_soft = evaluate_ratio(growth_ratios.loc['freeCashFlowGrowth'], 'FCF Growth', 'fcfGr', fail_sign='<', benchmark_value=0.0, fmt="{:.3%}", growth=True, growth_benchmark = 0.0)
# D/A ratio removed
#fails_hard, fails_soft = evaluate_ratio(key_metrics.loc['debtToAssets'], 'D/A Ratio', 'da', fail_sign='>', benchmark_value=1, fmt="{:.3f}")
# Payables turnover removed
#fails_hard, fails_soft = evaluate_ratio(financial_ratios.loc['payablesTurnover'], 'Payables Turnover Ratio', 'payTurn', fail_sign='<', benchmark_value=0, fmt="{:.3f}")
# Dicts are appended to lists to preserve order
lst_hard.append(fails_hard)
lst_soft.append(fails_soft)
# Write back to files, adding fails tabs
write_excel(fails_hard, fails_soft, tickers[i])
# All the information is getting wrapped to a pandas df - one for hard, second for soft fails
results_hard = dict(zip(tickers, lst_hard))
results_soft = dict(zip(tickers, lst_soft))
temp = pd.DataFrame.from_dict(results_hard,orient='index')
temp2 = pd.DataFrame.from_dict(results_soft,orient='index')
# Counting how many fails for each ticker occured - softs and hards (fails at multiple years in 1 indicator are counted as a single fail)
temp['count_hard'] = temp.count(axis=1)
temp2['count_soft'] = temp2.count(axis=1)
temp = temp.join(temp2, how='outer')
temp['count_total'] = temp['count_hard'] + (temp['count_soft'] / 2) # weighted count, where soft fails are twice less of importnace than hard
"""
# Creating list of companies to be selected based on results
Basic logic:
several lists are created, selecting companies with the least fails
1. companies that have less fails than median
2. less fails than 75%
3. less than 90%
4. less than 95%
As a result, a list with ticker names is returned
"""
end_results = {}
extended_results = {}
percentiles = [50, 25, 10, 5]
for i in percentiles:
selected = temp[temp['count_total']<=np.percentile(temp['count_total'], i)]
selected = selected.sort_index()
# l = selected.index
end_results['ticker_'+str(i)] = selected.index
extended_results['ticker_'+str(i)] = selected
#selected.to_csv('eligible_companies.csv')
# Results are merged into a single dataframe
end_results = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in end_results.items() ]))
# Custom tickers from the file are added - a portfolio without regards to stability, based on pure intuition/non quantifiable research
custom_tickers = pd.read_csv('custom_tickers.csv')
# All tickers from the file are added - a portfolio without regards to stability, based on search within all tickers
all_tickers = pd.DataFrame(tickers, columns = ['ticker_all'])
# Everyting is merged together and getting written to csv - containing several lists of tickers
# Later this file would become a source for Portfolio Theory models
end_results = pd.concat([end_results, custom_tickers], axis=1)
end_results = | pd.concat([end_results, all_tickers], axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 17 13:06:36 2018
@author: shlomi
OK
1)fix and get time series of SAOD volcanic index - see how kuchar did it
he did it SAD = surface area density of aerosols at 54 hPa
2)use enso3.4 anom for enso
3)use singapore qbo(level=50) for qbo
4)use solar f10.7 for solar
5) for MLR use the MERRA grid (reduced-interpolated), and the swoosh different grids
6) prepare MLR framework and run on cluster
more analysis:
1) include BDC from papar (reconstruct 70hpa - file from chaim weighted mean for tropics after zonal mean i.e time_series only)
2) include Temp (500 hpa) from merra time_series mean -add feature to choose the "tropical" definition where the weighted mean should be taken i.e. -20 to 20 lat to -5 to 5
1) and 2) can replace enso
3) regardless print a heatmap of regressors corr.
4) mask or remove pinatubo in volcalnic series or in all times
1) do this menually this time...
5) adding a time series reconstruction to results
1)already exists...apply on demand bc its a lot of data
2) use plotter subroutines to reconstruct the time-series fields.
3) even bettter : work with psyplot gui to select and plot what i want
6) change the data retrievel scheme to include different regressos:
1) first save the regressors in file format - regressors#1 and have a file;
2) MLR_all will run on all regressors list and append _R# suffix before .nc;
need to change save and load routines to accomplish this.
new methodology:
1) write single function to produce or load single or datasets of
regressors using _produce or _load, remember to save with _index.nc suffix
2) use load_all_regressors to load all of the regressors in reg_path
3) select specific regressors and do anomaly(after time slice)
"""
from strat_paths import work_chaim
from strat_paths import cwd
reg_path = cwd / 'regressors'
def print_saved_file(name, path):
print('{} was saved to {}'.format(name, path))
return
def load_all_regressors(loadpath=reg_path):
"""load all regressors(end with _index.nc') from loadpath to dataset"""
import xarray as xr
from collections import OrderedDict
from aux_functions_strat import path_glob
da_list = []
da_list_from_ds = []
files = sorted(path_glob(reg_path, '*index.nc'))
for file in files:
name = file.as_posix().split(
'/')[-1].split('.')[0].replace('_index', '')
try:
da = xr.load_dataarray(file)
da = da.reset_coords(drop=True)
da.name = name
da_list.append(da)
except ValueError:
ds = xr.load_dataset(file)
for da in ds.data_vars.values():
da = da.reset_coords(drop=True)
try:
da.name = da.attrs['name']
except KeyError:
da.name = name + '_' + da.name
# avoid name repetition:
da.name = "_".join(OrderedDict.fromkeys(da.name.split('_')))
da_list_from_ds.append(da)
for das in da_list_from_ds:
da_list.append(das)
ds = xr.merge(da_list)
return ds
def prepare_regressors(name='Regressors', plot=True, save=False,
rewrite_file=True, normalize=False, savepath=None,
rolling=None):
"""get all the regressors and prepare them save to file.
replaced prepare_regressors for MLR function"""
import aux_functions_strat as aux
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
import os
from pathlib import Path
# regressors names and filenames dict:
reg_file_dict = {'bdc': 'era5_bdc_index.nc',
't500': 'era5_t500_index.nc',
'enso': 'anom_nino3p4_index.nc',
'solar': 'solar_10p7cm_index.nc',
'vol': 'vol_index.nc',
'qbo': 'era5_qbo_index.nc',
'olr': 'olr_index.nc',
'ch4': 'ch4_index.nc',
'wind': 'era5_wind_shear_index.nc',
'cold': 'cpt_index.nc',
'aod': 'merra2_totexttau_index.nc'}
if savepath is None:
savepath = Path().cwd() / 'regressors/'
# aod:
aod = load_regressor(reg_file_dict['aod'], plot=False, dseason=False)
aod.name = 'aod'
# bdc:
bdc = load_regressor(reg_file_dict['bdc'], plot=False, deseason=True)
if rolling is not None:
bdc = bdc.rolling(time=3).mean()
bdc.name = 'bdc'
# t500
t500 = load_regressor(reg_file_dict['t500'], plot=False, deseason=True)
if rolling is not None:
t500 = t500.rolling(time=3).mean()
t500.name = 't500'
# ENSO
enso = load_regressor(reg_file_dict['enso'], plot=False, deseason=False)
enso.name = 'enso'
# SOLAR
solar = load_regressor(reg_file_dict['solar'], plot=False, deseason=False)
solar.name = 'solar'
# Volcanic forcing
vol = load_regressor(reg_file_dict['vol'], plot=False, deseason=False)
vol.name = 'vol'
# get the qbo 2 pcs:
qbo = load_regressor(reg_file_dict['qbo'], plot=False, deseason=False,
is_dataset=True)
qbo_1 = qbo['qbo_1']
qbo_2 = qbo['qbo_2']
# get GHG:
# ghg = load_regressor(reg_file_dict['ghg'], plot=False, deseason=False)
# ghg.name = 'ghg'
# get cold point:
cold = load_regressor(reg_file_dict['cold'], plot=False, deseason=True)
if rolling is not None:
cold = cold.rolling(time=3).mean()
cold.name = 'cold'
# get olr:
olr = load_regressor(reg_file_dict['olr'], plot=False, deseason=True)
olr.name = 'olr'
# get ch4:
ch4 = load_regressor(reg_file_dict['ch4'], plot=False, deseason=False,
normalize=True)
ch4.name = 'ch4'
# get wind_shear:
wind = load_regressor(reg_file_dict['wind'], plot=False, deseason=False)
wind.name = 'wind'
da_list = [x for x in reg_file_dict.keys() if x != 'qbo']
da_list += ['qbo_1', 'qbo_2']
ds = xr.Dataset()
for da_name in da_list:
ds[da_name] = locals()[da_name]
# fix vol and ch4
ds['vol'] = ds['vol'].fillna(1.31)
ds = ds.reset_coords(drop=True)
# ds['ch4'] = ds['ch4'].fillna(0.019076 + 1.91089)
# if poly is not None:
# da = ds.to_array(dim='regressors').dropna(dim='time').T
# da = poly_features(da, feature_dim='regressors', degree=poly,
# interaction_only=False, include_bias=False,
# normalize_poly=False)
# ds = da.to_dataset(dim='regressors')
# name = 'Regressors_d' + str(poly)
# else:
# name = 'Regressors'
if normalize:
ds = ds.apply(aux.normalize_xr, norm=1,
keep_attrs=True, verbose=False)
if save:
if rewrite_file:
try:
os.remove(str(savepath) + name + '.nc')
except OSError as e: # if failed, report it back to the user
print("Error: %s - %s." % (e.filename, e.strerror))
print('Updating ' + name + '.nc' + ' in ' + str(savepath))
filename = name + '.nc'
ds.to_netcdf(savepath / filename)
print_saved_file(name, savepath)
if plot:
le = len(ds.data_vars)
df = ds.to_dataframe()
df.plot()
plt.figure()
if le <= 20:
sns.heatmap(df.corr(), annot=True, fmt='.2f', cmap='bwr',
center=0.0)
else:
sns.heatmap(df.corr(), cmap='bwr', center=0.0)
return ds
def load_regressor(regressor_file, plot=True, deseason=True, normalize=False,
path=None, is_dataset=False):
"""loads a regressor from regressors folder. you can deseason it,
plot it, normalize it, etc..."""
import xarray as xr
from pathlib import Path
if path is None:
path = Path().cwd() / 'regressors/'
if is_dataset:
reg = xr.open_dataset(path / regressor_file)
else:
reg = xr.open_dataarray(path / regressor_file)
if deseason:
from aux_functions_strat import deseason_xr
reg = deseason_xr(reg, how='mean')
if normalize:
from aux_functions_strat import normalize_xr
# normalize = remove mean and divide by std
reg = normalize_xr(reg, verbose=False)
if plot:
if is_dataset:
reg.to_pandas().plot()
else:
reg.plot()
return reg
def split_anom_nino3p4_to_EN_LN_neutral(loadpath=reg_path, savepath=None):
ds = load_all_regressors(loadpath)
enso = ds['anom_nino3p4'].dropna('time')
EN = enso[enso >= 0.5].reindex(time=enso['time']).fillna(0)
EN.attrs['action'] = 'only EN (ENSO >=0.5) kept, other is 0.'
LN = enso[enso <= -0.5].reindex(time=enso['time']).fillna(0)
LN.attrs['action'] = 'only LN (ENSO <=-0.5) kept, other is 0.'
neutral = enso[(enso > -0.5) & (enso < 0.5)
].reindex(time=enso['time']).fillna(0)
neutral.attrs['action'] = 'only neutENSO (ENSO<0.5 & ENSO>-0.5) kept, other is 0.'
if savepath is not None:
EN.to_netcdf(savepath / 'EN_index.nc')
LN.to_netcdf(savepath / 'LN_index.nc')
neutral.to_netcdf(savepath / 'neutENSO_index.nc')
return EN, LN, neutral
def _produce_wind_shear(source='singapore', savepath=None):
import xarray as xr
from pathlib import Path
if source == 'singapore':
u = _download_singapore_qbo(path=savepath)
filename = 'singapore_wind_shear_index.nc'
elif source == 'era5':
u = xr.open_dataarray(savepath / 'ERA5_U_eq_mean.nc')
filename = 'era5_wind_shear_index.nc'
wind_shear = u.diff('level').sel(level=70)
wind_shear.name = 'wind_shear'
if savepath is not None:
wind_shear.to_netcdf(savepath / filename)
print_saved_file(filename, savepath)
return wind_shear
def _download_CH4(filename='ch4_mm.nc', loadpath=None,
trend=False, savepath=None, interpolate=False):
import xarray as xr
import pandas as pd
import wget
filepath = loadpath / filename
if filepath.is_file():
print('CH4 monthly means from NOAA ERSL already d/l and saved!')
# read it to data array (xarray)
ch4_xr = xr.open_dataset(loadpath / filename)
# else d/l the file and fObsirst read it to df (pandas),
# then to xarray then save as nc:
else:
link = 'ftp://aftp.cmdl.noaa.gov/products/trends/ch4/ch4_mm_gl.txt'
wget.download(link, out=loadpath.as_posix() + '/ch4_mm_gl.txt')
ch4_df = pd.read_csv(loadpath / 'ch4_mm_gl.txt', delim_whitespace=True,
comment='#',
names=['year', 'month', 'decimal', 'average',
'average_unc', 'trend', 'trend_unc'])
print('Downloading CH4 monthly means from NOAA ERSL website...')
ch4_df = ch4_df.drop(0)
idx = pd.to_datetime(dict(year=ch4_df.year, month=ch4_df.month,
day='01'))
ch4_df = ch4_df.set_index(idx)
ch4_df = ch4_df.drop(ch4_df.iloc[:, 0:3], axis=1)
ch4_df = ch4_df.rename_axis('time')
ch4_xr = xr.Dataset(ch4_df)
ch4_xr.attrs['long_name'] = 'Monthly averages of CH4 concentrations'
ch4_xr.attrs['units'] = 'ppb'
# if savepath is not None:
# ch4_xr.to_netcdf(savepath / filename)
# print('Downloaded CH4 monthly means data and saved it to: ' + filename)
# return ch4_xr
# if trend:
# ch4 = ch4_xr.trend
# print_saved_file('trend ch4_index.nc', savepath)
# else:
ch4 = ch4_xr.trend
if interpolate:
dt = pd.date_range(start='1979-01-01', end='2019-12-01', freq='MS')
ch4 = ch4.interp(time=dt)
ch4 = ch4.interpolate_na(dim='time', method='spline')
if savepath is not None:
ch4.to_netcdf(savepath / 'ch4_index.nc', 'w')
print_saved_file('ch4_index.nc', savepath)
return ch4
def _produce_CH4_jaxa(load_path, savepath=None):
import pandas as pd
"""http://www.gosat.nies.go.jp/en/assets/whole-atmosphere-monthly-mean_ch4_dec2019.zip"""
df = pd.read_csv(
load_path /
'10x60.trend.method2.txt',
comment='#',
header=None, delim_whitespace=True)
df.columns = ['year', 'month', 'mm', 'trend']
idx = pd.to_datetime(dict(year=df.year, month=df.month,
day='01'))
df = df.set_index(idx)
df.index.name = 'time'
df = df.drop(['year', 'month'], axis=1)
ds = df.to_xarray() * 1000.0
for da in ds.data_vars:
ds[da].attrs['unit'] = 'ppb'
return ds
def _produce_cpt_swoosh(load_path=work_chaim, savepath=None):
import xarray as xr
import pandas as pd
sw = xr.open_dataset(load_path /
'swoosh-v02.6-198401-201812/swoosh-v02.6-198401-201812-latpress-2.5deg-L31.nc', decode_times=False)
time = pd.date_range('1984-01-01', freq='MS', periods=sw.time.size)
sw['time'] = time
# cold point tropopause:
cpt = sw['cptropt']
cpt = cpt.sel(lat=slice(-15, 15))
cpt = cpt.mean('lat')
if savepath is not None:
cpt.to_netcdf(savepath / 'cpt_index.nc')
print_saved_file('cpt_index.nc', savepath)
return cpt
def _produce_cpt_sean_ERA5(load_path=work_chaim/'Sean - tropopause', savepath=None):
import xarray as xr
import pandas as pd
from aux_functions_strat import lat_mean
from aux_functions_strat import anomalize_xr
cpt = xr.load_dataset(load_path/'era5.tp.monmean.zm.nc')['ctpt']
cpt = cpt.sel(lat=slice(15, -15))
# attrs = cpt.attrs
cpt = lat_mean(cpt)
cpt.attrs['data from'] = 'ERA5'
cpt['time'] = pd.to_datetime(cpt['time'].values).to_period('M').to_timestamp()
cpt = anomalize_xr(cpt, freq='MS')
if savepath is not None:
cpt.to_netcdf(savepath / 'cpt_ERA5_index.nc')
return cpt
#def _produce_cold_point(savepath=None, lonslice=None):
# import xarray as xr
# import sys
# import os
# # lonslice is a two-tuple : (minlon, maxlon)
# if savepath is None:
# savepath = os.getcwd() + '/regressors/'
# if sys.platform == 'linux':
# work_path = '/home/shlomi/Desktop/DATA/Work Files/Chaim_Stratosphere_Data/'
# elif sys.platform == 'darwin': # mac os
# work_path = '/Users/shlomi/Documents/Chaim_Stratosphere_Data/'
# era5 = xr.open_dataarray(work_path + 'ERA5_T_eq_all.nc')
# if lonslice is None:
# # cold_point = era5.sel(level=100).quantile(0.1, ['lat',
# # 'lon'])
# cold_point = era5.sel(level=100)
# cold_point = cold_point.mean('lon')
# cold_point = cold_point.mean('lat')
# # cold_point = cold_point.rolling(time=3).mean()
#
# # cold_point = era5.sel(level=slice(150, 50)).min(['level', 'lat',
# # 'lon'])
# else:
# # cold_point = era5.sel(level=100).sel(lon=slice(*lonslice)).quantile(
# # 0.1, ['lat', 'lon'])
# cold_point = era5.sel(level=slice(150, 50)).sel(
# lon=slice(*lonslice)).min(['level', 'lat', 'lon'])
# cold_point.attrs['lon'] = lonslice
# cold_point.name = 'cold'
# cold_point.to_netcdf(savepath + 'cold_point_index.nc')
# print('Saved cold_point_index.nc to ' + savepath)
# return cold_point
def _produce_CDAS_QBO(savepath=None):
import pandas as pd
url = 'https://www.cpc.ncep.noaa.gov/data/indices/qbo.u50.index'
df = pd.read_csv(url, header=2, delim_whitespace=True)
anom_index = df[df['YEAR'] == 'ANOMALY'].index.values.item()
orig = df.iloc[0:anom_index - 2, :]
stan_index = df[df['YEAR'] == 'STANDARDIZED'].index.values.item()
anom = df.iloc[anom_index + 2: stan_index - 2, :]
stan = df.iloc[stan_index + 2:-1, :]
dfs = []
for df in [orig, anom, stan]:
df = df.head(42) # keep all df 1979-2020
# df.drop(df.tail(1).index, inplace=True)
df = df.melt(id_vars='YEAR', var_name='MONTH')
datetime = pd.to_datetime((df.YEAR + '-' + df.MONTH).apply(str), format='%Y-%b')
df.index = datetime
df = df.sort_index()
df = df.drop(['YEAR', 'MONTH'], axis=1)
df['value'] = df['value'].astype(float)
dfs.append(df)
all_df = pd.concat(dfs, axis=1)
all_df.columns = ['original', 'anomaly', 'standardized']
all_df.index.name='time'
qbo = all_df.to_xarray()
qbo.attrs['name'] = 'qbo_cdas'
qbo.attrs['long_name'] = 'CDAS 50 mb zonal wind index'
qbo['standardized'].attrs = qbo.attrs
if savepath is not None:
qbo.to_netcdf(savepath / 'qbo_cdas_index.nc')
print_saved_file('qbo_cdas_index.nc', savepath)
return qbo
def _produce_CO2(loadpath, filename='co2.txt'):
import requests
import io
import xarray as xr
import pandas as pd
from aux_functions_strat import save_ncfile
# TODO: complete this:
filepath = loadpath / filename
if filepath.is_file():
print('co2 index already d/l and saved!')
co2 = xr.open_dataset(filepath)
else:
print('Downloading CO2 index data from cpc website...')
url = 'https://www.esrl.noaa.gov/gmd/webdata/ccgg/trends/co2/co2_mm_mlo.txt'
s = requests.get(url).content
co2_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
delim_whitespace=True, comment='#')
co2_df.columns = ['year', 'month', 'decimal_date', 'monthly_average', 'deseasonalized', 'days', 'days_std', 'mm_uncertainty']
co2_df['dt'] = pd.to_datetime(co2_df['year'].astype(str) + '-' + co2_df['month'].astype(str))
co2_df = co2_df.set_index('dt')
co2_df.index.name = 'time'
co2 = co2_df[['monthly_average', 'mm_uncertainty']].to_xarray()
co2 = co2.rename(
{'monthly_average': 'co2', 'mm_uncertainty': 'co2_error'})
co2.attrs['name'] = 'CO2 index'
co2.attrs['source'] = url
co2['co2'].attrs['units'] = 'ppm'
save_ncfile(co2, loadpath, 'co2_index.nc')
return co2
def _produce_GHG(loadpath, savepath=None):
import xarray as xr
import numpy as np
import pandas as pd
from pathlib import Path
aggi = pd.read_csv(loadpath / 'AGGI_Table.csv', index_col='Year', header=2)
aggi = aggi[:-3]
ghg = aggi.loc[:, '1990 = 1']
ghg.name = 'GHG-RF'
ghg.index = pd.to_datetime(ghg.index, infer_datetime_format=True)
ghg_m = ghg.resample('MS').interpolate()
# extend the index :
ghg_m = pd.DataFrame(data=ghg_m,
index=pd.date_range(start=ghg_m.index[0],
end='2018-09-01',
freq=ghg_m.index.freq))
# fit data:
di = ghg_m.index
df = ghg_m.reset_index().drop('index', 1)
fit_df = df.dropna()
fit = np.polyfit(fit_df.index.values, fit_df.values, 3)
extp_func = np.poly1d(np.squeeze(fit))
# extrapolate:
nans_x = pd.isnull(df).any(1).nonzero()[0]
Y = np.expand_dims(extp_func(nans_x), 1)
df.loc[nans_x] = Y
df.index = di
ghg = xr.DataArray(np.squeeze(df), dims='time')
if savepath is not None:
ghg.to_netcdf(savepath / 'ghg_index.nc')
print_saved_file('ghg_index.nc', savepath)
return ghg
def _produce_OLR(loadpath, savepath=None):
import xarray as xr
import numpy as np
import pandas as pd
from pathlib import Path
olr = xr.open_dataset(loadpath / 'olr-monthly_v02r07_197901_201901.nc',
decode_times=False)
olr['time'] = | pd.date_range('1979-01-01', '2019-01-01', freq='MS') | pandas.date_range |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[ | pd.offsets.DateOffset(years=1) | pandas.offsets.DateOffset |
# Copyright (c) 2020 Spanish National Research Council
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from multiprocessing.pool import ThreadPool
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from utils import PATHS
from download import install
# Install xlrd >= 1.0.0 for Excel support
install('xlrd')
cant_popul = 581078
def fix_1207():
"""
Fix error in mobility dataset of Spain from INE (Instituto Nacional de Estadística).
"""
rawdir = PATHS.rawdir / 'maestra1' / 'municipios'
src = rawdir / '20200705_maestra_1_mitma_municipio.txt.gz'
dst = rawdir / '20200712_maestra_1_mitma_municipio.txt.gz'
df = pd.read_csv(src,
sep='|',
thousands='.',
dtype={'origen': 'string', 'destino': 'string'},
compression='gzip')
# Replace date
df['fecha'] = '20200712'
# Apply thousands separator
def add_sep(x):
x = str(x)
if len(x) > 3:
return f'{str(x)[:-3]}.{str(x)[-3:]}'
else:
return x
df['viajes'] = df['viajes'].apply(add_sep)
df['viajes_km'] = df['viajes_km'].apply(add_sep)
df.to_csv(dst,
sep='|',
compression='gzip',
index=False)
def process_day(tarfile):
"""
Process daily mobility files from INE.
Args:
tarfile [str, DataFrame]: Absolute path of mobility file.
Returns: Mobility dataframe.
"""
try:
df = pd.read_csv(tarfile,
sep='|',
thousands='.',
dtype={'origen': 'string', 'destino': 'string'},
compression='gzip')
except Exception as e:
print(f'Error processing {tarfile}')
raise Exception(e)
df['fecha'] = | pd.to_datetime(df.fecha, format='%Y%m%d') | pandas.to_datetime |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
if __name__ == '__main__':
# read data file
df = | pd.read_csv('durante_etal_2013_study1.txt', delimiter='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
from Functions import utils as ut
from plotly.subplots import make_subplots
from statistics import mean, stdev
from datetime import timedelta
from functools import reduce
import plotly.graph_objs as go
import plotly as py
import pandas as pd
import numpy as np
import collections
import itertools
import datetime
import shutil
import time
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../"))
from Functions import utils as ut
#TO DO: REVIEW get_recordings()
def get_recordings():
"""Get the recorded .csv files
Returns:
list: A list containing the absolute dirs for the recorded .csv files
"""
current_path = os.path.abspath(os.getcwd())
os.chdir('..')
current_path = (os.path.abspath(os.curdir))
mouse = os.path.join(current_path,'Data','Mouse','CSV')
#mouse = os.path.join(current_path,'Debug','Mouse','CSV')
mouse_files = []
for f in os.listdir(mouse):
tmp = '/'.join((mouse,f))
mouse_files.append(tmp)
keyboard = os.path.join(current_path,'Data','Keyboard','CSV')
#keyboard = os.path.join(current_path,'Debug','Keyboard','CSV')
keyboard_files = []
for f in os.listdir(keyboard):
tmp = '/'.join((keyboard,f))
keyboard_files.append(tmp)
chair = os.path.join(current_path,'Data','Chair','CSV')
#chair = os.path.join(current_path,'Debug','Chair','CSV')
chair_files = []
for f in os.listdir(chair):
tmp = '/'.join((chair,f))
chair_files.append(tmp)
return mouse_files, keyboard_files, chair_files
def convert_seconds(seconds):
"""Convert the given seconds into hh:mm:ss format
Args:
seconds (float): The given seconds
Returns:
list: A list containing the converted seconds into time format
"""
seconds = list(map(int,seconds))
dt0 = datetime.datetime(1,1,1)
time = []
for i,t in enumerate(seconds):
h = int(t // 3600)
m = int(t % 3600 // 60)
s = int(t % 3600 % 60)
td = datetime.timedelta(hours=h, minutes=m, seconds=s)
td = (dt0+td).strftime('%X')
time.append(td)
time = np.array(time)
return time
def get_seconds(timestamp,index):
"""Convert timestamp into seconds
Args:
timestamp (list): A list containing the csv file timestamps
index (int): The index where the day changes, if None there
is not any next day into the recorded file
Returns:
list: Timestamps in second format
"""
tmp = []
if index != None:
tmp = [i for i,t in enumerate(timestamp) if i >= index] #save the next day indexes
tmp1 = [t for i,t in enumerate(timestamp) if i < index]
tmp1 = tmp1[-1].split(':') #get the last time of the previous day
if len(tmp1) == 3:
tmp1 = float(tmp1[0]) * 3600 + float(tmp1[1]) * 60 + float(tmp1[2].replace(',', '.'))
else:
print('Problem Reading Data')
return
times_seconds = []
for i, t in enumerate(timestamp):
split = t.split(':')
if len(split) == 3:
if i in tmp:
seconds = float(split[0]) * 3600 + float(split[1]) * 60 + float(split[2].replace(',', '.')) + tmp1
times_seconds.append(seconds)
else:
seconds = float(split[0]) * 3600 + float(split[1]) * 60 + float(split[2].replace(',', '.'))
times_seconds.append(seconds)
else:
print('Problem Reading Data')
return
del tmp
return times_seconds
def absoluteFilePaths(directory) -> list:
"""Get the absolute paths given a directory
Args:
directory (str): The directory to examine
Returns:
list: A list containing the file names in absolute path format
"""
l = []
for dirpath,_,filenames in os.walk(directory):
for f in filenames:
l.append(os.path.abspath(os.path.join(dirpath, f)))
return l
def velocity(array):
""" Get a numpy array of mouse positions on the screen
and calculate their average speed
Args:
array (floats): Mouse position on the screen
Returns:
numpy array: Return mouse speed
"""
array = np.array(array)
return np.sum(np.abs(array[1:] - array[0:-1])) / len(array)
def get_mouse_features(filename,segment_size):
"""Mouse feature extraction
Args:
filename (str): CSV recordings file
segment_size (int): Create segments on the given number
Returns:
np.arrays: Containing the calculated features, the segment time centers and the starting day
"""
data = pd.read_csv(filename, delimiter=',')
times = list(data['Time'])
x, y, b = data['PosX'], data['PosY'], data['Button']
day_start = data['Date'][0]
next_dates = (data['Date'] > day_start)
if next_dates.any():
next_dates = data.loc[next_dates]
next_day = int(next_dates.index[0])
times_seconds = get_seconds(times,next_day)
else:
next_day = None
times_seconds = get_seconds(times,next_day)
mouse_buttons = [
'Button.left',
'Button.right',
'Button.middle'
]
#start = times_seconds[0]
tmp = ut.splitall(filename) #get the first timestamp from the filename
tmp = tmp[-1]
start = tmp[11:-4].split('_')
start = float(start[0]) * 3600 + float(start[1]) * 60 + float(start[2])
features = []
segment_centers = []
clicks = r_clicks = l_clicks = m_clicks = 0
while start + segment_size < times_seconds[-1]:
end = start + segment_size
cur_x = [ix for i, ix in enumerate(x) if times_seconds[i] >=start and times_seconds[i] <= end]
cur_y = [iy for i, iy in enumerate(y) if times_seconds[i] >=start and times_seconds[i] <= end]
cur_b = [ib for i, ib in enumerate(b) if times_seconds[i] >=start and times_seconds[i] <= end]
velocity_x = velocity(cur_x)
velocity_y = velocity(cur_y)
for index,button in enumerate(cur_b):
if button == 'None':
pass
elif button in mouse_buttons:
clicks += 1.0
elif button == mouse_buttons[0]:
l_clicks += 1.0
elif button == mouse_buttons[1]:
r_clicks += 1.0
elif button == mouse_buttons[2]:
m_clicks += 1.0
features.append([velocity_x, velocity_y, clicks/segment_size, r_clicks/segment_size,
l_clicks/segment_size, m_clicks/segment_size])
segment_centers.append(start + segment_size / 2)
start += segment_size
features = np.array(features)
segment_centers = np.array(segment_centers)
return features, segment_centers, day_start
def get_key_features(filename,segment_size):
"""Keyboard feature extraction
Args:
filename (str): CSV recordings file
segment_size (int): Create segments on the given number
Returns:
np.arrays: Containing the calculated features, the segment time centers and the starting day
"""
data = pd.read_csv(filename, delimiter=',')
keys = data['Key']
times = list(data['Time'])
day_start = data['Date'][0]
next_dates = (data['Date'] > day_start)
if next_dates.any():
next_dates = data.loc[next_dates]
next_day = int(next_dates.index[0])
times_seconds = get_seconds(times,next_day)
else:
next_day = None
times_seconds = get_seconds(times,next_day)
subkeys = ['Key.down','Key.up','Key.left','Key.right',
'Key.alt','Key.alt_gr','Key.alt_r','Key.ctrl',
'Key.ctrl_r','Key.shift','Key.shift_r','Key.backspace',
'Key.space','Key.enter','Key.page_down','Key.page_up'
]
#start = times_seconds[0]
tmp = ut.splitall(filename)
tmp = tmp[-1]
start = tmp[11:-4].split('_')
start = float(start[0]) * 3600 + float(start[1]) * 60 + float(start[2])
features = []
segment_centers = []
all_keys = arrow_keys = spaces = shift_ctrl_alt = 0
while start + segment_size < times_seconds[-1]:
end = start + segment_size
cur_key = [key for i, key in enumerate(keys) if times_seconds[i] >=start and times_seconds[i] <= end]
for i,key in enumerate(cur_key):
if key == 'None':
pass
elif key == subkeys[12]: #spaces
spaces += 1.0
elif key in subkeys: #all keys
all_keys += 1.0
elif key in subkeys[:4]: #arrows
arrow_keys += 1.0
elif key in subkeys[4:11]: #key combo
shift_ctrl_alt += 1.0
features.append([all_keys/segment_size, arrow_keys/segment_size,
spaces/segment_size, shift_ctrl_alt/segment_size])
segment_centers.append(start + segment_size / 2)
start += segment_size
features = np.array(features)
segment_centers = np.array(segment_centers)
return features, segment_centers, day_start
def get_chair_features(filename,segment_size):
"""Chair FRS feature extraction
Args:
filename (str): CSV recordings file
segment_size (int): Create segments on the given number
Returns:
np.arrays: Containing the calculated features, the segment time centers and the starting day
"""
data = pd.read_csv(filename, delimiter=',')
times = list(data['Time'])
a0, a1, a2, a3, a4 = data['A0'], data['A1'], data['A2'], data['A3'], data['A4']
day_start = data['Date'][0]
next_dates = (data['Date'] > day_start)
if (next_dates.any()):
next_dates = data.loc[next_dates]
next_day = int(next_dates.index[0])
times_seconds = get_seconds(times,next_day)
else:
next_day = None
times_seconds = get_seconds(times,next_day)
#start = times_seconds[0]
tmp = ut.splitall(filename)
tmp = tmp[-1]
start = tmp[11:-4].split('_')
start = float(start[0]) * 3600 + float(start[1]) * 60 + float(start[2])
features = []
segment_centers = []
while start + segment_size < times_seconds[-1]:
end = start + segment_size
cur_A0 = [A0 for i, A0 in enumerate(a0) if times_seconds[i] >=start and times_seconds[i] <= end]
cur_A1 = [A1 for i, A1 in enumerate(a1) if times_seconds[i] >=start and times_seconds[i] <= end]
cur_A2 = [A2 for i, A2 in enumerate(a2) if times_seconds[i] >=start and times_seconds[i] <= end]
cur_A3 = [A3 for i, A3 in enumerate(a3) if times_seconds[i] >=start and times_seconds[i] <= end]
cur_A4 = [A4 for i, A4 in enumerate(a4) if times_seconds[i] >=start and times_seconds[i] <= end]
if not (cur_A0 or cur_A1 or cur_A2 or cur_A3):
features.append([0.0, 0.0, 0.0, 0.0,0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
segment_centers.append(start + segment_size / 2)
else:
features.append([mean(cur_A0), stdev(cur_A0), mean(cur_A1), stdev(cur_A1),mean(cur_A2),
stdev(cur_A2), mean(cur_A3), stdev(cur_A3), mean(cur_A4), stdev(cur_A4)])
segment_centers.append(start + segment_size / 2)
start += segment_size
return features, segment_centers, day_start
#Todo: Review get_features()
def get_features() -> str:
"""Get the features path
Returns:
str: The path to save the generated features
"""
current_path = os.path.abspath(os.getcwd())
current_path = os.path.join(current_path,'Data')
#current_path = os.path.join(current_path,'Debug')
features_path = os.path.join(current_path,'Features')
return features_path
def extract_features(segment_size):
"""Extract
Args:
segment_size (int): Create segments of segmet_size seconds
"""
mouse,keyboard,chair = get_recordings()
pairs = [(i, j, k) for i in mouse for j in keyboard for k in chair if i.split('/')[-1].split('.')[0] == j.split('/')[-1].split('.')[0] == k.split('/')[-1].split('.')[0]]
for m,k,c in pairs:
print(m)
print(k)
print(c,'\n')
kf, kt, _ = get_key_features(k,float(segment_size))
k_segment_centers = convert_seconds(kt)
cf, ct, _ = get_chair_features(c,float(segment_size))
c_segment_centers = convert_seconds(ct)
mf, mt, _ = get_mouse_features(m,float(segment_size))
m_segment_centers = convert_seconds(mt)
'''
print(mf)
print(m_segment_centers)
print(kf)
print(k_segment_centers)
print(cf)
print(c_segment_centers)
'''
compare = lambda x, y, z: collections.Counter(x) == collections.Counter(y) == collections.Counter(z) #compare -> bool
if compare(k_segment_centers,c_segment_centers,m_segment_centers):
segment_centers = c_segment_centers
#Get the annotated labels from chair recording
chair = pd.read_csv(c)
chair = chair.drop(['A0','A1','A2','A3','A4'], axis = 1)
labels = list(chair['Label'])
dates = list(chair['Date'])
#Drop the miliseconds from the timestamps
timestamps = get_seconds(list(chair['Time']),None)
timestamps = sorted(timestamps)
timestamps = list(convert_seconds(timestamps))
tmp = []
label_idxs = []
date_idxs = []
'''
get the common timestamps() and their indexes
if a timestamp does not exist in the segments list
fill tuple with space
'''
[tmp.append((sec,timestamps.index(sec))) if sec in timestamps else tmp.append((sec,'')) for i,sec in enumerate(segment_centers)]
[label_idxs.append(labels[pair[1]]) if pair[1] != '' else label_idxs.append(0) for index,pair in enumerate(tmp)] #Map the labels
[date_idxs.append(dates[pair[1]]) if pair[1] != '' else date_idxs.append('NaN') for index,pair in enumerate(tmp)] #Map the Dates
del tmp
#Convert np arrays to dataframes
df1 = pd.DataFrame(mf, columns = ['Velocity_X','Velocity_Y','Clicks','R_Clicks','L_Clicks','M_Clicks'])
df2 = pd.DataFrame(kf, columns = ['All_keys_N','Arrow_keys_N','Spaces_N','Shft_Ctrl_Alt_N'])
df3 = pd.DataFrame(cf, columns = ['M_A0','STD_A0','M_A1','STD_A1','M_A2','STD_A2','M_A3','STD_A3','M_A4','STD_A4'])
df1['Time'] = m_segment_centers
df2['Time'] = k_segment_centers
df3['Time'] = c_segment_centers
'''
print(len(df1))
print(len(df2))
print(len(df3))
print(df1,'\n')
print(df2,'\n')
print(df3,'\n')
'''
merge = df1.set_index('Time').join(df2.set_index('Time')).join(df3.set_index('Time'))
merge.insert(loc=0, column='Date', value=date_idxs)
merge.insert(loc=len(merge.columns), column='Label', value=label_idxs)
merge.fillna(0.0, inplace=True)
merge.reset_index(inplace=True)
titles = list(merge.columns)
titles[0], titles[1] = titles[1], titles[0]
merge = merge[titles]
'''
print(merge.columns)
print(merge.head())
print()
#print(len(merge))
#print(len(date_idxs))
#print(len(label_idxs))
'''
#Save to .csv file
features_file = ''.join((c.split('/')[-1].split('.')[0],'.csv'))
features_dir = get_features()
features_file = os.path.join(features_dir,features_file)
merge.to_csv(features_file, encoding='utf-8', index=False)
del df1,df2,df3,merge
def cmp_segments(filename, segment_size):
"""Check whether the seconds segment size is greater
than the recorded second number
Args:
filename (str): The recorded .csv file
segment_size (int): A number describing how many second
chunks to examine
Returns:
bool: True or False
"""
df = | pd.read_csv(filename) | pandas.read_csv |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from abc import ABC
import logging
import os
import sys
import pandas as pd
# setup logger
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
from .operator import IOperator
class CSV(IOperator, ABC):
""" Instance Object for COCO annotation format """
def __init__(self, dataset):
super().__init__(dataset)
self._dataset = dataset
self.attrs = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
def extract(self, path: str):
"""
all the annotations in the file convert into general dataframe object.
:param path: string, relative / absolute path
:return: generalize pandas.DataFrame type object.
"""
if os.path.exists(path):
ann_df = pd.read_csv(path)
attr_df_list = list(ann_df.columns)
if all(x in attr_df_list for x in self.attrs):
new_ann_df = self.__dfUpdates(ann_df)
self.__updateDataset(new_ann_df.loc[:, ["name", "width", "height", "image_id"]])
self.__setAnn(new_ann_df)
else:
assert Exception(f"entered annotation file does not contains all the required attributes. \n {self.attrs}")
logger.error(f"entered annotation file does not contains all the required attributes. \n {self.attrs}")
sys.exit()
else:
assert Exception(f"entered directory {path}, does not exsist.")
logger.error(Exception(f"entered directory {path}, does not exsist."))
sys.exit()
def archive(self, location, df):
""" save csv annotation file in the given location
:param location: .csv file saving location
:param df: finalized DataFrame object from the self.translate()
:return: None
"""
if os.path.exists(os.path.dirname(location)):
df.to_csv(location, index=False)
else:
logger.exception("There are no such parent directory to file save.")
def translate(self):
""" translate common schema into csv compatible format.
:return: pd.DataFrame object with ["filename", "width", "height", "class", "xmin", "ymin", "xmax", "ymax"] columns.
"""
csv_ann_df = self.annotations.copy()
class_series = pd.Series(self.classes)
csv_ann_df.loc[:,"class"] = csv_ann_df["class_id"].map(class_series)
filename_series = pd.Series(self._dataset['name'].tolist(),
index=self._dataset["image_id"].tolist())
csv_ann_df.loc[:,"filename"] = csv_ann_df["image_id"].map(filename_series)
width_series = pd.Series(self._dataset['width'].tolist(),
index=self._dataset["image_id"].tolist())
csv_ann_df.loc[:, "width"] = csv_ann_df["image_id"].map(width_series)
height_series = pd.Series(self._dataset['height'].tolist(),
index=self._dataset["image_id"].tolist())
csv_ann_df.loc[:, "height"] = csv_ann_df["image_id"].map(height_series)
if (pd.isnull(csv_ann_df["class"]).sum() + pd.isnull(csv_ann_df["filename"]).sum()) != 0:
logger.error(f"There are not enough data in past annotation file to create annotation file. { | pd.isnull(csv_ann_df['class']) | pandas.isnull |
#
# process_species_by_dataset
#
# We generated a list of all the annotations in our universe; this script is
# used to (interactively) map them onto the GBIF and iNat taxonomies. Don't
# try to run this script from top to bottom; it's used like a notebook, not like
# a script, since manual review steps are required.
#
#%% Imports
import os
import re
from typing import Any
import unicodedata
import pandas as pd
import numpy as np
from tqdm import tqdm
from taxonomy_mapping.species_lookup import (
get_taxonomic_info, initialize_taxonomy_lookup, print_taxonomy_matches)
import taxonomy_mapping.retrieve_sample_image as retrieve_sample_image
# %autoreload 0
# %autoreload -species_lookup
#%% Constants
output_base = r'C:\git\ai4edev\camera-traps-private\taxonomy_archive'
xlsx_basename = 'species_by_dataset_2020_09_02_ic_ubc.xlsx'
# Input file
species_by_dataset_file = os.path.join(output_base, xlsx_basename)
# Output file after automatic remapping
output_file = species_by_dataset_file.replace('.xlsx', '.output.xlsx')
# File to which we manually copy that file and do all the manual review; this
# should never be programmatically written to
manual_review_xlsx = output_file.replace('.xlsx', '.manual.xlsx')
# The final output spreadsheet
output_xlsx = manual_review_xlsx.replace('.xlsx', '_remapped.xlsx')
output_csv = output_xlsx.replace('.xlsx', '.csv')
# HTML file generated to facilitate the identificaiton of egregious mismappings
html_output_file = os.path.join(output_base, 'mapping_previews.html')
download_images = True
master_table_file = r'C:\git\ai4edev\camera-traps-private\camera_trap_taxonomy_mapping.csv'
#%% Functions
def slugify(value: Any, allow_unicode: bool = False) -> str:
"""
From:
https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = str(value)
value = unicodedata.normalize('NFKC', value)
if not allow_unicode:
value = value.encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower()).strip()
return re.sub(r'[-\s]+', '-', value)
class TaxonomicMatch:
def __init__(self, scientific_name, common_name, taxonomic_level, source,
taxonomy_string, match):
self.scientific_name = scientific_name
self.common_name = common_name
self.taxonomic_level = taxonomic_level
self.source = source
self.taxonomy_string = taxonomy_string
self.match = match
def __repr__(self):
return ('TaxonomicMatch('
f'scientific_name={self.scientific_name}, '
f'common_name={self.common_name}, '
f'taxonomic_level={self.taxonomic_level}, '
f'source={self.source}')
# Prefer iNat matches over GBIF matches
taxonomy_preference = 'inat'
def get_preferred_taxonomic_match(query: str) -> TaxonomicMatch:
"""
Wrapper for species_lookup.py, but expressing a variety of heuristics and
preferences that are specific to our scenario.
"""
query = query.lower().strip().replace('_', ' ')
# query = 'person'
matches = get_taxonomic_info(query)
# Do we have an iNat match?
inat_matches = [m for m in matches if m['source'] == 'inat']
gbif_matches = [m for m in matches if m['source'] == 'gbif']
# print_taxonomy_matches(inat_matches, verbose=True)
# print_taxonomy_matches(gbif_matches, verbose=True)
scientific_name = ''
common_name = ''
taxonomic_level = ''
match = ''
source = ''
taxonomy_string = ''
n_inat_matches = len(inat_matches)
n_gbif_matches = len(gbif_matches)
selected_matches = None
if n_inat_matches > 0 and taxonomy_preference == 'inat':
selected_matches = 'inat'
elif n_gbif_matches > 0:
selected_matches = 'gbif'
if selected_matches == 'inat':
i_match = 0
if len(inat_matches) > 1:
# print('Warning: multiple iNat matches for {}'.format(query))
# Prefer chordates... most of the names that aren't what we want
# are esoteric insects, like a moth called "cheetah"
#
# If we can't find a chordate, just take the first match.
#
# i_test_match = 0
for i_test_match, match in enumerate(inat_matches):
found_vertebrate = False
taxonomy = match['taxonomy']
for taxonomy_level in taxonomy:
taxon_rank = taxonomy_level[1]
scientific_name = taxonomy_level[2]
if taxon_rank == 'phylum' and scientific_name == 'chordata':
i_match = i_test_match
found_vertebrate = True
break
if found_vertebrate:
break
match = inat_matches[i_match]['taxonomy']
# This is (taxonID, taxonLevel, scientific, [list of common])
lowest_level = match[0]
taxonomic_level = lowest_level[1]
scientific_name = lowest_level[2]
assert len(scientific_name) > 0
common_names = lowest_level[3]
if len(common_names) > 1:
# print(f'Warning: multiple iNat common names for {query}')
# Default to returning the query
if query in common_names:
common_name = query
else:
common_name = common_names[0]
elif len(common_names) > 0:
common_name = common_names[0]
# print(f'Matched iNat {query} to {scientific_name},{common_name}')
source = 'inat'
# ...if we had iNat matches
# If we either prefer GBIF or didn't have iNat matches
#
# Code is deliberately redundant here; I'm expecting some subtleties in how
# handle GBIF and iNat.
elif selected_matches == 'gbif':
i_match = 0
if len(gbif_matches) > 1:
# print('Warning: multiple GBIF matches for {}'.format(query))
# Prefer chordates... most of the names that aren't what we want
# are esoteric insects, like a moth called "cheetah"
#
# If we can't find a chordate, just take the first match.
#
# i_test_match = 0
for i_test_match, match in enumerate(gbif_matches):
found_vertebrate = False
taxonomy = match['taxonomy']
for taxonomy_level in taxonomy:
taxon_rank = taxonomy_level[1]
scientific_name = taxonomy_level[2]
if taxon_rank == 'phylum' and scientific_name == 'chordata':
i_match = i_test_match
found_vertebrate = True
break
if found_vertebrate:
break
match = gbif_matches[i_match]['taxonomy']
# This is (taxonID, taxonLevel, scientific, [list of common])
lowest_level = match[0]
taxonomic_level = lowest_level[1]
scientific_name = lowest_level[2]
assert len(scientific_name) > 0
common_names = lowest_level[3]
if len(common_names) > 1:
# print(f'Warning: multiple GBIF common names for {query}')
# Default to returning the query
if query in common_names:
common_name = query
else:
common_name = common_names[0]
elif len(common_names) > 0:
common_name = common_names[0]
source = 'gbif'
# ...if we needed to look in the GBIF taxonomy
taxonomy_string = str(match)
return TaxonomicMatch(scientific_name, common_name, taxonomic_level, source,
taxonomy_string, match)
# ...def get_preferred_taxonomic_match()
#%% Initialization
initialize_taxonomy_lookup()
#%% Test single-query lookup
if False:
#%%
matches = get_taxonomic_info('equus quagga')
print_taxonomy_matches(matches)
#%%
q = 'equus quagga'
# q = "grevy's zebra"
taxonomy_preference = 'gbif'
m = get_preferred_taxonomic_match(q)
print(m.source)
print(m.taxonomy_string)
import clipboard
clipboard.copy(m.taxonomy_string)
#%% Read the input data
df = pd.read_excel(species_by_dataset_file)
#%% Run all our taxonomic lookups
# i_row = 0; row = df.iloc[i_row]
# query = 'lion'
output_rows = []
for i_row, row in df.iterrows():
dataset_name = row['dataset']
query = row['species_label']
taxonomic_match = get_preferred_taxonomic_match(query)
def google_images_url(query: str) -> str:
return f'https://www.google.com/search?tbm=isch&q={query}'
scientific_url = ''
if len(taxonomic_match.scientific_name) > 0:
scientific_url = google_images_url(taxonomic_match.scientific_name)
common_url = ''
if len(taxonomic_match.common_name) > 0:
common_url = google_images_url(taxonomic_match.common_name)
query_url = google_images_url(query)
output_row = {
'dataset_name': dataset_name,
'query': query,
'taxonomy_level': taxonomic_match.taxonomic_level,
'scientific_name': taxonomic_match.scientific_name,
'common_name': taxonomic_match.common_name,
'source': taxonomic_match.source,
'is_typo': '',
'setup': '',
'notes': '',
'non-global': '',
'query_url': query_url,
'scientific_url': scientific_url,
'common_url': common_url,
'taxonomy_string': taxonomic_match.taxonomy_string
}
output_rows.append(output_row)
# ...for each query
# Write to the excel file that we'll use for manual review
output_df = pd.DataFrame(data=output_rows, columns=[
'dataset_name', 'query', 'taxonomy_level', 'scientific_name', 'common_name',
'source', 'is_typo', 'setup', 'notes', 'non-global', 'query_url',
'scientific_url', 'common_url', 'taxonomy_string'])
output_df.to_excel(output_file, index=None, header=True)
#%% Download preview images for everything we successfully mapped
# uncomment this to load saved output_file
# output_df = pd.read_excel(output_file, keep_default_na=False)
preview_base = os.path.join(output_base, 'preview_images')
os.makedirs(preview_base, exist_ok=True)
scientific_name_to_paths = {}
# i_row = 0; row = output_df.iloc[i_row]
for i_row, row in tqdm(output_df.iterrows(), total=len(output_df)):
scientific_name = row.scientific_name
assert isinstance(scientific_name, str)
if len(scientific_name) == 0:
continue
if scientific_name in scientific_name_to_paths:
continue
image_paths = None
preview_dir = os.path.join(preview_base, slugify(scientific_name))
if os.path.isdir(preview_dir) and len(os.listdir(preview_dir)) > 0:
print(f'Bypassing preview download for {preview_dir}')
image_paths = os.listdir(preview_dir)
image_paths = [os.path.join(preview_dir, p) for p in image_paths]
elif download_images:
print(f'Downloading images for {preview_dir}')
os.makedirs(preview_dir, exist_ok=True)
image_paths = retrieve_sample_image.download_images(
scientific_name, output_directory=preview_dir, limit=4)
if image_paths is not None:
scientific_name_to_paths[scientific_name] = image_paths
# ...for each query
#%% Write HTML file with representative images to scan for obvious mis-mappings
with open(html_output_file, 'w') as f:
f.write('<html><head></head><body>\n')
# i_row = 0; row = output_df.iloc[i_row]
for i_row, row in tqdm(output_df.iterrows(), total=len(output_df)):
f.write('<p class="speciesinfo_p" style="font-weight:bold;font-size:130%">')
common = row.common_name
if len(common) == 0:
common = 'no common name'
f.write('{}: {} mapped to {} ({}) from {}</p>\n'.format(
row.dataset_name, row.query, row.scientific_name, common,
row.source))
if row.scientific_name not in scientific_name_to_paths:
f.write('<p class="content_p">no images available</p>')
else:
image_paths = scientific_name_to_paths[row.scientific_name]
n_images = len(image_paths)
image_paths = [os.path.relpath(p, output_base) for p in image_paths]
image_width_percent = round(100 / n_images)
f.write('<table class="image_table"><tr>\n')
for image_path in image_paths:
f.write('<td style="vertical-align:top;" width="{}%">'
'<img src="{}" style="display:block; width:100%; vertical-align:top; height:auto;">'
'</td>\n'.format(image_width_percent, image_path))
f.write('</tr></table>\n')
# ...for each row
f.write('</body></html>\n')
#%% Look for redundancy with the master table
# Note: `master_table_file` is a CSV file that is the concatenation of the
# manually-remapped files ("manual_remapped.xlsx"), which are the output of
# this script run across from different groups of datasets. The concatenation
# should be done manually. If `master_table_file` doesn't exist yet, skip this
# code cell. Then, after going through the manual steps below, set the final
# manually-remapped version to be the `master_table_file`.
def generate_query_id(dataset_name: str, query: str) -> str:
return dataset_name + '|' + query
master_table = | pd.read_csv(master_table_file) | pandas.read_csv |
import numpy as np
import pandas as pd
from scipy.stats import mode
from tqdm import tqdm
from geopy.geocoders import Nominatim
from datetime import datetime
def handle_bornIn(x):
skip_vals = ['16-Mar', '23-May', 'None']
if x not in skip_vals:
return datetime(2012, 1, 1).year - datetime(int(x), 1, 1).year
else:
return 23
def handle_gender(x):
if x == 'male':
return 1
else:
return 0
def handle_memberSince(x):
skip_vals = ['--None']
if pd.isna(x):
return datetime(2012, 1, 1)
elif x not in skip_vals:
return datetime.strptime(x, '%d-%m-%Y')
else:
return datetime(2012, 1, 1)
def process_tours_df(data_content):
dtype = {}
cols = data_content.tours_df.columns[9:]
for d in cols:
dtype[d] = np.int16
data_content.tours_df = data_content.tours_df.astype(dtype)
data_content.tours_df['area'] = data_content.tours_df['city'] + ' ' + data_content.tours_df['state'] + ' ' + \
data_content.tours_df['pincode'] + ' ' + data_content.tours_df['country']
data_content.tours_df['area'] = data_content.tours_df['area'].apply(lambda x: x.lstrip() if type(x) == str else x)
data_content.tours_df['area'] = data_content.tours_df['area'].apply(lambda x: x.rstrip() if type(x) == str else x)
data_content.tours_df.drop(['city', 'state', 'pincode', 'country'], axis=1, inplace=True)
data_content.tours_df['tour_date'] = data_content.tours_df['tour_date'].apply(
lambda x: datetime(int(x.split('-')[2]), int(x.split('-')[1]), int(x.split('-')[0]), 23, 59))
def process_tour_convoy_df(data_content):
print('Initializing tour_convoy_df...', flush=True)
data_content.tour_convoy_df['total_going'] = 0
data_content.tour_convoy_df['total_not_going'] = 0
data_content.tour_convoy_df['total_maybe'] = 0
data_content.tour_convoy_df['total_invited'] = 0
data_content.tour_convoy_df['fraction_going'] = 0
data_content.tour_convoy_df['fraction_not_going'] = 0
data_content.tour_convoy_df['fraction_maybe'] = 0
known_bikers = set()
lis = ['going', 'not_going', 'maybe', 'invited']
pbar = tqdm(total=data_content.tour_convoy_df.shape[0],
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description("Step 1 of 2")
for idx, _ in data_content.tour_convoy_df.iterrows():
s = [0, 0, 0]
for j, l in enumerate(lis):
if not pd.isna(data_content.tour_convoy_df.loc[idx, l]):
biker = data_content.tour_convoy_df.loc[idx, l].split()
data_content.tour_convoy_df.loc[idx, 'total_' + l] = len(biker)
if j != 3:
s[j] = len(biker)
for bik in biker:
known_bikers.add(bik)
if sum(s) != 0:
for j in range(3):
data_content.tour_convoy_df.loc[idx, 'fraction_' + lis[j]] = s[j] / sum(s)
pbar.update(1)
pbar.close()
mean = data_content.tour_convoy_df['total_invited'].mean()
std = data_content.tour_convoy_df['total_invited'].std()
data_content.tour_convoy_df['fraction_invited'] = data_content.tour_convoy_df['total_invited'].apply(
lambda x: (x - mean) / std)
biker_tour_convoy_df = dict()
for biker in list(known_bikers):
biker_tour_convoy_df[biker] = [[], [], [], []]
pbar = tqdm(total=data_content.tour_convoy_df.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description("Step 2 of 2")
for idx, _ in data_content.tour_convoy_df.iterrows():
for l in lis:
if not pd.isna(data_content.tour_convoy_df.loc[idx, l]):
biker = data_content.tour_convoy_df.loc[idx, l].split()
for bik in biker:
biker_tour_convoy_df[bik][lis.index(l)] += \
[data_content.tour_convoy_df.loc[idx, 'tour_id']]
pbar.update(1)
pbar.close()
for key, _ in biker_tour_convoy_df.items():
for i in range(4):
biker_tour_convoy_df[key][i] = ' '.join(list(set(biker_tour_convoy_df[key][i])))
biker_tour_convoy_df = pd.DataFrame.from_dict(biker_tour_convoy_df, orient='index')
biker_tour_convoy_df.reset_index(inplace=True)
biker_tour_convoy_df.columns = ['biker_id'] + lis
print('tour_convoy_df ready...', flush=True)
return biker_tour_convoy_df
def get_coordinates(locations, data_content):
geolocation_map = {}
locator = Nominatim(user_agent="Kolibri")
for i in tqdm(range(len(locations)),
disable=False,
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}'):
# noinspection PyBroadException
try:
location = locator.geocode(locations[i])
geolocation_map[locations[i]] = [location.latitude, location.longitude]
except:
# Called when there is presumably some noise in the Address location
# noinspection PyBroadException
data_content.noise += [locations[i]]
geolocation_map[locations[i]] = [np.nan, np.nan]
location_df = pd.DataFrame({'location': list(locations),
'latitude': np.array(list(geolocation_map.values()))[:, 0],
'longitude': np.array(list(geolocation_map.values()))[:, 1]})
return geolocation_map, location_df
def initialize_locations(data_content):
# noinspection PyBroadException
try:
location_df = pd.read_csv(data_content.base_dir + 'temp/location.csv')
location_from_csv = True
except:
location_df = None
location_from_csv = False
if location_from_csv:
geolocation = {}
print('Initializing Locations from DataFrame...', flush=True)
for i, l in enumerate(location_df['location'].tolist()):
geolocation[l] = [location_df.loc[i, 'latitude'], location_df.loc[i, 'longitude']]
else:
print('Initializing Locations from Nominatim...', flush=True)
biker_location = data_content.bikers_df['area'].dropna().drop_duplicates().tolist()
geolocation, location_df = get_coordinates(biker_location, data_content)
return geolocation, location_df
def impute_location_from_tour_convoy(data_content):
# From tour_convoy
unk_loc = data_content.bikers_df[pd.isna(data_content.bikers_df['latitude'])]
org_bik = list(set(data_content.convoy_df['biker_id'].drop_duplicates().tolist()).intersection(
data_content.bikers_df['biker_id'].tolist()))
groups = ['going', 'not_going', 'maybe', 'invited']
rest_trs = data_content.tours_df[data_content.tours_df['tour_id'].isin(
data_content.tour_convoy_df['tour_id'])]
rest_con = data_content.convoy_df[data_content.convoy_df['biker_id'].isin(org_bik)]
pbar = tqdm(total=unk_loc.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step ' + str(data_content.current_step) + ' of ' + str(data_content.total_steps))
for idx, _ in unk_loc.iterrows():
if unk_loc.loc[idx, 'biker_id'] in org_bik:
cdf = rest_con[rest_con['biker_id'] == unk_loc.loc[idx, 'biker_id']]
if cdf.shape[0] > 0:
tours = []
for g in groups:
tours += cdf[g].tolist()[0].split()
tours = (' '.join(tours)).split()
trs = rest_trs[rest_trs['tour_id'].isin(tours)]
if trs.shape[0] > 0:
m, _ = mode(trs[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = trs[trs['latitude'] == m[0, 0]].index.tolist()[0]
lat, long, = trs.loc[index, 'latitude'], trs.loc[index, 'longitude']
data_content.bikers_df.loc[idx, 'latitude'] = lat
data_content.bikers_df.loc[idx, 'longitude'] = long
pbar.update(1)
pbar.close()
data_content.current_step += 1
def impute_location_from_tours(data_content):
# From tours_df
unk_loc = data_content.bikers_df[ | pd.isna(data_content.bikers_df['latitude']) | pandas.isna |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017 by University of Kassel and Fraunhofer Institute for Wind Energy and
# Energy System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
import pandas as pd
from numpy import nan, isnan, arange, dtype, zeros
from pandapower.auxiliary import pandapowerNet, get_free_id, _preserve_dtypes
from pandapower.results import reset_results
from pandapower.std_types import add_basic_std_types, load_std_type
from pandapower import __version__
def create_empty_network(name="", f_hz=50., sn_kva=1e3):
"""
This function initializes the pandapower datastructure.
OPTIONAL:
**f_hz** (float, 50.) - power system frequency in hertz
**name** (string, None) - name for the network
**sn_kva** (float, 1e3) - reference apparent power for per unit system
OUTPUT:
**net** (attrdict) - PANDAPOWER attrdict with empty tables:
EXAMPLE:
net = create_empty_network()
"""
net = pandapowerNet({
# structure data
"bus": [('name', dtype(object)),
('vn_kv', 'f8'),
('type', dtype(object)),
('zone', dtype(object)),
('in_service', 'bool'), ],
"load": [("name", dtype(object)),
("bus", "u4"),
("p_kw", "f8"),
("q_kvar", "f8"),
("const_z_percent", "f8"),
("const_i_percent", "f8"),
("sn_kva", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", dtype(object))],
"sgen": [("name", dtype(object)),
("bus", "i8"),
("p_kw", "f8"),
("q_kvar", "f8"),
("sn_kva", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", dtype(object))],
"gen": [("name", dtype(object)),
("bus", "u4"),
("p_kw", "f8"),
("vm_pu", "f8"),
("sn_kva", "f8"),
("min_q_kvar", "f8"),
("max_q_kvar", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", dtype(object))],
"switch": [("bus", "i8"),
("element", "i8"),
("et", dtype(object)),
("type", dtype(object)),
("closed", "bool"),
("name", dtype(object))],
"shunt": [("bus", "u4"),
("name", dtype(object)),
("q_kvar", "f8"),
("p_kw", "f8"),
("vn_kv", "f8"),
("step", "u4"),
("max_step", "u4"),
("in_service", "bool")],
"ext_grid": [("name", dtype(object)),
("bus", "u4"),
("vm_pu", "f8"),
("va_degree", "f8"),
("in_service", 'bool')],
"line": [("name", dtype(object)),
("std_type", dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("length_km", "f8"),
("r_ohm_per_km", "f8"),
("x_ohm_per_km", "f8"),
("c_nf_per_km", "f8"),
("max_i_ka", "f8"),
("df", "f8"),
("parallel", "u4"),
("type", dtype(object)),
("in_service", 'bool')],
"trafo": [("name", dtype(object)),
("std_type", dtype(object)),
("hv_bus", "u4"),
("lv_bus", "u4"),
("sn_kva", "f8"),
("vn_hv_kv", "f8"),
("vn_lv_kv", "f8"),
("vsc_percent", "f8"),
("vscr_percent", "f8"),
("pfe_kw", "f8"),
("i0_percent", "f8"),
("shift_degree", "f8"),
("tp_side", dtype(object)),
("tp_mid", "i4"),
("tp_min", "i4"),
("tp_max", "i4"),
("tp_st_percent", "f8"),
("tp_st_degree", "f8"),
("tp_pos", "i4"),
("parallel", "u4"),
("df", "f8"),
("in_service", 'bool')],
"trafo3w": [("name", dtype(object)),
("std_type", dtype(object)),
("hv_bus", "u4"),
("mv_bus", "u4"),
("lv_bus", "u4"),
("sn_hv_kva", "u8"),
("sn_mv_kva", "u8"),
("sn_lv_kva", "u8"),
("vn_hv_kv", "f8"),
("vn_mv_kv", "f8"),
("vn_lv_kv", "f8"),
("vsc_hv_percent", "f8"),
("vsc_mv_percent", "f8"),
("vsc_lv_percent", "f8"),
("vscr_hv_percent", "f8"),
("vscr_mv_percent", "f8"),
("vscr_lv_percent", "f8"),
("pfe_kw", "f8"),
("i0_percent", "f8"),
("shift_mv_degree", "f8"),
("shift_lv_degree", "f8"),
("tp_side", dtype(object)),
("tp_mid", "i4"),
("tp_min", "i4"),
("tp_max", "i4"),
("tp_st_percent", "f8"),
("tp_pos", "i4"),
("in_service", 'bool')],
"impedance": [("name", dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("rft_pu", "f8"),
("xft_pu", "f8"),
("rtf_pu", "f8"),
("xtf_pu", "f8"),
("sn_kva", "f8"),
("in_service", 'bool')],
"dcline": [("name", dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("p_kw", "f8"),
("loss_percent", 'f8'),
("loss_kw", 'f8'),
("vm_from_pu", "f8"),
("vm_to_pu", "f8"),
("max_p_kw", "f8"),
("min_q_from_kvar", "f8"),
("min_q_to_kvar", "f8"),
("max_q_from_kvar", "f8"),
("max_q_to_kvar", "f8"),
("in_service", 'bool')],
"ward": [("name", dtype(object)),
("bus", "u4"),
("ps_kw", "f8"),
("qs_kvar", "f8"),
("qz_kvar", "f8"),
("pz_kw", "f8"),
("in_service", "bool")],
"xward": [("name", dtype(object)),
("bus", "u4"),
("ps_kw", "f8"),
("qs_kvar", "f8"),
("qz_kvar", "f8"),
("pz_kw", "f8"),
("r_ohm", "f8"),
("x_ohm", "f8"),
("vm_pu", "f8"),
("in_service", "bool")],
"measurement": [("name", dtype(object)),
("type", dtype(object)),
("element_type", dtype(object)),
("value", "f8"),
("std_dev", "f8"),
("bus", "u4"),
("element", dtype(object))],
"piecewise_linear_cost": [("type", dtype(object)),
("element", dtype(object)),
("element_type", dtype(object)),
("p", dtype(object)),
("f", dtype(object))],
"polynomial_cost": [("type", dtype(object)),
("element", dtype(object)),
("element_type", dtype(object)),
("c", dtype(object))],
# geodata
"line_geodata": [("coords", dtype(object))],
"bus_geodata": [("x", "f8"), ("y", "f8")],
# result tables
"_empty_res_bus": [("vm_pu", "f8"),
("va_degree", "f8"),
("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_ext_grid": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_line": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_from_ka", "f8"),
("i_to_ka", "f8"),
("i_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_trafo": [("p_hv_kw", "f8"),
("q_hv_kvar", "f8"),
("p_lv_kw", "f8"),
("q_lv_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_hv_ka", "f8"),
("i_lv_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_trafo3w": [("p_hv_kw", "f8"),
("q_hv_kvar", "f8"),
("p_mv_kw", "f8"),
("q_mv_kvar", "f8"),
("p_lv_kw", "f8"),
("q_lv_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_hv_ka", "f8"),
("i_mv_ka", "f8"),
("i_lv_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_load": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_sgen": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_gen": [("p_kw", "f8"),
("q_kvar", "f8"),
("va_degree", "f8"),
("vm_pu", "f8")],
"_empty_res_shunt": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
"_empty_res_impedance": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_from_ka", "f8"),
("i_to_ka", "f8")],
"_empty_res_dcline": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("vm_from_pu", "f8"),
("va_from_degree", "f8"),
("vm_to_pu", "f8"),
("va_to_degree", "f8")],
"_empty_res_ward": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
"_empty_res_xward": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
# internal
"_ppc": None,
"_is_elements": None,
"_pd2ppc_lookups": {"bus": None,
"ext_grid": None,
"gen": None},
"version": float(__version__[:3]),
"converged": False,
"name": name,
"f_hz": f_hz,
"sn_kva": sn_kva
})
for s in net:
if isinstance(net[s], list):
net[s] = pd.DataFrame(zeros(0, dtype=net[s]), index=[])
add_basic_std_types(net)
reset_results(net)
net['user_pf_options'] = dict()
return net
def create_bus(net, vn_kv, name=None, index=None, geodata=None, type="b",
zone=None, in_service=True, max_vm_pu=nan,
min_vm_pu=nan, **kwargs):
"""create_bus(net, vn_kv, name=None, index=None, geodata=None, type="b", \
zone=None, in_service=True, max_vm_pu=nan, min_vm_pu=nan)
Adds one bus in table net["bus"].
Busses are the nodes of the network that all other elements connect to.
INPUT:
**net** (pandapowerNet) - The pandapower network in which the element is created
OPTIONAL:
**name** (string, default None) - the name for this bus
**index** (int, default None) - Force a specified ID if it is available. If None, the \
index one higher than the highest already existing index is selected.
**vn_kv** (float) - The grid voltage level.
**geodata** ((x,y)-tuple, default None) - coordinates used for plotting
**type** (string, default "b") - Type of the bus. "n" - auxilary node,
"b" - busbar, "m" - muff
**zone** (string, None) - grid region
**in_service** (boolean) - True for in_service or False for out of service
**max_vm_pu** (float, NAN) - Maximum bus voltage in p.u. - necessary for OPF
**min_vm_pu** (float, NAN) - Minimum bus voltage in p.u. - necessary for OPF
OUTPUT:
**index** (int) - The unique ID of the created element
EXAMPLE:
create_bus(net, name = "bus1")
"""
if index and index in net["bus"].index:
raise UserWarning("A bus with index %s already exists" % index)
if index is None:
index = get_free_id(net["bus"])
# store dtypes
dtypes = net.bus.dtypes
net.bus.loc[index, ["name", "vn_kv", "type", "zone", "in_service"]] = \
[name, vn_kv, type, zone, bool(in_service)]
# and preserve dtypes
_preserve_dtypes(net.bus, dtypes)
if geodata is not None:
if len(geodata) != 2:
raise UserWarning("geodata must be given as (x, y) tupel")
net["bus_geodata"].loc[index, ["x", "y"]] = geodata
if not isnan(min_vm_pu):
if "min_vm_pu" not in net.bus.columns:
net.bus.loc[:, "min_vm_pu"] = pd.Series()
net.bus.loc[index, "min_vm_pu"] = float(min_vm_pu)
if not isnan(max_vm_pu):
if "max_vm_pu" not in net.bus.columns:
net.bus.loc[:, "max_vm_pu"] = pd.Series()
net.bus.loc[index, "max_vm_pu"] = float(max_vm_pu)
return index
def create_buses(net, nr_buses, vn_kv, index=None, name=None, type="b", geodata=None,
zone=None, in_service=True, max_vm_pu=nan, min_vm_pu=nan):
"""create_buses(net, nr_buses, vn_kv, index=None, name=None, type="b", geodata=None, \
zone=None, in_service=True, max_vm_pu=nan, min_vm_pu=nan)
Adds several buses in table net["bus"] at once.
Busses are the nodal points of the network that all other elements connect to.
Input:
**net** (pandapowerNet) - The pandapower network in which the element is created
**nr_buses** (int) - The number of buses that is created
OPTIONAL:
**name** (string, default None) - the name for this bus
**index** (int, default None) - Force specified IDs if available. If None, the indeces \
higher than the highest already existing index are selected.
**vn_kv** (float) - The grid voltage level.
**geodata** ((x,y)-tuple, default None) - coordinates used for plotting
**type** (string, default "b") - Type of the bus. "n" - auxilary node,
"b" - busbar, "m" - muff
**zone** (string, None) - grid region
**in_service** (boolean) - True for in_service or False for out of service
**max_vm_pu** (float, NAN) - Maximum bus voltage in p.u. - necessary for OPF
**min_vm_pu** (float, NAN) - Minimum bus voltage in p.u. - necessary for OPF
OUTPUT:
**index** (int) - The unique indices ID of the created elements
EXAMPLE:
create_bus(net, name = "bus1")
"""
if index:
for idx in index:
if idx in net.bus.index:
raise UserWarning("A bus with index %s already exists" % index)
else:
bid = get_free_id(net["bus"])
index = arange(bid, bid + nr_buses, 1)
# TODO: not needed when concating anyways?
# store dtypes
# dtypes = net.bus.dtypes
dd = pd.DataFrame(index=index, columns=net.bus.columns)
dd["vn_kv"] = vn_kv
dd["type"] = type
dd["zone"] = zone
dd["in_service"] = in_service
dd["name"] = name
net["bus"] = pd.concat([net["bus"], dd], axis=0).reindex_axis(net["bus"].columns, axis=1)
# and preserve dtypes
# _preserve_dtypes(net.bus, dtypes)
if geodata:
if len(geodata) != 2:
raise UserWarning("geodata must be given as (x, y) tupel")
net["bus_geodata"].loc[bid, ["x", "y"]] = geodata
if not isnan(min_vm_pu):
if "min_vm_pu" not in net.bus.columns:
net.bus.loc[:, "min_vm_pu"] = pd.Series()
net.bus.loc[index, "min_vm_pu"] = float(min_vm_pu)
if not isnan(max_vm_pu):
if "max_vm_pu" not in net.bus.columns:
net.bus.loc[:, "max_vm_pu"] = pd.Series()
net.bus.loc[index, "max_vm_pu"] = float(max_vm_pu)
return index
def create_load(net, bus, p_kw, q_kvar=0, const_z_percent=0, const_i_percent=0, sn_kva=nan,
name=None, scaling=1., index=None,
in_service=True, type=None, max_p_kw=nan, min_p_kw=nan,
max_q_kvar=nan, min_q_kvar=nan, controllable=nan):
"""create_load(net, bus, p_kw, q_kvar=0, const_z_percent=0, const_i_percent=0, sn_kva=nan, \
name=None, scaling=1., index=None, \
in_service=True, type=None, max_p_kw=nan, min_p_kw=nan, max_q_kvar=nan, \
min_q_kvar=nan, controllable=nan)
Adds one load in table net["load"].
All loads are modelled in the consumer system, meaning load is positive and generation is
negative active power. Please pay attention to the correct signing of the reactive power as
well.
INPUT:
**net** - The net within this load should be created
**bus** (int) - The bus id to which the load is connected
OPTIONAL:
**p_kw** (float, default 0) - The real power of the load
- postive value -> load
- negative value -> generation
**q_kvar** (float, default 0) - The reactive power of the load
**const_z_percent** (float, default 0) - percentage of p_kw and q_kvar that will be \
associated to constant impedance load at rated voltage
**const_i_percent** (float, default 0) - percentage of p_kw and q_kvar that will be \
associated to constant current load at rated voltage
**sn_kva** (float, default None) - Nominal power of the load
**name** (string, default None) - The name for this load
**scaling** (float, default 1.) - An OPTIONAL scaling factor to be set customly
**type** (string, None) - type variable to classify the load
**index** (int, None) - Force a specified ID if it is available. If None, the index one \
higher than the highest already existing index is selected.
**in_service** (boolean) - True for in_service or False for out of service
**max_p_kw** (float, default NaN) - Maximum active power load - necessary for controllable \
loads in for OPF
**min_p_kw** (float, default NaN) - Minimum active power load - necessary for controllable \
loads in for OPF
**max_q_kvar** (float, default NaN) - Maximum reactive power load - necessary for \
controllable loads in for OPF
**min_q_kvar** (float, default NaN) - Minimum reactive power load - necessary for \
controllable loads in OPF
**controllable** (boolean, default NaN) - States, whether a load is controllable or not. \
Only respected for OPF
OUTPUT:
**index** (int) - The unique ID of the created element
EXAMPLE:
create_load(net, bus=0, p_kw=10., q_kvar=2.)
"""
if bus not in net["bus"].index.values:
raise UserWarning("Cannot attach to bus %s, bus does not exist" % bus)
if index is None:
index = get_free_id(net["load"])
if index in net["load"].index:
raise UserWarning("A load with the id %s already exists" % id)
# store dtypes
dtypes = net.load.dtypes
net.load.loc[index, ["name", "bus", "p_kw", "const_z_percent", "const_i_percent", "scaling",
"q_kvar", "sn_kva", "in_service", "type"]] = \
[name, bus, p_kw, const_z_percent, const_i_percent, scaling, q_kvar, sn_kva,
bool(in_service), type]
# and preserve dtypes
_preserve_dtypes(net.load, dtypes)
if not isnan(min_p_kw):
if "min_p_kw" not in net.load.columns:
net.load.loc[:, "min_p_kw"] = pd.Series()
net.load.loc[index, "min_p_kw"] = float(min_p_kw)
if not isnan(max_p_kw):
if "max_p_kw" not in net.load.columns:
net.load.loc[:, "max_p_kw"] = pd.Series()
net.load.loc[index, "max_p_kw"] = float(max_p_kw)
if not isnan(min_q_kvar):
if "min_q_kvar" not in net.load.columns:
net.load.loc[:, "min_q_kvar"] = pd.Series()
net.load.loc[index, "min_q_kvar"] = float(min_q_kvar)
if not isnan(max_q_kvar):
if "max_q_kvar" not in net.load.columns:
net.load.loc[:, "max_q_kvar"] = pd.Series()
net.load.loc[index, "max_q_kvar"] = float(max_q_kvar)
if not isnan(controllable):
if "controllable" not in net.load.columns:
net.load.loc[:, "controllable"] = | pd.Series() | pandas.Series |
# -*- coding: utf-8 -*-
"""
This module is for running predictions.
Examples:
Example command line executable::
$ python predict.py
"""
import logging
from pathlib import Path
import click
import pandas as pd
from cloudpickle import load
from orbyter_demo.util.config import parse_config
from orbyter_demo.util.logging import setup_logging
logger = logging.getLogger(__name__)
@click.command()
@click.argument("config_file", type=str, default="/mnt/configs/config.yml")
def predict(config_file):
"""
Main function runs predictions.
Args:
None
Returns:
None
"""
config = parse_config(config_file)
# Load model
logger.info(f"Loading model from {config['predict']['model_path']}.")
model_path = Path(config["predict"]["model_path"])
with open(model_path, "rb") as f:
trained_model = load(f)
# Load data
logger.info(f"Loading input data from {config['predict']['data_path']}.")
data_path = Path(config["predict"]["data_path"])
X = pd.read_parquet(data_path)
# Make predictions and persist
logger.info(
f"Make predictions and persist to {config['predict']['predictions_path']}."
)
yhat = trained_model.predict(X)
yhat = | pd.DataFrame(yhat, columns=["MedianHouseValue"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = | pd.Categorical(1) | pandas.Categorical |
from django.views.generic import TemplateView, CreateView
import pandas as pd
import numpy as np
###importing surprise library to implement the recommending systems needed
from surprise import NMF, SVD, SVDpp, KNNBasic, KNNWithMeans, KNNWithZScore, CoClustering
from surprise.model_selection import cross_validate
from surprise import Reader, Dataset
from django.shortcuts import render
import requests
class MovieRatingsView(TemplateView):
template_name = 'main/ratings.html'
class IndexPageView(TemplateView):
template_name = 'main/index.html'
class ChangeLanguageView(TemplateView):
template_name = 'main/change_language.html'
class MovieReccomdationView(TemplateView):
template_name = 'main/recomend.html'
class ResultsView(TemplateView):
template_name = 'main/results.html'
def reccomendation_system(request):
current_user = request.user
user_id = request.user.id
print("User ID:", user_id)
columns = ['user_id', 'item_id', 'rating', 'timestamp']
df = | pd.read_csv('main/ml-100k/u.data', sep='\t', names=columns) | pandas.read_csv |
import unittest
import numpy as np
import pandas as pd
from sklearn.cluster import DBSCAN, KMeans
from sklearn.covariance import EmpiricalCovariance, MinCovDet
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.mixture import GaussianMixture
from dsbox.ml.outliers import CovarianceOutliers, GaussianProcessOutliers
from dsbox.ml.outliers import GMMOutliers, ClusteringOutliers
from dsbox.ml.outliers import KMeansOneClusterOutliers, KMeansIterativeOneClusterOutliers
from dsbox.ml.outliers import MADOutliers, FFTOutliers
class CovarianceOutliersTest(unittest.TestCase):
def test_covarianceoutliers_constructor_should_accept_different_scikit_covariance_estimators(self):
# given
robust_cov = MinCovDet()
emp_cov = EmpiricalCovariance()
# when
cov_outliers_1 = CovarianceOutliers(emp_cov)
cov_outliers_2 = CovarianceOutliers(robust_cov)
# then
self.assertTrue(isinstance(cov_outliers_1, CovarianceOutliers))
self.assertTrue(isinstance(cov_outliers_2, CovarianceOutliers))
def test_covarianceoutliers_predict_proba_gives_biggest_proba_to_biggest_outlier(self):
# given
df = pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1])
# when
cov_outliers = CovarianceOutliers()
cov_outliers.fit(df)
probas = cov_outliers.predict_proba(df)
outlier_index = np.argmax(probas)
# then
outlier_index_true = 6
self.assertEqual(outlier_index_true, outlier_index)
def test_covarianceoutliers_predict_should_return_correct_values(self):
# given
df = pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1])
# when
cov_outliers = CovarianceOutliers()
cov_outliers.fit(df)
outliers = cov_outliers.predict(df)
# then
outliers_true = [False, False, False, False, False, False, True, True, False, False, False,
False, False, False]
self.assertListEqual(outliers_true, outliers.tolist())
class GaussianProcessOutliersTest(unittest.TestCase):
def test_gpoutliers_predict_should_return_correct_values(self):
# given
data = np.random.random_sample(1000) * 2 - 1
data[300] = 5
data[700] = -6
df = pd.DataFrame(data)
# when
gp_outliers = GaussianProcessOutliers(GaussianProcessRegressor(alpha=0.9, normalize_y=True), n_samples=100)
gp_outliers.fit(df)
outliers = gp_outliers.predict(df, confidence=0.999)
# then
outlier_positions_true = [300, 700]
self.assertTrue(outliers[outlier_positions_true[0]])
self.assertTrue(outliers[outlier_positions_true[1]])
class KMeansOneClusterOutliersTest(unittest.TestCase):
def test_kmeansonecluster_outliers_predict_should_return_correct_values(self):
# given
df = | pd.DataFrame([1, 0, 0, 1, 10, 2, 115, 110, 32, 16, 2, 0, 15, 1]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
Created on Mon Sep 28 16:26:09 2015
@author: r4dat
'''
# ICD9 procs from NHSN definition.
# Diabetes diagnoses from AHRQ version 5 SAS program, CMBFQI32.TXT
# sample string generator print((','.join(map(str, [str(x) for x in range(25040,25094)]))).replace(',','","'))
#
# "25000"-"25033",
# "64800"-"64804" = "DM" /* Diabetes w/o chronic complications*/
# "25000","25001","25002","25003","25004","25005","25006","25007","25008","25009","25010","25011","25012","25013","25014","25015","25016","25017","25018","25019","25020","25021","25022","25023","25024","25025","25026","25027","25028","25029","25030","25031","25032","25033",
# "64800","64801","64802","64803","64804"
#
# "25040"-"25093",
# "7751 " = "DMCX" /* Diabetes w/ chronic complications */
# "25040","25041","25042","25043","25044","25045","25046","25047","25048","25049","25050","25051","25052","25053","25054","25055","25056","25057","25058","25059","25060","25061","25062","25063","25064","25065","25066","25067","25068","25069","25070","25071","25072","25073","25074","25075","25076","25077","25078","25079","25080","25081","25082","25083","25084","25085","25086","25087","25088","25089","25090","25091","25092","25093"
# "7751"
#
import pypyodbc
import pandas as pd
import numpy as np
| pd.set_option('expand_frame_repr', False) | pandas.set_option |
import numpy as np
import pandas as pd
import scipy.integrate
import tqdm
def single_nutrient(params, time, gamma_max, nu_max, precursor_mass_ref, Km,
omega, phi_R, phi_P, num_muts=1, volume=1E-3):
"""
Defines the system of ordinary differenetial equations (ODEs) which describe
accumulation of biomass on a single nutrient source.
Parameters
----------
params: list, [M, Mr, Mp, precursors, nutrients]
A list of the parameters whose dynamics are described by the ODEs.
M : positive float
Total protein biomass of the system
Mr : positive float, must be < M
Ribosomal protein biomass of the system
Mp : positive float, must be < M
Metabbolic protein biomass of the system
precursors : positive float
Mass of precursors in the cell. This is normalized to
total protein biomass when calculating the translational
capacity.
nutrients : positive float
Mass of nutrients in the system.
time : float
Evaluated time step of the system.
gamma_max: positive float
The maximum translational capacity in units of inverse time.
nu_max : positive float
The maximum nutritional capacity in units of inverse time.
precursor_conc_ref : positive float
The dissociation constant of charged tRNA to the elongating ribosome.
Km : positive float
The Monod constant for growth on the specific nutrient source.
This is in units of molar.
omega: positive float
The yield coefficient of the nutrient source in mass of amino acid
produced per mass of nutrient.
phi_R : float, [0, 1]
The fraction of the proteome occupied by ribosomal protein mass
phi_P : float, [0, 1]
The fraction of the proteome occupied by metabolic protein mass
num_muts: int
The number of mutants whose dynamics need to be tracked.
volume: float, default 1 mL
The volume of the system for calculation of concentrations.
Returns
-------
out: list, [dM_dt, dMr_dt, dMp_dt, dprecursors_dt, dnutrients_dt]
A list of the evaluated ODEs at the specified time step.
dM_dt : The dynamics of the total protein biomass.
dMr_dt : The dynamics of the ribosomal protein biomass.
dMp_dt : the dynamics of the metabolic protein biomass.
dprecursors_dt : The dynamics of the precursor/charged-tRNA pool.
dnutrients_dt : The dynamics of the nutrients in the growth medium
"""
# Define constants
AVO = 6.022E23
OD_CONV = 6E17
#TODO: Put in data validation
# Unpack the parameters
if num_muts > 1:
nutrients = params[-1]
M, Mr, Mp, precursors = np.reshape(params[:-1], (4, num_muts))
else:
M, Mr, Mp, precursors, nutrients = params
# Compute the precursor mass fraction and nutrient concentration
precursor_mass_frac = precursors / M
nutrient_conc = nutrients / (AVO * volume)
# Compute the two capacities
gamma = gamma_max * precursor_mass_frac / (precursor_mass_frac + precursor_mass_ref)
nu = nu_max * nutrient_conc / (nutrient_conc + Km)
# ODEs for biomass accumulation
dM_dt = gamma * Mr
dMr_dt = phi_R * dM_dt
dMp_dt = phi_P * dM_dt
# ODE for precursors and nutrients
dprecursors_dt = nu * Mp - dM_dt
dnutrients_dt = -nu * Mp/ omega
_out = [dM_dt, dMr_dt, dMp_dt, dprecursors_dt]
if num_muts > 1:
dnutrients_dt = np.sum(dnutrients_dt)
out = [value for deriv in _out for value in deriv]
out.append(dnutrients_dt)
return out
def dilution_cycle(time, fun, fun_params, fun_args, nutrient_dict,
target_mass=1, num_dilutions=10,
colnames=None, num_muts=1, **int_kwargs):
"""
Integrates a desired function with periodic dilutions and returns a
dataframe of the complete integration.
Parameters
-----------
ty
time: numpy-array
The time interval to integrate for a single growth cycle. This
time interval will be repeated for each dilution.
fun: function
The function you wish to integrate
fun_params : list
List of parameters to feed into the function
fun_args : dict
Arguments to feed the integration function. Must be a dict as
some arguements are accessed (in the case of mutants)
nutrient_dict : dict
A dictionary of the indices and values to reset the nutrient conditions
for each dilution event. The keys correspond to the indices those of the
`fun_params` which define the nutrient conditions. The value corresponds
to the desired reset value.
num_dilutions : int
The number of dilution cycles that should be performed
dilution_factor : float or int
The factor by which the parameters should be decreased for each dilution
event. Note that this does not apply to the nutrient parameters which
are reset by `nutrient_dict.`
colnames : list of str, optional
The desired column names of the output. If `None`, columns will be
left arbitrarily named.
**int_kwargs: dict
kwargs to be fed to the ODE solver.
"""
# TODO: Put in type checks.
# Perform the initial integration
out = scipy.integrate.odeint(fun, fun_params, time, args=fun_args,
**int_kwargs)
# Instantiate the dataframes
if colnames != None:
initial_df = | pd.DataFrame(out, columns=colnames) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Fatal Police Shooting
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Zg-tic0ZjTQSkN0YXI2CtB3ix9H---Fh
"""
import pandas as pd
df = | pd.read_csv('database.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import numpy as np
import datetime
import math
import os
import osr
import glob
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
import gdal
from joblib import Parallel, delayed
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor = 0.001,
epsg=4326, cores=1):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
nc_paths = create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path, Scaling_factor,
epsg)
args = [nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor]
print('\tApply HANTS on tiles...')
results = Parallel(n_jobs=cores)(delayed(HANTS_netcdf)(nc_path, args)
for nc_path in nc_paths)
if len(nc_paths) > 1:
Merge_NC_Tiles(nc_paths, nc_path, start_date, end_date, latlim, lonlim, cellsize, epsg, Scaling_factor)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path, Scaling_factor,
epsg=4326):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1],
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1],
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
# ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = pd.date_range(start_date, end_date, freq='D')
dates_ls = [d.toordinal() for d in dates_dt]
os.chdir(rasters_path)
ras_ls = glob.glob('*.tif')
# Create tile parts
if (lat_n > 200 or lon_n > 200):
lat_n_amount = np.maximum(1,int(np.floor(lat_n/100)))
lon_n_amount = np.maximum(1,int(np.floor(lon_n/100)))
nc_path_part_names = nc_path.split('.')
nc_path_tiles = []
for lat_n_one in range(0, lat_n_amount):
for lon_n_one in range(0, lon_n_amount):
nc_path_tile = ''.join(nc_path_part_names[0] + "_h%03d_v%03d.nc" %(lon_n_one, lat_n_one))
nc_path_tiles = np.append(nc_path_tiles, nc_path_tile)
else:
nc_path_tiles = nc_path
i = 0
# Loop over the nc_paths
for nc_path_tile in nc_path_tiles:
i += 1
if lat_n_amount > 1:
lat_part = int(nc_path_tile[-6:-3])
lat_start = lat_part * 100
if int(lat_part) is not int(lat_n_amount-1):
lat_end = int((lat_part + 1) * 100)
else:
lat_end = int(lat_n)
else:
lat_start = int(0)
lat_end = int(lat_n)
if lon_n_amount > 1:
lon_part = int(nc_path_tile[-11:-8])
lon_start = int(lon_part * 100)
if int(lon_part) is not int(lon_n_amount-1):
lon_end = int((lon_part + 1) * 100)
else:
lon_end = int(lon_n)
else:
lon_start = int(0)
lon_end = int(lon_n)
# Define space dimention
lat_range = lat_ls[lat_start:lat_end]
lon_range = lon_ls[lon_start:lon_end]
geo_ex = tuple([lon_range[0] - 0.5*cellsize, cellsize, 0, lat_range[0] + cellsize * 0.5, 0, -cellsize])
# Create netcdf file
print('Creating netCDF file tile %s out of %s...' %(i,len(nc_path_tiles)))
nc_file = netCDF4.Dataset(nc_path_tile, 'w', format="NETCDF4_CLASSIC")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_end - lat_start)
lon_dim = nc_file.createDimension('longitude', lon_end - lon_start)
time_dim = nc_file.createDimension('time', len(dates_ls))
# Create Variables
crso = nc_file.createVariable('crs', 'i4')
crso.long_name = 'Lon/Lat Coords in WGS84'
crso.standard_name = 'crs'
crso.grid_mapping_name = 'latitude_longitude'
crso.projection = spa_ref
crso.longitude_of_prime_meridian = 0.0
crso.semi_major_axis = 6378137.0
crso.inverse_flattening = 298.257223563
crso.geo_reference = geo_ex
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude',))
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude',))
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time',))
time_var.standard_name = 'time'
time_var.calendar = 'gregorian'
original_var = nc_file.createVariable('original_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
original_var.long_name = 'original_values'
original_var.grid_mapping = 'crs'
original_var.add_offset = 0.00
original_var.scale_factor = Scaling_factor
original_var.set_auto_maskandscale(False)
print('\tVariables created')
# Fill in time and space dimention
lat_var[:] = lat_range
lon_var[:] = lon_range
time_var[:] = dates_ls
# Create memory example file
# empty array
empty_vec = pd.np.empty((lat_end - lat_start, lon_end - lon_start))
empty_vec[:] = -9999 * np.float(Scaling_factor)
dest_ex = Save_as_MEM(empty_vec, geo_ex, str(epsg))
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(len(dates_ls)):
Date_now = datetime.datetime.fromordinal(dates_ls[tt])
yyyy = str(Date_now.year)
mm = '%02d' %int(Date_now.month)
dd = '%02d' %int(Date_now.day)
# Raster
ras = name_format.format(yyyy=yyyy,mm=mm,dd=dd)
if ras in ras_ls:
data_in = os.path.join(rasters_path, ras)
dest = reproject_dataset_example(data_in, dest_ex)
array_tt = dest.GetRasterBand(1).ReadAsArray()
array_tt[array_tt<-9999] = -9999 * np.float(Scaling_factor)
original_var[tt, :, :] = np.int_(array_tt * 1./np.float(Scaling_factor))
else:
# Store values
original_var[tt, :, :] = np.int_(empty_vec * 1./np.float(Scaling_factor))
# Close file
nc_file.close()
print('NetCDF %s file created' %i)
# Return
return nc_path_tiles
def HANTS_netcdf(nc_path, args):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netcdf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor = args
# Read netcdfs
nc_file = netCDF4.Dataset(nc_path, 'r+', format="NETCDF4_CLASSIC")
nc_file.set_fill_on()
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[ztime, rows, cols] = original_values.shape
size_st = cols*rows
values_hants = pd.np.empty((ztime, rows, cols))
outliers_hants = pd.np.empty((ztime, rows, cols))
values_hants[:] = pd.np.nan
outliers_hants[:] = pd.np.nan
# Additional parameters
ni = len(time_var)
ts = range(ni)
# Loop
counter = 1
#print('Running HANTS...')
for m in range(rows):
for n in range(cols):
#print('\t{0}/{1}'.format(counter, size_st))
y = pd.np.array(original_values[:, m, n])
y[pd.np.isnan(y)] = -9999
[yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
low, high, fet, dod, delta)
values_hants[:, m, n] = yr
outliers_hants[:, m, n] = outliers
counter = counter + 1
values_hants[values_hants<-9999] = -9999 * np.float(Scaling_factor)
hants_var = nc_file.createVariable('hants_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
hants_var.long_name = 'hants_values'
hants_var.grid_mapping = 'crs'
hants_var.add_offset = 0.00
hants_var.scale_factor = Scaling_factor
hants_var.set_auto_maskandscale(False)
combined_var = nc_file.createVariable('combined_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
combined_var.long_name = 'combined_values'
combined_var.grid_mapping = 'crs'
combined_var.add_offset = 0.00
combined_var.scale_factor = Scaling_factor
combined_var.set_auto_maskandscale(False)
outliers_var = nc_file.createVariable('outliers', 'i4',
('time', 'latitude', 'longitude'),
fill_value=-9999)
outliers_var.long_name = 'outliers'
outliers_var.grid_mapping = 'crs'
hants_var[:,:,:]= np.int_(values_hants * 1./np.float(Scaling_factor))
outliers_var[:,:,:] = outliers_hants
combined_var[:,:,:] = pd.np.where(outliers_hants,
np.int_(values_hants * 1./np.float(Scaling_factor)),
np.int_(original_values * 1./np.float(Scaling_factor)))
# Close netcdf file
nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
delta):
'''
This function runs the python implementation of the HANTS algorithm for a
single point (lat, lon). It plots the fit and returns a data frame with
the 'original' and the 'hants' time series.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r', format="NETCDF4_CLASSIC")
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[pd.np.abs(lon - lonx).argmin()]
lat_i = pd.np.where(lat == lat_closest)[0][0]
lon_i = pd.np.where(lon == lon_closest)[0][0]
# Read values
original_values = nc_file.variables['original_values'][:, lat_i, lon_i]
# Additional parameters
ni = len(time)
ts = range(ni)
# HANTS
y = pd.np.array(original_values)
y[pd.np.isnan(y)] = -9999
[hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
dod, delta)
# Plot
top = 1.15*max(pd.np.nanmax(original_values),
pd.np.nanmax(hants_values))
bottom = 1.15*min(pd.np.nanmin(original_values),
pd.np.nanmin(hants_values))
ylim = [bottom, top]
plt.plot(time, hants_values, 'r-', label='HANTS')
plt.plot(time, original_values, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.format(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netcdf file
nc_file.close()
# Data frame
df = pd.DataFrame({'time': time,
'original': original_values,
'hants': hants_values})
# Return
return df
def HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet, dod, delta):
'''
This function applies the Harmonic ANalysis of Time Series (HANTS)
algorithm originally developed by the Netherlands Aerospace Centre (NLR)
(http://www.nlr.org/space/earth-observation/).
This python implementation was based on two previous implementations
available at the following links:
https://codereview.stackexchange.com/questions/71489/harmonic-analysis-of-time-series-applied-to-arrays
http://nl.mathworks.com/matlabcentral/fileexchange/38841-matlab-implementation-of-harmonic-analysis-of-time-series--hants-
'''
# Arrays
mat = pd.np.zeros((min(2*nf+1, ni), ni))
# amp = np.zeros((nf + 1, 1))
# phi = np.zeros((nf+1, 1))
yr = pd.np.zeros((ni, 1))
outliers = pd.np.zeros((1, len(y)))
# Filter
sHiLo = 0
if HiLo == 'Hi':
sHiLo = -1
elif HiLo == 'Lo':
sHiLo = 1
nr = min(2*nf+1, ni)
noutmax = ni - nr - dod
# dg = 180.0/math.pi
mat[0, :] = 1.0
ang = 2*math.pi*pd.np.arange(nb)/nb
cs = pd.np.cos(ang)
sn = pd.np.sin(ang)
i = pd.np.arange(1, nf+1)
for j in | pd.np.arange(ni) | pandas.np.arange |
# Contributions to SBDF reader functionality provided by PDF Solutions, Inc. (C) 2021
"""
TODOS:
* Return table/column metadata as well as the table data
* Support Decimal type
* Support _ValueArrayEncodingId.RUN_LENGTH array type
* Contemplate making an SBDF writer
"""
from contextlib import ExitStack
from pathlib import Path
from typing import Any, BinaryIO, Dict, Hashable, List, Tuple, Union, cast
import numpy as np
import pandas as pd
try:
from tqdm import tqdm
except ImportError:
tqdm = None
from .array import (
PackedArray,
PackedBitArray,
PackedPlainArray,
next_bytes_as_packed_array,
unpack_bit_array,
unpack_packed_array,
)
from .base import SectionTypeId, ValueTypeId, next_bytes_as_int, next_bytes_as_str
from .metadata import Metadatum, next_bytes_as_column_metadata, next_bytes_as_metadata
def _next_bytes_as_section_id(file: BinaryIO) -> int:
"""Reads section type id from file."""
magic_number = next_bytes_as_int(file)
if magic_number != 0xDF:
raise ValueError("Section magic number 1 not found")
magic_number = next_bytes_as_int(file)
if magic_number != 0x5B:
raise ValueError("Section magic number 2 not found")
section_id = next_bytes_as_int(file)
return section_id
def import_data( # noqa: C901
sbdf_file: Union[str, Path],
strings_as_categories: bool = False,
skip_strings: bool = False,
progress_bar: bool = True,
) -> pd.DataFrame:
"""Import data from an SBDF file and create a pandas DataFrame.
TODO: document keyword arguments
"""
# prevent edge cases for skip_strings option
if skip_strings and strings_as_categories:
raise ValueError("Strings cannot be both skipped and treated as categories")
# establish a master context manager for the duration of reading the file
with ExitStack() as read_context:
# open the SBDF file, managing context using the master context
file = read_context.enter_context(Path(sbdf_file).open("rb"))
# if we have tqdm, create and add progress bar managed by master read context
pbar = None
if tqdm is not None:
pbar = read_context.enter_context(
tqdm(desc="Reading File", unit="row", disable=not progress_bar)
)
# read file header
section_id = _next_bytes_as_section_id(file)
assert section_id == SectionTypeId.FILEHEADER
version_major = next_bytes_as_int(file)
version_minor = next_bytes_as_int(file)
if (version_major, version_minor) != (1, 0):
v = f"{version_major}.{version_minor}"
msg = f"Only version 1.0 supported, but version {v} encountered."
raise ValueError(msg)
# read table metadata
section_id = _next_bytes_as_section_id(file)
assert section_id == SectionTypeId.TABLEMETADATA
table_metadata = { # noqa F841
md.name: md.value for md in next_bytes_as_metadata(file)
}
# TODO: parse table metadata into a form that can be returned
# read column metadata
n_columns = next_bytes_as_int(file, n_bytes=4)
column_metadata_fields: Tuple[Metadatum, ...] = next_bytes_as_metadata(
file, skip_values=True
)
column_metadatas: Tuple[Dict[str, Any], ...] = tuple(
{
md.name: md.value
for md in next_bytes_as_column_metadata(file, column_metadata_fields)
}
for _ in range(n_columns)
)
# TODO: parse column metadata into a form that can be returned
column_names: Tuple[Hashable, ...] = tuple(
md_dict["Name"] for md_dict in column_metadatas
)
column_types = tuple(
ValueTypeId(md_dict["DataType"][0]) for md_dict in column_metadatas
)
# read table content as arrays packed into bytes objects
rows_per_slice: List[int] = []
table_slices: List[Dict[Hashable, PackedArray]] = []
table_slice_nulls: List[Dict[Hashable, PackedBitArray]] = []
while True:
current_slice: Dict[Hashable, PackedArray] = dict()
current_slice_nulls: Dict[Hashable, PackedBitArray] = dict()
# read next table slice
section_id = _next_bytes_as_section_id(file)
if section_id == SectionTypeId.TABLEEND:
break
if section_id != SectionTypeId.TABLESLICE:
raise ValueError(f"Expected table slice ID, got {section_id} instead")
slice_n_columns = next_bytes_as_int(file, n_bytes=4)
assert slice_n_columns == n_columns
# read each column slice in the table slice
for column_name in column_names:
section_id = _next_bytes_as_section_id(file)
assert section_id == SectionTypeId.COLUMNSLICE
col_vals = next_bytes_as_packed_array(file)
# handle column properties (ignoring all but IsInvalid)
n_properties = next_bytes_as_int(file, n_bytes=4)
for _ in range(n_properties):
property_name = next_bytes_as_str(file)
property_value = cast(
PackedBitArray, next_bytes_as_packed_array(file)
)
# we only care about the "IsInvalid" property, which defines nulls
if property_name == "IsInvalid":
current_slice_nulls[column_name] = property_value
current_slice[column_name] = col_vals
n_row_in_slice = next(iter(current_slice.values())).n
rows_per_slice.append(n_row_in_slice)
if pbar is not None:
pbar.update(n_row_in_slice)
table_slices.append(current_slice)
table_slice_nulls.append(current_slice_nulls)
# concatenate column slices and missing mask slices into single packed objects
col_name_iter = column_names
if tqdm is not None:
col_name_iter = tqdm(
col_name_iter,
desc="Concatenating Column Slice Data",
unit="col",
disable=not progress_bar,
)
packed_full_columns = {}
packed_missing_masks = {}
for col_name in col_name_iter:
chunks = tuple(ts.pop(col_name) for ts in table_slices)
array_type = type(chunks[0]) if len(chunks) > 0 else PackedPlainArray
packed_full_columns[col_name] = array_type.concatenate(chunks) # type: ignore
packed_missing_masks[col_name] = PackedBitArray.concatenate(
tuple(
tsn.pop(col_name, PackedBitArray.empty(n))
for tsn, n in zip(table_slice_nulls, rows_per_slice)
)
)
# unpack columns from bytes objects into numpy arrays
col_name_type_iter = zip(column_names, column_types)
if tqdm is not None:
col_name_type_iter = tqdm(
col_name_type_iter,
desc="Unpacking Data",
unit="col",
disable=not progress_bar,
total=n_columns,
)
pandas_data = {}
for col_name, col_type in col_name_type_iter:
# skip strings if setting enabled
if skip_strings and col_type == ValueTypeId.STRING:
del packed_full_columns[col_name]
pandas_data[col_name] = pd.Categorical.from_codes(
codes=np.zeros(sum(rows_per_slice), dtype=np.uint8),
categories=["<SKIPPED>"],
)
continue
# unpack column to array otherwise
packed = packed_full_columns.pop(col_name)
if isinstance(packed, PackedPlainArray):
col_array = unpack_packed_array(packed, strings_as_categories)
elif isinstance(packed, PackedBitArray):
col_array = unpack_bit_array(packed)
else:
raise RuntimeError(
"Unable to parse file correctly, we thought we had a packed "
"array, but we didn't!"
)
pandas_data[col_name] = col_array
# unpack and apply missing masks
col_name_type_iter = zip(column_names, column_types)
if tqdm is not None:
col_name_type_iter = tqdm(
col_name_type_iter,
desc="Handling Missing Values",
unit="col",
disable=not progress_bar,
total=n_columns,
)
for col_name, col_type in col_name_type_iter:
missing_mask = unpack_bit_array(packed_missing_masks.pop(col_name))
if missing_mask.any():
col_array = pandas_data[col_name]
missing_value = (
None
if col_type
in (ValueTypeId.BINARY, ValueTypeId.DECIMAL, ValueTypeId.STRING)
else np.nan
)
needs_copy = (
not col_array.flags.writeable if hasattr(col_array, "flags") else False
)
# convert numpy-native binary array to Python object array for nullability
dtype = "O" if col_type == ValueTypeId.BINARY else None
col_array = pd.Series(col_array, copy=needs_copy, dtype=dtype)
col_array.loc[missing_mask] = missing_value
col_array = col_array.values
pandas_data[col_name] = col_array
# create dataframe and return
df = | pd.DataFrame(pandas_data) | pandas.DataFrame |
"""
#
# scikit_optim.py
#
# Copyright (c) 2018 <NAME>. MIT License.
#
"""
import numpy as np
import os
import pandas as pd
import sys
import time
import warnings
import sklearn.metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from tqdm import tqdm
if not sys.warnoptions:
warnings.simplefilter('ignore')
os.environ['PYTHONWARNINGS'] = 'ignore' # For ConvergenceWarning in CVs.
CPU_ALLOC = {24: 8, 12: 4, 8: 2, 4: 3} # 8->2 good for laptops, 4->3 for RPis.
CPU_USE = CPU_ALLOC.get(os.cpu_count(), 1)
def bucket_data(df, bucket_list=None):
'''
Convenience method: this bins features.
'''
if not bucket_list:
bucket_list = [(col, 10) for col in df.columns]
for col, bin_amt in bucket_list:
df[col] = pd.cut(
df[col],
bins=bin_amt,
right=True,
labels=range(1, bin_amt + 1),
)
return df
class ModelSelector():
def __init__(self, acc_metric='accuracy_score', num_cv=5, **kwargs):
'''
kwargs: *something* must be passed for each type or it will be ignored.
Can have just check, just ignore, just params, or any combination thereof.
kwarg inputs:
bucket: check, ignore, bucket_list.
min_max_scale: check, ignore, feature_range.
one_hot_encode: check, ignore, categories, bucket_list.
raw: check, ignore.
Example:
kwargs = {
'min_max_scale': {
'ignore': ['RandForest'],
'feature_range': (0, 0,5)
},
'one_hot_encode': {
'check': ['GaussNB', 'MultiNB'],
'categories': [
list(range(1, 11)) if c not in ['multiworld'] else [0, 1]
for c in df_train.columns
],
'bucket_list': [(col, 10) for col in df.columns if col not in ['multiworld']]
},
raw: {
'check': ['LogRegress']
}
}
'''
# Unpack the data preparation types and params.
self.run_types = {}
for k in kwargs:
self.run_types[k] = kwargs.get(k, None) # Should contain check, ignore, any params needed.
self.acc_metric = acc_metric # scoring param to optimize on in CV.
self.num_cv = num_cv
self.summary_df_cv = None # best CV-search score.
self.models = None # dict of model objects.
self.params = None # dict of all best params for all evaluated models.
self.best_model = None # string, name of best model.
self.best_params = None # dict of best params for the best model.
def fit(self, X_in, y_in, X_val_in=None, y_val_in=None):
# get lists organized and initialize dicts.
check_list = [
'GMM', 'LogRegress', 'DecTree', 'RandForest',
'SupportVC', 'kNN', 'GaussNB', 'MultiNB'
] # default is all models.
# Pull out models to check and run types, drop ignore models, default to all models.
todo_list = [
(mod, k)
for k in self.run_types
for mod in self.run_types[k].get('check', check_list)
if self.run_types[k] and mod not in self.run_types[k].get('ignore', [])
]
summary_dict_cv = {mod_tup: 0 for mod_tup in todo_list} # eventually turn into df.
model_dict = {mod_tup: 0 for mod_tup in todo_list} # dict of model objects.
params = {mod_tup: 0 for mod_tup in todo_list} # stores params for each model.
# loop over todo_list and score. Innefficient because re-prepping X.
pbar = tqdm(todo_list, desc='Training models', ncols=150)
for model, prep_method in pbar:
pbar.set_description('{} / {}'.format(model, prep_method))
t_0 = time.time()
mod = globals()[model](acc_metric=self.acc_metric, num_cv=self.num_cv) # Instantiate model class.
# Prep data and fit model.
X = self.data_prep(prep_method, X_in)
if X_val_in is not None: # OoS score case.
X_val = self.data_prep(prep_method, X_val_in)
mod_score = round(mod.score(X, y_in, X_val, y_val_in) * 100, 2)
else: # In-sample score case.
mod_score = round(mod.fit(X, y_in).best_score * 100, 2)
# Store results
summary_dict_cv[(model, prep_method)] = {
'time': time.strftime("%H:%M:%S", time.gmtime(time.time()-t_0)),
self.acc_metric: mod.best_score
}
params[(model, prep_method)] = mod.best_params
model_dict[(model, prep_method)] = mod
pbar.close()
# get df and sort based on perf. store bests.
summ_df_cv = | pd.DataFrame.from_dict(summary_dict_cv, orient='index') | pandas.DataFrame.from_dict |
"""
.. _twitter:
Twitter Data API
================
"""
import logging
from functools import wraps
from twython import Twython
import pandas as pd
from pandas.io.json import json_normalize
TWITTER_LOG_FMT = ('%(asctime)s | %(levelname)s | %(filename)s:%(lineno)d '
'| %(funcName)s | %(message)s')
logging.basicConfig(format=TWITTER_LOG_FMT)
# Functions that depend on 'previous_cursor' and 'next_cursor' to
# navigate requests with a lot of data, request pagination basically.
CURSORED_FUNCTIONS = [
'get_followers_ids',
'get_followers_list',
'get_friends_ids',
'get_friends_list',
'get_list_members',
'get_list_memberships',
'get_list_subscribers',
'get_list_subscriptions',
'get_retweeters_ids',
'show_owned_lists',
]
# Responses that contain a special key (and the name of that key)
# containing the required data and need to be extracted through
# that key, as opposed to other responses where you can easily
# call DataFrame on them directly
SPECIAL_KEY_FUNCS = {
'search': 'statuses',
'get_followers_list': 'users',
'get_friends_list': 'users',
'get_list_members': 'users',
'get_list_subscribers': 'users',
'get_list_memberships': 'lists',
'get_list_subscriptions': 'lists',
'show_owned_lists': 'lists',
}
# Functions that contain an embedded ``user`` key, containing
# 40+ attributes of the user tweeting, listed, retweeted, etc.
USER_DATA_EMBEDDED = {
'get_favorites': 'tweet_',
'get_home_timeline': 'tweet_',
'get_list_memberships': 'list_',
'get_list_statuses': 'tweet_',
'get_list_subscriptions': '',
'get_mentions_timeline': 'tweet_',
'get_retweets': 'tweet_',
'get_user_timeline': 'tweet_',
'lookup_status': 'tweet_',
'retweeted_of_me': 'tweet_',
'search': 'tweet_',
'show_lists': 'list_',
'show_owned_lists': 'list_',
}
DEFAULT_COUNTS = {
'get_favorites': 200,
'get_followers_ids': 5000,
'get_followers_list': 200,
'get_friends_ids': 5000,
'get_friends_list': 200,
'get_home_timeline': 200,
'get_list_members': 5000,
'get_list_memberships': 1000,
'get_list_statuses': 100,
'get_list_subscribers': 5000,
'get_list_subscriptions': 1000,
'get_mentions_timeline': 200,
'get_retweeters_ids': 100,
'get_retweets': 100,
'get_user_timeline': 200,
'lookup_status': 100,
'lookup_user': 100,
'retweeted_of_me': 100,
'search': 100,
'search_users': 20,
'show_lists': 100,
'show_owned_lists': 1000
}
def _expand_entities(df):
if 'tweet_entities' in df:
colnames = ['tweet_entities_' + x for x in ['mentions', 'hashtags',
'urls', 'symbols',
'media']]
entities_df = json_normalize(df['tweet_entities'])
mentions = [', '.join(['@' + x['screen_name'] for x in y])
for y in entities_df['user_mentions']]
hashtags = [', '.join(['#' + x['text'] for x in y])
for y in entities_df['hashtags']]
urls = [', '.join([x['expanded_url'] for x in y])
for y in entities_df['urls']]
symbols = [', '.join(['$' + x['text'] for x in y])
for y in entities_df['symbols']]
if 'media' in entities_df:
entities_df['media'] = entities_df['media'].fillna('')
media = [', '.join([x['media_url'] for x in y]) if y != '' else
y for y in entities_df['media']]
entity_cols = [mentions, hashtags, urls, symbols, media]
else:
entity_cols = [mentions, hashtags, urls, symbols]
col_idx = df.columns.get_loc('tweet_entities')
for j, col in enumerate(entity_cols):
df.insert(col_idx+j+1, colnames[j], col)
return df
def _get_counts(number=None, default=None):
"""Split a number into a list of divisors and the remainder.
The divisor is the default count in this case."""
if not number:
number = 1
div = divmod(number, default)
result = [default for x in range(div[0])]
if div[1] != 0:
return result + [div[1]]
return result
def make_dataframe(func):
@wraps(func)
def wrapper(count=None, max_id=None, *args, **kwargs):
nonlocal func
twtr = Twython(**wrapper.get_auth_params())
fname = func.__name__
func = eval('twtr.' + fname)
if count is None:
count = DEFAULT_COUNTS[fname]
counts = _get_counts(count, DEFAULT_COUNTS[fname])
responses = []
for i, count in enumerate(counts):
if fname == 'search':
if responses and not responses[-1]['statuses']:
break
max_id = (max_id or None) if i == 0 else (responses[-1]['statuses'][-1]['id'] - 1)
if (fname != 'search') and (fname not in CURSORED_FUNCTIONS):
if responses and len(responses[-1]) == 0:
break
max_id = (max_id or None) if i == 0 else (responses[-1][-1]['id'] - 1)
if fname in CURSORED_FUNCTIONS:
cursor = None if i == 0 else responses[-1]['next_cursor']
max_id = None
else:
cursor = None
kwargs_log = ', '.join([k + '=' + str(v) for k, v in kwargs.items()])
args_log = ', '.join(args)
logging.info(msg=fname + ' | ' + 'Requesting: ' +
'count=' + str(count) + ', max_id=' +
str(max_id) + ', ' + kwargs_log + args_log)
resp = func(count=count,
max_id=max_id,
cursor=cursor,
*args, **kwargs)
responses.append(resp)
if '_ids' in fname:
finallist = []
for sublist in responses:
finallist.extend(sublist['ids'])
finaldict = {'previous_cursor': responses[0]['previous_cursor'],
'next_cursor': responses[-1]['next_cursor'],
'ids': finallist}
return finaldict
final_df = pd.DataFrame()
for resp in responses:
if SPECIAL_KEY_FUNCS.get(fname):
resp_df = pd.DataFrame(resp[SPECIAL_KEY_FUNCS.get(fname)])
if fname in USER_DATA_EMBEDDED:
resp_df.columns = [USER_DATA_EMBEDDED[fname] + col for col in resp_df.columns]
user_df = pd.DataFrame([x['user'] for x in resp[SPECIAL_KEY_FUNCS.get(fname)]])
user_df.columns = ['user_' + col for col in user_df.columns]
temp_df = pd.concat([resp_df, user_df], axis=1, sort=False)
else:
temp_df = resp_df
else:
resp_df = pd.DataFrame(resp)
if fname in USER_DATA_EMBEDDED:
resp_df.columns = [USER_DATA_EMBEDDED[fname] + x for x in resp_df.columns]
user_df = pd.DataFrame([x['user'] for x in resp])
user_df.columns = ['user_' + x for x in user_df.columns]
temp_df = pd.concat([resp_df, user_df], axis=1)
else:
temp_df = resp_df
final_df = final_df.append(temp_df, sort=False, ignore_index=True)
for col in final_df:
if 'created_at' in col:
final_df[col] = pd.to_datetime(final_df[col])
for col in final_df:
if 'source' in col:
final_df[col + '_url'] = final_df[col].str.extract('<a href="(.*)" rel=')[0]
final_df[col] = final_df[col].str.extract('nofollow">(.*)</a>')[0]
if 'tweet_entities' in final_df:
return _expand_entities(final_df)
return final_df
return wrapper
def authenticate(func):
"""Used internally, please use set_auth_params for authentication."""
auth_params = {}
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
def set_auth_params(**params):
nonlocal auth_params
auth_params.update(params)
def get_auth_params():
return auth_params
wrapper.set_auth_params = set_auth_params
wrapper.get_auth_params = get_auth_params
return wrapper
@authenticate
def get_application_rate_limit_status(consumed_only=True):
"""
Returns the current rate limits for methods belonging to the
specified resource families.
:param consumed_only: Whether or not to return only items that
have been consumed. Otherwise returns the full list.
https://developer.twitter.com/en/docs/developer-utilities/rate-limit-status/api-reference/get-application-rate_limit_status
"""
twtr = Twython(**get_application_rate_limit_status.get_auth_params())
ratelimit = twtr.get_application_rate_limit_status()
limit_df = pd.DataFrame()
for resource in ratelimit['resources']:
temp_df = pd.DataFrame(ratelimit['resources'][resource]).T
limit_df = limit_df.append(temp_df, sort=False)
limit_df['reset'] = pd.to_datetime(limit_df['reset'], unit='s')
limit_df['resource'] = limit_df.index.str.split('/').str[1]
limit_df.index.name = 'endpoint'
limit_df = limit_df.sort_values(['resource'])
limit_df = limit_df.reset_index()
if consumed_only:
print(' '*12, 'Rate limit as of:',
pd.Timestamp.now(tz='UTC').strftime('%Y-%m-%-d %H:%M:%S'))
return limit_df[limit_df['limit'].ne(limit_df['remaining'])]
return limit_df
@authenticate
def get_available_trends():
"""
Returns the locations that Twitter has trending topic information for.
https://developer.twitter.com/en/docs/trends/locations-with-trending-topics/api-reference/get-trends-available
"""
twtr = Twython(**get_available_trends.get_auth_params())
available_trends = twtr.get_available_trends()
trends_df = pd.DataFrame(available_trends)
trends_df['code'] = [x['code'] for x in trends_df['placeType']]
trends_df['place_type'] = [x['name'] for x in trends_df['placeType']]
del trends_df['placeType']
trends_df = trends_df.sort_values(['country', 'place_type', 'name'])
trends_df = trends_df.reset_index(drop=True)
return trends_df
@make_dataframe
@authenticate
def get_favorites(user_id=None, screen_name=None, count=None, since_id=None,
max_id=None, include_entities=None, tweet_mode=None):
"""
Returns the 20 most recent Tweets favorited by the authenticating
or specified user.
:param user_id: (int - optional) The ID of the user for whom to return
results.
:param screen_name: (str - optional) The screen name of the user for whom
to return results.
:param count: (int - optional) Specifies the number of results to retrieve.
:param since_id: (int - optional) Returns results with an ID greater than
(that is, more recent than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of Tweets has
occured since the since_id, the since_id will be forced to the oldest ID
available.
:param max_id: (int - optional) Returns results with an ID less than (that
is, older than) or equal to the specified ID.
:param include_entities: (bool - optional) The entities node will be
omitted when set to False .
:param tweet_mode: (str - optional) Valid request values are compat and
extended, which give compatibility mode and extended mode, respectively for
Tweets that contain over 140 characters
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-favorites-list
"""
pass
@make_dataframe
@authenticate
def get_followers_ids(user_id=None, screen_name=None, cursor=None,
stringify_ids=None, count=None):
"""
Returns a cursored collection of user IDs for every user
following the specified user.
:param user_id: (int - optional) The ID of the user for whom to return
results.
:param screen_name: (str - optional) The screen name of the user for whom
to return results.
:param cursor: (cursor - semi-optional) Causes the list of connections to
be broken into pages of no more than 5000 IDs at a time. The number of IDs
returned is not guaranteed to be 5000 as suspended users are filtered out after
connections are queried. If no cursor is provided, a value of -1 will be
assumed, which is the first “page.” The response from the API will include a
previous_cursor and next_cursor to allow paging back and forth. See Using
cursors to navigate collections for more information.
:param stringify_ids: (bool - optional) Some programming environments will
not consume Twitter IDs due to their size. Provide this option to have IDs
returned as strings instead. More about Twitter IDs.
:param count: (int - optional) Specifies the number of results to retrieve.
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-followers-ids
"""
pass
@make_dataframe
@authenticate
def get_followers_list(user_id=None, screen_name=None, cursor=None, count=None,
skip_status=None, include_user_entities=None):
"""
Returns a cursored collection of user objects for users
following the specified user.
:param user_id: (int - optional) The ID of the user for whom to return
results.
:param screen_name: (str - optional) The screen name of the user for whom
to return results.
:param cursor: (cursor - semi-optional) Causes the results to be broken
into pages. If no cursor is provided, a value of -1 will be assumed, which is
the first “page.” The response from the API will include a previous_cursor and
next_cursor to allow paging back and forth. See Using cursors to navigate
collections for more information.
:param count: (int - optional) Specifies the number of results to retrieve.
:param skip_status: (bool - optional) When set to True, statuses will not
be included in the returned user objects. If set to any other value, statuses
will be included.
:param include_user_entities: (bool - optional) The user object entities
node will not be included when set to False.
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-followers-list
"""
pass
@make_dataframe
@authenticate
def get_friends_ids(user_id=None, screen_name=None, cursor=None,
stringify_ids=None, count=None):
"""
Returns a cursored collection of user IDs for every user the
specified user is following (otherwise known as their "friends").
:param user_id: (int - optional) The ID of the user for whom to return
results.
:param screen_name: (str - optional) The screen name of the user for whom
to return results.
:param cursor: (cursor - semi-optional) Causes the list of connections to
be broken into pages of no more than 5000 IDs at a time. The number of IDs
returned is not guaranteed to be 5000 as suspended users are filtered out after
connections are queried. If no cursor is provided, a value of -1 will be
assumed, which is the first “page.” The response from the API will include a
previous_cursor and next_cursor to allow paging back and forth. See Using
cursors to navigate collections for more information.
:param stringify_ids: (bool - optional) Some programming environments will
not consume Twitter IDs due to their size. Provide this option to have IDs
returned as strings instead. More about Twitter IDs.
:param count: (int - optional) Specifies the number of results to retrieve.
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friends-ids
"""
pass
@make_dataframe
@authenticate
def get_friends_list(user_id=None, screen_name=None, cursor=None, count=None,
skip_status=None, include_user_entities=None):
"""
Returns a cursored collection of user objects for every user the
specified user is following (otherwise known as their "friends").
:param user_id: (int - optional) The ID of the user for whom to return
results.
:param screen_name: (str - optional) The screen name of the user for whom
to return results.
:param cursor: (cursor - semi-optional) Causes the results to be broken
into pages. If no cursor is provided, a value of -1 will be assumed, which is
the first “page.” The response from the API will include a previous_cursor and
next_cursor to allow paging back and forth. See Using cursors to navigate
collections for more information.
:param count: (int - optional) Specifies the number of results to retrieve.
:param skip_status: (bool - optional) When set to True statuses will not be
included in the returned user objects.
:param include_user_entities: (bool - optional) The user object entities
node will not be included when set to False.
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friends-list
"""
pass
@make_dataframe
@authenticate
def get_home_timeline(count=None, since_id=None, max_id=None, trim_user=None,
exclude_replies=None, include_entities=None, tweet_mode=None):
"""
Returns a collection of the most recent Tweets and retweets
posted by the authenticating user and the users they follow.
:param count: (int - optional) Specifies the number of results to retrieve.
:param since_id: (int - optional) Returns results with an ID greater than
(that is, more recent than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of Tweets has
occured since the since_id, the since_id will be forced to the oldest ID
available.
:param max_id: (int - optional) Returns results with an ID less than (that
is, older than) or equal to the specified ID.
:param trim_user: (bool - optional) When set to True, each Tweet returned
in a timeline will include a user object including only the status authors
numerical ID. Omit this parameter to receive the complete user object.
:param exclude_replies: (bool - optional) This parameter will prevent
replies from appearing in the returned timeline. Using exclude_replies with the
count parameter will mean you will receive up-to count Tweets — this is because
the count parameter retrieves that many Tweets before filtering out retweets
and replies.
:param include_entities: (bool - optional) The entities node will not be
included when set to False.
:param tweet_mode: (str - optional) Valid request values are compat and
extended, which give compatibility mode and extended mode, respectively for
Tweets that contain over 140 characters
https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-home_timeline
"""
pass
@make_dataframe
@authenticate
def get_list_members(list_id=None, slug=None, owner_screen_name=None, owner_id=None,
count=None, cursor=None, include_entities=None, skip_status=None):
"""
Returns the members of the specified list.
:param list_id: (str - required) The numerical id of the list.
:param slug: (str - required) You can identify a list by its slug instead
of its numerical id. If you decide to do so, note that you’ll also have to
specify the list owner using the owner_id or owner_screen_name parameters.
:param owner_screen_name: (str - optional) The screen name of the user who
owns the list being requested by a slug.
:param owner_id: (int - optional) The user ID of the user who owns the list
being requested by a slug.
:param count: (int - optional) Specifies the number of results to retrieve.
:param cursor: (cursor - semi-optional) Causes the collection of list
members to be broken into “pages” of consistent sizes (specified by the count
parameter). If no cursor is provided, a value of -1 will be assumed, which is
the first “page.” The response from the API will include a previous_cursor and
next_cursor to allow paging back and forth. See Using cursors to navigate
collections for more information.
:param include_entities: (bool - optional) The entities node will not be
included when set to False.
:param skip_status: (bool - optional) When set to True statuses will not be
included in the returned user objects.
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-members
"""
pass
@make_dataframe
@authenticate
def get_list_memberships(user_id=None, screen_name=None, count=None,
cursor=None, filter_to_owned_lists=None):
"""
Returns the lists the specified user has been added to.
:param user_id: (int - optional) The ID of the user for whom to return
results. Helpful for disambiguating when a valid user ID is also a valid screen
name.
:param screen_name: (str - optional) The screen name of the user for whom
to return results. Helpful for disambiguating when a valid screen name is also
a user ID.
:param count: (int - optional) Specifies the number of results to retrieve.
:param cursor: (cursor - optional) Breaks the results into pages. Provide a
value of -1 to begin paging. Provide values as returned in the response body’s
next_cursor and previous_cursor attributes to page back and forth in the list.
It is recommended to always use cursors when the method supports them. See
Cursoring for more information.
:param filter_to_owned_lists: (bool - optional) When True, will return just
lists the authenticating user owns, and the user represented by user_id or
screen_name is a member of.
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-memberships
"""
pass
@make_dataframe
@authenticate
def get_list_statuses(list_id=None, slug=None, owner_screen_name=None, owner_id=None,
since_id=None, max_id=None, count=None, include_entities=None,
include_rts=None, tweet_mode=None):
"""
Returns a timeline of tweets authored by members of the specified list.
:param list_id: (str - required) The numerical id of the list.
:param slug: (str - required) You can identify a list by its slug instead
of its numerical id. If you decide to do so, note that you’ll also have to
specify the list owner using the owner_id or owner_screen_name parameters.
:param owner_screen_name: (str - optional) The screen name of the user who
owns the list being requested by a slug .
:param owner_id: (int - optional) The user ID of the user who owns the list
being requested by a slug .
:param since_id: (int - optional) Returns results with an ID greater than
(that is, more recent than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of Tweets has
occured since the since_id, the since_id will be forced to the oldest ID
available.
:param max_id: (int - optional) Returns results with an ID less than (that
is, older than) or equal to the specified ID.
:param count: (int - optional) Specifies the number of results to retrieve.
:param include_entities: (bool - optional) Entities are ON by default in
API 1.1, each tweet includes a node called “entities”. This node offers a
variety of metadata about the tweet in a discreet structure, including:
user_mentions, urls, and hashtags. You can omit entities from the result by
using include_entities=False
:param include_rts: (bool - optional) When set to True, the list timeline
will contain native retweets (if they exist) in addition to the standard stream
of tweets. The output format of retweeted tweets is identical to the
representation you see in home_timeline.
:param tweet_mode: (str - optional) Valid request values are compat and
extended, which give compatibility mode and extended mode, respectively for
Tweets that contain over 140 characters
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-statuses
"""
pass
@make_dataframe
@authenticate
def get_list_subscribers(list_id=None, slug=None, owner_screen_name=None, owner_id=None,
count=None, cursor=None, include_entities=None, skip_status=None):
"""
Returns the subscribers of the specified list.
:param list_id: (str - required) The numerical id of the list.
:param slug: (str - required) You can identify a list by its slug instead
of its numerical id. If you decide to do so, note that you’ll also have to
specify the list owner using the owner_id or owner_screen_name parameters.
:param owner_screen_name: (str - optional) The screen name of the user who
owns the list being requested by a slug .
:param owner_id: (int - optional) The user ID of the user who owns the list
being requested by a slug .
:param count: (int - optional) Specifies the number of results to retrieve.
:param cursor: (cursor - optional) Breaks the results into pages. A single
page contains 20 lists. Provide a value of -1 to begin paging. Provide values
as returned in the response body’s next_cursor and previous_cursor attributes
to page back and forth in the list. See Using cursors to navigate collections
for more information.
:param include_entities: (bool - optional) When set to True, each tweet
will include a node called “entities”. This node offers a variety of metadata
about the tweet in a discreet structure, including: user_mentions, urls, and
hashtags. While entities are opt-in on timelines at present, they will be made
a default component of output in the future. See Tweet Entities for more
details.
:param skip_status: (bool - optional) When set to Truestatuses will not be
included in the returned user objects.
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-subscribers
"""
pass
@make_dataframe
@authenticate
def get_list_subscriptions(user_id=None, screen_name=None, count=None,
cursor=None):
"""
Obtain a collection of the lists the specified user is subscribed to.
:param user_id: (int - optional) The ID of the user for whom to return
results. Helpful for disambiguating when a valid user ID is also a valid screen
name.
:param screen_name: (str - optional) The screen name of the user for whom
to return results. Helpful for disambiguating when a valid screen name is also
a user ID.
:param count: (int - optional) Specifies the number of results to retrieve.
:param cursor: (cursor - optional) Breaks the results into pages. Provide a
value of -1 to begin paging. Provide values as returned in the response body’s
next_cursor and previous_cursor attributes to page back and forth in the list.
It is recommended to always use cursors when the method supports them. See
Cursoring for more information.
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-subscriptions
"""
pass
@make_dataframe
@authenticate
def get_mentions_timeline(count=None, since_id=None, max_id=None,
trim_user=None, include_entities=None, tweet_mode=None):
"""
Returns the 20 most recent mentions (tweets containing a users's
@screen_name) for the authenticating user.
:param count: (int - optional) Specifies the number of results to retrieve.
:param since_id: (int - optional) Returns results with an ID greater than
(that is, more recent than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of Tweets has
occured since the since_id, the since_id will be forced to the oldest ID
available.
:param max_id: (int - optional) Returns results with an ID less than (that
is, older than) or equal to the specified ID.
:param trim_user: (bool - optional) When set to True, each tweet returned
in a timeline will include a user object including only the status authors
numerical ID. Omit this parameter to receive the complete user object.
:param include_entities: (bool - optional) The entities node will not be
included when set to False.
:param tweet_mode: (str - optional) Valid request values are compat and
extended, which give compatibility mode and extended mode, respectively for
Tweets that contain over 140 characters
https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-mentions_timeline
"""
pass
@authenticate
def get_place_trends(ids, exclude=None):
"""
Returns the top 10 trending topics for a specific WOEID, if
trending information is available for it.
:param id: (int or list of ints - required) run ``get_available_trends()`` for
the full listing.
The Yahoo! Where On Earth ID of the
location to return trending information for. Global information is available
by using 1 as the WOEID .
:param exclude: (str - optional) Setting this equal to hashtags will remove
all hashtags from the trends list.
https://developer.twitter.com/en/docs/trends/trends-for-location/api-reference/get-trends-place
"""
twtr = Twython(**get_place_trends.get_auth_params())
trends_df = pd.DataFrame()
if isinstance(ids, int):
ids = [ids]
for place_id in ids:
place_trends = twtr.get_place_trends(id=place_id)
trend_df = | pd.DataFrame(place_trends[0]['trends']) | pandas.DataFrame |
import argparse
import pandas as pd
from forexconnect import ForexConnect, fxcorepy
import common_samples
def parse_args():
parser = argparse.ArgumentParser(description='Process command parameters.')
common_samples.add_main_arguments(parser)
common_samples.add_instrument_timeframe_arguments(parser)
common_samples.add_date_arguments(parser)
common_samples.add_max_bars_arguments(parser)
args = parser.parse_args()
return args
def ma(name, df, n):
column_ma = pd.Series(df[name].rolling(window=n).mean(),
name='MA_' + name + '_' + str(n))
df = df.join(column_ma)
return df
def adx(df, n):
i = 0
upi = []
doi = []
while i + 1 <= df.index[-1]:
up_move = df.at[i + 1, 'High'] - df.at[i, 'High']
do_move = df.at[i, 'Low'] - df.at[i + 1, 'Low']
if up_move > do_move and up_move > 0:
upd = up_move
else:
upd = 0
upi.append(upd)
if do_move > up_move and do_move > 0:
dod = do_move
else:
dod = 0
doi.append(dod)
i = i + 1
i = 0
tr_l = [0]
while i < df.index[-1]:
tr = max(df.at[i + 1, 'High'],
df.at[i, 'Close']) - min(df.at[i + 1, 'Low'],
df.at[i, 'Close'])
tr_l.append(tr)
i = i + 1
tr_s = pd.Series(tr_l)
atr = tr_s.ewm(span=n, min_periods=n).mean()
upi = pd.Series(upi)
doi = pd.Series(doi)
posdi = upi.ewm(span=n, min_periods=n - 1).mean()/atr
negdi = doi.ewm(span=n, min_periods=n - 1).mean()/atr
adx_r = 100 * abs(posdi - negdi) / (posdi + negdi)
rowadx = adx_r.ewm(span=n, min_periods=n - 1)
meanadx = rowadx.mean()
columnadx = meanadx.rename('ADX_' + str(n) + '_' + str(n))
df = df.join(columnadx)
return df
def macd(name, df, n_fast, n_slow, n_signal):
emafast = df[name].ewm(span=n_fast, min_periods=n_slow - 1).mean()
emaslow = df[name].ewm(span=n_slow, min_periods=n_slow - 1).mean()
columnmacd = pd.Series(emafast - emaslow,
name='MACD_' + str(n_fast) + '_' + str(n_slow))
rowmacd = columnmacd.ewm(span=n_signal, min_periods=n_signal - 1)
meanmacd = rowmacd.mean()
macdsign = meanmacd.rename('MACDsign_' + str(n_fast) + '_' + str(n_slow))
macddiff = pd.Series(columnmacd - macdsign,
name='MACDdiff_' + str(n_fast) + '_' + str(n_slow))
df = df.join(columnmacd)
df = df.join(macdsign)
df = df.join(macddiff)
return df
def rsi(name, df, n):
i = 0
upi = [0]
doi = [0]
while i + 1 <= df.index[-1]:
diff = df.at[i + 1, name] - df.at[i, name]
if diff > 0:
upd = diff
else:
upd = 0
upi.append(upd)
if diff < 0:
dod = -diff
else:
dod = 0
doi.append(dod)
i = i + 1
upi = pd.Series(upi)
doi = | pd.Series(doi) | pandas.Series |
#!/usr/bin/env python
"""Extract subcatchment runoff summary results from SWMM report file.
Reads subcatchment geometries from a GisToSWMM5 generated subcatchment geometry
file (*_subcatchments.wkt) file and subcatchment runoff results from a SWMM
report (by default .rpt) file. The script merges the information and saves it
as a Well-Known-Text (.wkt) file.
Copyright (C) 2018 <NAME>, Aalto University School of Engineering
"""
import os
import sys
import re
import pandas as pd
# Check input parameters
if (len(sys.argv) != 4):
print("Usage:\n"
"./ExtractSubcatchmentResults.py [PATH TO *_subcatchments.wkt FILE] "
"[PATH TO SWMM *.rpt OUTPUT FILE] [PATH TO OUTPUT FILE AS *.wkt]")
sys.exit()
elif (not sys.argv[1].lower().endswith('.wkt')):
print('Error:\n'
'Second argument has to be a [PATH TO *_subcatchments.wkt FILE]')
sys.exit()
elif (not sys.argv[2].lower().endswith('.rpt')):
print('Error:\n'
'Third argument has to be a [PATH TO SWMM *.rpt OUTPUT FILE]')
sys.exit()
elif (not sys.argv[3].lower().endswith('.wkt')):
print('Error:\n'
'Final argument has to be a [PATH TO OUTPUT FILE AS *.wkt]')
sys.exit()
# Read subcatchment spatial information from the wkt file into a dataframe
df1 = pd.read_csv(sys.argv[1], sep=';')
df1.drop(df1.columns[3:], axis=1, inplace=True) # Remove extra columns
# Read subcatchment runoff summary results ...
data = []
with open(sys.argv[2], 'rt') as rpt_file:
for line in rpt_file:
if ' Subcatchment Runoff Summary' in line:
for idx, row in enumerate(rpt_file):
if idx < 3: # Skip extra lines after header
continue
if idx == 3: # Read first line of column headers
subcatchment_headers_1 = row.split()
if idx == 4: # Read second line of column headers
subcatchment_headers_2 = row.split()
if idx == 5: # Read column units
subcatchment_units = re.split('\s{2,}', row.strip())
if idx < 7: # Skip extra lines after header
continue
if row.isspace(): # Stop looking after empty line
break
if row.startswith(' ---'): # Skip separator lines
break
else: # Save data
data.append(row.split())
# Create attribute names from header info
subcatchment_units.pop(0)
subcatchment_headers = [a + '' + b for a, b in zip(
subcatchment_headers_1, subcatchment_headers_2)]
subcatchment_units.append('-')
subcatchment_headers = [a + '_' + b for a, b in zip(
subcatchment_headers, subcatchment_units)]
subcatchment_headers.insert(0, 'name')
# Create dataframe from data
df2 = pd.DataFrame(data, columns=subcatchment_headers)
df2[subcatchment_headers[1:]] = \
df2[subcatchment_headers[1:]].astype(float, errors='ignore')
# Merge spatial dataframe with data dataframe
df3 = | pd.merge(df1, df2, on='name') | pandas.merge |
import pandas as pd
from utils.constants import *
def parse_devices(filename: str) -> pd.DataFrame:
data = | pd.read_json(filename, orient="records") | pandas.read_json |
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import pandas as pd
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import pickle
from sklearn.metrics import r2_score
import warnings
from scipy.interpolate import interp1d
import numpy as np
__author__ = '<NAME>, <NAME>'
__copyright__ = '© Pandemic Central, 2021'
__license__ = 'MIT'
__status__ = 'release'
__url__ = 'https://github.com/solveforj/pandemic-central'
__version__ = '3.0.0'
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
us_state_abbrev = {
'Alabama': 'AL',
'Alaska': 'AK',
'American Samoa': 'AS',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District Of Columbia': 'DC',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Guam': 'GU',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands':'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'
}
def get_state_fips():
# Source: US census
# Link: www.census.gov/geographies/reference-files/2017/demo/popest/2017-fips.html
# File: 2017 State, County, Minor Civil Division, and Incorporated Place FIPS Codes
# Note: .xslx file header was removed and sheet was exported to csv
fips_data = pd.read_csv("data/geodata/all-geocodes-v2017.csv",encoding = "ISO-8859-1", dtype={'State Code (FIPS)': str, 'County Code (FIPS)': str})
# Map 040 level fips code to state name in dictionary
state_data = fips_data[fips_data['Summary Level'] == 40].copy(deep=True)
state_data['state_abbrev'] = state_data['Area Name (including legal/statistical area description)'].apply(lambda x : us_state_abbrev[x])
state_map = pd.Series(state_data['State Code (FIPS)'].values,index=state_data['state_abbrev']).to_dict()
state_map['AS'] = "60"
state_map['GU'] = "66"
state_map['MP'] = "69"
state_map['PR'] = "72"
state_map['VI'] = "78"
# Get all county fips codes
fips_data = fips_data[fips_data['Summary Level'] == 50]
fips_data.insert(0, 'FIPS', fips_data['State Code (FIPS)'] + fips_data['County Code (FIPS)'])
fips_data = fips_data[['FIPS', 'State Code (FIPS)']]
return state_map, fips_data
def align_rt(county_rt):
print(" • Loading input Rt, testing, and cases datasets")
#county_rt = pd.read_csv("data/Rt/rt_data.csv", dtype={"FIPS":str})
#county_rt = county_rt[~county_rt['RtIndicator'].isnull()]
#county_rt['state_rt'] = county_rt['state_rt'].fillna(method='ffill')
#print(county_rt)
#print(len(county_rt[county_rt['FIPS'] == "01001"]))
#print(county_rt.groupby("FIPS").tail(1)['date'].unique())
case_data = | pd.read_csv("data/JHU/jhu_data.csv", dtype={"FIPS":str}) | pandas.read_csv |
import logging
from concurrent.futures import ThreadPoolExecutor
from io import BytesIO
from typing import List
import requests
import numpy as np
import pandas as pd
from catboost import CatBoost, Pool
from metaspace import SMInstance
from sm.engine.annotation.diagnostics import (
get_dataset_diagnostics,
DiagnosticType,
DiagnosticImageFormat,
DiagnosticImageKey,
)
from sm.engine.annotation.fdr import run_fdr_ranking_labeled
from sm.engine.annotation.scoring_model import add_derived_features
logger = logging.getLogger(__name__)
def _unpack_fdr_diagnostics(fdr_diagnostic):
dfs = {
img['key']: pd.read_parquet(BytesIO(requests.get(img['url']).content))
for img in fdr_diagnostic['images']
if img['format'] == DiagnosticImageFormat.PARQUET
}
decoy_map_df = dfs[DiagnosticImageKey.DECOY_MAP_DF]
formula_map_df = dfs[DiagnosticImageKey.FORMULA_MAP_DF]
metrics_df = dfs[DiagnosticImageKey.METRICS_DF]
return decoy_map_df, formula_map_df, metrics_df
def get_fdr_diagnostics_local(dataset_id):
diagnostics = get_dataset_diagnostics(dataset_id)
fdr_diagnostics = [diag for diag in diagnostics if diag['type'] == DiagnosticType.FDR_RESULTS]
assert len(fdr_diagnostics) == 1, 'This code only supports datasets run with a single molDB'
return _unpack_fdr_diagnostics(fdr_diagnostics[0])
def get_fdr_diagnostics_remote(sm: SMInstance, dataset_id: str):
diagnostics = sm.dataset(id=dataset_id).diagnostics(False)
fdr_diagnostics = [diag for diag in diagnostics if diag['type'] == DiagnosticType.FDR_RESULTS]
assert len(fdr_diagnostics) == 1, 'This code only supports datasets run with a single molDB'
return _unpack_fdr_diagnostics(fdr_diagnostics[0])
def get_many_fdr_diagnostics_remote(sm: SMInstance, dataset_ids: List[str]):
errors = []
with ThreadPoolExecutor() as executor:
def _get_ds(i, ds_id):
print(f'Retrieving dataset {i}/{len(dataset_ids)}: {ds_id}')
try:
return ds_id, *get_fdr_diagnostics_remote(sm, ds_id)
except Exception as e:
logger.exception(f'Error retrieving dataset {ds_id}: {e}')
return ds_id, e
for ret in executor.map(_get_ds, range(len(dataset_ids)), dataset_ids):
if not isinstance(ret[1], Exception):
yield ret
else:
errors.append(ret)
print('Errors:', errors)
def get_ranking_data(ds_diags, features):
def _process_ds(args):
i, (ds_id, decoy_map_df, formula_map_df, metrics_df) = args
print(f'Processing dataset {i}: {ds_id}')
_groups = []
rankings = list(decoy_map_df.groupby('tm'))
for target_modifier, map_df in rankings:
targets = map_df[['formula', 'tm']].rename(columns={'tm': 'modifier'}).drop_duplicates()
decoys = map_df[['formula', 'dm']].rename(columns={'dm': 'modifier'})
decoy_sample_size = len(decoys) / len(targets)
# v1 FDR takes the median of many separate rankings for the different decoy groups
decoys['decoy_i'] = np.arange(len(decoys)) % decoy_sample_size
target_df = targets.merge(formula_map_df, on=['formula', 'modifier']).merge(
metrics_df, left_on='formula_i', right_index=True
)
decoy_df = decoys.merge(formula_map_df, on=['formula', 'modifier']).merge(
metrics_df, left_on='formula_i', right_index=True
)
# Remove MSM==0 annotations as they're likely non-detections
target_df = target_df[lambda df: (df.chaos > 0) & (df.spatial > 0) & (df.spectral > 0)]
decoy_df = decoy_df[lambda df: (df.chaos > 0) & (df.spatial > 0) & (df.spectral > 0)]
# Sanity check: Skip this group if there are <10 annotations that would get FDR<=20%
# as it's an indicator that the data is bad for some reason (e.g. this adduct shouldn't
# form at all with this instrument/sample type)
all_df = pd.concat([target_df.assign(target=True), decoy_df.assign(target=False)])
all_df['fdr'] = run_fdr_ranking_labeled(
all_df.chaos * all_df.spatial * all_df.spectral,
all_df.target,
decoy_sample_size,
rule_of_succession=True,
monotonic=True,
)
if np.count_nonzero(all_df.fdr[all_df.target] <= 0.2) < 10:
print(
f'Skipping {ds_id} {target_modifier} as there are less than 10 FDR<=20% targets'
)
continue
if np.count_nonzero(all_df.fdr[~all_df.target] <= 0.5) < 10:
print(
f'Skipping {ds_id} {target_modifier} as there are less than 10 FDR<=50% decoys'
)
continue
# Add FDR metrics
add_derived_features(target_df, decoy_df, decoy_sample_size, features)
group_name = f'{ds_id},{target_modifier}'
merged_df = pd.concat(
[
target_df.assign(target=1.0, group_name=group_name, ds_id=ds_id),
decoy_df.assign(target=0.0, group_name=group_name, ds_id=ds_id),
],
ignore_index=True,
)
_groups.append(merged_df)
return _groups
with ThreadPoolExecutor() as executor:
groups = []
for result in executor.map(_process_ds, enumerate(ds_diags)):
groups.extend(result)
groups_df = pd.concat(groups, ignore_index=True)
groups_df['ds_id'] = groups_df.ds_id.astype('category')
groups_df['group_name'] = groups_df.group_name.astype('category')
return groups_df
def get_cv_splits(ds_ids, n_folds=5, n_shuffles=1):
np.random.seed(123)
splits = []
for i in range(n_shuffles):
ds_ids = np.sort(np.unique(ds_ids))
np.random.shuffle(ds_ids)
ds_bins = np.linspace(0, n_folds, len(ds_ids), endpoint=False).astype('i')
splits.extend((ds_ids[ds_bins != i], ds_ids[ds_bins == i]) for i in range(n_folds))
return splits
def make_pairs(df, n_per_group=10000, max_n=1000000):
np.random.seed(42)
group_names = df.group_name.unique()
grps = df.groupby(['group_name', df.target == 1], observed=True).indices
pair_sets = []
for group_name in group_names:
target_idxs = grps.get((group_name, True), [])
decoy_idxs = grps.get((group_name, False), [])
if len(decoy_idxs) > 0 and len(target_idxs) > 0:
n_candidates = int(
n_per_group * 2
) # Generate more than needed in case some aren't unique
if len(decoy_idxs) * len(target_idxs) > n_candidates:
# More combinations than requested pairs - select randomly
pairs = np.hstack(
[
np.random.choice(target_idxs, n_candidates)[:, np.newaxis],
np.random.choice(decoy_idxs, n_candidates)[:, np.newaxis],
]
)
pairs = np.unique(pairs, axis=0)
else:
# Fewer combinations than requested pairs - select all combinations
pairs = np.hstack(
[
np.repeat(target_idxs, len(decoy_idxs))[:, np.newaxis],
np.tile(decoy_idxs, len(target_idxs))[:, np.newaxis],
]
)
if len(pairs) > n_per_group:
pairs = pairs[np.random.choice(len(pairs), n_per_group, replace=False), :]
pair_sets.append(pairs)
set_counts = np.array([len(p) for p in pair_sets])
max_per_set = np.max(set_counts)
# If there are too many pairs, reduce the maximum allowed size of each pair_set so that
# smaller sets become more fairly represented in the re-balancing
while np.sum(np.minimum(set_counts, max_per_set)) > max_n:
surplus = np.sum(np.minimum(set_counts, max_per_set)) - max_n
amount_to_subtract = max(1, surplus // len(set_counts))
max_per_set -= amount_to_subtract
# pylint: disable=consider-using-enumerate # Would be misleading as pair_sets[i] is reassigned
for i in range(len(pair_sets)):
if len(pair_sets[i]) > max_per_set:
pair_sets[i] = pair_sets[i][
np.random.choice(len(pair_sets[i]), max_per_set, replace=False)
]
pairs = np.vstack(pair_sets)
return pairs
def cv_train(metrics_df, splits, features, cb_params):
def run_split(i):
train_ds_ids, eval_ds_ids = splits[i]
model = train_catboost_model(metrics_df, train_ds_ids, eval_ds_ids, features, cb_params)
best_score = model.get_best_score()
return {
'best_iteration': model.get_best_iteration(),
'train': next(iter(best_score.get('learn', {}).values()), None) if best_score else None,
'validate': next(iter(best_score.get('validation', {}).values()), None)
if best_score
else None,
'model': model,
}
# CatBoost often only uses 2-3 cores, so run two at once to maximize throughput
with ThreadPoolExecutor(2) as executor:
results = list(executor.map(run_split, range(len(splits))))
return | pd.DataFrame(results) | pandas.DataFrame |
"""
This module provides a helper object to manage an updateable timechart search
through the export API which doesn't support aggregated live searches.
NOTE: IF you stumbled upon this, know that this is pretty much just a POC/playground.
"""
import json
from threading import Lock
from snaptime import snap_tz
import tzlocal
try:
import pandas as pd
except ImportError as e:
raise ImportError(f"This feature requires the optional extra `pandas` package to be installed: {str(e)}")
from .utils import parse_ts
import structlog
logger = structlog.getLogger(__name__)
class WindowedTimeseries:
"""
Defines an aggregated search for timeseries data in the specified time
window which may be static or moving (with relative timestamps).
Parameters
----------
api : humioapi.HumioAPI
A Humio API instance for interacting with Humio
query : string
A Humio query string to execute
repos : list
A list of repositories to search against
start : string
A snaptime-token (-<PASSWORD>) or timestring to search after
stop : string
A snaptime-token (@h) or timestring to search before
freq : str
A pandas frequency string to use when calculating missing buckets.
This *must* correspond to the frequency used in the Humio search.
timefield : str, optional
The name of the timestamp field in the search result, by default "_bucket"
datafields : list, optional
A list of all data fields ("columns") in the search result, by default None
which means all fields remaining after groupby are used.
groupby : list, optional
A list of all groupby fields ("series") in the search result, by default None
which means no grouping is performed.
title : str, optional
A title identifying this search - use however you like, by default ""
cutoff_start : str, optional
An unsigned snaptime-token to cutoff the head of the final DataFrame, by default "0m"
cutoff_stop : str, optional
An unsigned snaptime-token to cutoff the tail of the final DataFrame, by default "0m"
trusted_pickle : string, optional
A path to a trusted pickle-file to save/load the DataFrame, by default None
"""
def __init__(
self,
api,
query,
repos,
start,
stop,
freq,
timefield="_bucket",
datafields=None,
groupby=None,
title="",
cutoff_start="0m",
cutoff_stop="0m",
trusted_pickle=None,
tz=None,
):
self.api = api
self.query = query
self.repos = repos
self.start = start
self.stop = stop
self.freq = freq
self.timefield = timefield
self.datafields = datafields
self.groupby = groupby
self.title = title
self.cutoff_start = cutoff_start
self.cutoff_stop = cutoff_stop
self.tz = tzlocal.get_localzone()
self.data = pd.DataFrame()
self.trusted_pickle = trusted_pickle
self._metadata = {}
self.lock = Lock()
if self.trusted_pickle:
self.load_df()
logger.debug(
"Initialized search object definition", start=self.start, stop=self.stop, event_count=len(self.data)
)
def copyable_attributes(self, ignore=None):
"""
Provides all instance attributes that can be considered copyable
Parameters
----------
ignore : list, optional
A list of attributes to ignore, by default all non-copyable keys
Returns
-------
dict
A dictionary of all copyable keys
"""
if ignore is None:
ignore = ["api", "data", "trusted_pickle", "lock", "_metadata"]
return {k: v for k, v in self.__dict__.items() if k not in ignore}
def sanity_check(self):
# Check that the searchstring span is equal to the pandas freq
pass
def load_df(self):
"""Loads and unpickles a trusted pickled pd.DataFrame"""
try:
with open(self.trusted_pickle + ".meta", "r") as metafile:
meta = json.load(metafile)
for key, value in self.copyable_attributes().items():
if key in meta and value != meta[key]:
logger.info(
"Search has changed since DataFrame was pickled",
parameter=key,
stored_value=meta[key],
current_value=value,
)
self.data = pd.DataFrame()
return
self.data = pd.read_pickle(self.trusted_pickle + ".pkl")
logger.debug(
"Loaded pickled data from file", event_count=len(self.data), pickle=self.trusted_pickle + ".pkl"
)
except FileNotFoundError:
pass
def save_df(self):
"""Saves a pickled `pd.DataFrame` to file"""
with open(self.trusted_pickle + ".meta", "w") as metafile:
json.dump(self.copyable_attributes(), metafile)
self.data.to_pickle(self.trusted_pickle + ".pkl")
logger.debug("Saved pickled data to file", event_count=len(self.data), pickle=self.trusted_pickle + ".pkl")
def current_refresh_window(self):
"""Returns the smallest possible search window required to update missing data
Returns:
Tuple: (`pd.Timestamp`, `pd.Timestamp`)
"""
# Shrink the search window according to the cutoffs and generate all buckets
# that should appear in the current DataFrame
wanted_buckets = pd.date_range(
snap_tz(parse_ts(self.start, stdlib=True), "+" + self.cutoff_start, tz=self.tz),
snap_tz(parse_ts(self.stop, stdlib=True), "-" + self.cutoff_stop, tz=self.tz),
freq=self.freq,
closed="left",
)
missing = wanted_buckets.difference(self.data.index.dropna(how="all").unique())
if missing.empty:
logger.debug(
"Calculated minimum required search range and found no missing buckets",
current_start=self.data.index.min(),
current_stop=self.data.index.max(),
wanted_start=wanted_buckets.min(),
wanted_stop=wanted_buckets.max(),
)
return None, None
# Expand the search window again according to the cutoffs
start = snap_tz(missing.min(), "-" + self.cutoff_start, tz=self.tz)
stop = snap_tz(missing.max() + pd.Timedelta(self.freq), "+" + self.cutoff_stop, tz=self.tz)
logger.debug(
"Calculated minimum required search range",
current_start=self.data.index.min(),
current_stop=self.data.index.max(),
wanted_start=wanted_buckets.min(),
wanted_stop=wanted_buckets.max(),
next_start=start,
next_stop=stop,
)
return start, stop
def update(self):
"""
Find and update missing data in the current `pd.DataFrame` according
to the start and stop timestamps. Optionally load and save a pickled
`pd.DataFrame` to file.
Concurrent calls will return non-blocking until the first call
has completed its update request.
Returns: None
"""
if self.trusted_pickle:
self.load_df()
if self.lock.acquire(blocking=False):
try:
start, stop = self.current_refresh_window()
if all([start, stop]):
new_data = list(self.api.streaming_search(self.query, self.repos, start, stop))
if new_data:
logger.info("Search returned new data", events=len(new_data))
data = humio_to_timeseries(
new_data, timefield=self.timefield, datafields=self.datafields, groupby=self.groupby
)
self.data = data.combine_first(self.data)
else:
logger.warn("Search didnt return any data")
else:
logger.info("Data is already current. Not fetching new data.")
# Clean up data outside the current search window, adjusted with the cutoffs
self.data = self.data[
(
self.data.index
>= str(snap_tz(parse_ts(self.start, stdlib=True), "+" + self.cutoff_start, tz=self.tz))
)
& (
self.data.index
< str(snap_tz(parse_ts(self.stop, stdlib=True), "-" + self.cutoff_stop, tz=self.tz))
)
]
if self.trusted_pickle:
self.save_df()
finally:
self.lock.release()
else:
logger.info("Data update already in progress in another thread", lock=self.lock)
def humio_to_timeseries(events, timefield="_bucket", datafields=None, groupby=None, fill=None, sep="@"):
"""
Convert a list of Humio event dicts to a datetime-indexed pandas dataframe
"""
df = pd.DataFrame.from_records(events)
df = df.apply(pd.to_numeric, errors="coerce")
df[timefield] = pd.to_datetime(df[timefield], unit="ms", utc=True)
df = | pd.pivot_table(df, index=timefield, values=datafields, columns=groupby, fill_value=fill) | pandas.pivot_table |
# fmt: off
import os
import h5py
import torch
import copy
import ipywidgets as ipyw
import scipy
import pandas as pd
import datetime
import time
import itertools
import qgrid
import shutil
import subprocess
from random import shuffle
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import precision_recall_curve
from scipy.ndimage.interpolation import map_coordinates
from scipy.interpolate import RectBivariateSpline
from scipy import interpolate,ndimage
import skimage as sk
import pickle as pkl
import skimage.morphology
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from .utils import pandas_hdf5_handler,kymo_handle,writedir
from .trcluster import hdf5lock,dask_controller
from .metrics import object_f_scores
from matplotlib import pyplot as plt
class weightmap_generator:
def __init__(self,nndatapath,w0,wm_sigma):
self.nndatapath = nndatapath
self.w0 = w0
self.wm_sigma = wm_sigma
def make_weight_map(self,binary_mask):
ttl_count = binary_mask.size
cell_count = np.sum(binary_mask)
background_count = ttl_count - cell_count
class_weight = np.array([ttl_count/(background_count+1),ttl_count/(cell_count+1)])
class_weight = class_weight/np.sum(class_weight)
labeled = sk.measure.label(binary_mask)
labels = np.unique(labeled)[1:]
dist_maps = []
borders = []
num_labels = len(labels)
if num_labels == 0:
weight = np.ones(binary_mask.shape)*class_weight[0]
elif num_labels == 1:
cell = labeled==1
# dilated = sk.morphology.binary_dilation(cell)
eroded = sk.morphology.binary_dilation(cell)
border = eroded^cell
weight = np.ones(binary_mask.shape)*class_weight[0]
weight[binary_mask] += class_weight[1]
# weight[border] = 0.
else:
for i in labels:
cell = labeled==i
# dilated = sk.morphology.binary_dilation(cell)
eroded = sk.morphology.binary_dilation(cell)
border = eroded^cell
borders.append(border)
dist_map = scipy.ndimage.morphology.distance_transform_edt(~border)
dist_maps.append(dist_map)
dist_maps = np.array(dist_maps)
borders = np.array(borders)
borders = np.max(borders,axis=0)
dist_maps = np.sort(dist_maps,axis=0)
weight = self.w0*np.exp(-((dist_maps[0] + dist_maps[1])**2)/(2*(self.wm_sigma**2)))
weight[binary_mask] += class_weight[1]
weight[~binary_mask] += class_weight[0]
# weight[borders] = 0.
return weight
def make_weightmaps(self,seg_arr):
num_indices = seg_arr.shape[0]
weightmap_arr = []
for t in range(0,num_indices):
working_seg_arr = seg_arr[t,0].astype(bool)
weightmap = self.make_weight_map(working_seg_arr).astype("float32")
weightmap_arr.append(weightmap)
weightmap_arr = np.array(weightmap_arr)[:,np.newaxis,:,:]
return weightmap_arr
class data_augmentation:
def __init__(self,p_flip=0.5,max_rot=10,min_padding=20):
self.p_flip = p_flip
self.max_rot = max_rot
self.min_padding = min_padding
# def make_chunked_kymograph(self,img_arr,chunksize=10):
# pad = (chunksize - (img_arr.shape[2]%chunksize))*img_arr.shape[1]
# chunked_arr = np.swapaxes(img_arr,1,2)
# chunked_arr = chunked_arr.reshape(chunked_arr.shape[0],-1)
# chunked_arr = np.pad(chunked_arr,((0,0),(0,pad)),'constant',constant_values=0)
# chunked_arr = chunked_arr.reshape(chunked_arr.shape[0],-1,img_arr.shape[1]*chunksize)
# chunked_arr = np.swapaxes(chunked_arr,1,2)
# return chunked_arr
def random_crop(self,img_arr,seg_arr):
false_arr = np.zeros(img_arr.shape[2:4],dtype=bool)
random_crop_len_y = np.random.uniform(low=0.1,high=1.,size=(1,img_arr.shape[0]))
random_crop_len_x = np.random.uniform(low=0.4,high=1.,size=(1,img_arr.shape[0]))
random_crop_len = np.concatenate([random_crop_len_y,random_crop_len_x],axis=0)
random_crop_remainder = 1.-random_crop_len
random_crop_start = (np.random.uniform(low=0.,high=1.,size=(2,img_arr.shape[0])))*random_crop_remainder
low_crop = np.floor(random_crop_start*np.array(img_arr.shape[2:4])[:,np.newaxis]).astype('int32')
high_crop = np.floor(low_crop+(random_crop_len*np.array(img_arr.shape[2:4])[:,np.newaxis])).astype('int32')
# random_low_samples = np.random.uniform(low=0.,high=0.5,size=(2,img_arr.shape[0]))
# low_crop = (random_low_samples*np.array(img_arr.shape[2:4])[:,np.newaxis]).astype('int32')
# remainder = np.array(img_arr.shape[2:4])[:,np.newaxis]-low_crop
# random_high_samples = np.random.uniform(low=0.5,high=1.,size=(2,img_arr.shape[0]))
# high_crop = np.floor(random_high_samples*remainder).astype('int32')+low_crop
out_arr = []
out_seg_arr = []
center = (img_arr.shape[2]//2,img_arr.shape[3]//2)
for t in range(img_arr.shape[0]):
mask = copy.copy(false_arr)
working_arr = copy.copy(img_arr[t,0,:,:])
working_seg_arr = copy.copy(seg_arr[t,0,:,:])
dim_0_range = (high_crop[0,t] - low_crop[0,t])
dim_1_range = high_crop[1,t] - low_crop[1,t]
top_left = (center[0]-dim_0_range//2,center[1]-dim_1_range//2)
dim_0_maxscale = img_arr.shape[2]/dim_0_range
dim_1_maxscale = img_arr.shape[3]/dim_1_range
dim_0_scale = np.clip(np.random.normal(loc=1.0,scale=0.1),0.8,dim_0_maxscale)
dim_1_scale = np.clip(np.random.normal(loc=1.0,scale=0.1),0.8,dim_1_maxscale)
rescaled_img = sk.transform.rescale(working_arr[low_crop[0,t]:high_crop[0,t],low_crop[1,t]:high_crop[1,t]],(dim_0_scale,dim_1_scale),preserve_range=True).astype(int)
rescaled_seg = (sk.transform.rescale(working_seg_arr[low_crop[0,t]:high_crop[0,t],low_crop[1,t]:high_crop[1,t]]==1,(dim_0_scale,dim_1_scale))>0.5).astype("int8")
# rescaled_border = (sk.transform.rescale(working_seg_arr[low_crop[0,t]:high_crop[0,t],low_crop[1,t]:high_crop[1,t]]==2,(dim_0_scale,dim_1_scale))>0.5)
# rescaled_seg[rescaled_border] = 2
top_left = (center[0]-rescaled_img.shape[0]//2,center[1]-rescaled_img.shape[1]//2)
working_arr[top_left[0]:top_left[0]+rescaled_img.shape[0],top_left[1]:top_left[1]+rescaled_img.shape[1]] = rescaled_img
working_seg_arr[top_left[0]:top_left[0]+rescaled_img.shape[0],top_left[1]:top_left[1]+rescaled_img.shape[1]] = rescaled_seg
mask[top_left[0]:top_left[0]+rescaled_img.shape[0],top_left[1]:top_left[1]+rescaled_img.shape[1]] = True
working_arr[~mask] = 0
working_seg_arr[~mask] = False
out_arr.append(working_arr)
out_seg_arr.append(working_seg_arr)
out_arr = np.expand_dims(np.array(out_arr),1)
out_seg_arr = np.expand_dims(np.array(out_seg_arr),1)
return out_arr,out_seg_arr
def random_x_flip(self,img_arr,seg_arr,p=0.5):
choices = np.random.choice(np.array([True,False]),size=img_arr.shape[0],p=np.array([p,1.-p]))
out_img_arr = copy.copy(img_arr)
out_seg_arr = copy.copy(seg_arr)
out_img_arr[choices,0,:,:] = np.flip(img_arr[choices,0,:,:],axis=1)
out_seg_arr[choices,0,:,:] = np.flip(seg_arr[choices,0,:,:],axis=1)
return out_img_arr,out_seg_arr
def random_y_flip(self,img_arr,seg_arr,p=0.5):
choices = np.random.choice(np.array([True,False]),size=img_arr.shape[0],p=np.array([p,1.-p]))
out_img_arr = copy.copy(img_arr)
out_seg_arr = copy.copy(seg_arr)
out_img_arr[choices,0,:,:] = np.flip(img_arr[choices,0,:,:],axis=2)
out_seg_arr[choices,0,:,:] = np.flip(seg_arr[choices,0,:,:],axis=2)
return out_img_arr,out_seg_arr
def change_brightness(self,img_arr,num_control_points=3):
out_img_arr = copy.copy(img_arr)
for t in range(img_arr.shape[0]):
control_points = (np.add.accumulate(np.ones(num_control_points+2))-1.)/(num_control_points+1)
control_point_locations = (control_points*65535).astype(int)
orig_locations = copy.copy(control_point_locations)
random_points = np.random.uniform(low=0,high=65535,size=num_control_points).astype(int)
sorted_points = np.sort(random_points)
control_point_locations[1:-1] = sorted_points
mapping = interpolate.PchipInterpolator(orig_locations, control_point_locations)
out_img_arr[t,0,:,:] = mapping(img_arr[t,0,:,:])
return out_img_arr
def add_padding(self,img_arr,seg_arr,max_rot=20,min_padding=20):
hyp_length = np.ceil((img_arr.shape[2]**2+img_arr.shape[3]**2)**(1/2)).astype(int)
max_rads = ((90-max_rot)/360)*(2*np.pi)
min_rads = (90/360)*(2*np.pi)
max_y = np.maximum(np.ceil(hyp_length*np.sin(max_rads)),np.ceil(hyp_length*np.sin(min_rads))).astype(int)
max_x = np.maximum(np.ceil(hyp_length*np.cos(max_rads)),np.ceil(hyp_length*np.cos(min_rads))).astype(int)
delta_y = max_y-img_arr.shape[2]
delta_x = max_x-img_arr.shape[3]
if delta_x % 2 == 1:
delta_x+=1
if delta_y % 2 == 1:
delta_y+=1
delta_y = np.maximum(delta_y,2*min_padding)
delta_x = np.maximum(delta_x,2*min_padding)
padded_img_arr = np.pad(img_arr, ((0,0),(0,0),(delta_y//2,delta_y//2),(delta_x//2,delta_x//2)), 'constant', constant_values=0)
padded_seg_arr = np.pad(seg_arr, ((0,0),(0,0),(delta_y//2,delta_y//2),(delta_x//2,delta_x//2)), 'constant', constant_values=0)
return padded_img_arr,padded_seg_arr
def translate(self,pad_img_arr,pad_seg_arr,img_arr,seg_arr):
trans_img_arr = copy.copy(pad_img_arr)
trans_seg_arr = copy.copy(pad_seg_arr)
delta_y = pad_img_arr.shape[2] - img_arr.shape[2]
delta_x = pad_img_arr.shape[3] - img_arr.shape[3]
for t in range(pad_img_arr.shape[0]):
trans_y = np.random.randint(-(delta_y//2),high=delta_y//2)
trans_x = np.random.randint(-(delta_x//2),high=delta_x//2)
trans_img_arr[t,0,delta_y//2:delta_y//2+img_arr.shape[2],delta_x//2:delta_x//2+img_arr.shape[3]] = 0
trans_seg_arr[t,0,delta_y//2:delta_y//2+img_arr.shape[2],delta_x//2:delta_x//2+img_arr.shape[3]] = 0
trans_img_arr[t,0,delta_y//2+trans_y:delta_y//2+img_arr.shape[2]+trans_y,delta_x//2+trans_x:delta_x//2+img_arr.shape[3]+trans_x] =\
pad_img_arr[t,0,delta_y//2:delta_y//2+img_arr.shape[2],delta_x//2:delta_x//2+img_arr.shape[3]]
trans_seg_arr[t,0,delta_y//2+trans_y:delta_y//2+img_arr.shape[2]+trans_y,delta_x//2+trans_x:delta_x//2+img_arr.shape[3]+trans_x] =\
pad_seg_arr[t,0,delta_y//2:delta_y//2+img_arr.shape[2],delta_x//2:delta_x//2+img_arr.shape[3]]
return trans_img_arr,trans_seg_arr
def rotate(self,img_arr,seg_arr,max_rot=20):
rot_img_arr = copy.copy(img_arr)
rot_seg_arr = copy.copy(seg_arr)
for t in range(img_arr.shape[0]):
r = np.random.uniform(low=-max_rot,high=max_rot)
rot_img_arr[t,0,:,:] = sk.transform.rotate(img_arr[t,0,:,:],r,preserve_range=True).astype("int32")
rot_seg = (sk.transform.rotate(seg_arr[t,0,:,:]==1,r)>0.5).astype("int8")
# rot_border = sk.transform.rotate(seg_arr[t,:,:]==2,r)>0.5
# rot_seg[rot_border] = 2
rot_seg_arr[t,0,:,:] = rot_seg
return rot_img_arr,rot_seg_arr
def deform_img_arr(self,img_arr,seg_arr):
def_img_arr = copy.copy(img_arr)
def_seg_arr = copy.copy(seg_arr)
for t in range(img_arr.shape[0]):
y_steps = np.linspace(0.,4.,num=img_arr.shape[2])
x_steps = np.linspace(0.,4.,num=img_arr.shape[3])
grid = np.random.normal(scale=1.,size=(2,4,4))
dx = RectBivariateSpline(np.arange(4),np.arange(4),grid[0]).ev(y_steps[:,np.newaxis],x_steps[np.newaxis,:])
dy = RectBivariateSpline(np.arange(4),np.arange(4),grid[1]).ev(y_steps[:,np.newaxis],x_steps[np.newaxis,:])
y,x = np.meshgrid(np.arange(img_arr.shape[2]), np.arange(img_arr.shape[3]), indexing='ij')
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
elastic_img = map_coordinates(img_arr[t,0,:,:], indices, order=1).reshape(img_arr.shape[2:4])
def_img_arr[t,0,:,:] = elastic_img
elastic_cell = (map_coordinates(seg_arr[t,0,:,:]==1, indices, order=1).reshape(seg_arr.shape[2:4])>0.5)
elastic_cell = sk.morphology.binary_closing(elastic_cell)
# elastic_border = (map_coordinates(seg_arr[t,:,:]==2, indices, order=1).reshape(seg_arr.shape[1:3])>0.5)
def_seg_arr[t,0,elastic_cell] = 1
# def_seg_arr[t,elastic_border] = 2
return def_img_arr,def_seg_arr
def get_augmented_data(self,img_arr,seg_arr):
img_arr,seg_arr = self.random_crop(img_arr,seg_arr)
img_arr,seg_arr = self.random_x_flip(img_arr,seg_arr,p=self.p_flip)
img_arr,seg_arr = self.random_y_flip(img_arr,seg_arr,p=self.p_flip)
img_arr = self.change_brightness(img_arr)
pad_img_arr,pad_seg_arr = self.add_padding(img_arr,seg_arr,max_rot=self.max_rot+5)
img_arr,seg_arr = self.translate(pad_img_arr,pad_seg_arr,img_arr,seg_arr)
del pad_img_arr
del pad_seg_arr
img_arr,seg_arr = self.rotate(img_arr,seg_arr,max_rot=self.max_rot)
img_arr,seg_arr = self.deform_img_arr(img_arr,seg_arr)
img_arr,seg_arr = (img_arr.astype("int32"),seg_arr.astype("int8"))
return img_arr,seg_arr
class UNet_Training_DataLoader:
def __init__(self,nndatapath="",experimentname="",trainpath="",testpath="",valpath="",augment=False):
self.nndatapath = nndatapath
self.experimentname = experimentname
self.trainpath = trainpath
self.testpath = testpath
self.valpath = valpath
self.trainname = self.trainpath.split("/")[-1]
self.testname = self.testpath.split("/")[-1]
self.valname = self.valpath.split("/")[-1]
self.metapath = self.nndatapath + "/metadata.hdf5"
def get_metadata(self,headpath):
meta_handle = pandas_hdf5_handler(headpath + "/metadata.hdf5")
global_handle = meta_handle.read_df("global",read_metadata=True)
kymo_handle = meta_handle.read_df("kymograph",read_metadata=True)
fovdf = kymo_handle.reset_index(inplace=False)
fovdf = fovdf.set_index(["fov","row","trench"], drop=True, append=False, inplace=False)
fovdf = fovdf.sort_index()
channel_list = global_handle.metadata["channels"]
fov_list = kymo_handle['fov'].unique().tolist()
t_len = len(kymo_handle.index.get_level_values("timepoints").unique())
trench_dict = {fov:len(fovdf.loc[fov]["trenchid"].unique()) for fov in fov_list}
shape_y = kymo_handle.metadata["kymograph_params"]["ttl_len_y"]
shape_x = kymo_handle.metadata["kymograph_params"]["trench_width_x"]
kymograph_img_shape = tuple((shape_y,shape_x))
return channel_list,fov_list,t_len,trench_dict,kymograph_img_shape
def get_selection(self,channel,trench_dict,fov_list,t_subsample_step,t_range,max_trenches,kymograph_img_shape,selectionname):
fov_list = list(fov_list)
ttl_trench_count = np.sum(np.array([trench_dict[fov] for fov in fov_list]))
ttl_trench_count = min(ttl_trench_count,max_trenches)
num_t = len(range(t_range[0],t_range[1]+1,t_subsample_step))
ttl_imgs = ttl_trench_count*num_t
print("Total Number of Trenches: " + str(ttl_trench_count))
print("Total Number of Timepoints: " + str(num_t))
print("Total Number of Images: " + str(ttl_imgs))
selection = tuple((channel,fov_list,t_subsample_step,t_range,max_trenches,ttl_imgs,kymograph_img_shape))
setattr(self, selectionname + "_selection", selection)
def inter_get_selection(self,headpath,selectionname):
channel_list,fov_list,t_len,trench_dict,kymograph_img_shape = self.get_metadata(headpath)
selection = ipyw.interactive(self.get_selection, {"manual":True}, channel=ipyw.Dropdown(options=channel_list,value=channel_list[0],description='Feature Channel:',disabled=False),\
trench_dict=ipyw.fixed(trench_dict),fov_list=ipyw.SelectMultiple(options=fov_list),\
t_subsample_step=ipyw.IntSlider(value=1, min=1, max=50, step=1),\
t_range=ipyw.IntRangeSlider(value=[0, t_len-1],min=0,max=t_len-1,step=1,disabled=False,continuous_update=False),\
max_trenches=ipyw.IntText(value=1,description='Maximum Trenches per FOV: ',disabled=False),\
kymograph_img_shape=ipyw.fixed(kymograph_img_shape),\
selectionname=ipyw.fixed(selectionname));
display(selection)
def export_chunk(self,selectionname,file_idx,augment,file_trench_indices,weight_grid_list):
selection = getattr(self,selectionname + "_selection")
datapath = getattr(self,selectionname + "path")
dataname = getattr(self,selectionname + "name")
img_path = datapath + "/kymograph/kymograph_" + str(file_idx) + ".hdf5"
seg_path = datapath + "/fluorsegmentation/segmentation_" + str(file_idx) + ".hdf5"
nndatapath = self.nndatapath + "/" + selectionname + "_" + str(file_idx) + ".hdf5"
with h5py.File(img_path,"r") as imgfile:
img_arr = imgfile[selection[0]][file_trench_indices,selection[3][0]:selection[3][1]+1:selection[2]]
img_arr = img_arr.reshape(img_arr.shape[0]*img_arr.shape[1],img_arr.shape[2],img_arr.shape[3])
img_arr = img_arr[:,np.newaxis,:,:]
img_arr = img_arr.astype('int32')
with h5py.File(seg_path,"r") as segfile:
seg_arr = segfile["data"][file_trench_indices,selection[3][0]:selection[3][1]+1:selection[2]]
seg_arr = seg_arr.reshape(seg_arr.shape[0]*seg_arr.shape[1],seg_arr.shape[2],seg_arr.shape[3])
seg_arr = seg_arr[:,np.newaxis,:,:]
seg_arr = seg_arr.astype('int8')
if augment:
img_arr,seg_arr = self.data_augmentation.get_augmented_data(img_arr,seg_arr)
chunk_shape = (1,1,img_arr.shape[2],img_arr.shape[3])
with h5py.File(nndatapath,"w") as outfile:
img_handle = outfile.create_dataset("img",data=img_arr,chunks=chunk_shape,dtype='int32')
seg_handle = outfile.create_dataset("seg",data=seg_arr,chunks=chunk_shape,dtype='int8')
for item in weight_grid_list:
w0,wm_sigma = item
weightmap_gen = weightmap_generator(self.nndatapath,w0,wm_sigma)
weightmap_arr = weightmap_gen.make_weightmaps(seg_arr)
with h5py.File(nndatapath,"a") as outfile:
weightmap_handle = outfile.create_dataset("weight_" + str(item),data=weightmap_arr,chunks=chunk_shape,dtype='int32')
return file_idx
def gather_chunks(self,outputdf,output_metadata,selectionname,file_idx_list,weight_grid_list):
nnoutputpath = self.nndatapath + "/" + selectionname + ".hdf5"
tempdatapath = self.nndatapath + "/" + selectionname + "_" + str(file_idx_list[0]) + ".hdf5"
with h5py.File(tempdatapath,"r") as infile:
output_shape = (len(outputdf.index),1,infile["img"].shape[2],infile["img"].shape[3])
chunk_shape = (1,1,infile["img"].shape[2],infile["img"].shape[3])
with h5py.File(nnoutputpath,"w") as outfile:
img_handle = outfile.create_dataset("img",output_shape,chunks=chunk_shape,dtype='int32')
seg_handle = outfile.create_dataset("seg",output_shape,chunks=chunk_shape,dtype='int8')
for item in weight_grid_list:
weightmap_handle = outfile.create_dataset("weight_" + str(item),output_shape,chunks=chunk_shape,dtype='int32')
current_idx = 0
for file_idx in file_idx_list:
nndatapath = self.nndatapath + "/" + selectionname + "_" + str(file_idx) + ".hdf5"
with h5py.File(nndatapath,"r") as infile:
img_arr = infile["img"][:]
seg_arr = infile["seg"][:]
weight_arr_list = []
for item in weight_grid_list:
weight_arr_list.append(infile["weight_" + str(item)][:])
num_indices = img_arr.shape[0]
with h5py.File(nnoutputpath,"a") as outfile:
outfile["img"][current_idx:current_idx+num_indices] = img_arr
outfile["seg"][current_idx:current_idx+num_indices] = seg_arr
for i,item in enumerate(weight_grid_list):
outfile["weight_" + str(item)][current_idx:current_idx+num_indices] = weight_arr_list[i]
current_idx += num_indices
os.remove(nndatapath)
def export_data(self,selectionname,dask_controller,weight_grid_list,augment=False):
dask_controller.futures = {}
selection = getattr(self,selectionname + "_selection")
datapath = getattr(self,selectionname + "path")
dataname = getattr(self,selectionname + "name")
input_meta_handle = pandas_hdf5_handler(datapath + "/metadata.hdf5")
output_meta_handle = pandas_hdf5_handler(self.metapath)
trenchdf_list = []
kymodf = input_meta_handle.read_df("kymograph",read_metadata=True)
fovdf = kymodf.reset_index(inplace=False)
fovdf = fovdf.set_index(["fov","row","trench"], drop=True, append=False, inplace=False)
fovdf = fovdf.sort_index()
trenchdf = fovdf.loc[selection[1]]
trenchdf = trenchdf.reset_index(inplace=False)
trenchdf = trenchdf.set_index(["trenchid","timepoints"], drop=True, append=False, inplace=False)
trenchdf = trenchdf.sort_index()
trenches = trenchdf.index.get_level_values("trenchid").unique().tolist()
shuffle(trenches)
trenches = np.sort(trenches[:selection[4]])
filedf = trenchdf.loc[pd.IndexSlice[trenches, selection[3][0]:selection[3][1]+1:selection[2]], :]
filedf = filedf.reset_index(inplace=False)
filedf = filedf.set_index(["File Index","File Trench Index"], drop=True, append=False, inplace=False)
filedf = filedf.sort_index()
filelist = filedf.index.get_level_values("File Index").unique().tolist()
for file_idx in filelist:
file_trenchdf = filedf.loc[file_idx]
file_trench_indices = file_trenchdf.index.get_level_values("File Trench Index").unique().tolist()
future = dask_controller.daskclient.submit(self.export_chunk,selectionname,file_idx,augment,file_trench_indices,weight_grid_list,retries=1)
dask_controller.futures["File Number: " + str(file_idx)] = future
outputdf = filedf.reset_index(inplace=False)
outputdf = outputdf.set_index(["trenchid","timepoints"], drop=True, append=False, inplace=False)
outputdf = outputdf.sort_index()
del outputdf["File Index"]
del outputdf["File Trench Index"]
selection_keys = ["channel", "fov_list", "t_subsample_step", "t_range", "max_trenches", "ttl_imgs", "kymograph_img_shape"]
selection = {selection_keys[i]:item for i,item in enumerate(selection)}
selection["experiment_name"],selection["data_name"] = (self.experimentname, dataname)
selection["W0 List"], selection["Wm Sigma List"] = (self.grid_dict['W0 (Border Region Weight):'],self.grid_dict['Wm Sigma (Border Region Spread):'])
output_metadata = {"nndataset" : selection}
segparampath = datapath + "/fluorescent_segmentation.par"
with open(segparampath, 'rb') as infile:
seg_param_dict = pkl.load(infile)
output_metadata["segmentation"] = seg_param_dict
input_meta_handle = pandas_hdf5_handler(datapath + "/metadata.hdf5")
for item in ["global","kymograph"]:
indf = input_meta_handle.read_df(item,read_metadata=True)
output_metadata[item] = indf.metadata
output_meta_handle.write_df(selectionname,outputdf,metadata=output_metadata)
file_idx_list = dask_controller.daskclient.gather([dask_controller.futures["File Number: " + str(file_idx)] for file_idx in filelist])
self.gather_chunks(outputdf,output_metadata,selectionname,file_idx_list,weight_grid_list)
def display_grid(self):
tab_dict = {'W0 (Border Region Weight):':[1., 3., 5., 10.],'Wm Sigma (Border Region Spread):':[1., 2., 3., 4., 5.]}
children = [ipyw.SelectMultiple(options=val,value=(val[1],),description=key,disabled=False) for key,val in tab_dict.items()]
self.tab = ipyw.Tab()
self.tab.children = children
for i,key in enumerate(tab_dict.keys()):
self.tab.set_title(i, key[:-1])
return self.tab
def get_grid_params(self):
if hasattr(self,'tab'):
self.grid_dict = {child.description:child.value for child in self.tab.children}
delattr(self, 'tab')
elif hasattr(self,'grid_dict'):
pass
else:
raise "No selection defined."
print("======== Grid Params ========")
for key,val in self.grid_dict.items():
print(key + " " + str(val))
def export_all_data(self,n_workers=20,memory='4GB'):
writedir(self.nndatapath,overwrite=True)
grid_keys = self.grid_dict.keys()
grid_combinations = list(itertools.product(*list(self.grid_dict.values())))
self.data_augmentation = data_augmentation()
dask_cont = dask_controller(walltime='01:00:00',local=False,n_workers=n_workers,memory=memory)
dask_cont.startdask()
# dask_cont.daskcluster.start_workers()
dask_cont.displaydashboard()
try:
for selectionname in ["train","test","val"]:
if selectionname == "train":
self.export_data(selectionname,dask_cont,grid_combinations,augment=True)
else:
self.export_data(selectionname,dask_cont,grid_combinations,augment=False)
dask_cont.shutdown()
except:
dask_cont.shutdown()
raise
class GridSearch:
def __init__(self,nndatapath,numepochs=50):
self.nndatapath = nndatapath
self.numepochs = numepochs
def display_grid(self):
meta_handle = pandas_hdf5_handler(self.nndatapath + "/metadata.hdf5")
trainmeta = meta_handle.read_df("train",read_metadata=True).metadata["nndataset"]
w0_list,wm_sigma_list = trainmeta["W0 List"],trainmeta["Wm Sigma List"]
self.tab_dict = {'Batch Size:':[5, 10, 25],'Layers:':[2, 3, 4],\
'Hidden Size:':[16, 32, 64],'Learning Rate:':[0.001, 0.005, 0.01, 0.05],\
'Momentum:':[0.9, 0.95, 0.99],'Weight Decay:':[0.0001,0.0005, 0.001],\
'Dropout:':[0., 0.3, 0.5, 0.7], 'w0:':w0_list, 'wm sigma':wm_sigma_list}
children = [ipyw.SelectMultiple(options=val,value=(val[1],),description=key,disabled=False) for key,val in self.tab_dict.items()]
self.tab = ipyw.Tab()
self.tab.children = children
for i,key in enumerate(self.tab_dict.keys()):
self.tab.set_title(i, key[:-1])
return self.tab
def get_grid_params(self):
self.grid_dict = {child.description:child.value for child in self.tab.children}
print("======== Grid Params ========")
for key,val in self.grid_dict.items():
print(key + " " + str(val))
def generate_pyscript(self,run_idx,grid_params):
import_line = "import trenchripper as tr"
trainer_line = "nntrainer = tr.unet.UNet_Trainer(\"" + self.nndatapath + "\"," + str(run_idx) + \
",gpuon=True,numepochs=" + str(self.numepochs) + ",batch_size=" + str(grid_params[0])+",layers=" + \
str(grid_params[1])+",hidden_size=" + str(grid_params[2]) + ",lr=" + str(grid_params[3]) + \
",momentum=" + str(grid_params[4]) + ",weight_decay=" + str(grid_params[5])+",dropout="+str(grid_params[6]) + \
",w0=" + str(grid_params[7]) + ",wm_sigma=" + str(grid_params[8]) + ")"
train_line = "nntrainer.train_model()"
pyscript = "\n".join([import_line,trainer_line,train_line])
with open(self.nndatapath + "/models/scripts/" + str(run_idx) + ".py", "w") as scriptfile:
scriptfile.write(pyscript)
def generate_sbatchscript(self,run_idx,hours,cores,mem,gres):
shebang = "#!/bin/bash"
core_line = "#SBATCH -c " + str(cores)
hour_line = "#SBATCH -t " + str(hours) + ":00:00"
gpu_lines = "#SBATCH -p gpu\n#SBATCH --gres=" + gres
mem_line = "#SBATCH --mem=" + mem
report_lines = "#SBATCH -o " + self.nndatapath + "/models/scripts/" + str(run_idx) +\
".out\n#SBATCH -e " + self.nndatapath + "/models/scripts/" + str(run_idx) + ".err\n"
run_line = "python -u " + self.nndatapath + "/models/scripts/" + str(run_idx) + ".py"
sbatchscript = "\n".join([shebang,core_line,hour_line,gpu_lines,mem_line,report_lines,run_line])
with open(self.nndatapath + "/models/scripts/" + str(run_idx) + ".sh", "w") as scriptfile:
scriptfile.write(sbatchscript)
def run_sbatchscript(self,run_idx):
cmd = ["sbatch",self.nndatapath + "/models/scripts/" + str(run_idx) + ".sh"]
subprocess.run(cmd)
def run_grid_search(self,hours=12,cores=2,mem="8G",gres="gpu:1"):
grid_keys = self.grid_dict.keys()
grid_combinations = list(itertools.product(*list(self.grid_dict.values())))
writedir(self.nndatapath + "/models",overwrite=True)
writedir(self.nndatapath + "/models/scripts",overwrite=True)
self.run_indices = []
for run_idx,grid_params in enumerate(grid_combinations):
self.generate_pyscript(run_idx,grid_params)
self.generate_sbatchscript(run_idx,hours,cores,mem,gres)
self.run_sbatchscript(run_idx)
self.run_indices.append(run_idx)
def cancel_all_runs(self,username):
for run_idx in self.run_indices:
cmd = ["scancel","-p","gpu","--user=" + username]
subprocess.Popen(cmd,shell=True,stdin=None,stdout=None,stderr=None,close_fds=True)
class SegmentationDataset(Dataset):
def __init__(self,filepath,weightchannel="",training=False):
self.filepath = filepath
self.weightchannel = weightchannel
self.training = training
self.chunksize = 1000
with h5py.File(self.filepath,"r") as infile:
self.shape = infile["img"].shape
self.current_chunk = 0
self.load_chunk(self.current_chunk)
def load_chunk(self,chunk_idx):
with h5py.File(self.filepath,"r") as infile:
self.img_data = infile["img"][chunk_idx*self.chunksize:(chunk_idx+1)*self.chunksize]
if self.training:
self.seg_data = infile["seg"][chunk_idx*self.chunksize:(chunk_idx+1)*self.chunksize]
self.weight_data = infile[self.weightchannel][chunk_idx*self.chunksize:(chunk_idx+1)*self.chunksize]
self.current_chunk = chunk_idx
def __len__(self):
with h5py.File(self.filepath,"r") as infile:
out_len = infile["img"].shape[0]
return out_len
def __getitem__(self,idx):
idx_chunk = idx//self.chunksize
subidx = idx%self.chunksize
if idx_chunk != self.current_chunk:
self.load_chunk(idx_chunk)
if self.training:
sample = {'img': self.img_data[subidx], 'seg': self.seg_data[subidx], self.weightchannel: self.weight_data[subidx]}
else:
sample = {'img': self.img_data[subidx]}
return sample
class double_conv(nn.Module):
"""(Conv => BatchNorm =>ReLU) twice."""
def __init__(self,in_ch,out_ch):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self,x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super().__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super().__init__()
self.downconv = nn.Sequential(
nn.MaxPool2d(2),
double_conv(in_ch, out_ch))
def forward(self, x):
x = self.downconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=False):
super().__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX//2,
diffY // 2, diffY - diffY//2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super().__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
class UNet(nn.Module):
def __init__(self,n_channels,n_classes,layers=3,hidden_size=64,dropout=0.,withsoftmax=False):
super().__init__()
self.inc = inconv(n_channels, hidden_size)
self.downlist = nn.ModuleList([down(hidden_size*(2**i), hidden_size*(2**(i+1))) for i in range(0,layers-1)] + [down(hidden_size*(2**(layers-1)), hidden_size*(2**(layers-1)))])
self.uplist = nn.ModuleList([up(hidden_size*(2**i), hidden_size*(2**(i-2))) for i in reversed(range(2,layers+1))] + [up(hidden_size*2, hidden_size)])
self.outc = outconv(hidden_size, n_classes)
self.drop = nn.Dropout(p=dropout)
self.withsoftmax = withsoftmax
def uniforminit(self):
for param in self.named_parameters():
param[1].data.uniform_(-0.05,0.05)
def forward(self, x):
xlist = [self.inc(x)]
for item in self.downlist:
xlist.append(item(xlist[-1]))
x = xlist[-1]
x = self.drop(x)
for i,item in enumerate(self.uplist):
x = item(x, xlist[-(i+2)])
x = self.outc(x)
if self.withsoftmax:
x = F.softmax(x,dim=1)
return x
class UNet_Trainer:
def __init__(self,nndatapath,model_number,numepochs=10,batch_size=100,layers=3,hidden_size=64,lr=0.005,momentum=0.95,weight_decay=0.0005,dropout=0.,\
w0=5.,wm_sigma=3.,gpuon=False):
self.nndatapath = nndatapath
self.model_number = model_number
self.numepochs = numepochs
self.batch_size = batch_size
self.gpuon = gpuon
self.layers = layers
self.hidden_size = hidden_size
self.dropout = dropout
self.lr = lr
self.momentum = momentum
self.weight_decay = weight_decay
self.w0 = w0
self.wm_sigma = wm_sigma
self.model = UNet(1,2,layers=layers,hidden_size=hidden_size,dropout=dropout,withsoftmax=True)
self.model.uniforminit()
if gpuon:
self.model = self.model.cuda()
self.optimizer = optim.SGD(self.model.parameters(), lr = self.lr,momentum=self.momentum,weight_decay=self.weight_decay)
def removefile(self,path):
if os.path.exists(path):
os.remove(path)
def load_model(self,paramspath):
if self.gpuon:
device = torch.device("cuda")
self.model.load_state_dict(torch.load(paramspath))
else:
device = torch.device('cpu')
self.model.load_state_dict(torch.load(paramspath, map_location=device))
def train(self,x,y,weightmaps):
self.optimizer.zero_grad()
fx = self.model.forward(x)
fx = torch.log(fx)
nll = F.nll_loss(fx,y,reduction='none')*weightmaps
mean_nll = torch.mean(nll)
mean_nll.backward()
self.optimizer.step()
nll = torch.sum(nll)
return nll
def test(self,x,y,weightmaps):
fx = self.model.forward(x)
fx = torch.log(fx)
nll = F.nll_loss(fx,y,reduction='none')*weightmaps
nll = torch.sum(nll)
return nll
def perepoch(self,e,train_iter,test_iter,val_iter,train_data_shape,test_data_shape,val_data_shape):
now = datetime.datetime.now()
print('=======epoch ' + str(e) + '=======')
self.model.train()
total_train_nll = 0.
num_train_batches = len(train_iter)
for i,b in enumerate(train_iter):
img_arr,seg_arr,weightmaps = (b['img'].numpy(),b['seg'].numpy(),b['weight_' + str(tuple([self.w0,self.wm_sigma]))].numpy())
seg_arr,weightmaps = seg_arr[:,0],weightmaps[:,0]
x = torch.Tensor(img_arr)
y = torch.LongTensor(seg_arr)
weightmaps = torch.Tensor(weightmaps)
if self.gpuon:
x = x.cuda()
y = y.cuda()
weightmaps = weightmaps.cuda()
# weights = weights.cuda()
nll = self.train(x,y,weightmaps)
total_train_nll += nll.detach().cpu().numpy()
# if (i%100 == 0) and self.saveparams:
# torch.save(self.model.state_dict(), self.nnpath + "/model_layers=" + str(self.layers) + "_hidden_size=" + str(self.hidden_size) +\
# "_dropout=" + str(self.dropout) + '_lr=' + str(self.lr) + '_momentum=' + str(self.momentum) + "_epoch_" + str(e) + "_step_" + str(i) +".pt")
del x
del y
del weightmaps
del nll
torch.cuda.empty_cache()
avgtrainnll = total_train_nll/(np.prod(np.array(train_data_shape)))
print('Mean Train NLL: ' + str(avgtrainnll))
self.model.eval()
total_val_nll = 0.
for i,b in enumerate(val_iter):
img_arr,seg_arr,weightmaps = (b['img'].numpy(),b['seg'].numpy(),b['weight_' + str(tuple([self.w0,self.wm_sigma]))].numpy())
seg_arr,weightmaps = seg_arr[:,0],weightmaps[:,0]
x = torch.Tensor(img_arr)
y = torch.LongTensor(seg_arr)
weightmaps = torch.Tensor(weightmaps)
if self.gpuon:
x = x.cuda()
y = y.cuda()
weightmaps = weightmaps.cuda()
# weights = weights.cuda()
nll = self.test(x,y,weightmaps)
total_val_nll += nll.detach().cpu().numpy()
del x
del y
del weightmaps
del nll
torch.cuda.empty_cache()
avgvalnll = total_val_nll/(np.prod(np.array(val_data_shape)))
print('Mean Val NLL: ' + str(avgvalnll))
total_test_nll = 0.
for i,b in enumerate(test_iter):
img_arr,seg_arr,weightmaps = (b['img'].numpy(),b['seg'].numpy(),b['weight_' + str(tuple([self.w0,self.wm_sigma]))].numpy())
seg_arr,weightmaps = seg_arr[:,0],weightmaps[:,0]
x = torch.Tensor(img_arr)
y = torch.LongTensor(seg_arr)
weightmaps = torch.Tensor(weightmaps)
if self.gpuon:
x = x.cuda()
y = y.cuda()
weightmaps = weightmaps.cuda()
# weights = weights.cuda()
nll = self.test(x,y,weightmaps)
total_test_nll += nll.detach().cpu().numpy()
del x
del y
del weightmaps
del nll
torch.cuda.empty_cache()
avgtestnll = total_test_nll/(np.prod(np.array(test_data_shape)))
print('Mean Test NLL: ' + str(avgtestnll))
entry = [[self.model_number,self.batch_size,self.layers,self.hidden_size,self.lr,self.momentum,self.weight_decay,\
self.dropout,self.w0,self.wm_sigma,e,avgtrainnll,avgvalnll,avgtestnll,str(now)]]
df_out = pd.DataFrame(data=entry,columns=['Model #','Batch Size','Layers','Hidden Size','Learning Rate','Momentum','Weight Decay',\
'Dropout',"W0 Weight","Wm Sigma",'Epoch','Train Loss','Val Loss','Test Loss','Date/Time'])
df_out = df_out.set_index(['Model #','Epoch'], drop=True, append=False, inplace=False)
df_out = df_out.sort_index()
return df_out
def write_metadata(self,filepath,iomode,df_out):
meta_handle = pandas_hdf5_handler(filepath)
if os.path.exists(filepath):
ind = df_out.index[0]
df_in = meta_handle.read_df("data")
df_mask = ~df_in.index.isin([ind])
df_in = df_in[df_mask]
df_out = pd.concat([df_in, df_out])
meta_handle.write_df("data",df_out)
def get_fscore(self,iterator,data_shape):
y_true = []
y_scores = []
for i,b in enumerate(iterator):
img_arr,y = (b['img'].numpy(),b['seg'].numpy())
x = torch.Tensor(img_arr)
if self.gpuon:
x = x.cuda()
fx = self.model.forward(x).detach().cpu().numpy()
# y_true.append(y.flatten())
# y_scores.append(fx[:,1].flatten())
y_true.append(y[:,0])
y_scores.append(fx[:,1])
del x
del y
torch.cuda.empty_cache()
y_true = np.concatenate(y_true,axis=0)
y_scores = np.concatenate(y_scores,axis=0)
precisions, recalls, thresholds = precision_recall_curve(y_true.flatten(), y_scores.flatten())
fscores = 2*((precisions*recalls)/(precisions+recalls))
best_idx = np.nanargmax(fscores)
precision, recall, fscore, threshold = (precisions[best_idx], recalls[best_idx], fscores[best_idx], thresholds[best_idx])
y_true = y_true.astype(bool)
y_pred = y_scores>threshold
all_f_scores = []
for i in range(y_true.shape[0]):
_,_,f_score = object_f_scores(y_true[i],y_pred[i])
all_f_scores += f_score.tolist()
all_f_scores = np.array(all_f_scores)
all_f_scores = all_f_scores[~np.isnan(all_f_scores)]
return precision, recall, fscore, threshold, all_f_scores
def train_model(self):
timestamp = datetime.datetime.now()
start = time.time()
writedir(self.nndatapath + "/models", overwrite=False)
self.removefile(self.nndatapath + "/models/training_metadata_" + str(self.model_number) + ".hdf5")
train_data = SegmentationDataset(self.nndatapath + "/train.hdf5",weightchannel='weight_' + str(tuple([self.w0,self.wm_sigma])),training=True)
test_data = SegmentationDataset(self.nndatapath + "/test.hdf5",weightchannel='weight_' + str(tuple([self.w0,self.wm_sigma])),training=True)
val_data = SegmentationDataset(self.nndatapath + "/val.hdf5",weightchannel='weight_' + str(tuple([self.w0,self.wm_sigma])),training=True)
train_data_shape = train_data.shape
test_data_shape = test_data.shape
val_data_shape = val_data.shape
for e in range(0,self.numepochs):
train_iter = DataLoader(train_data,batch_size=self.batch_size,shuffle=True)
test_iter = DataLoader(test_data,batch_size=self.batch_size,shuffle=True)
val_iter = DataLoader(val_data,batch_size=self.batch_size,shuffle=True)
df_out = self.perepoch(e,train_iter,test_iter,val_iter,train_data_shape,test_data_shape,val_data_shape)
self.write_metadata(self.nndatapath + "/models/training_metadata_" + str(self.model_number) + ".hdf5","w",df_out)
end = time.time()
time_elapsed = (end-start)/60.
torch.save(self.model.state_dict(), self.nndatapath + "/models/" + str(self.model_number) + ".pt")
val_p, val_r, val_f, val_t, all_val_f = self.get_fscore(val_iter,val_data_shape)
test_p, test_r, test_f, test_t, all_test_f = self.get_fscore(test_iter,test_data_shape)
meta_handle = pandas_hdf5_handler(self.nndatapath + "/metadata.hdf5")
trainmeta = meta_handle.read_df("train",read_metadata=True).metadata
valmeta = meta_handle.read_df("val",read_metadata=True).metadata
testmeta = meta_handle.read_df("test",read_metadata=True).metadata
experiment_name = trainmeta["nndataset"]["experiment_name"]
train_dataname,train_org,train_micro,train_ttl_img = (trainmeta["nndataset"]["data_name"],trainmeta["global"]["Organism"],\
trainmeta["global"]["Microscope"],trainmeta["nndataset"]["ttl_imgs"])
val_dataname,val_org,val_micro,val_ttl_img = (valmeta["nndataset"]["data_name"],valmeta["global"]["Organism"],\
valmeta["global"]["Microscope"],valmeta["nndataset"]["ttl_imgs"])
test_dataname,test_org,test_micro,test_ttl_img = (testmeta["nndataset"]["data_name"],testmeta["global"]["Organism"],\
testmeta["global"]["Microscope"],testmeta["nndataset"]["ttl_imgs"])
train_loss,val_loss,test_loss = df_out['Train Loss'].tolist()[0],df_out['Val Loss'].tolist()[0],df_out['Test Loss'].tolist()[0]
entry = [[experiment_name,self.model_number,train_dataname,train_org,train_micro,train_ttl_img,val_dataname,val_org,val_micro,val_ttl_img,\
test_dataname,test_org,test_micro,test_ttl_img,self.batch_size,self.layers,self.hidden_size,self.lr,self.momentum,\
self.weight_decay,self.dropout,self.w0,self.wm_sigma,train_loss,val_loss,val_p,val_r,val_f,val_t,all_val_f,test_loss,test_p,test_r,\
test_f,test_t,all_test_f,str(timestamp),self.numepochs,time_elapsed]]
df_out = pd.DataFrame(data=entry,columns=['Experiment Name','Model #','Train Dataset','Train Organism','Train Microscope','Train # Images',\
'Val Dataset','Val Organism','Val Microscope','Val # Images',\
'Test Dataset','Test Organism','Test Microscope','Test # Images',\
'Batch Size','Layers','Hidden Size','Learning Rate','Momentum','Weight Decay',\
'Dropout',"W0 Weight","Wm Sigma",'Train Loss','Val Loss','Val Precision','Val Recall','Val F1 Score',\
'Val Threshold','Val F1 Cell Scores','Test Loss','Test Precision','Test Recall','Test F1 Score',\
'Test Threshold','Test F1 Cell Scores','Date/Time','# Epochs','Training Time (mins)'])
df_out = df_out.set_index(['Experiment Name','Model #'], drop=True, append=False, inplace=False)
df_out = df_out.sort_index()
metalock = hdf5lock(self.nndatapath + "/model_metadata.hdf5",updateperiod=5.)
metalock.lockedfn(self.write_metadata,"w",df_out)
class TrainingVisualizer:
def __init__(self,trainpath,modeldbpath):
self.trainpath = trainpath
self.modelpath = trainpath + "/models"
self.modeldfpath = trainpath + "/model_metadata.hdf5"
self.modeldbpath = modeldbpath
self.paramdbpath = modeldbpath+"/Parameters"
self.update_dfs()
if os.path.exists(self.modeldfpath):
self.models_widget = qgrid.show_grid(self.model_df.sort_index())
def update_dfs(self):
df_idx_list = []
for path in os.listdir(self.modelpath):
if "training_metadata" in path:
df_idx = int(path.split("_")[-1][:-5])
df_idx_list.append(df_idx)
df_list = []
for df_idx in df_idx_list:
dfpath = self.modelpath + "/training_metadata_" + str(df_idx) + ".hdf5"
df_handle = pandas_hdf5_handler(dfpath)
df = df_handle.read_df("data")
df_list.append(copy.deepcopy(df))
del df
self.train_df = | pd.concat(df_list) | pandas.concat |
import pandas as pd
import numpy as np
import os
# acquire
from pydataset import data
from datetime import date
from scipy import stats
# turn off pink warning boxes
import warnings
warnings.filterwarnings("ignore")
import sklearn
from sklearn.model_selection import train_test_split
# Train/Split the data~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def split(df, stratify_by= None):
"""
Crude train, validate, test split
To stratify, send in a column name
"""
if stratify_by == None:
train, test = train_test_split(df, test_size=.2, random_state=123)
train, validate = train_test_split(train, test_size=.3, random_state=123)
else:
train, test = train_test_split(df, test_size=.2, random_state=123, stratify=df[stratify_by])
train, validate = train_test_split(train, test_size=.3, random_state=123, stratify=train[stratify_by])
return train, validate, test
# Create X_train, y_train, etc...~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def seperate_y(train, validate, test):
'''
This function will take the train, validate, and test dataframes and seperate the target variable into its
own panda series
'''
X_train = train.drop(columns=[''])
y_train = train.logerror
X_validate = validate.drop(columns=[''])
y_validate = validate.logerror
X_test = test.drop(columns=[''])
y_test = test.logerror
return X_train, y_train, X_validate, y_validate, X_test, y_test
# Scale the data~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def scale_data(X_train, X_validate, X_test):
'''
This function will scale numeric data using Min Max transform after
it has already been split into train, validate, and test.
'''
obj_col = []
num_train = X_train.drop(columns = obj_col)
num_validate = X_validate.drop(columns = obj_col)
num_test = X_test.drop(columns = obj_col)
# Make the thing
scaler = sklearn.preprocessing.MinMaxScaler()
# we only .fit on the training data
scaler.fit(num_train)
train_scaled = scaler.transform(num_train)
validate_scaled = scaler.transform(num_validate)
test_scaled = scaler.transform(num_test)
# turn the numpy arrays into dataframes
train_scaled = pd.DataFrame(train_scaled, columns=num_train.columns)
validate_scaled = pd.DataFrame(validate_scaled, columns=num_train.columns)
test_scaled = pd.DataFrame(test_scaled, columns=num_train.columns)
return train_scaled, validate_scaled, test_scaled
# Combo Train & Scale Function~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def split_seperate_scale(df, stratify_by= None):
'''
This function will take in a dataframe
seperate the dataframe into train, validate, and test dataframes
seperate the target variable from train, validate and test
then it will scale the numeric variables in train, validate, and test
finally it will return all dataframes individually
'''
# split data into train, validate, test
train, validate, test = split(df, stratify_by= None)
# seperate target variable
X_train, y_train, X_validate, y_validate, X_test, y_test = seperate_y(train, validate, test)
# scale numeric variable
train_scaled, validate_scaled, test_scaled = scale_data(X_train, X_validate, X_test)
return train, validate, test, X_train, y_train, X_validate, y_validate, X_test, y_test, train_scaled, validate_scaled, test_scaled
# Classification Train & Scale Function~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def train_validate_test_split(df, seed=123):
df = clean_city(df)
train_and_validate, test = train_test_split(
df, test_size=0.2, random_state=seed, stratify=df.gender
)
train, validate = train_test_split(
train_and_validate,
test_size=0.3,
random_state=seed,
stratify=train_and_validate.gender,
)
return train, validate, test
# Miscellaneous Prep Functions~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
''''''''''''''''''''
' '
' Helper Functions '
' '
''''''''''''''''''''
def missing_values_table(df):
'''This function will look at any data set and report back on zeros and nulls for every column while also giving percentages of total values
and also the data types. The message prints out the shape of the data frame and also tells you how many columns have nulls '''
zero_val = (df == 0.00).astype(int).sum(axis=0)
null_count = df.isnull().sum()
mis_val_percent = 100 * df.isnull().sum() / len(df)
mz_table = | pd.concat([zero_val, null_count, mis_val_percent], axis=1) | pandas.concat |
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from datetime import datetime, timedelta
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_actions
import definitions
from charts import (line_trace, area_trace, candlestick_trace,
colored_bar_trace, accumulation_trace, cci_trace,
roc_trace, stoc_trace, mom_trace, moving_average_trace,
e_moving_average_trace, bollinger_trace, pp_trace)
app = dash.Dash(__name__, meta_tags=[{"name": "viewport",
"content": "width=device-width"}])
# ------------------------------------------------------------------------------
# Import and clean data (importing csv into pandas)
df = pd.read_csv("assets/msft_prices.csv")
# df.set_index("date", inplace=True)
df["date"] = | pd.to_datetime(df["date"], format="%Y-%m-%d %H:%M:%S") | pandas.to_datetime |
#!/usr/bin/python
import unittest
import cv2
import numpy as np
import os
import pandas as pd
from pandas.testing import assert_frame_equal
from numpy.testing import assert_allclose
from PIE import track_colonies
# load in a test timecourse colony property dataframe
# NB: this case is quite pathological, preliminary analysis on bad
# images, with poor tracking, but this is probably better for testing
timecourse_colony_prop_df = \
pd.read_csv(os.path.join('tests','test_ims',
'SL_170619_2_GR_small_xy0001_phase_colony_data_tracked.csv'),
index_col = 0)
satellite_prop_df = \
pd.read_csv(os.path.join('tests','test_ims',
'test_sat_data.csv'),
index_col = 0)
class TestGetOverlap(unittest.TestCase):
'''
Tests getting overlap of colonies between current and next timepoint
'''
def setUp(self):
self.colony_tracker = \
track_colonies.ColonyTracker()
self.colony_tracker.perform_registration = True
def test_get_overlap_t5t6(self):
'''
Tests finding overlap between timepoints 5 and 6 of
timecourse_colony_prop_df
Checked against results of previous matlab code (and manual
confirmation of most rows)
'''
tp_5_data = \
timecourse_colony_prop_df[
timecourse_colony_prop_df.timepoint == 5]
# get colony properties at next timepoint
tp_6_data = \
timecourse_colony_prop_df[
timecourse_colony_prop_df.timepoint == 6]
expected_overlap_df = pd.DataFrame(
np.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]],
dtype = bool),
index = tp_5_data.index, columns = tp_6_data.index)
test_overlap_df = \
self.colony_tracker._get_overlap(tp_5_data, tp_6_data)
assert_frame_equal(expected_overlap_df, test_overlap_df)
class TestFindCentroidTransform(unittest.TestCase):
'''
Tests finding rigid-body affine transformation matrix that moves
centroids from one timepoint to centroids from another timepoint
'''
@classmethod
def setUpClass(self):
self.colony_tracker = \
track_colonies.ColonyTracker()
self.t1_data = timecourse_colony_prop_df[
timecourse_colony_prop_df['timepoint'] == 1]
def _make_affine_mat(self, angle_in_degrees, x_displacement, y_displacement):
'''
Creates affine transformation matrix to rotate image by
angle_in_degrees and move it by x_displacement and
y_displacement
'''
angle = angle_in_degrees*np.pi/180
affine_mat = np.array([
[np.cos(angle), -np.sin(angle), x_displacement],
[np.sin(angle), np.cos(angle), y_displacement]])
return(affine_mat)
def _warp_centroids(self, timepoint_df, warp_mat):
'''
Warps positions of centroids in timepoint_df by warp_mat and
returns dataframe with warped centroids
'''
centroids = np.float32(timepoint_df[['cX', 'cY']].to_numpy())
warped_centroids = \
np.squeeze(np.float32(cv2.transform(centroids[np.newaxis],
warp_mat)))
warped_df = timepoint_df.copy()
warped_df[['cX', 'cY']] = warped_centroids
return(warped_df)
def test_simple_coord_transform(self):
'''
test whether correct matrix is found when self.t1_data is
rotated and shifted
'''
expected_warp_mat = self._make_affine_mat(15, 40, -25)
# this is a pretty big displacement
warped_t1_data = self._warp_centroids(self.t1_data, expected_warp_mat)
test_warp_mat = self.colony_tracker._find_centroid_transform(self.t1_data,
warped_t1_data)
assert_allclose(expected_warp_mat, test_warp_mat, rtol = 1e-4)
def test_coord_transform_missing_data(self):
'''
test whether correct matrix is found when self.t1_data is
rotated and shifted, and rows are missing from both original and
warped matrix
'''
expected_warp_mat = self._make_affine_mat(15, 40, -25)
# this is a pretty big displacement
warped_t1_data = self._warp_centroids(self.t1_data, expected_warp_mat)
test_warp_mat = self.colony_tracker._find_centroid_transform(
self.t1_data.drop([2,3,11]), warped_t1_data.drop([5,8,9]))
assert_allclose(expected_warp_mat, test_warp_mat, rtol = 1e-4)
def test_coord_transform_missing_data_and_outlier(self):
'''
test whether correct matrix is found when self.t1_data is
rotated and shifted, rows are missing from both original and
warped matrix, and one of the datapoints is changed to be an
outlier
'''
expected_warp_mat = self._make_affine_mat(15, 40, -25)
# this is a pretty big displacement
warped_t1_data = self._warp_centroids(self.t1_data, expected_warp_mat)
warped_t1_data.loc[warped_t1_data.index[4], ['cX', 'cY']] = \
warped_t1_data.loc[warped_t1_data.index[4], ['cX', 'cY']] + \
np.array([200, 350])
test_warp_mat = self.colony_tracker._find_centroid_transform(
self.t1_data.drop([2,3,11]), warped_t1_data.drop([5,8,9]))
assert_allclose(expected_warp_mat, test_warp_mat, rtol = 1e-4)
def test_too_few_points(self):
'''
test whether non-warp matrix is found when self.t1_data is
too short to produce affine matrix
'''
expected_warp_mat = self._make_affine_mat(0, 0, 0)
warped_t1_data = self.t1_data.iloc[0:2]
test_warp_mat = self.colony_tracker._find_centroid_transform(
warped_t1_data, warped_t1_data)
assert_allclose(expected_warp_mat, test_warp_mat, rtol = 1e-4)
class TestFindSatellitesByDist(unittest.TestCase):
'''
Tests finding satellites based on distance cutoff
'''
@classmethod
def setUpClass(self):
self.colony_tracker = \
track_colonies.ColonyTracker()
def test_find_sat_by_dist(self):
parent_candidate_df = pd.DataFrame({
'cX': [11, 40, 55.4, 80, 101.3],
'cY': [21.5, 21.5, 30, 100, 20],
'major_axis_length': [30, 18, 18, 9, 21]},
index = [3, 2, 1, 15, 16])
# first colony should match both first and second parent
# second and fifth colony match no parent colony
# third colony matches only 5th parent colony
# fourth and sixth colonies match only 3rd parent colony
sat_candidate_df = pd.DataFrame({
'cX': [30, 20, 95.5, 51.5, 85, 59],
'cY': [21.5, 100, 12, 34, 50, 19],
'major_axis_length': [2, 4, 3, 2, 5, 4]
},
index = [21, 32, 43, 54, 11, 103])
expected_parent_sat_df = pd.DataFrame({
'satellite_idx': [43, 54, 103],
'parent_idx': [16, 1, 1]
})
test_parent_sat_df = \
self.colony_tracker._find_satellites_by_dist(
parent_candidate_df, sat_candidate_df)
assert_frame_equal(expected_parent_sat_df, test_parent_sat_df)
def test_find_sat_by_dist_no_match(self):
parent_candidate_df = pd.DataFrame({
'cX': [11, 40, 55.4, 80, 101.3],
'cY': [21.5, 21.5, 30, 100, 20],
'major_axis_length': [30, 18, 18, 9, 21]},
index = [3, 2, 1, 15, 16])
# first colony should match both first and second parent
# second and fifth colony match no parent colony
# third colony matches only 5th parent colony
# fourth and sixth colonies match only 3rd parent colony
sat_candidate_df = pd.DataFrame({
'cX': [20, 85],
'cY': [100, 50],
'major_axis_length': [4, 5]
},
index = [32, 11])
expected_parent_sat_df = pd.DataFrame({
'satellite_idx': [],
'parent_idx': []
})
test_parent_sat_df = \
self.colony_tracker._find_satellites_by_dist(
parent_candidate_df, sat_candidate_df)
assert_frame_equal(expected_parent_sat_df, test_parent_sat_df,
check_dtype=False)
def test_find_sat_by_dist_no_sat(self):
parent_candidate_df = pd.DataFrame({
'cX': [11, 40],
'cY': [21.5, 21.5],
'major_axis_length': [30, 18]},
index = [3, 2])
sat_candidate_df = pd.DataFrame({
'cX': [],
'cY': [],
'major_axis_length': []
},
index = [])
expected_parent_sat_df = pd.DataFrame({
'satellite_idx': [],
'parent_idx': []
})
test_parent_sat_df = \
self.colony_tracker._find_satellites_by_dist(
parent_candidate_df, sat_candidate_df)
assert_frame_equal(expected_parent_sat_df, test_parent_sat_df,
check_dtype=False)
def test_find_sat_by_dist_no_parent(self):
parent_candidate_df = pd.DataFrame({
'cX': [],
'cY': [],
'major_axis_length': []},
index = [])
sat_candidate_df = pd.DataFrame({
'cX': [30, 20],
'cY': [21.5, 100],
'major_axis_length': [2, 4]
},
index = [21, 32])
expected_parent_sat_df = pd.DataFrame({
'satellite_idx': [],
'parent_idx': []
})
test_parent_sat_df = \
self.colony_tracker._find_satellites_by_dist(
parent_candidate_df, sat_candidate_df)
assert_frame_equal(expected_parent_sat_df, test_parent_sat_df,
check_dtype=False)
def test_find_sat_by_dist_real_data(self):
parent_candidate_df = satellite_prop_df.loc[[31,32,35,36,37]]
sat_candidate_df = satellite_prop_df.loc[[33,34]]
expected_parent_sat_df = pd.DataFrame({
'satellite_idx': [34],
'parent_idx': [35]
})
test_parent_sat_df = \
self.colony_tracker._find_satellites_by_dist(
parent_candidate_df, sat_candidate_df)
assert_frame_equal(expected_parent_sat_df, test_parent_sat_df)
class TestAggregateByParent(unittest.TestCase):
'''
Tests aggregation of colony_tracker.active_property_df by
parent_colony
'''
def setUp(self):
self.colony_tracker = \
track_colonies.ColonyTracker()
def test_aggregation(self):
self.colony_tracker.tracking_col_name = 'time_tracking_id'
self.colony_tracker.active_col_prop_df = pd.DataFrame({
'phase_num': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2, 2],
'timepoint': [1, 1, 1, 2, 2, 2, 1, 1, 2, 2, 3, 3, 1, 2, 1, 2, 3],
'parent_colony': [
'a', 'b', 'c', 'a', 'a', 'b', 'a', 'b', 'b', 'b', 'b', 'f',
'x', 'x', 'x', 'x', 'y'
],
'xy_pos_idx': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
'label': np.arange(1, 18).astype(str),
'area': [
1, 2.1, 3.21, 5.4321, 4.321, 6.09, 7.1, 8.19, 9.13, 10,
11.5, 12.43, 13.67, 14.85, 15.69, 16.9, 17
],
'perimeter': np.arange(1,18)*3+.4,
'time_tracking_id': [
'a', 'b', 'c', 'a', 'd', 'b', 'a', 'b', 'e', 'b', 'b', 'f',
'x', 'x', 'x', 'x', 'y'
]
}, index = range(100,117))
expected_property_df = pd.DataFrame({
'phase_num': [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 2],
'timepoint': [1, 1, 1, 2, 2, 1, 1, 2, 3, 3, 1, 2, 1, 2, 3],
'xy_pos_idx': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
'perimeter':
np.array(
[1, 2, 3, 4, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17]
)*3+.4,
'time_tracking_id': [
'a', 'b', 'c', 'a', 'b', 'a', 'b', 'b', 'b', 'f',
'x', 'x', 'x', 'x', 'y'
],
'label': [
'1', '2', '3', '4;5', '6', '7', '8', '9;10', '11', '12','13',
'14', '15', '16', '17'
],
'area': [
1, 2.1, 3.21, 4.321+5.4321, 6.09, 7.1, 8.19, 9.13+10,
11.5, 12.43, 13.67, 14.85, 15.69, 16.9, 17
]
},
index = [
100, 101, 102, 103, 105, 106, 107, 109, 110, 111, 112, 113,
114, 115, 116
])
test_property_df = self.colony_tracker._aggregate_by_parent()
| assert_frame_equal(expected_property_df, test_property_df,
check_index_type = False) | pandas.testing.assert_frame_equal |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
)
import pandas._testing as tm
class TestDataFrameShift:
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
)
def test_shift_non_writable_array(self, input_data, output_data, frame_or_series):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
result = frame_or_series(input_data).shift(1)
if frame_or_series is not Series:
# need to explicitly specify columns in the empty case
expected = frame_or_series(
output_data,
index=range(len(output_data)),
columns=range(1),
dtype="float64",
)
else:
expected = frame_or_series(output_data, dtype="float64")
tm.assert_equal(result, expected)
def test_shift_mismatched_freq(self, frame_or_series):
ts = frame_or_series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
@pytest.mark.parametrize(
"obj",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, obj, shift_size, frame_or_series):
# GH#22397
if frame_or_series is not Series:
obj = obj.to_frame()
assert obj.shift(shift_size) is not obj
def test_shift_object_non_scalar_fill(self):
# shift requires scalar fill_value except for object dtype
ser = Series(range(3))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
ser.shift(1, fill_value=[])
df = ser.to_frame()
with pytest.raises(ValueError, match="fill_value must be a scalar"):
df.shift(1, fill_value=np.arange(3))
obj_ser = ser.astype(object)
result = obj_ser.shift(1, fill_value={})
assert result[0] == {}
obj_df = obj_ser.to_frame()
result = obj_df.shift(1, fill_value={})
assert result.iloc[0, 0] == {}
def test_shift_int(self, datetime_frame, frame_or_series):
ts = tm.get_obj(datetime_frame, frame_or_series).astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
tm.assert_equal(shifted, expected)
def test_shift_32bit_take(self, frame_or_series):
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
arr = np.arange(5, dtype=dtype)
s1 = frame_or_series(arr, index=index)
p = arr[1]
result = s1.shift(periods=p)
expected = frame_or_series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods, frame_or_series):
# GH#21275
obj = frame_or_series(
range(periods),
index=date_range("2016-1-1 00:00:00", periods=periods, freq="H"),
)
result = obj.shift(1, "2H")
expected = frame_or_series(
range(periods),
index=date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
tm.assert_equal(result, expected)
def test_shift_dst(self, frame_or_series):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
obj = frame_or_series(dates)
res = obj.shift(0)
tm.assert_equal(res, obj)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = obj.shift(ex)
exp = frame_or_series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
def test_shift_by_zero(self, datetime_frame, frame_or_series):
# shift by 0
obj = tm.get_obj(datetime_frame, frame_or_series)
unshifted = obj.shift(0)
tm.assert_equal(unshifted, obj)
def test_shift(self, datetime_frame):
# naive shift
ser = datetime_frame["A"]
shifted = datetime_frame.shift(5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(5)
tm.assert_series_equal(shifted["A"], shifted_ser)
shifted = datetime_frame.shift(-5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(-5)
tm.assert_series_equal(shifted["A"], shifted_ser)
unshifted = datetime_frame.shift(5).shift(-5)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_frame.values[:-5]
)
unshifted_ser = ser.shift(5).shift(-5)
tm.assert_numpy_array_equal(unshifted_ser.dropna().values, ser.values[:-5])
def test_shift_by_offset(self, datetime_frame, frame_or_series):
# shift by DateOffset
obj = tm.get_obj(datetime_frame, frame_or_series)
offset = offsets.BDay()
shifted = obj.shift(5, freq=offset)
assert len(shifted) == len(obj)
unshifted = shifted.shift(-5, freq=offset)
tm.assert_equal(unshifted, obj)
shifted2 = obj.shift(5, freq="B")
tm.assert_equal(shifted, shifted2)
unshifted = obj.shift(0, freq=offset)
tm.assert_equal(unshifted, obj)
d = obj.index[0]
shifted_d = d + offset * 5
if frame_or_series is DataFrame:
tm.assert_series_equal(obj.xs(d), shifted.xs(shifted_d), check_names=False)
else:
tm.assert_almost_equal(obj.at[d], shifted.at[shifted_d])
def test_shift_with_periodindex(self, frame_or_series):
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
if frame_or_series is DataFrame:
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
else:
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_equal(shifted2, shifted3)
tm.assert_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=offsets.BDay())
tm.assert_equal(shifted5, shifted4)
def test_shift_other_axis(self):
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
def test_shift_named_axis(self):
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical1(self, frame_or_series):
# GH#9416
obj = frame_or_series(["a", "b", "c", "d"], dtype="category")
rt = obj.shift(1).shift(-1)
tm.assert_equal(obj.iloc[:-1], rt.dropna())
def get_cat_values(ndframe):
# For Series we could just do ._values; for DataFrame
# we may be able to do this if we ever have 2D Categoricals
return ndframe._mgr.arrays[0]
cat = get_cat_values(obj)
sp1 = obj.shift(1)
tm.assert_index_equal(obj.index, sp1.index)
assert np.all(get_cat_values(sp1).codes[:1] == -1)
assert np.all(cat.codes[:-1] == get_cat_values(sp1).codes[1:])
sn2 = obj.shift(-2)
tm.assert_index_equal(obj.index, sn2.index)
assert np.all(get_cat_values(sn2).codes[-2:] == -1)
assert np.all(cat.codes[2:] == get_cat_values(sn2).codes[:-2])
tm.assert_index_equal(cat.categories, get_cat_values(sp1).categories)
tm.assert_index_equal(cat.categories, get_cat_values(sn2).categories)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_categorical_fill_value(self, frame_or_series):
ts = frame_or_series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = frame_or_series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = r"Cannot setitem on a Categorical with a new category \(f\)"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_fill_value(self, frame_or_series):
# GH#24128
dti = date_range("1/1/2000", periods=5, freq="H")
ts = frame_or_series([1.0, 2.0, 3.0, 4.0, 5.0], index=dti)
exp = frame_or_series([0.0, 1.0, 2.0, 3.0, 4.0], index=dti)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_equal(result, exp)
exp = frame_or_series([0.0, 0.0, 1.0, 2.0, 3.0], index=dti)
result = ts.shift(2, fill_value=0.0)
tm.assert_equal(result, exp)
ts = frame_or_series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert tm.get_dtype(res) == tm.get_dtype(ts)
# retain integer dtype
obj = frame_or_series([1, 2, 3, 4, 5], index=dti)
exp = frame_or_series([0, 1, 2, 3, 4], index=dti)
result = obj.shift(1, fill_value=0)
tm.assert_equal(result, exp)
exp = frame_or_series([0, 0, 1, 2, 3], index=dti)
result = obj.shift(2, fill_value=0)
tm.assert_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self, using_array_manager):
# GH#35488
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(2, axis=1)
expected = df3.take([-1, -1, 0, 1, 2], axis=1)
expected.iloc[:, :2] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
# rebuild df3 because `take` call above consolidated
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(-2, axis=1)
expected = df3.take([2, 3, 4, -1, -1], axis=1)
expected.iloc[:, -2:] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) axis=1 support
def test_shift_axis1_multiple_blocks_with_int_fill(self):
# GH#42719
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([-1, -1, 0, 1], axis=1)
expected.iloc[:, :2] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(-2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([2, 3, -1, -1], axis=1)
expected.iloc[:, -2:] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame, frame_or_series):
# TODO(2.0): remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
tm.assert_equal(shifted, shifted3)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.tshift(freq="M")
# DatetimeIndex
dtobj = tm.get_obj(datetime_frame, frame_or_series)
shifted = dtobj.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_equal(dtobj, unshifted)
shifted2 = dtobj.tshift(freq=dtobj.index.freq)
tm.assert_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
inferred_ts = tm.get_obj(inferred_ts, frame_or_series)
shifted = inferred_ts.tshift(1)
expected = dtobj.tshift(1)
expected.index = expected.index._with_freq(None)
tm.assert_equal(shifted, expected)
unshifted = shifted.tshift(-1)
tm.assert_equal(unshifted, inferred_ts)
no_freq = dtobj.iloc[[0, 5, 7]]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
def test_tshift_deprecated(self, datetime_frame, frame_or_series):
# GH#11631
dtobj = tm.get_obj(datetime_frame, frame_or_series)
with tm.assert_produces_warning(FutureWarning):
dtobj.tshift()
def test_period_index_frame_shift_with_freq(self, frame_or_series):
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_equal(unshifted, ps)
shifted2 = ps.shift(freq="B")
tm.assert_equal(shifted, shifted2)
shifted3 = ps.shift(freq=offsets.BDay())
tm.assert_equal(shifted, shifted3)
def test_datetime_frame_shift_with_freq(self, datetime_frame, frame_or_series):
dtobj = tm.get_obj(datetime_frame, frame_or_series)
shifted = dtobj.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
| tm.assert_equal(dtobj, unshifted) | pandas._testing.assert_equal |
# coding: utf-8
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from copy import copy as make_copy
from .classifier import *
import time
class StackingClassifier():
"""A Stacking classifier is a classifier that uses the predictions of several first layer estimators (generated with a cross validation method)
for a second layer estimator.
Parameters
----------
base_estimators : list
List of estimators to fit in the first level using a cross validation.
level_estimator : object, optional (default=LogisticRegression())
The estimator used in second and last level.
n_folds : int, optional (default=5)
Number of folds used to generate the meta features for the training set.
copy : boolean, optional (default=False)
If true, meta features are added to the original dataset
drop_first = boolean, optional (default=True)
If True, each estimator output n_classes-1 probabilities
random_state : None, int or RandomState (default=1)
Pseudo-random number generator state used for shuffling. If None, use default numpy RNG for shuffling.
verbose : boolean, optional (default=True)
Verbose mode.
"""
def __init__(self, base_estimators = [Classifier(strategy="XGBoost"),Classifier(strategy="RandomForest"),Classifier(strategy="ExtraTrees")], level_estimator = LogisticRegression(n_jobs=-1), n_folds = 5, copy = False, drop_first = True, random_state = 1, verbose = True):
self.base_estimators = base_estimators
if(type(self.base_estimators)!=list):
raise ValueError("base_estimators must be a list")
else:
for i, est in enumerate(self.base_estimators):
self.base_estimators[i] = make_copy(est)
self.level_estimator = level_estimator
self.n_folds = n_folds
if(type(self.n_folds)!=int):
raise ValueError("n_folds must be an integer")
self.copy = copy
if(type(self.copy)!=bool):
raise ValueError("copy must be a boolean")
self.drop_first = drop_first
if(type(self.drop_first)!=bool):
raise ValueError("drop_first must be a boolean")
self.random_state = random_state
if((type(self.random_state)!=int)&(self.random_state!=None)):
raise ValueError("random_state must be either None or an integer")
self.verbose = verbose
if(type(self.verbose)!=bool):
raise ValueError("verbose must be a boolean")
self.__fitOK = False
self.__fittransformOK = False
def get_params(self, deep = True):
return {'level_estimator': self.level_estimator,
'base_estimators' : self.base_estimators,
'n_folds' : self.n_folds,
'copy' : self.copy,
'drop_first' : self.drop_first,
'random_state' : self.random_state,
'verbose' : self.verbose}
def set_params(self,**params):
self.__fitOK = False
self.__fittransformOK = False
for k,v in params.items():
if k not in self.get_params():
warnings.warn("Invalid parameter a for stacking_classifier StackingClassifier. Parameter IGNORED. Check the list of available parameters with `stacking_classifier.get_params().keys()`")
else:
setattr(self,k,v)
def __cross_val_predict_proba(self, estimator, X, y, cv):
"""Evaluate the target by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
cv : a STRATIFIED cross-validation generator
Returns
-------
y_pred : array-like of shape = [n_samples, n_classes]
The predicted class probabilities for X.
"""
classes = y.value_counts()
classes_to_drop = classes[classes<2].index
indexes_to_drop = y[y.apply(lambda x: x in classes_to_drop)].index
y_pred = np.zeros((len(y), len(classes)-len(classes_to_drop)))
for train_index, test_index in cv.split(X,y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index] #defining train et validation sets for each fold
y_train = y.iloc[train_index]
try:
X_train = X_train.drop(indexes_to_drop)
y_train = y_train.drop(indexes_to_drop)
except:
pass
estimator.fit(X_train, y_train) #learning the model
y_pred[test_index] = estimator.predict_proba(X_test)[:,] #predicting the probability
return y_pred
def fit_transform(self, X, y):
"""Create meta-features for the training dataset.
Parameters
----------
X : DataFrame, shape = [n_samples, n_features]
The training dataset.
y : pandas series of shape = [n_samples, ]
The target.
Returns
-------
X_transform : DataFrame, shape = [n_samples, n_features*int(copy)+n_metafeatures]
Returns the transformed training dataset.
"""
### sanity checks
if((type(X)!=pd.SparseDataFrame)&(type(X)!=pd.DataFrame)):
raise ValueError("X must be a DataFrame")
if(type(y)!=pd.core.series.Series):
raise ValueError("y must be a Series")
cv = StratifiedKFold(n_splits = self.n_folds,shuffle=True,random_state=self.random_state) #stratified k fold
preds = pd.DataFrame([], index=y.index)
classes = y.value_counts()
classes_to_drop = classes[classes<2].index
indexes_to_drop = y[y.apply(lambda x: x in classes_to_drop)].index
if(self.verbose):
print("")
print("[=============================================================================] LAYER [===================================================================================]")
print("")
for c, clf in enumerate(self.base_estimators):
if(self.verbose):
print("> fitting estimator n°"+ str(c+1) + " : "+ str(clf.get_params())+" ...")
print("")
y_pred = self.__cross_val_predict_proba(clf, X, y, cv) #for each base estimator, we create the meta feature on train set
for i in range(0, y_pred.shape[1]-int(self.drop_first)):
preds["est"+str(c+1)+"_class"+str(i)] = y_pred[:,i]
clf.fit(X.drop(indexes_to_drop), y.drop(indexes_to_drop)) # and we refit the base estimator on entire train set
layer = 1
while(len(np.intersect1d(X.columns, ["layer"+str(layer)+"_"+s for s in preds.columns]))>0):
layer = layer + 1
preds.columns = ["layer"+str(layer)+"_"+s for s in preds.columns]
self.__fittransformOK = True
if(self.copy==True):
return | pd.concat([X, preds], axis=1) | pandas.concat |
#!/usr/bin/env python
import torch
from torch.utils.data import DataLoader
import pickle
from rdkit import Chem
from rdkit import rdBase
from tqdm import tqdm
from rdkit.Chem import AllChem
from data_structs import MolData, Vocabulary
from model import RNN
from utils import Variable, decrease_learning_rate, unique
import torch.nn as nn
import argparse
import pandas as pd
rdBase.DisableLog('rdApp.error')
def cano_smi_file(fname, outfn):
"""
canonicalize smile file
Args:
fname: location of file containing the SMILES structures
outfn: Filename for output Canolized SMILES
Returns: None
"""
out = open(outfn, 'w')
with open (fname) as f:
for line in f:
smi = line.rstrip()
can_smi = Chem.MolToSmiles(Chem.MolFromSmiles(smi))
out.write(can_smi + '\n')
out.close()
def train_model(voc_dir, smi_dir, prior_dir, tf_dir,tf_process_dir,freeze=False):
"""
Transfer learning on target molecules using the SMILES structures
Args:
voc_dir: location of the vocabulary
smi_dir: location of the SMILES file used for transfer learning
prior_dir: location of prior trained model to initialize transfer learning
tf_dir: location to save the transfer learning model
tf_process_dir: location to save the SMILES sampled while doing transfer learning
freeze: Bool. If true, all parameters in the RNN will be frozen except for the last linear layer during
transfer learning.
Returns: None
"""
voc = Vocabulary(init_from_file=voc_dir)
#cano_smi_file('all_smi_refined.csv', 'all_smi_refined_cano.csv')
moldata = MolData(smi_dir, voc)
# Monomers 67 and 180 were removed because of the unseen [C-] in voc
# DAs containing [C] removed: 43 molecules in 5356; Ge removed: 154 in 5356; [c] removed 4 in 5356
# [S] 1 molecule in 5356
data = DataLoader(moldata, batch_size=64, shuffle=True, drop_last=False,
collate_fn=MolData.collate_fn)
transfer_model = RNN(voc)
# if freeze=True, freeze all parameters except those in the linear layer
if freeze:
for param in transfer_model.rnn.parameters():
param.requires_grad = False
transfer_model.rnn.linear = nn.Linear(512, voc.vocab_size)
if torch.cuda.is_available():
transfer_model.rnn.load_state_dict(torch.load(prior_dir))
else:
transfer_model.rnn.load_state_dict(torch.load(prior_dir,
map_location=lambda storage, loc: storage))
optimizer = torch.optim.Adam(transfer_model.rnn.parameters(), lr=0.0005)
smi_lst = []; epoch_lst = []
for epoch in range(1, 11):
for step, batch in tqdm(enumerate(data), total=len(data)):
seqs = batch.long()
log_p, _ = transfer_model.likelihood(seqs)
loss = -log_p.mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 80 == 0 and step != 0:
decrease_learning_rate(optimizer, decrease_by=0.03)
tqdm.write('*'*50)
tqdm.write("Epoch {:3d} step {:3d} loss: {:5.2f}\n".format(epoch, step, loss.data[0]))
seqs, likelihood, _ = transfer_model.sample(128)
valid = 0
for i, seq in enumerate(seqs.cpu().numpy()):
smile = voc.decode(seq)
if Chem.MolFromSmiles(smile):
valid += 1
if i < 5:
tqdm.write(smile)
tqdm.write("\n{:>4.1f}% valid SMILES".format(100*valid/len(seqs)))
tqdm.write("*"*50 + '\n')
torch.save(transfer_model.rnn.state_dict(), tf_dir)
seqs, likelihood, _ = transfer_model.sample(1024)
valid = 0
#valid_smis = []
for i, seq in enumerate(seqs.cpu().numpy()):
smile = voc.decode(seq)
if Chem.MolFromSmiles(smile):
try:
AllChem.GetMorganFingerprintAsBitVect(Chem.MolFromSmiles(smile), 2, 1024)
valid += 1
smi_lst.append(smile)
epoch_lst.append(epoch)
except:
continue
torch.save(transfer_model.rnn.state_dict(), tf_dir)
transfer_process_df = pd.DataFrame(columns=['SMILES', 'Epoch'])
transfer_process_df['SMILES'] = pd.Series(data=smi_lst)
transfer_process_df['Epoch'] = | pd.Series(data=epoch_lst) | pandas.Series |
import pandas as pd
from upgrade_model import k8s_releases_loader
k8s_releases = k8s_releases_loader.load()
def compute(id, start_date, end_date, first_version, upgrade_every):
days = pd.date_range(start=start_date, end=end_date, freq='D')
environment_ids = [id]
environment_state = pd.DataFrame(
index= | pd.MultiIndex.from_product([environment_ids,days],names=['environment_id','at_date']) | pandas.MultiIndex.from_product |
import pandas as pd
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import time
import multiprocessing as mp
start_time=time.time()
def svm(location1,location2):
data=pd.read_csv(location1)
data_columns=data.columns
xtrain = data[data_columns[data_columns != 'typeoffraud']]
ytrain=data['typeoffraud']
data1=pd.read_csv(location2)
data1_columns=data1.columns
xtest = data1[data1_columns[data1_columns != 'typeoffraud']]
from sklearn import svm
clf=svm.SVC(kernel='rbf')
clf.fit(xtrain,ytrain)
ypredict=clf.predict(xtest)
rel=list(zip(ypredict))
pp=pd.DataFrame(data=rel,columns=['label'])
pp.to_csv('label.csv',index=False)
###########################################################################################################
def maketags(location2,location3):
e=pd.read_csv(location2)
tags=[]
ids=[]
tags1=[]
ids1=[]
for i,l in enumerate(e['typeoffraud']):
if l==1 or l==2 or l==3:
ids.append(e.iloc[i,1])
tags.append(e.iloc[i,4])
if l==4 or l==5 or l==6:
ids1.append(e.iloc[i,1])
tags1.append(e.iloc[i,4])
rel=list(zip(ids,tags))
pp=pd.DataFrame(data=rel,columns=['ids','tags'])
pp.to_csv('labelofhead.csv',index=False)
rel1=list(zip(ids1,tags1))
pp1=pd.DataFrame(data=rel1,columns=['ids','tags'])
pp1.to_csv('labelofcol.csv',index=False)
e1=pd.read_csv(location3)
x=list(e['ids'])
y=list(e1['label'])
rel=list(zip(x,y))
pp=pd.DataFrame(data=rel,columns=['ids','tags'])
pp.to_csv('labelofmethod.csv',index=False)
locationnew="labelofmethod.csv"
e=pd.read_csv(locationnew)
idof=[]
tag=[]
idof1=[]
tag1=[]
for i,l in enumerate(e['tags']):
if l==1 or l==2 or l==3:
idof.append(e.iloc[i,0])
tag.append(e.iloc[i,1])
if l==4 or l==5 or l==6:
idof1.append(e.iloc[i,0])
tag1.append(e.iloc[i,1])
rel=list(zip(idof,tag))
pp=pd.DataFrame(data=rel,columns=['ids','tags'])
pp.to_csv('labelofheadM.csv',index=False)
rel1=list(zip(idof1,tag1))
pp1=pd.DataFrame(data=rel1,columns=['ids','tags'])
pp1.to_csv('labelofcolM.csv',index=False)
#############################################################################################################
def evalofhead(location4,location5):
e=pd.read_csv(location4)
e1= | pd.read_csv(location5) | pandas.read_csv |
import os
import json
import pandas as pd
statements = []
evidences = []
adjective_frequencies = {'sub': {}, 'obj': {}}
_adjective_frequencies = {'sub': {}, 'obj': {}}
adjective_names = {'sub': {}, 'obj': {}}
_adjective_names = {'sub': {}, 'obj': {}}
adjective_pairs = {}
_adjective_pairs = {}
with open('../../data/causemos_indra_statements/CauseMos_indra_statements.json', 'r') as f:
lines = f.readlines()
for idx, line in enumerate(lines, 1):
statement = json.loads(line)
#print(json.dumps(statement, indent=4, sort_keys=True))
belief = statement["_source"]["belief"]
evidence = statement["_source"]["evidence"]
for evid_idx, evid in enumerate(evidence, 1):
text = evid["evidence_context"]["text"]
_adjectives = []
for key in ["subj_adjectives", "obj_adjectives"]:
_adj = evid["evidence_context"][key]
_adj = _adj if _adj else []
_adjectives.append(_adj)
_polarities = []
for key in ["subj_polarity", "obj_polarity"]:
_pol = evid["evidence_context"][key]
_pol = _pol if _pol else 0
_polarities.append(_pol)
evidences.append({
'Statement #': idx,
'Evidence #': evid_idx,
'_Sub Adj': ', '.join(_adjectives[0]),
'_Obj Adj': ', '.join(_adjectives[1]),
'_Sub Pol': _polarities[0],
'_Obj Pol': _polarities[1],
'# _Sub Adj': len(_adjectives[0]),
'# _Obj Adj': len(_adjectives[1]),
'Text': text
})
for idx2, key in enumerate(['sub', 'obj']):
if len(_adjectives[idx2]) in _adjective_frequencies[key].keys():
_adjective_frequencies[key][len(_adjectives[idx2])] += 1
else:
_adjective_frequencies[key][len(_adjectives[idx2])] = 1
_adjectives[0] = ['None'] if len(_adjectives[0]) == 0 else _adjectives[0]
_adjectives[1] = ['None'] if len(_adjectives[1]) == 0 else _adjectives[1]
for adj in _adjectives[0]:
if adj in _adjective_names['sub'].keys():
_adjective_names['sub'][adj] += 1
else:
_adjective_names['sub'][adj] = 1
for adj in _adjectives[1]:
if adj in _adjective_names['obj'].keys():
_adjective_names['obj'][adj] += 1
else:
_adjective_names['obj'][adj] = 1
for sub in _adjectives[0]:
for obj in _adjectives[1]:
adj_pair = (sub, obj)
if adj_pair in _adjective_pairs.keys():
_adjective_pairs[adj_pair] += 1
else:
_adjective_pairs[adj_pair] = 1
# print(len(evidence))
# print(json.dumps(statement, indent=4, sort_keys=True))
# exit()
#
# continue
text = evidence[0]["evidence_context"]["text"]
_adjectives = []
for key in ["subj_adjectives", "obj_adjectives"]:
_adj = evidence[0]["evidence_context"][key]
_adj = _adj if _adj else []
_adjectives.append(_adj)
_polarities = []
for key in ["subj_polarity", "obj_polarity"]:
_pol = evidence[0]["evidence_context"][key]
_pol = _pol if _pol else 0
_polarities.append(_pol)
concepts = []
for key in ["subj", "obj"]:
con = statement["_source"][key]["concept"]
concepts.append(con)
adjectives = []
for key in ["subj", "obj"]:
adj = statement["_source"][key]["adjectives"]
adjectives.append(adj)
polarities = []
for key in ["subj", "obj"]:
pol = statement["_source"][key]["polarity"]
polarities.append(pol)
statements.append({
'Statement #': idx,
'Belief': belief,
'Subject': concepts[0],
'Object': concepts[1],
'Sub Adj': ', '.join(adjectives[0]),
'Obj Adj': ', '.join(adjectives[1]),
'Sub Pol': polarities[0],
'Obj Pol': polarities[1],
'_Sub Adj': ', '.join(_adjectives[0]),
'_Obj Adj': ', '.join(_adjectives[1]),
'_Sub Pol': _polarities[0],
'_Obj Pol': _polarities[1],
'# Sub Adj': len(adjectives[0]),
'# Obj Adj': len(adjectives[1]),
'# _Sub Adj': len(_adjectives[0]),
'# _Obj Adj': len(_adjectives[1]),
'# _Evidence': len(evidence),
'Text': text
})
if len(adjectives[0]) > 1 or len(adjectives[1]) > 1:
with open(f'../../data/causemos_indra_statements/multi_adjective/{idx}.json', 'w') as out:
out.write(json.dumps(statement, indent=4, sort_keys=True))
for idx2, key in enumerate(['sub', 'obj']):
if len(adjectives[idx2]) in adjective_frequencies[key].keys():
adjective_frequencies[key][len(adjectives[idx2])] += 1
else:
adjective_frequencies[key][len(adjectives[idx2])] = 1
adjectives[0] = ['None'] if len(adjectives[0]) == 0 else adjectives[0]
adjectives[1] = ['None'] if len(adjectives[1]) == 0 else adjectives[1]
for adj in adjectives[0]:
if adj in adjective_names['sub'].keys():
adjective_names['sub'][adj] += 1
else:
adjective_names['sub'][adj] = 1
for adj in adjectives[1]:
if adj in adjective_names['obj'].keys():
adjective_names['obj'][adj] += 1
else:
adjective_names['obj'][adj] = 1
for sub in adjectives[0]:
for obj in adjectives[1]:
adj_pair = (sub, obj)
if adj_pair in adjective_pairs.keys():
adjective_pairs[adj_pair] += 1
else:
adjective_pairs[adj_pair] = 1
# print(belief)
# print(text)
# print(_adjectives)
# print(_polarities)
# print(adjectives)
# print(_polarities)
# print(concepts)
df_statements = pd.DataFrame(statements)
df_evidences = pd.DataFrame(evidences)
df_statements.to_csv('../../data/causemos_indra_statements/statements.csv', index=False,
columns=['Statement #', 'Sub Adj', '_Sub Adj', 'Sub Pol', '_Sub Pol', 'Subject', 'Obj Adj',
'_Obj Adj', 'Obj Pol', '_Obj Pol', '# Sub Adj', '# _Sub Adj', '# Obj Adj', '# _Obj Adj',
'# _Evidence', 'Text'])
df_evidences.to_csv('../../data/causemos_indra_statements/evidence.csv', index=False,
columns=['Statement #', 'Evidence #', '_Sub Adj', '_Sub Pol', '_Obj Adj', '_Obj Pol', '# _Sub Adj',
'# _Obj Adj', 'Text'])
# df_sub_adj_counts = df_statements.groupby(by='# Sub Adj').count()
# df_obj_adj_counts = df_statements.groupby(by='# Obj Adj').count()
#
# _df_sub_adj_counts = df_statements.groupby(by='# _Sub Adj').count()
# _df_obj_adj_counts = df_statements.groupby(by='# _Obj Adj').count()
#
# df_sub_adj_counts.to_csv('../../data/causemos_indra_statements/sub_adj_counts.csv', index=False)
# df_obj_adj_counts.to_csv('../../data/causemos_indra_statements/obj_adj_counts.csv', index=False)
#
# _df_sub_adj_counts.to_csv('../../data/causemos_indra_statements/_sub_adj_counts.csv', index=False)
# _df_obj_adj_counts.to_csv('../../data/causemos_indra_statements/_obj_adj_counts.csv', index=False)
for idx2, key in enumerate(['sub', 'obj']):
multiplicity = []
frequency = []
for mult, freq in adjective_frequencies[key].items():
multiplicity.append(mult)
frequency.append(freq)
df_freq = pd.DataFrame({'# Adjectives': multiplicity, 'frequency': frequency})
df_freq.to_csv(f'../../data/causemos_indra_statements/{key}_adj_counts.csv', index=False)
multiplicity = []
frequency = []
for mult, freq in _adjective_frequencies[key].items():
multiplicity.append(mult)
frequency.append(freq)
df_freq = pd.DataFrame({'# Adjectives': multiplicity, 'frequency': frequency})
df_freq.to_csv(f'../../data/causemos_indra_statements/_{key}_adj_counts.csv', index=False)
adjective = []
frequency = []
for adj, freq in adjective_names[key].items():
adjective.append(adj)
frequency.append(freq)
df_freq = pd.DataFrame({'Adjective': adjective, 'frequency': frequency})
df_freq.to_csv(f'../../data/causemos_indra_statements/{key}_adjectives.csv', index=False)
adjective = []
frequency = []
for adj, freq in _adjective_names[key].items():
adjective.append(adj)
frequency.append(freq)
df_freq = | pd.DataFrame({'Adjective': adjective, 'frequency': frequency}) | pandas.DataFrame |
"""
The ``risk_models`` module provides functions for estimating the covariance matrix given
historical returns.
The format of the data input is the same as that in :ref:`expected-returns`.
**Currently implemented:**
- fix non-positive semidefinite matrices
- general risk matrix function, allowing you to run any risk model from one function.
- sample covariance
- semicovariance
- exponentially weighted covariance
- minimum covariance determinant
- shrunk covariance matrices:
- manual shrinkage
- Ledoit Wolf shrinkage
- Oracle Approximating shrinkage
- covariance to correlation matrix
"""
import warnings
import numpy as np
import pandas as pd
from .expected_returns import returns_from_prices
def _is_positive_semidefinite(matrix):
"""
Helper function to check if a given matrix is positive semidefinite.
Any method that requires inverting the covariance matrix will struggle
with a non-positive semidefinite matrix
:param matrix: (covariance) matrix to test
:type matrix: np.ndarray, pd.DataFrame
:return: whether matrix is positive semidefinite
:rtype: bool
"""
try:
# Significantly more efficient than checking eigenvalues (stackoverflow.com/questions/16266720)
np.linalg.cholesky(matrix + 1e-16 * np.eye(len(matrix)))
return True
except np.linalg.LinAlgError:
return False
def fix_nonpositive_semidefinite(matrix, fix_method="spectral"):
"""
Check if a covariance matrix is positive semidefinite, and if not, fix it
with the chosen method.
The ``spectral`` method sets negative eigenvalues to zero then rebuilds the matrix,
while the ``diag`` method adds a small positive value to the diagonal.
:param matrix: raw covariance matrix (may not be PSD)
:type matrix: pd.DataFrame
:param fix_method: {"spectral", "diag"}, defaults to "spectral"
:type fix_method: str, optional
:raises NotImplementedError: if a method is passed that isn't implemented
:return: positive semidefinite covariance matrix
:rtype: pd.DataFrame
"""
if _is_positive_semidefinite(matrix):
return matrix
else:
warnings.warn(
"The covariance matrix is non positive semidefinite. Amending eigenvalues."
)
# Eigendecomposition
q, V = np.linalg.eigh(matrix)
if fix_method == "spectral":
# Remove negative eigenvalues
q = np.where(q > 0, q, 0)
# Reconstruct matrix
fixed_matrix = V @ np.diag(q) @ V.T
elif fix_method == "diag":
min_eig = np.min(q)
if min_eig < 0:
fixed_matrix = matrix - 1.1 * min_eig * np.eye(len(matrix))
else:
raise NotImplementedError("Method {} not implemented".format(fix_method))
if not _is_positive_semidefinite(fixed_matrix):
warnings.warn("Could not fix matrix. Please try a different risk model.")
# Rebuild labels if provided
if isinstance(matrix, pd.DataFrame):
tickers = matrix.index
return pd.DataFrame(fixed_matrix, index=tickers, columns=tickers)
else:
return fixed_matrix
def risk_matrix(prices, method="sample_cov", **kwargs):
"""
Compute a covariance matrix, using the risk model supplied in the ``method``
parameter.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param method: the risk model to use. Should be one of:
- ``sample_cov``
- ``semicovariance``
- ``exp_cov``
- ``min_cov_determinant``
- ``ledoit_wolf``
- ``ledoit_wolf_constant_variance``
- ``ledoit_wolf_single_factor``
- ``ledoit_wolf_constant_correlation``
- ``oracle_approximating``
:type method: str, optional
:raises NotImplementedError: if the supplied method is not recognised
:return: annualised sample covariance matrix
:rtype: pd.DataFrame
"""
if method == "sample_cov":
return sample_cov(prices, **kwargs)
elif method == "semicovariance":
return semicovariance(prices, **kwargs)
elif method == "exp_cov":
return exp_cov(prices, **kwargs)
elif method == "min_cov_determinant":
return min_cov_determinant(prices, **kwargs)
elif method == "ledoit_wolf" or method == "ledoit_wolf_constant_variance":
return CovarianceShrinkage(prices, **kwargs).ledoit_wolf()
elif method == "ledoit_wolf_single_factor":
return CovarianceShrinkage(prices, **kwargs).ledoit_wolf(
shrinkage_target="single_factor"
)
elif method == "ledoit_wolf_constant_correlation":
return CovarianceShrinkage(prices, **kwargs).ledoit_wolf(
shrinkage_target="constant_correlation"
)
elif method == "oracle_approximating":
return CovarianceShrinkage(prices, **kwargs).oracle_approximating()
else:
raise NotImplementedError("Risk model {} not implemented".format(method))
def sample_cov(prices, returns_data=False, frequency=252, **kwargs):
"""
Calculate the annualised sample covariance matrix of (daily) asset returns.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:return: annualised sample covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("data is not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
if returns_data:
returns = prices
else:
returns = returns_from_prices(prices)
return fix_nonpositive_semidefinite(
returns.cov() * frequency, kwargs.get("fix_method", "spectral")
)
def semicovariance(
prices, returns_data=False, benchmark=0.000079, frequency=252, **kwargs
):
"""
Estimate the semicovariance matrix, i.e the covariance given that
the returns are less than the benchmark.
.. semicov = E([min(r_i - B, 0)] . [min(r_j - B, 0)])
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param benchmark: the benchmark return, defaults to the daily risk-free rate, i.e
:math:`1.02^{(1/252)} -1`.
:type benchmark: float
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year). Ensure that you use the appropriate
benchmark, e.g if ``frequency=12`` use the monthly risk-free rate.
:type frequency: int, optional
:return: semicovariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("data is not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
if returns_data:
returns = prices
else:
returns = returns_from_prices(prices)
drops = np.fmin(returns - benchmark, 0)
return fix_nonpositive_semidefinite(
drops.cov() * frequency, kwargs.get("fix_method", "spectral")
)
def _pair_exp_cov(X, Y, span=180):
"""
Calculate the exponential covariance between two timeseries of returns.
:param X: first time series of returns
:type X: pd.Series
:param Y: second time series of returns
:type Y: pd.Series
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:return: the exponential covariance between X and Y
:rtype: float
"""
covariation = (X - X.mean()) * (Y - Y.mean())
# Exponentially weight the covariation and take the mean
if span < 10:
warnings.warn("it is recommended to use a higher span, e.g 30 days")
return covariation.ewm(span=span).mean().iloc[-1]
def exp_cov(prices, returns_data=False, span=180, frequency=252, **kwargs):
"""
Estimate the exponentially-weighted covariance matrix, which gives
greater weight to more recent data.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:return: annualised estimate of exponential covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("data is not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
assets = prices.columns
if returns_data:
returns = prices
else:
returns = returns_from_prices(prices)
N = len(assets)
# Loop over matrix, filling entries with the pairwise exp cov
S = np.zeros((N, N))
for i in range(N):
for j in range(i, N):
S[i, j] = S[j, i] = _pair_exp_cov(
returns.iloc[:, i], returns.iloc[:, j], span
)
cov = pd.DataFrame(S * frequency, columns=assets, index=assets)
return fix_nonpositive_semidefinite(cov, kwargs.get("fix_method", "spectral"))
def min_cov_determinant(
prices, returns_data=False, frequency=252, random_state=None, **kwargs
):
"""
Calculate the minimum covariance determinant, an estimator of the covariance matrix
that is more robust to noise.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:param random_state: random seed to make results reproducible, defaults to None
:type random_state: int, optional
:return: annualised estimate of covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("data is not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
# Extra dependency
try:
import sklearn.covariance
except (ModuleNotFoundError, ImportError):
raise ImportError("Please install scikit-learn via pip or poetry")
assets = prices.columns
if returns_data:
X = prices.dropna(how="all")
else:
X = prices.pct_change().dropna(how="all")
X = np.nan_to_num(X.values)
raw_cov_array = sklearn.covariance.fast_mcd(X, random_state=random_state)[1]
cov = pd.DataFrame(raw_cov_array, index=assets, columns=assets) * frequency
return fix_nonpositive_semidefinite(cov, kwargs.get("fix_method", "spectral"))
def cov_to_corr(cov_matrix):
"""
Convert a covariance matrix to a correlation matrix.
:param cov_matrix: covariance matrix
:type cov_matrix: pd.DataFrame
:return: correlation matrix
:rtype: pd.DataFrame
"""
if not isinstance(cov_matrix, pd.DataFrame):
warnings.warn("cov_matrix is not a dataframe", RuntimeWarning)
cov_matrix = pd.DataFrame(cov_matrix)
Dinv = np.diag(1 / np.sqrt(np.diag(cov_matrix)))
corr = np.dot(Dinv, np.dot(cov_matrix, Dinv))
return pd.DataFrame(corr, index=cov_matrix.index, columns=cov_matrix.index)
def corr_to_cov(corr_matrix, stdevs):
"""
Convert a correlation matrix to a covariance matrix
:param corr_matrix: correlation matrix
:type corr_matrix: pd.DataFrame
:param stdevs: vector of standard deviations
:type stdevs: array-like
:return: covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(corr_matrix, pd.DataFrame):
warnings.warn("cov_matrix is not a dataframe", RuntimeWarning)
corr_matrix = pd.DataFrame(corr_matrix)
return corr_matrix * np.outer(stdevs, stdevs)
class CovarianceShrinkage:
"""
Provide methods for computing shrinkage estimates of the covariance matrix, using the
sample covariance matrix and choosing the structured estimator to be an identity matrix
multiplied by the average sample variance. The shrinkage constant can be input manually,
though there exist methods (notably <NAME>) to estimate the optimal value.
Instance variables:
- ``X`` - pd.DataFrame (returns)
- ``S`` - np.ndarray (sample covariance matrix)
- ``delta`` - float (shrinkage constant)
- ``frequency`` - int
"""
def __init__(self, prices, returns_data=False, frequency=252):
"""
:param prices: adjusted closing prices of the asset, each row is a date and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param frequency: number of time periods in a year, defaults to 252 (the number of trading days in a year)
:type frequency: int, optional
"""
# Optional import
try:
from sklearn import covariance
self.covariance = covariance
except (ModuleNotFoundError, ImportError):
raise ImportError("Please install scikit-learn via pip or poetry")
if not isinstance(prices, pd.DataFrame):
warnings.warn("data is not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
self.frequency = frequency
if returns_data:
self.X = prices.dropna(how="all")
else:
self.X = prices.pct_change().dropna(how="all")
self.S = self.X.cov().values
self.delta = None # shrinkage constant
def _format_and_annualize(self, raw_cov_array):
"""
Helper method which annualises the output of shrinkage calculations,
and formats the result into a dataframe
:param raw_cov_array: raw covariance matrix of daily returns
:type raw_cov_array: np.ndarray
:return: annualised covariance matrix
:rtype: pd.DataFrame
"""
assets = self.X.columns
cov = | pd.DataFrame(raw_cov_array, index=assets, columns=assets) | pandas.DataFrame |
#!/usr/bin/env python3
# various functions and mixins for downstream genomic and epigenomic anlyses
import os
import glob
import re
import random
from datetime import datetime
import time
from pybedtools import BedTool
import pandas as pd
import numpy as np
from tqdm import tqdm_notebook, tqdm
# Get Current Git Commit Hash for version
path = [x.replace(' ', r'\ ') for x in os.popen('echo $PYTHONPATH').read().split(':') if 'dkfunctions' in x.split('/')]
if len(path) > 0:
version = os.popen(f'cd {path[0]}; git rev-parse HEAD').read()[:-1]
__version__ = f'v0.1, Git SHA1: {version}'
else:
__version__ = f'v0.1, {datetime.now():%Y-%m-%d}'
def val_folder(folder):
folder = folder if folder.endswith('/') else f'{folder}/'
folder = f'{os.getcwd()}/' if folder == '/' else folder
os.makedirs(folder, exist_ok=True)
return folder
def image_display(file):
from IPython.display import Image, display
display(Image(file))
def rplot(plot_func, filename, filetype, *args, **kwargs):
from rpy2.robjects.packages import importr
grdevices = importr('grDevices')
filetype = filetype.lower()
plot_types = {'png': grdevices.png,
'svg': grdevices.svg,
'pdf': grdevices.pdf
}
plot_types[filetype](f'{filename}.{filetype}')
return_object = plot_func(*args, **kwargs)
grdevices.dev_off()
if filetype == 'png':
image_display(f'{filename}.{filetype}')
return return_object
def read_pd(file, *args, **kwargs):
if (file.split('.')[-1] == 'txt') or (file.split('.')[-1] == 'tab'):
return pd.read_table(file, header=0, index_col=0, *args, **kwargs)
elif (file.split('.')[-1] == 'xls') or (file.split('.')[-1] == 'xlsx'):
return pd.read_excel(file, *args, **kwargs)
else:
raise IOError("Cannot parse count matrix. Make sure it is .txt, .xls, or .xlsx")
def rout_write(x):
'''
function for setting r_out to print to file instead of jupyter
rpy2.rinterface.set_writeconsole_regular(rout_write)
rpy2.rinterface.set_writeconsole_warnerror(rout_write)
'''
print(x, file=open(f'{os.getcwd()}/R_out_{datetime.now():%Y-%m-%d}.txt', 'a'))
def alert_me(text):
'''
Send me a pop up alert to macosx.
'''
os.system(f'''osascript -e 'tell Application "System Events" to display dialog "{text}"' ''')
def tq_type():
environ = os.environ
if '_' in environ.keys():
jupyter = True if os.environ['_'].endswith('jupyter') else False
elif 'MPLBACKEND' in environ.keys():
jupyter = True if 'ipykernel' in os.environ['MPLBACKEND'] else jupyter
return tqdm_notebook if jupyter else tqdm
def peak_overlap_MC(df_dict, background, permutations=1000, seed=42, notebook=True):
'''
Monte Carlo simulation of peak overlaps in a given background
pvalue calucated as liklihood over emperical random background overlap of shuffled peaks per chromosome.
Inputs
------
df_dict: dictinoary of dataframes in bed format
background genome space: pybedtool bed of background genome space
permutations: number of permutations
seed: random seed
Returns
-------
pvalue
'''
np.random.seed(seed)
tq = tq_type()
# generate probability of chosing a chromosome region based on its size
bregions = background.to_dataframe()
bregions.index = range(len(bregions))
bregions['Size'] = bregions.iloc[:, 2] - bregions.iloc[:, 1]
total_size = bregions.Size.sum()
bregions['fraction'] = bregions.Size / total_size
bed_dict = {name: df.copy() for name, df in df_dict.items()}
# determine length of each peak region
for df in bed_dict.values():
df['Length'] = df.iloc[:, 2] - df.iloc[:, 1]
# determine baseline overlap intersect count of preshuffled peaks.
A, B = bed_dict.values()
overlap = len(BedTool.from_dataframe(A).sort().merge() + BedTool.from_dataframe(B).sort().merge())
results = []
for permutation in tq(range(permutations)):
for df in bed_dict.values():
# randomly pick a region in the background based on size distribution of the regions
index_list = bregions.index.tolist()
df_size = len(df)
bregions_fraction = bregions.fraction
first_pick = np.random.choice(index_list, size=df_size, p=bregions_fraction)
lengths = df.Length.tolist()
alternatives = np.random.choice(index_list, size=df_size, p=bregions_fraction)
# repick regions if the peak length is larger than the region size (this part can be optimized)
regions = []
new_pick = 0
for reg, length in zip(first_pick, lengths):
reg_length = bregions.iloc[reg, 2] - bregions.iloc[reg, 1]
if reg_length > length:
regions.append(reg)
else:
while reg_length <= length:
new_reg = alternatives[new_pick]
reg_length = bregions.iloc[new_reg, 2] - bregions.iloc[new_reg, 1]
new_pick += 1
regions.append(new_reg)
# assign the chromosome
df.iloc[:, 0] = [bregions.iloc[x, 0] for x in regions]
# randomly pick a start within the selected background region within the peak size constraints
df.iloc[:, 1] = [np.random.randint(bregions.iloc[reg, 1], bregions.iloc[reg, 2] - length) for length, reg in zip(lengths, regions)]
# assign end based on peak length
df.iloc[:, 2] = df.iloc[:, 1] + df.Length
new_overlap = len(BedTool.from_dataframe(A).sort().merge() + BedTool.from_dataframe(B).sort().merge())
results.append(1 if new_overlap >= overlap else 0)
p = (sum(results) + 1) / (len(results) + 1)
A_name, B_name = df_dict.keys()
print(f'Number of intersected peaks of {A_name} and {B_name}: {overlap}')
print(f'Number of times simulated intersections exceeded or equaled the actual overlap: {sum(results)}')
print(f'Monte Carlo p-value estimate: {p}')
return p
'''
Implementation of an Enrichr API with graphs
Author: <NAME>
'''
def post_genes(gene_list, description):
'''
posts gene list to Enricr
Returns
-------
dictionary: userListId, shortId
'''
import json
import requests
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/addList'
genes_str = '\n'.join([str(x) for x in gene_list])
payload = {'list': (None, genes_str),
'description': (None, description)
}
response = requests.post(ENRICHR_URL, files=payload)
if not response.ok:
raise Exception('Error analyzing gene list')
return json.loads(response.text)
def enrich(userListId, filename, gene_set_library):
'''
Returns
-------
Text file of enrichment results
'''
import requests
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/export'
query_string = '?userListId=%s&filename=%s&backgroundType=%s'
url = ENRICHR_URL + query_string % (userListId, filename, gene_set_library)
response = requests.get(url, stream=True)
with open(filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return response
def enrichr_barplot(filename, gene_library, out_dir, description, max_n=20,
q_thresh=0.05, color='slategray', display_image=True):
'''
Saves barplot from Enrichr results
Paramaters
----------
filename: enrichr response file
gene_library: gene set library to test
out_dir: result output folder
description: sample or gene set source name
max_n: max number of significant to display
q_thresh: qvalue threshold
color: plot color
dispaly_image: bool
Return
------
None
'''
import seaborn as sns
import matplotlib.pyplot as plt
e_df = pd.read_csv(filename, header=0, sep="\t").sort_values(by=['Adjusted P-value']).head(max_n)
e_df['Clean_term'] = e_df.Term.apply(lambda x: x.split("_")[0])
e_df['log_q'] = -np.log10(e_df['Adjusted P-value'])
plt.clf()
sns.set(context='paper', font='Arial', font_scale=1.2, style='white',
rc={'figure.dpi': 300, 'figure.figsize': (8, 6)}
)
fig, ax = plt.subplots()
fig.suptitle(f'{description} {gene_library.replace("_", " ")} enrichment\n(q<{q_thresh}, max {max_n})')
sig = e_df[e_df['Adjusted P-value'] <= q_thresh].copy()
if len(sig) > 0:
g = sns.barplot(data=sig, x='log_q', y='Clean_term', color=color, ax=ax)
plt.xlabel('q-value (-log$_{10}$)')
plt.ylabel('Enrichment Term')
ymin, ymax = g.get_ylim()
g.vlines(x=-np.log10(q_thresh), ymin=ymin, ymax=ymax, colors='k',
linestyles='dashed', label=f'q = {q_thresh}')
g.legend()
sns.despine()
else:
ax.text(0.5, 0.5, 'No Significant Enrichments.',
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes
)
try:
plt.tight_layout(h_pad=1, w_pad=1)
except ValueError:
pass
plt.subplots_adjust(top=0.88)
file = f'{out_dir}{description}_{gene_library}_enrichr.barplot.png'
fig.savefig(file, dpi=300)
plt.close()
image_display(file)
def enrichr(dict_of_genelists, out_dir, dict_of_genelibraries=None, display=True,
q_thresh=0.05, plot_color='slategray', max_n=20 ):
'''
Runs enrichment analysis through Enrichr and plots results
Paramaters
----------
dict_of_genelists: dictionary of description to genelists
dict_of_genelibraries: dictionary of enrichr gene libraries to test against
If None, will use default libraries
display: bool whether to display inline
q_thresh: qvalue threshold
plot_color:
max_n:
'''
out_dir = out_dir if out_dir.endswith('/') else f'{out_dir}/'
gene_libraries ={'KEGG': 'KEGG_2016',
'GO_Biological_Process': 'GO_Biological_Process_2018',
'ChIP-X_Consensus_TFs': 'ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X',
'ChEA': 'ChEA_2016',
'OMIM_Disease': 'OMIM_Disease'
}
libraries = gene_libraries if dict_of_genelibraries is None else dict_of_genelibraries
generator = ((d,g,l,gl) for d,g in dict_of_genelists.items()
for l, gl in libraries.items()
)
for description, genes, library, gene_library in generator:
filename=f'{out_dir}{description}_{library}.enrichr.txt'
post = post_genes(genes, description)
get = enrich(post['userListId'], filename, gene_library)
if get.ok:
enrichr_barplot(filename=filename, gene_library=library, out_dir=out_dir, description=description,
max_n=max_n,q_thresh=q_thresh, color=plot_color, display_image=display)
else:
print(f'Enrichr error: {library}, {description}')
'''
end enrichr
'''
def gsea_dotplot(df_dict, title='', qthresh=0.05, top_term=None, gene_sets=[], dotsize_factor=4, figsize=(4, 10), out_dir='.'):
'''
Makes a dotplot of GSEA results with the dot size as the percent of genes in the leading edge and the color the NES.
Plots only significant dots at given fdr theshold
Inputs
------
df_dict: dictionary of named GSEA results for the analysis. pandas df of gsea_report.xls (use pd.concat to combine pos and neg enrichments)
name: name used for title and filename
qthresh: qvalue theshold for includsion
pgene_sets: list of gene sets to plot. If empty, will plot all with FDR q value < 0.05
top_term: integer specifing top number of sets to plot (by qvalue). None plots all.
dot_size_factor: scale to increase dot size for leading edge %
out_dir: output directory
Returns
-------
Gene_Sets used for plotting
'''
import matplotlib.pyplot as plt
import seaborn as sns
out_dir = val_folder(out_dir)
index = []
# get leading edge percentages
for df in df_dict.values():
if 'NAME' in df.columns.tolist():
df.index = df.NAME
df['le_tags'] = df['LEADING EDGE'].apply(lambda x: x.split('%')[0].split('=')[-1])
df.sort_values(by='NES', ascending=False, inplace=True)
index += df[df['FDR q-val'] < 0.05].index.tolist()
index = list(set(index))
# use gene_sets if provided
if len(gene_sets) > 0:
index = gene_sets
# make master df
data_df = pd.DataFrame()
for name, df in df_dict.items():
df['sample_name'] = name
data_df = pd.concat([data_df, df.loc[index]])
# extra filters
data_df = data_df[data_df.sample_name.notna()]
if top_term:
index = list(set(data_df.sort_values(by='FDR q-val').head(top_term).index.tolist()))
# reindex
data_df['GS_NAME'] = data_df.index
data_df.index = range(len(data_df))
# make x coordinate
samples = data_df.sample_name.unique()
sample_number = len(samples)
sample_x = {name: (x + .5) for name, x in zip(samples, range(sample_number))}
data_df['x'] = data_df.sample_name.map(sample_x)
# make y coordinate
gene_set = list(index[::-1])
gene_set_number = len(gene_set)
sample_y = {name: y for name, y in zip(gene_set, range(gene_set_number))}
data_df['y'] = data_df.GS_NAME.map(sample_y)
# filter for significance and make dot size from leading edge percentage
data_df['sig_tags'] = data_df[['FDR q-val', 'le_tags']].apply(lambda x: 0 if float(x[0]) > qthresh else float(x[1]), axis=1)
data_df['area'] = data_df['sig_tags'] * dotsize_factor
plot_df = data_df[data_df.GS_NAME.isin(index)].copy()
# plot
plt.clf()
sns.set(context='paper', style='white', font='Arial', rc={'figure.dpi': 300})
fig, ax = plt.subplots(figsize=figsize)
sc = ax.scatter(x=plot_df.x, y=plot_df.y, s=plot_df.area, edgecolors='face', c=plot_df.NES, cmap='RdBu_r')
# format y axis
ax.yaxis.set_major_locator(plt.FixedLocator(plot_df.y))
ax.yaxis.set_major_formatter(plt.FixedFormatter(plot_df.GS_NAME))
ax.set_yticklabels(plot_df.GS_NAME.apply(lambda x: x.replace('_', ' ')), fontsize=16)
# format x axis
ax.set_xlim(0, sample_number)
ax.xaxis.set_major_locator(plt.FixedLocator(plot_df.x))
ax.xaxis.set_major_formatter(plt.FixedFormatter(plot_df.sample_name))
ax.set_xticklabels(plot_df.sample_name, fontsize=16, rotation=45)
# add colorbar
cax = fig.add_axes([0.95, 0.20, 0.03, 0.22])
cbar = fig.colorbar(sc, cax=cax,)
cbar.ax.tick_params(right=True)
cbar.ax.set_title('NES', loc='left', fontsize=12)
cbar.ax.tick_params(labelsize=10)
# add legend
markers = []
min_value = plot_df[plot_df.sig_tags > 0].sig_tags.min()
max_value = plot_df.sig_tags.max()
rounded_min = int(10 * round((min_value - 5) / 10))
rounded_max = int(10 * round((max_value + 5) / 10)) # rounds up to nearest ten (ie 61 --> 70)
sizes = [x for x in range(rounded_min, rounded_max + 1, 10)]
for size in sizes:
markers.append(ax.scatter([], [], s=size * dotsize_factor, c='k'))
legend = ax.legend(markers, sizes, prop={'size': 12})
legend.set_title('Leading Edge (%)', prop={'size': 12})
# offset legend
bb = legend.get_bbox_to_anchor().inverse_transformed(ax.transAxes)
xOffset = .6
yOffset = 0
bb.x0 += xOffset
bb.x1 += xOffset
bb.y0 += yOffset
bb.y1 += yOffset
legend.set_bbox_to_anchor(bb, transform=ax.transAxes)
# set title
ax.set_title(title.replace('_', ' '), fontsize=20)
sns.despine()
fig.savefig(f'{out_dir}{title.replace(" ", "_")}.png', bbox_inches='tight')
fig.savefig(f'{out_dir}{title.replace(" ", "_")}.svg', bbox_inches='tight')
return plot_df
def annotate_peaks(dict_of_dfs, folder, genome, db='UCSC', check=False, TSS=[-3000,3000], clean=False):
'''
Annotate a dictionary of dataframes from bed files to the genome using ChIPseeker and Ensembl annotations.
Inputs
------
dict_of_beds: dictionary of bed files
folder: output folder
genome: hg38, hg19, mm10
db: default UCSC, but can also accept Ensembl
TSS: list of regions around TSS to annotate as promoter
check: bool. checks whether annotation file already exists
Returns
-------
dictionary of annotated bed files as dataframe
'''
import rpy2.robjects as ro
import rpy2.rinterface as ri
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
pandas2ri.activate()
tq = tq_type()
ri.set_writeconsole_regular(rout_write)
ri.set_writeconsole_warnerror(rout_write)
chipseeker = importr('ChIPseeker')
genomicFeatures = importr('GenomicFeatures')
makeGR = ro.r("makeGRangesFromDataFrame")
as_df = ro.r("as.data.frame")
check_df = {key: os.path.isfile(f'{folder}{key.replace(" ","_")}_annotated.txt') for key in dict_of_dfs.keys()}
return_bool = False not in set(check_df.values())
if return_bool & check:
return {f'{key}_annotated': pd.from_csv(f'{folder}{key.replace(" ","_")}_annotated.txt', index_col=0, header=0, sep="\t") for key in dict_of_dfs.keys()}
species = ('Mmusculus' if genome.lower() == 'mm10' else 'Hsapiens')
if db.lower() == 'ucsc':
TxDb = importr(f'TxDb.{species}.UCSC.{genome.lower()}.knownGene')
txdb = ro.r(f'txdb <- TxDb.{species}.UCSC.{genome.lower()}.knownGene')
elif db.lower() == 'ensembl':
TxDb = importr(f'TxDb.{species}.UCSC.{genome.lower()}.ensGene')
txdb = ro.r(f'txdb <- TxDb.{species}.UCSC.{genome.lower()}.ensGene')
else:
raise ValueError('UCSC or Ensembl only.')
os.makedirs(folder, exist_ok=True)
if genome.lower() == 'mm10':
annoDb = importr('org.Mm.eg.db')
anno = 'org.Mm.eg.db'
elif genome.lower() == 'hg38' or genome.lower() == 'hg19':
annoDb = importr('org.Hs.eg.db')
anno = 'org.Hs.eg.db'
return_dict = {}
print('Annotating Peaks...')
for key, df in tq(dict_of_dfs.items()):
if check & check_df[key]:
return_dict[f'{key}_annotated'] = pd.from_csv(f'{folder}{key.replace(" ","_")}_annotated.txt', index_col=0, header=0, sep="\t")
else:
col_len = len(df.columns)
df.columns = ["chr", "start", "end"] + list(range(col_len - 3))
GR = makeGR(df)
GR_anno = chipseeker.annotatePeak(GR, overlap='TSS', TxDb=txdb, annoDb=anno, tssRegion=ro.IntVector(TSS)) #switched to TSS on 10/02/2019
return_dict[f'{key}_annotated'] = ro.pandas2ri.ri2py(chipseeker.as_data_frame_csAnno(GR_anno))
return_dict[f'{key}_annotated'].to_excel(f'{folder}{key.replace(" ","_")}_annotated.xlsx')
if clean:
for k,df in return_dict.items():
df['Anno'] = df.annotation.apply(lambda x: 'Promoter' if x.split(' ')[0] == 'Promoter' else x)
df['Anno'] = df.Anno.apply(lambda x: 'Intergenic' if x.split(' ')[0] in ['Downstream', 'Distal'] else x)
df['Anno'] = df.Anno.apply(lambda x: x.split(' ')[0] if x.split(' ')[0] in ['Intron', 'Exon'] else x)
return return_dict
def plot_venn2(Series, string_name_of_overlap, folder):
'''
Series with with overlaps 10,01,11
Plots a 2 way venn.
Saves to file.
'''
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib_venn import venn2, venn2_circles
folder = f'{folder}venn2/' if folder.endswith('/') else f'{folder}/venn2/'
os.makedirs(folder, exist_ok=True)
plt.figure(figsize=(7, 7))
font = {'family': 'sans-serif',
'weight': 'normal',
'size': 16,
}
plt.rc('font', **font)
# make venn
sns.set(style='white', font='Arial')
venn_plot = venn2(subsets=(Series.iloc[0], Series.iloc[1], Series.iloc[2]), set_labels=[name.replace('_', ' ') for name in Series.index.tolist()])
patch = ['10', '01', '11']
colors = ['green', 'blue', 'teal']
for patch, color in zip(patch, colors):
venn_plot.get_patch_by_id(patch).set_color('none')
venn_plot.get_patch_by_id(patch).set_alpha(.4)
venn_plot.get_patch_by_id(patch).set_edgecolor('none')
c = venn2_circles(subsets=(Series.iloc[0], Series.iloc[1], Series.iloc[2]))
colors_test = ['green', 'blue']
for circle, color in zip(c, colors_test):
circle.set_edgecolor(color)
circle.set_alpha(0.8)
circle.set_linewidth(3)
plt.title(string_name_of_overlap.replace('_', ' ') + " overlaps")
plt.tight_layout()
name = string_name_of_overlap.replace('_', ' ').replace('\n', '_')
plt.savefig(f"{folder}{name}-overlap.svg")
plt.savefig(f"{folder}{name}-overlap.png", dpi=300)
plt.close()
image_display(f"{folder}{name}-overlap.png")
def plot_venn2_set(dict_of_sets, string_name_of_overlap, folder, pvalue=False, total_genes=None):
'''
Plots a 2 way venn from a dictionary of sets
Saves to file.
Inputs
------
dict_of_sets: dictionary of sets to overlap
string_name_of_overlap: string with name of overlap
folder: output folder
Returns
-------
None
'''
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib_venn import venn2, venn2_circles
from scipy import stats
folder = f'{folder}venn2/' if folder.endswith('/') else f'{folder}/venn2/'
os.makedirs(folder, exist_ok=True)
plt.figure(figsize=(7, 7))
font = {'family': 'sans-serif',
'weight': 'normal',
'size': 16,
}
plt.rc('font', **font)
set_list = []
set_names = []
for name, setlist in dict_of_sets.items():
set_list.append(setlist)
set_names.append(name.replace('_', ' '))
# make venn
sns.set(style='white', font='Arial')
venn_plot = venn2(subsets=set_list, set_labels=set_names)
patch = ['10', '01', '11']
colors = ['green', 'blue', 'teal']
for patch, color in zip(patch, colors):
venn_plot.get_patch_by_id(patch).set_color('none')
venn_plot.get_patch_by_id(patch).set_alpha(.4)
venn_plot.get_patch_by_id(patch).set_edgecolor('none')
c = venn2_circles(subsets=set_list)
colors_test = ['green', 'blue']
for circle, color in zip(c, colors_test):
circle.set_edgecolor(color)
circle.set_alpha(0.8)
circle.set_linewidth(3)
if None not in [pvalue, total_genes]:
intersection_N = len(set_list[0] & set_list[1])
pvalue = stats.hypergeom.sf(intersection_N, total_genes, len(set_list[0]), len(set_list[1]))
pvalue_string = f'= {pvalue:.03g}' if pvalue > 1e-5 else '< 1e-5'
plt.text(0, -.05, f'p-value {pvalue_string}', fontsize=10, transform=c[1].axes.transAxes)
plt.title(string_name_of_overlap.replace('_', ' ') + " overlaps")
plt.tight_layout()
plt.savefig(f"{folder}{string_name_of_overlap.replace(' ', '_')}-overlap.svg")
plt.savefig(f"{folder}{string_name_of_overlap.replace(' ', '_')}-overlap.png", dpi=300)
plt.close()
image_display(f"{folder}{string_name_of_overlap.replace(' ', '_')}-overlap.png")
def plot_venn3_set(dict_of_sets, string_name_of_overlap, folder):
'''
Makes 3 way venn from 3 sets.
Saves to file.
Inputs
------
dict_of_sets: dictionary of sets to overlap
string_name_of_overlap: string with name of overlap
folder: output folder
Returns
-------
None
'''
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib_venn import venn3, venn3_circles
folder = f'{folder}venn3/' if folder.endswith('/') else f'{folder}/venn3/'
os.makedirs(folder, exist_ok=True)
plt.clf()
sns.set(style='white', context='paper', font_scale=2, rc={'figure.figsize': (7, 7)})
# font = {'family': 'sans-serif',
# 'weight': 'normal',
# 'size': 16,
# }
# plt.rc('font', **font)
set_list = []
set_names = []
for name, setlist in dict_of_sets.items():
set_list.append(setlist)
set_names.append(name.replace('_', ' '))
# make venn
venn_plot = venn3(subsets=set_list, set_labels=set_names)
patch = ['100', '110', '101', '010', '011', '001', '111']
for p in patch:
if venn_plot.get_patch_by_id(p):
venn_plot.get_patch_by_id(p).set_color('none')
venn_plot.get_patch_by_id(p).set_alpha(.4)
venn_plot.get_patch_by_id(p).set_edgecolor('none')
# make
c = venn3_circles(subsets=set_list)
colors_list = ['green', 'blue', 'grey']
for circle, color in zip(c, colors_list):
circle.set_edgecolor(color)
circle.set_alpha(0.8)
circle.set_linewidth(3)
plt.title(f"{string_name_of_overlap.replace('_', ' ')} Overlaps")
plt.tight_layout()
plt.savefig(f"{folder}{string_name_of_overlap.replace(' ','_')}-overlap.svg")
plt.savefig(f"{folder}{string_name_of_overlap.replace(' ','_')}-overlap.png", dpi=300)
plt.close()
image_display(f"{folder}{string_name_of_overlap.replace(' ','_')}-overlap.png")
def plot_venn3_counts(element_list, set_labels, string_name_of_overlap, folder):
'''
Plot three way venn based on counts of specific overlaping numbers.
Saves to file.
Inputs
------
element_list: tuple with counts of the the overlaps from (Abc,aBc,ABc,abC,AbC,ABC)
set_labels: list or tuple with names of the overlaps ('A','B','C')
string_name_of_overlap: string with name of overlap
folder: output folder
Returns
-------
None
'''
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib_venn import venn3, venn3_circles
folder = f'{folder}venn3/' if folder.endswith('/') else f'{folder}/venn3/'
os.makedirs(folder, exist_ok=True)
plt.clf()
sns.set(style='white', context='paper', font_scale=1, rc={'figure.figsize': (7, 7)})
# font = {'family': 'sans-serif',
# 'weight': 'normal',
# 'size': 16,
# }
# plt.rc('font', **font)
# make venn
venn_plot = venn3(subsets=element_list, set_labels=[name.replace('_', ' ') for name in set_labels])
patch = ['100', '110', '101', '010', '011', '001', '111']
for p in patch:
if venn_plot.get_patch_by_id(p):
venn_plot.get_patch_by_id(p).set_color('none')
venn_plot.get_patch_by_id(p).set_alpha(.4)
venn_plot.get_patch_by_id(p).set_edgecolor('none')
# make
c = venn3_circles(subsets=element_list)
colors_list = ['green', 'blue', 'grey']
for circle, color in zip(c, colors_list):
circle.set_edgecolor(color)
circle.set_alpha(0.8)
circle.set_linewidth(3)
plt.title(f"{string_name_of_overlap.replace('_', ' ')} Overlaps")
plt.tight_layout()
plt.savefig(f"{folder}{string_name_of_overlap.replace(' ', '_')}-overlap.svg")
plt.savefig(f"{folder}{string_name_of_overlap.replace(' ', '_')}-overlap.png", dpi=300)
plt.close()
image_display(f"{folder}{string_name_of_overlap.replace(' ', '_')}-overlap.png")
def overlap_two(bed_dict, genome=None):
'''
Takes a dictionary of two bed-like format files.
Merges all overlapping peaks for each bed into a master file.
Intersects beds to merged master file.
Performs annotations with ChIPseeker if genome is specified.
Plots venn diagrams of peak overlaps
If genome is specified, also plots venn diagrams of annotated gene sets.
Inputs
------
bed_dict: dictionary of BedTool files
genome: 'hg38','hg19','mm10'
Returns
-------
Returns a dictionary of dataframes from unique and overlap peaks.
If genome is specified, includes a dictionary of annotated peaks.
'''
names = list(bed_dict.keys())
Folder = f'{os.getcwd()}/'
subfolder = f"{names[0].replace(' ', '_')}_{names[1].replace(' ', '_')}_overlap/"
out = f'{Folder}{subfolder}'
os.makedirs(out, exist_ok=True)
print(f'Output files are found in {out}')
masterfile = bed_dict[names[0]].cat(bed_dict[names[1]]).sort().merge()
sorted_dict = {key: bed.sort().merge() for key, bed in bed_dict.items()}
overlap_dict = {'overlap': masterfile.intersect(sorted_dict[names[0]]).intersect(sorted_dict[names[1]])}
for key, bed in sorted_dict.items():
other = {other_key: other_bed for other_key, other_bed in sorted_dict.items() if other_key != key}
overlap_dict['{}_unique_peak'.format(key)] = masterfile.intersect(sorted_dict[key]).intersect(list(other.values())[0], v=True)
for key, bed in overlap_dict.items():
bed.to_dataframe().to_csv('{}{}{}-unique-peaks-from-mergedPeaks.bed'.format(Folder, subfolder, key.replace(' ', '_')),
header=None, index=None, sep="\t")
overlap_numbers = pd.Series({names[0]: len(overlap_dict['{}_unique_peak'.format(names[0])]),
names[1]: len(overlap_dict['{}_unique_peak'.format(names[1])]),
'overlap': len(overlap_dict['overlap'])
},
index=[names[0], names[1], 'overlap']
)
# Venn
plot_venn2(overlap_numbers,
'{} and\n{} peak'.format(names[0], names[1]),
'{}{}'.format(Folder, subfolder)
)
if bool(genome):
print('Annotating overlaping peaks...')
# Annotate with ChIPseeker
unikey = '{}_unique'
unianno = '{}_unique_annotated'
return_dict = annotate_peaks({unikey.format(key): bed.to_dataframe() for key, bed in overlap_dict.items()}, '{}{}'.format(Folder, subfolder), genome=genome)
Set1_unique = set(return_dict[unianno.format('{}_unique_peak'.format(names[0]))].SYMBOL.unique().tolist())
Set2_unique = set(return_dict[unianno.format('{}_unique_peak'.format(names[1]))].SYMBOL.unique().tolist())
Overlap_Set = set(return_dict[unianno.format('overlap')].SYMBOL.unique().tolist())
venn2_dict = {names[0]: (Set1_unique | Overlap_Set),
names[1]: (Set2_unique | Overlap_Set)
}
plot_venn2_set(venn2_dict,
'{} and {}\nannotated gene'.format(names[0], names[1]),
'{}{}'.format(Folder, subfolder)
)
gene_overlaps = {}
gene_overlaps['{}_unique_genes'.format(names[0])] = Set1_unique - (Set2_unique | Overlap_Set)
gene_overlaps['{}_unique_genes'.format(names[1])] = Set2_unique - (Set1_unique | Overlap_Set)
gene_overlaps['Overlap_Gene_Set'] = (Set1_unique & Set2_unique) | Overlap_Set
for key, gene_set in gene_overlaps.items():
with open(f'{Folder}{subfolder}{key}.txt', 'w') as file:
for gene in gene_set:
file.write(f'{gene}\n')
for key, item in gene_overlaps.items():
return_dict[key] = item
for key, df in overlap_dict.items():
return_dict[key] = df
else:
return_dict = overlap_dict
return return_dict
def overlap_three(bed_dict, genome=None):
'''
Takes a dictionary of three bed-like format files.
Merges all overlapping peaks for each bed into a master file.
Intersects beds to merged master file.
Performs annotations with ChIPseeker if genome is specified.
Plots venn diagrams of peak overlaps
If genome is specified, also plots venn diagrams of annotated gene sets.
Inputs
------
bed_dict: dictionary of BedTool files
genome: 'hg38','hg19','mm10'
Returns
-------
Returns a dictionary of dataframes from unique and overlap peaks.
If genome is specified, includes a dictionary of annotated peaks.
'''
from collections import OrderedDict
names = list(bed_dict.keys())
Folder = f'{os.getcwd()}/'
subfolder = f"{names[0].replace(' ', '_')}-{ names[1].replace(' ', '_')}-{names[2].replace(' ', '_')}-overlap/"
out = f'{Folder}{subfolder}'
os.makedirs(out, exist_ok=True)
print(f'Output files are found in {out}')
print(f'A: {names[0]}, B: {names[1]}, C: {names[2]}')
master = bed_dict[names[0]].cat(bed_dict[names[1]]).cat(bed_dict[names[2]]).sort().merge()
A = bed_dict[names[0]].sort().merge()
B = bed_dict[names[1]].sort().merge()
C = bed_dict[names[2]].sort().merge()
sorted_dict = OrderedDict({'master': master, 'A': A, 'B': B, 'C': C})
sorted_dict['A_bc'] = (master + A - B - C)
sorted_dict['aB_c'] = (master + B - A - C)
sorted_dict['A_B_c'] = (master + A + B - C)
sorted_dict['abC_'] = (master + C - A - B)
sorted_dict['A_bC_'] = (master + A + C - B)
sorted_dict['aB_C_'] = (master + B + C - A)
sorted_dict['A_B_C_'] = (master + A + B + C)
labTup = tuple(key for key in sorted_dict.keys())
lenTup = tuple(len(bed) for bed in sorted_dict.values())
print(f'{labTup}\n{lenTup}')
plot_venn3_counts(lenTup[4:], names, f"{'_'.join(names)}-peak-overlaps", out)
for key, bed in sorted_dict.items():
if len(bed) > 1:
bed.to_dataframe().to_csv(f'{out}{key.replace(" ", "_")}-peaks-from-mergedPeaks.bed', header=None, index=None, sep="\t")
if bool(genome):
print('Annotating ovelapped peaks...')
unikey = '{}'
unianno = '{}_annotated'
return_dict = annotate_peaks({unikey.format(key): bed.to_dataframe() for key, bed in sorted_dict.items()}, out, genome=genome)
Set1 = set(return_dict[unianno.format('A')].SYMBOL.unique().tolist())
Set2 = set(return_dict[unianno.format('B')].SYMBOL.unique().tolist())
Set3 = set(return_dict[unianno.format('C')].SYMBOL.unique().tolist())
plot_venn3_set({names[0]: Set1, names[1]: Set2, names[2]: Set3}, f'{names[0]}_{names[1]}_{names[2]}-gene-overlaps', out)
return sorted_dict if genome is None else {**sorted_dict, **return_dict}
def splice_bar(data, title, x, y):
'''
Plots bar graph of misplicing counts as file.
Inputs
------
data: dataframe
title: string plot title
x: string of columm title for number of events in data
y: string of column title for splicing type in data
Returns
-------
None
'''
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context='paper', font='Arial', style='white', font_scale=2)
plot = sns.barplot(x=x, y=y, data=data)
plot.set_title(title.replace('_', ' '))
plot.set_ylabel('')
sns.despine()
sns.utils.plt.savefig('{}.png'.format(title.replace(' ', '_')), dpi=300)
plt.close()
image_display('{}.png'.format(title.replace(' ', '_')))
def make_df(dict_of_sets, name):
'''
Make a dataframe from a dictionary of sets.
Inputs
------
dict_of_sets: dictionary of sets
name: string name of file
Returns
-------
dataframe
'''
out_dir = '{pwd}/{name}/'.format(pwd=os.getcwd(), name=name.replace(' ', '_'))
os.makedirs(out_dir, exist_ok=True)
count = 0
for key, genes in dict_of_sets.items():
count = max(count, len(genes))
df = pd.DataFrame(index=range(1, count + 1))
for key, genes in dict_of_sets.items():
df[key] = pd.Series(list(genes) + ['NA'] * (count - len(genes)))
df.to_excel('{}/{}.xls'.format(out_dir, name.replace(' ', '_')), index=False)
return df
def plot_col(df, title, ylabel, out='', xy=(None, None), xticks=[''], plot_type=['violin'], pvalue=False, compare_tags=None):
'''
Two column boxplot from dataframe. Titles x axis based on column names.
Inputs
------
df: dataframe (uses first two columns)
title: string of title
ylabel: string of y label
xy: If specified, will x is the label column and y is the data column. (default: (None,None): Data separated into two columns).
xticks: list of xtick names (default is column name)
pvalue: bool to perform ttest (default is False). Will only work if xy=(None,None) or ther are only two labels in x.
plot_type: list of one or more: violin, box, swarm (default=violin)
compare_tags: if xy and pvalue is specified and there are more than two tags in x, specify the tags to compare. eg. ['a','b']
out: out parent directory. if none returns into colplot/
Returns
------
None
'''
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
out = val_folder(out)
plt.clf()
sns.set(context='paper', font='Arial', font_scale=2, style='white', rc={'figure.dpi': 300, 'figure.figsize': (5, 6)})
if type(plot_type) != list:
plot_type = plot_type.split()
lower_plot_type = [x.lower() for x in plot_type]
if len(lower_plot_type) == 0:
raise IOError('Input a plot type.')
elif True not in {x in lower_plot_type for x in ['violin', 'box', 'swarm']}:
raise IOError('Did not recognize plot type.')
if 'swarm' in lower_plot_type:
if xy == (None, None):
fig = sns.swarmplot(data=df, color='black', s=4)
else:
fig = sns.swarmplot(data=df, x=xy[0], y=xy[1], color='black', s=4)
if 'violin' in lower_plot_type:
if xy == (None, None):
fig = sns.violinplot(data=df)
else:
fig = sns.violinplot(data=df, x=xy[0], y=xy[1])
if 'box' in lower_plot_type:
if xy == (None, None):
fig = sns.boxplot(data=df)
else:
fig = sns.boxplot(data=df, x=xy[0], y=xy[1])
fig.yaxis.set_label_text(ylabel)
fig.set_title(title.replace('_', ' '))
if xticks:
fig.xaxis.set_ticklabels(xticks)
fig.xaxis.set_label_text('')
for tick in fig.xaxis.get_ticklabels():
tick.set_fontsize(12)
if pvalue:
if xy == (None, None):
_, pvalue = stats.ttest_ind(a=df.iloc[:, 0], b=df.iloc[:, 1])
compare_tags = df.columns
else:
_, pvalue = stats.ttest_ind(a=df[df[xy[0]] == compare_tags[0]][xy[1]], b=df[df[xy[0]] == compare_tags[1]][xy[1]])
fig.text(s='p-value = {:.03g}, {} v {}'.format(pvalue, compare_tags[0], compare_tags[1]), x=0, y=-.12, transform=fig.axes.transAxes, fontsize=12)
sns.despine()
plt.tight_layout()
plt.savefig('{}{}.svg'.format(out, title.replace(' ', '_')))
plt.subplots_adjust(bottom=0.17, top=0.9)
plt.savefig('{}{}.png'.format(out, title.replace(' ', '_')), dpi=300)
print('{}.png found in {}/'.format(title.replace(' ', '_'), out))
plt.close()
image_display('{}{}.png'.format(out, title.replace(' ', '_')))
def scatter_regression(df, s=150, alpha=0.3, line_color='dimgrey', svg=False, reg_stats=True, point_color='steelblue', title=None,
xlabel=None, ylabel=None, IndexA=None, IndexB=None, annotate=None, Alabel='Group A', Blabel='Group B'):
'''
Scatter plot and Regression based on two matched vectors.
Plots r-square and pvalue on .png
Inputs
------
df: dataframe to plot (column1 = x axis, column2= y axis)
kwargs (defaults):
s: point size (150)
alpha: (0.3)
line_color: regression line color (dimgrey)
svg: make svg (False)
stats: print R2 and pvalue on plot (True)
point_color: (steelblue)
title: string
xlabel: string
ylabel: string
IndexA: set or list of genes to highlight red
Alabel: string for IndexA group ('Group A')
IndexB: set or list of genes to highlight blue
annotate: list of genes to annotate on the graph
Returns
-------
None
Prints file name and location
Saves .png plot in scatter_regression/ folder in cwd with dpi=300.
'''
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
sns.set(context='paper', style="white", font_scale=3, font='Arial',
rc={"lines.linewidth": 2,
'figure.figsize': (9, 9),
'font.size': 18, 'figure.dpi': 300})
fig, ax = plt.subplots()
cols = df.columns.tolist()
regplot = sns.regplot(x=cols[0], y=cols[1], data=df, scatter=True,
fit_reg=True, color=line_color,
scatter_kws={'s': s, 'color': point_color, 'alpha': alpha}
)
if xlabel:
plt.xlabel(xlabel, labelpad=10)
if ylabel:
plt.ylabel(ylabel, labelpad=10)
if title:
regplot.set_title(title.replace('_', ' '))
if type(IndexA) in [list, set]:
# A = set(IndexA)
Abool = [True if x in IndexA else False for x in df.index.tolist()]
regplot = ax.scatter(df[Abool].iloc[:, 0], df[Abool].iloc[:, 1], marker='o', alpha=(alpha + .4 if alpha < .6 else 1), color='red', s=s, label=Alabel)
if type(IndexB) in [list, set]:
# B = set(IndexB)
Bbool = [True if x in IndexB else False for x in df.index.tolist()]
regplot = ax.scatter(df[Bbool].iloc[:, 0], df[Bbool].iloc[:, 1], marker='o', alpha=(alpha + .3 if alpha < .7 else 1), color='mediumblue', s=s, label=Blabel)
if type(annotate) in [list, set]:
anno_df = df[[True if x in annotate else False for x in df.index.tolist()]]
offx, offy = (df.iloc[:, :2].max() - df.iloc[:, :2].min()) * .1
for index, (x, y) in anno_df.iterrows():
ax.annotate(index, xy=(x, y), xytext=((x - offx, y + offy) if y >= x else (x + offx, y - offy)), arrowprops={'arrowstyle': '-', 'color': 'black'})
if reg_stats:
r, pvalue = stats.pearsonr(x=df.iloc[:, 0], y=df.iloc[:, 1])
ax.text(0, 0, 'r = {:.03g}; p-value = {:.03g}'.format(r, pvalue), fontsize=25, transform=ax.transAxes)
sns.despine(offset=5)
fig.tight_layout()
os.makedirs('scatter_regression/', exist_ok=True)
if svg:
plt.savefig('scatter_regression/{}.svg'.format(title.replace(' ', '_')))
plt.savefig('scatter_regression/{}.png'.format(title.replace(' ', '_')), dpi=300)
print('{}.png found in {}/scatter_regression/'.format(title.replace(' ', '_'), os.getcwd()))
plt.close()
image_display('scatter_regression/{}.png'.format(title.replace(' ', '_')))
def signature_heatmap(vst, sig, name, cluster_columns=False):
'''
Generate heatmap of differentially expressed genes using
variance stablized transfrmed log2counts.
Inputs
------
vst = gene name is the index
sig = set or list of signature
name = name of file
cluster_columns = bool (default = False)
Outputs
------
.png and .svg file of heatmap
Returns
-------
None
'''
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font='Arial', font_scale=2, style='white', context='paper')
vst['gene_name'] = vst.index
CM = sns.clustermap(vst[vst.gene_name.apply(lambda x: x in sig)].drop('gene_name', axis=1),
z_score=0, method='complete', cmap='RdBu_r',
yticklabels=False, col_cluster=cluster_columns)
CM.fig.suptitle(name.replace('_', ' '))
CM.savefig('{}_Heatmap.png'.format(name.replace(' ', '_')), dpi=300)
CM.savefig('{}_Heatmap.svg'.format(name.replace(' ', '_')))
plt.close()
image_display('{}_Heatmap.png'.format(name.replace(' ', '_')))
def ssh_job(command_list, job_name, job_folder, project='nimerlab', threads=1, q='general', mem=3000):
'''
Sends job to LSF pegasus.ccs.miami.edu
Inputs
------
command_list: list of commands with new lines separated by commas
job_name: string of job name (also used for log file)
job_folder: string of folder to save err out and script files
q: pegasus q, ie. 'bigmem', 'general' (default), 'parrallel'
mem: integer memory requirement, default=3000 (3GB RAM)
project: string pegasus project name (default = nimerlab)
threads: integer of number of threads. default = 1
ssh: whether or not to ssh into pegasus. Default=True
Returns
-------
Tuple(rand_id, job_folder, prejob_files)
'''
job_folder = job_folder if job_folder.endswith('/') else f'{job_folder}/'
os.system(f'ssh pegasus mkdir -p {job_folder}')
rand_id = str(random.randint(0, 100000))
str_comd_list = '\n'.join(command_list)
cmd = '\n'.join(['#!/bin/bash',
'',
f"#BSUB -J ID_{rand_id}_JOB_{job_name.replace(' ','_')}",
f'#BSUB -R "rusage[mem={mem}]"',
f'#BSUB -R "span[ptile={threads}]"',
f"#BSUB -o {job_folder}{job_name.replace(' ','_')}_logs_{rand_id}.stdout.%J",
f"#BSUB -e {job_folder}{job_name.replace(' ','_')}_logs_{rand_id}.stderr.%J",
'#BSUB -W 120:00',
f'#BSUB -n {threads}',
f'#BSUB -q {q}',
f'#BSUB -P {project}',
'',
f'{str_comd_list}'
])
with open(f'{job_name.replace(" ","_")}.sh', 'w') as file:
file.write(cmd)
prejob_files = os.popen(f'ssh pegasus ls {job_folder}').read().split('\n')[:-1]
os.system(f'''ssh pegasus "mkdir -p {job_folder}"''')
os.system(f'scp {job_name.replace(" ", "_")}.sh pegasus:{job_folder}')
os.system(f'''ssh pegasus "cd {job_folder}; bsub < {job_name.replace(' ','_')}.sh"''')
print(f'Submitting {job_name} as ID_{rand_id} from folder {job_folder}: {datetime.now():%Y-%m-%d %H:%M:%S}')
return (rand_id, job_folder, prejob_files, job_name)
def ssh_check(ID, job_folder, prejob_files=None, wait=True, return_filetype=None, load=False, check_IO_logs=None, sleep=10, job_name=''):
'''
Checks for pegasus jobs sent by ssh_job and prints contents of the log file.
Optionally copies and/or loads the results file.
Inputs
------
Job ID: Job ID
wait: wait for processes to finish before returning, default=True
job_folder: job folder to probe for results, (only if return_filetype specified)
return_filetype: return file type (ex. .png will search for all .png in job_folder and import it)default=None
display: whether or not to display imported file
pre_list: list of contents of job folder brefore execution.
check_IO_logs: read output from .err .out logs
sleep: seconds to sleep (default 10)
job_name: pepends local ssh folder with job name if provided
Returns
------
None
'''
job_folder = val_folder(job_folder)
jobs_list = os.popen('ssh pegasus bhist -w').read()
job = [j for j in re.findall(r'ID_(\d+)', jobs_list) if j == ID]
if len(job) != 0:
print(f'Job ID_{ID} is not complete: {datetime.now():%Y-%m-%d %H:%M:%S}')
else:
if os.popen('''ssh pegasus "if [ -f {}/*_logs_{}.stderr* ]; then echo 'True' ; fi"'''.format(job_folder, ID)).read() == 'True\n':
print(f'Job ID_{ID} is finished')
else:
print(f'There was likely an error in submission of Job ID_{ID}')
if wait:
running = True
while running:
jobs_list = os.popen('ssh pegasus "bhist -w"').read()
job = [j for j in re.findall(r'ID_(\d+)', jobs_list) if j == ID]
if len(job) == 0:
running = False
else:
print(f'Waiting for jobs to finish... {datetime.now():%Y-%m-%d %H:%M:%S}')
time.sleep(sleep)
print(f'Job ID_{ID} is finished')
if load:
os.makedirs(f'ssh_files/{ID}/', exist_ok=True)
post_files = os.popen(f'ssh pegasus ls {job_folder}*{return_filetype}').read().split("\n")[:-1]
if prejob_files is None:
prejob_files = []
import_files = [file for file in post_files if file not in prejob_files]
for file in import_files:
print('Copying {} to {}/ssh_files/{}{}/'.format(file, os.getcwd(), job_name, ID))
os.system('scp pegasus:{} ssh_files/{}{}/{}'.format(file, job_name, ID, file.split('/')[-1]))
image_display('ssh_files/{}{}/{}'.format(job_name, ID, file.split('/')[-1]))
if check_IO_logs:
logs = {'ErrorFile': '{}/*_logs_{}.stderr*'.format(job_folder, ID),
'OutFile': '{}/*_logs_{}.stdout*'.format(job_folder, ID)
}
os.makedirs('logs/', exist_ok=True)
for key, log in logs.items():
os.system("scp 'pegasus:{}' 'logs/ID_{}_{}.txt'".format(log, ID, key))
if os.path.isfile('logs/ID_{}_{}.txt'.format(ID, key)):
print('logs/ID_{} {}:'.format(ID, key))
with open('logs/ID_{}_{}.txt'.format(ID, key)) as file:
print(file.read())
def deeptools(regions,
signals,
matrix_name,
out_name,
pegasus_folder,
envelope='deeptools',
copy=False,
title='',
bps=(1500, 1500, 4000),
d_type='center',
scaled_names=('TSS', 'TES'),
make=('matrix', 'heatmap', 'heatmap_group', 'profile', 'profile_group'),
missing_values_as_zero=True,
heatmap_kmeans=0,
save_sorted_regions='',
sort_regions='descend',
profile_colors=None):
'''
Inputs
------
regions: dictionary {'region_name':'/path/to/ssh/bedfile'}
signals: dictionary {'signal_name':'/path/to/ssh/bigwigfile'}
matrix_name: string of matrix name or matrix to be named (before .matrix.gz)
out_name: name for output file
tite: plot title (optional)
envelope: conda envelope
bps: tuple of region width on either side of center or scaled. center ignores last number. default is (1500,1500,4000)
type: 'center' or 'scaled'
scaled_names: optional names for scaled start and end (default ('TSS','TES'))
make: tuple of deeptool commands. options: matrix, heatmap, heatmap_group, profile, profile_group
copy: bool. Copy region and signal files to peagasus
missing_values_as_zero: True
heatmap_kmeans: Default 0. kmeans clusters (int)
save_sorted_regions= '' (default: don't output) else filename for kmeans sorted region file
sort_regions= default descend. 'keep', 'no', ascend.
profile_colors: default none. list of colers per sample in sample order
Returns
-------
string of commands for ssh_job
'''
pegasus_folder = pegasus_folder if pegasus_folder.endswith('/') else f'{pegasus_folder}/'
os.system(f"ssh pegasus 'mkdir {pegasus_folder}'")
make_lower = [x.lower() for x in make]
if d_type.lower() == 'center':
deepMat = 'reference-point --referencePoint center'
deepHeat = "--refPointLabel 'Peak Center'"
deepProf = "--refPointLabel 'Peak Center'"
else:
deepMat = f'scale-regions --regionBodyLength {bps[2]}'
deepHeat = f'--startLabel {scaled_names[0]} --endLabel {scaled_names[1]}'
deepProf = f'--startLabel {scaled_names[0]} --endLabel {scaled_names[1]}'
cmd_list = ['module rm python share-rpms65', f'source activate {envelope}']
if copy:
print('Copying region files to pegasus...')
for region in regions.values():
if os.popen(f'''ssh pegasus "if [ -f {pegasus_folder}{region.split('/')[-1]}]; then echo 'True' ; fi"''').read() != 'True\n':
print(f'Copying {region} to pegasus at {pegasus_folder}.')
os.system(f"scp {region} pegasus:{pegasus_folder}")
else:
print(f'{region} found in {pegasus_folder}.')
print('Copying signal files to pegasus...')
for signal in signals.values():
if os.popen(f'''ssh pegasus "if [ -f {pegasus_folder}/{signal.split('/')[-1]} ]; then echo 'True' ; fi"''').read() != 'True\n':
print(f'Copying {signal} to {pegasus_folder}.')
os.system(f"scp {signal} pegasus:{pegasus_folder}")
pegasus_region_path = ' '.join([f"{pegasus_folder}{region_path.split('/')[-1]}" for region_path in regions.values()])
pegasus_signal_path = ' '.join([f"{pegasus_folder}{signal_path.split('/')[-1]}" for signal_path in signals.values()])
else:
pegasus_region_path = ' '.join([f'{region_path}' for region_path in regions.values()])
pegasus_signal_path = ' '.join([f'{signal_path}' for signal_path in signals.values()])
if 'matrix' in make_lower:
signal_name = ' '.join([f'''"{signal_name.replace('_', ' ')}"''' for signal_name in signals.keys()])
computeMatrix = f"computeMatrix {deepMat} -a {bps[0]} -b {bps[1]} -p 4 -R {pegasus_region_path} -S {pegasus_signal_path} --samplesLabel {signal_name} -o {matrix_name}.matrix.gz"
if missing_values_as_zero:
computeMatrix += ' --missingDataAsZero'
cmd_list.append(computeMatrix)
if 'heatmap' in make_lower or 'heatmap_group' in make_lower:
region_name = ' '.join([f'''"{region_name.replace('_', ' ')}"''' for region_name in regions.keys()])
plotHeatmap_base = f"plotHeatmap -m {matrix_name}.matrix.gz --dpi 300 {deepHeat} --plotTitle '{title.replace('_',' ')}' --whatToShow 'heatmap and colorbar' --colorMap Reds"
if sort_regions != 'descend':
plotHeatmap_base += f' --sortRegions {sort_regions}'
if heatmap_kmeans > 0:
plotHeatmap_base += f' --kmeans {heatmap_kmeans}'
else:
plotHeatmap_base += f' --regionsLabel {region_name}'
if save_sorted_regions != '':
plotHeatmap_base += f' --outFileSortedRegions {save_sorted_regions}.txt'
if 'heatmap' in make_lower:
cmd_list.append(f"{plotHeatmap_base} -out {out_name}_heatmap.png")
if 'heatmap_group' in make_lower:
cmd_list.append(f"{plotHeatmap_base} -out {out_name}_heatmap_perGroup.png --perGroup")
if 'profile' in make_lower or 'profile_group' in make_lower:
region_name = ' '.join([f'''"{region_name.replace('_', ' ')}"''' for region_name in regions.keys()])
plotProfile_base = f"plotProfile -m {matrix_name}.matrix.gz --dpi 300 {deepProf} --plotTitle '{title.replace('_',' ')}'"
if heatmap_kmeans > 0:
plotProfile_base += f' --kmeans {heatmap_kmeans}'
else:
plotProfile_base += f' --regionsLabel {region_name}'
if profile_colors:
plotProfile_base += f' --colors {" ".join(profile_colors)}'
if save_sorted_regions != '':
plotProfile_base += f' --outFileSortedRegions {save_sorted_regions}_profile.txt'
if 'profile' in make_lower:
cmd_list.append(f"{plotProfile_base} -out {out_name}_profile.png")
if 'profile_group' in make_lower:
cmd_list.append(f"{plotProfile_base} -out {out_name}_profile_perGroup.png --perGroup")
return cmd_list
def order_cluster(dict_set, count_df, gene_column_name, title):
'''
Inputs
------
dict_set: a dictary with a cluster name and a set of genes in that cluster for plotting (should be non-overlapping).
df: a pandas dataframe with the normalized counts for each gene and samples (or average of samples) in row columns.
should also contain a column with the gene name.
gene_column_name: the pandas column specifying the gene name (used in the dict_set)
title: title for the plot and for saving the file
Returns
------
(Ordered Index List, Ordered Count DataFrame, Clustermap)
'''
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.cluster import hierarchy
import matplotlib.patches as mpatches
out_list = []
df = count_df.copy()
df['group'] = 'NA'
for name, genes in dict_set.items():
if len(genes) == 0:
print(f'There are not genes in {name}. Skipping Group')
continue
reduced_df = df[df[gene_column_name].isin(genes)]
linkage = hierarchy.linkage(reduced_df.drop(columns=[gene_column_name, 'group']), method='ward', metric='euclidean')
order = hierarchy.dendrogram(linkage, no_plot=True, color_threshold=-np.inf)['leaves']
gene_list = reduced_df.iloc[order][gene_column_name].tolist()
gene_index = df[df[gene_column_name].isin(gene_list)].index.tolist()
out_list += gene_index
gene_symbol = [gene.split('_')[-1] for gene in gene_list]
with open(f'{name}_genes.txt', 'w') as file:
for gene in gene_symbol:
file.write(f'{gene}\n')
df.loc[gene_index, 'group'] = name
ordered_df = df.loc[out_list]
color_mapping = dict(zip([name for name, genes in dict_set.items() if len(genes) > 0], sns.hls_palette(len(df.group.unique()), s=.7)))
row_colors = df.group.map(color_mapping)
sns.set(context='notebook', font='Arial', palette='RdBu_r', style='white', rc={'figure.dpi': 300})
clustermap = sns.clustermap(ordered_df.loc[out_list].drop(columns=[gene_column_name, 'group']),
z_score=0,
row_colors=row_colors,
row_cluster=False,
col_cluster=False,
cmap='RdBu_r',
yticklabels=False)
clustermap.fig.suptitle(title)
legend = [mpatches.Patch(color=color, label=label.replace('_', ' ')) for label, color in color_mapping.items() if label != 'NA']
clustermap.ax_heatmap.legend(handles=legend, bbox_to_anchor=(-.1, .9, 0., .102))
clustermap.savefig(f'{title.replace(" ","_")}.png', dpi=300)
plt.close()
image_display(f'{title.replace(" ","_")}.png')
return out_list, ordered_df, clustermap
def ranked_ordered_cluster(dict_set, in_df,
gene_column_name,
dict_sort_col,
title='ranked_ordered_cluster',
group_name='Group',
figsize=None,
ascending=False):
'''
Inputs
------
dict_set: a dictary with a cluster name and a set of genes in that cluster for plotting.
df: a pandas dataframe with the normalized counts for each gene and samples (or average of samples) in row columns.
should also contain a column with the gene name.
gene_column_name: the pandas column specifying the gene name (used in the dict_set)
dict_sort_col: dictionary mapping cluster name with column to sort by in that cluster.
group_name: name (string) of the clusters (ie. Group, or Lineage)
title: title for the plot and for saving the file
figsize: tuple of figsize or default none for autogeneration
ascending: bool for sort order
Returns
------
(Ordered Count DataFrame, Clustermap)
'''
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import matplotlib.patches as mpatches
from dkfunctions import image_display
out_dfs = []
df = in_df.copy()
df[group_name] = 'NA'
df.index = df[gene_column_name]
for name, genes in dict_set.items():
reduced_df = df[df[gene_column_name].isin(genes)].copy()
zscored = reduced_df.drop(columns=[gene_column_name, group_name]).T.apply(stats.zscore).T.copy()
order = zscored.sort_values(by=dict_sort_col[name], ascending=ascending).index.tolist()
gene_list = reduced_df.loc[order, gene_column_name].tolist()
gene_symbol = [gene.split('_')[-1] for gene in gene_list]
with open(f'{name}_genes.txt', 'w') as file:
for gene in gene_symbol:
file.write(f'{gene}\n')
reduced_df[group_name] = name
reduced_df = reduced_df.loc[gene_list]
out_dfs.append(reduced_df)
ordered_df = pd.concat(out_dfs)
groups = ordered_df[group_name].unique()
color_mapping = dict(zip(groups, sns.color_palette("colorblind",len(groups))))
row_colors = ordered_df[group_name].map(color_mapping).tolist()
sns.set(context='paper', font='Arial', palette='pastel', style='white', rc={'figure.dpi': 300}, font_scale=.9)
g = sns.clustermap(ordered_df.drop(columns=[gene_column_name, group_name]),
z_score=0,
row_colors=row_colors,
row_cluster=False,
col_cluster=False,
cmap='RdBu_r',
yticklabels=True,
figsize=figsize)
g.fig.suptitle(title)
legend = [mpatches.Patch(color=color, label=label.replace('_', ' ')) for label, color in color_mapping.items() if label != 'NA']
g.ax_heatmap.legend(handles=legend, bbox_to_anchor=(-.1, .9, 0., .102),fontsize='large')
g.savefig(f'{title.replace(" ","_")}.png', dpi=300)
g.savefig(f'{title.replace(" ","_")}.svg')
plt.close()
image_display(f'{title.replace(" ","_")}.png')
return ordered_df, g
def gsea_barplot(out_dir, pos_file, neg_file, gmt_name, max_number=20):
'''
Inputs
------
out_dir: directory output or '' for current directory
pos_file: GSEA positive enrichment .xls file
neg_file: GSEA negative enrichment .xls file
gmt_name: name of enrichment (ex: Hallmarks)
max_number: max number of significant sets to report (default 20)
Returns
-------
string of save file
'''
import matplotlib.pyplot as plt
import seaborn as sns
out_dir = out_dir if out_dir.endswith('/') else '{}/'.format(out_dir)
out_dir = '' if out_dir == '/' else out_dir
os.makedirs(out_dir, exist_ok=True)
pos = pd.read_table(pos_file).head(max_number) if os.path.isfile(pos_file) else pd.DataFrame(columns=['FDR q-val'])
pos[gmt_name] = [' '.join(name.split('_')[1:]) for name in pos.NAME.tolist()]
neg = pd.read_table(neg_file).head(max_number) if os.path.isfile(neg_file) else pd.DataFrame(columns=['FDR q-val'])
neg[gmt_name] = [' '.join(name.split('_')[1:]) for name in neg.NAME.tolist()]
sns.set(context='paper', font='Arial', font_scale=.9, style='white', rc={'figure.dpi': 300, 'figure.figsize': (8, 6)})
fig, (ax1, ax2) = plt.subplots(ncols=1, nrows=2)
fig.suptitle('{} GSEA enrichment\n(q<0.05, max {})'.format(gmt_name, max_number))
if len(pos[pos['FDR q-val'] < 0.05]) > 0:
UP = sns.barplot(data=pos[pos['FDR q-val'] < 0.05], x='NES', y=gmt_name, color='firebrick', ax=ax1)
UP.set_title('Positive Enrichment')
sns.despine()
if len(neg[neg['FDR q-val'] < 0.05]) > 0:
DN = sns.barplot(data=neg[neg['FDR q-val'] < 0.05], x='NES', y=gmt_name, color='steelblue', ax=ax2)
DN.set_title('Negative Enrichment')
sns.despine()
try:
plt.tight_layout(h_pad=1, w_pad=1)
except ValueError:
pass
plt.subplots_adjust(top=0.88)
file = f'{out_dir}{gmt_name}_GSEA_NES_plot.png'
fig.savefig(file, dpi=300)
plt.close()
image_display(file)
return file
def hinton(df, filename, folder, max_weight=None):
"""Draw Hinton diagram for visualizing a weight matrix."""
import matplotlib.pyplot as plt
import seaborn as sns
folder = folder if folder.endswith('/') else f'{folder}/'
folder = f'{os.getcwd()}/' if folder == '/' else folder
sns.set(context='paper', rc={'figure.figsize': (8, 8), 'figure.dpi': 200})
matrix = df.values
plt.clf()
plt.figure(figsize=(10, 10), dpi=200)
ax = plt.gca()
if not max_weight:
max_weight = 2 ** np.ceil(np.log(np.abs(matrix).max()) / np.log(2))
ax.patch.set_facecolor('white')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
ax.axis('off')
for (x, y), w in np.ndenumerate(matrix):
color = 'red' if w > 0 else 'blue'
size = np.sqrt(np.abs(w) / max_weight)
rect = plt.Rectangle([y - size / 2, x - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
fraction = len(df.index.tolist())
increment = (.915 / fraction)
y = 0.942
for x in df.index.tolist():
ax.annotate(x, xy=(-.15, y), xycoords='axes fraction')
y -= increment
ax.annotate("Components", xy=(.4, 0), xycoords='axes fraction', size=14)
ax.autoscale_view()
ax.annotate('Hinton Plot of Independent Components', xy=(.14, 1), xycoords='axes fraction', size=20)
ax.invert_yaxis()
ax.figure.savefig(f'{folder}{filename}.png')
plt.close()
image_display(f'{folder}{filename}.png')
def genomic_annotation_plots(dict_of_annotated_dfs, txdb_db,
filename='Genomic_Annotation_Plot',
title='',
bar_width=.75,
figsize=(10, 5),
order=['Promoter (<=1kb)',
'Promoter (1-2kb)',
'Promoter (2-3kb)',
'Intron',
'Exon',
"3' UTR",
"5' UTR",
'Downstream (<1kb)',
'Downstream (1-2kb)'
'Downstream (2-3kb)',
'Distal Intergenic'],
feature_col='annotation',
palette='colorblind',
plot_mode='fraction'
):
'''
from chipseeker annotation output as df
txdb_db = UCSC or Ensembl
'''
import matplotlib.pyplot as plt
import seaborn as sns
db = '(uc' if txdb_db == 'UCSC' else '(ENS'
Anno_df = | pd.DataFrame(index=order) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from joblib import dump, load
from rulevetting.templates.model import ModelTemplate
class Model(ModelTemplate):
def __init__(self):
self.model = load('./notebooks/models/lr_model_all.joblib')
def predict(self, df_features: pd.DataFrame):
return self.model.predict(df_features)
def predict_proba(self, df_features: pd.DataFrame):
return self.model.predict_proba(df_features)
def print_model(self, df_features):
print(self.model)
if __name__ == '__main__':
from rulevetting.projects.iai_pecarn.dataset import Dataset
df_train, df_tune, df_test = Dataset().get_data(load_csvs=True)
df_full = | pd.concat((df_train, df_tune, df_test)) | pandas.concat |
import logging
import pandas as pd
from easysparql import easysparqlclass
import seaborn as sns
import matplotlib.pyplot as plt
from pandas.api.types import CategoricalDtype
from tadaqq.util import compute_scores
PRINT_DIFF = True
def get_logger(name, level=logging.INFO):
logger = logging.getLogger(name)
formatter = logging.Formatter('%(name)-12s>> %(message)s')
# formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
if not logger.hasHandlers():
logger.addHandler(handler)
logger.setLevel(level)
return logger
logger = get_logger(__name__, level=logging.INFO)
# logger = get_logger(__name__, level=logging.DEBUG)
esparql = easysparqlclass.EasySparql(cache_dir=".cache", logger=logger)
# def compute_scores(eval_data, k=1):
# """
# """
# corr = 0
# incorr = 0
# notf = 0
# for d in eval_data:
# if d == -1:
# notf += 1
# elif d <= k:
# corr += 1
# elif d < 1:
# err_msg = "Error: compute_scores> Invalid k <%s>" % str(d)
# print(err_msg)
# raise Exception(err_msg)
# else:
# incorr += 1
# if corr == 0:
# prec = 0
# rec = 0
# f1 = 0
# else:
# prec = corr / (corr+incorr)
# rec = corr / (corr+notf)
# f1 = 2*prec*rec / (prec+rec)
# # print("#corr: %d\t#incorr: %d\t#notf: %d" % (corr, incorr, notf))
# return prec, rec, f1
# # print("Precision: %.2f\nRecall: %.2f\nF1: %.2f" % (prec, rec, f1))
def get_num_rows(fdir):
df = pd.read_csv(fdir)
return len(df.index)
def compute_scores_per_key(eval_pp, fname=None, print_scores=False):
"""
eval_pp: dict
For example (property as a key)
{
"generic property": [1,... ] (k values),
}
"""
lines = []
print("\n\n| %15s | %15s | %15s | %5s |" % ("Key", "Precision", "Recall", "F1"))
print("|:%s:|:%s:|:%s:|:%s:|" % ("-"*15,"-"*15,"-"*15,"-"*5,))
for p in eval_pp:
prec, rec, f1 = compute_scores(eval_pp[p])
lines.append([p, 'prec', prec])
lines.append([p, 'rec', rec])
lines.append([p, 'f1', f1])
# if PRINT_DIFF:
# print("%s: \n\t%f1.2\t%f1.2\t%f1.2" % (p, prec, rec, f1))
if print_scores:
print("| %15s | %15.2f | %15.2f | %5.2f| " % (p, prec, rec, f1))
if fname:
generate_diagram(lines, fname)
def generate_diagram(acc, draw_fname):
"""
:param acc: acc
:param draw_file_base: base of the diagram
:return: None
"""
data = pd.DataFrame(acc, columns=['Property Concept', 'Metric', 'Value'])
ax = sns.barplot(x="Value", y="Property Concept",
hue="Metric",
data=data, linewidth=1.0,
# palette="colorblind",
palette="Spectral",
# palette="pastel",
# palette="ch:start=.2,rot=-.3",
# palette="YlOrBr",
# palette="Paired",
# palette="Set2",
orient="h")
# ax.legend_.remove()
# ax.legend(bbox_to_anchor=(1.01, 1), borderaxespad=0)
ax.legend(bbox_to_anchor=(1.0, -0.1), borderaxespad=0)
# ax.set_xlim(0, 1.0)
# ax.set_ylim(0, 0.7)
# Horizontal
ticks = ax.get_yticks()
new_ticks = [t for t in ticks]
texts = ax.get_yticklabels()
# print(ax.get_yticklabels())
labels = [t.get_text() for t in texts]
ax.set_yticks(new_ticks)
ax.set_yticklabels(labels, fontsize=8)
ax.set(xlabel=None, ylabel=None)
# print(ax.get_yticklabels())
plt.setp(ax.lines, color='k')
ax.figure.savefig('%s.svg' % draw_fname, bbox_inches="tight")
ax.figure.clf()
def compute_counts(files_k, fname):
bins = [20, 30, 40, 50, 70, 100, 150, 200]
bins_score = dict()
for f in files_k:
corr = 1
if files_k[f][0] != 1:
corr = 0
nrows = files_k[f][1]
added = False
for b in bins:
if nrows < b:
bs = str(b)
if bs not in bins_score:
bins_score[bs] = {
'corr': 0,
'notf': 0,
'incorr': 0
}
if files_k[f][0] == 1:
bins_score[bs]['corr'] += 1
elif files_k[f][0] == -1:
bins_score[bs]['notf'] += 1
elif files_k[f][0] > 1:
bins_score[bs]['incorr'] += 1
else:
raise Exception("Invalid k")
added = True
if not added:
bs = "%d<" % max(bins)
if bs not in bins_score:
bins_score[bs] = {
'corr': 0,
'notf': 0,
'incorr': 0
}
if files_k[f][0] == 1:
bins_score[bs]['corr'] += 1
elif files_k[f][0] == -1:
bins_score[bs]['notf'] += 1
elif files_k[f][0] > 1:
bins_score[bs]['incorr'] += 1
else:
raise Exception("Invalid k")
rows = []
for bname in bins_score:
if bins_score[bname]['corr'] == 0:
acc = 0
prec = 0
recall = 0
f1 = 0
else:
acc = bins_score[bname]['corr'] / (bins_score[bname]['corr'] + bins_score[bname]['incorr'] + bins_score[bname]['notf'])
prec = bins_score[bname]['corr'] / (bins_score[bname]['corr'] + bins_score[bname]['incorr'])
recall = bins_score[bname]['corr'] / (bins_score[bname]['corr'] + bins_score[bname]['notf'])
f1 = 2 * prec * recall / (prec+recall)
tot = bins_score[bname]['corr'] + bins_score[bname]['incorr'] + bins_score[bname]['notf']
rows.append([bname, acc, 'accuracy', tot])
rows.append([bname, prec, 'precision', tot])
rows.append([bname, recall, 'recall', tot])
rows.append([bname, f1, 'f1', tot])
# rows.append([bname, acc, prec, recall, len(bins_score[bname])])
# df = pd.DataFrame(rows, columns=['nrows', 'accuracy', 'precision', 'recall', 'ncols'])
df = pd.DataFrame(rows, columns=['nrows', 'score', 'metric', 'ncols'])
cats = [str(b) for b in bins] + ["%d<" % max(bins)]
x_pos = dict()
for idx, c in enumerate(cats):
x_pos[c] = idx
cat_type = CategoricalDtype(categories=cats, ordered=True)
df['nrows'] = df['nrows'].astype(cat_type)
cats = ['precision', 'recall', 'accuracy', 'f1']
cat_type = CategoricalDtype(categories=cats)
df['metric'] = df['metric'].astype(cat_type)
# print(df.dtypes)
# print(df)
# p = sns.color_palette("flare", as_cmap=True)
# p = sns.color_palette("mako", as_cmap=True)
# p = sns.dark_palette("#69d", reverse=False, as_cmap=True)
ax = sns.scatterplot(x="nrows", y="score", data=df, size="ncols", hue="metric",
#palette=p,
sizes=(40, 100))
# legend_labels, leg_oth = ax.get_legend_handles_labels()
# ax = sns.scatterplot(x="nrows", y="precision", data=df, size="ncols", hue="ncols",
# palette=p, sizes=(40, 100), ax=ax)
# ax = sns.scatterplot(x="nrows", y="recall", data=df, size="ncols", hue="ncols",
# palette=p, sizes=(40, 100), ax=ax)
# sns.lineplot(data=df, x='nrows', y='accuracy', dashes=True, ax=ax, linestyle="--", linewidth=1, palette=p)
# sns.lineplot(data=df, x='nrows', y='score', dashes=True, ax=ax, linestyle="--", linewidth=1, hue="metric")
linestyles = ["--", ":", "dashdot", "solid"]
for idx, c in enumerate(cats):
sns.lineplot(data=df[df.metric == c], x='nrows', y='score', dashes=True, ax=ax, linestyle=linestyles[idx], linewidth=1)
# sns.move_legend(ax, "lower center", bbox_to_anchor=(.5, 0.5), ncol=2, title=None, frameon=False)
# ax.set(ylim=(0, 1))
ax.legend(loc=2, fontsize='x-small')
# ax.legend(bbox_to_anchor=(0.1, 1.0), borderaxespad=0)
# ax.legend(legend_labels, leg_oth, title="Number of columns")
# Draw number of files/columns
# for idx, row in df.iterrows():
# nr = row['nrows']
# nr = x_pos[nr]
# plt.text(nr, row['accuracy'], row['ncols'])
ax.figure.savefig('%s.svg' % fname, bbox_inches="tight")
# plt.show()
ax.figure.clf()
return df
def compute_counts_per_err_meth(scores_dict, fname):
"""
scores_dict: dict
{
'estimate': {
'mean_sq_err': df,
'mean_err': df,
},
'exact': {}
}
sample df:
nrows score metric ncols
0 200< 0 accuracy 1
1 200< 0 precision 1
2 200< 0 recall 1
3 200< 0 f1 1
4 200 0 accuracy 1
5 200 0 precision 1
6 200 0 recall 1
7 200 0 f1 1
"""
# df = pd.DataFrame()
dfs = []
print("scores dict: ")
print(scores_dict)
for e in scores_dict:
for m in scores_dict[e]:
df1 = scores_dict[e][m]
df1['pred'] = [e] * len(df1.index)
df1['method'] = [m] * len(df1.index)
print("==============")
print(e)
print(m)
print(df1)
print("\n")
dfs.append(df1)
print("len dfs: %d" % len(dfs))
df = | pd.concat(dfs, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 10 17:05:23 2018
@author: <NAME>
"""
# -*- coding: utf-8 -*-
"""
Description: Risky Comments Extractor Based on Risky category bag of words.
"""
import numpy as np
import os
import pandas as pd
import re
from textblob import TextBlob;
from textblob import Word;
#Clear Memory & remove global object:
clear = lambda: os.system('cls')
clear()
path = "C:\\Users\\DELL\\Desktop\\Python"
os.chdir(path)
#print (os.getcwd())
#https://www.datacamp.com/community/tutorials/python-excel-tutorial
# Assign spreadsheet filename to `file`
RcBOWfile = 'WP_DE_SoW Review Tracker-2018 V0.21 Python.xlsx'
Sowfile = 'TextPreProcessing_Stage1.xlsx'
# Load spreadsheet
SoWTracker = | pd.ExcelFile(Sowfile) | pandas.ExcelFile |
# importar bibliotecas
import pandas as pd
import numpy as np
import win32com.client as win32
import xlsxwriter
# importar a base de dados
tabela_vendas = pd.read_excel('Vendas.xlsx')
# visualizar a base de dados
pd.set_option('display.max_columns', None)
# print(tabela_vendas)
print('\nTabela de Vendas: ')
print(tabela_vendas.head())
print('-' * 50)
# faturamento por loja
# tabela_vendas[['ID Loja', 'Valor Final']]
# Criar uma lista com cada uma das lojas e do lado a soma do faturamento
# tabela_vendas.groupby('ID Loja').sum()
# tabela_vendas[['ID Loja', 'Valor Final']].groupby('ID Loja').sum()
faturamento = tabela_vendas[['ID Loja', 'Valor Final']].groupby('ID Loja').sum()
#print('\nTabela de Faturamento: ')
#print(faturamento)
#print('-' * 50)
# quantidade de produtos vendidos por loja
quantidade = tabela_vendas[['ID Loja', 'Quantidade']].groupby('ID Loja').sum()
#print('\nTabela de Quantidades: ')
#print(quantidade)
#print('-' * 50)
# ticket médio por produto em cada loja
# usar .to_frame() para retirar o flot64 em baixo e transformar em uma tabela
ticket_medio = (faturamento['Valor Final'] / quantidade['Quantidade']).to_frame()
#print('\nTabela de Ticket Médio: ')
#print(ticket_medio)
#print('-' * 50)
# enviar um e-amil com o relatório
tabela_tratada = pd.DataFrame()
tabela_tratada['FATURAMENTO'] = faturamento
tabela_tratada['QTDD'] = quantidade
tabela_tratada['TICKET MEDIO'] = ticket_medio
# tabela_tratada.to_excel('Dados-tratado.xlsx')
#print('\nTabela Tratada: ')
#print(tabela_tratada)
#print('-' * 50)
pivot = | pd.pivot_table(tabela_vendas, index= ['ID Loja'], values='Valor Final', aggfunc='sum') | pandas.pivot_table |
import pandas
import bmeg.ioutils
from bmeg.emitter import JSONEmitter
from bmeg import (Aliquot, DrugResponse, Project, Compound,
Compound_Projects_Project,
DrugResponse_Aliquot_Aliquot,
DrugResponse_Compounds_Compound)
def transform(cellline_lookup_path='source/ccle/cellline_id_lookup.tsv',
cells_path='source/pharmacodb/cells.tsv.gz',
drugs_path='source/pharmacodb/drugs.tsv.gz',
drug_annots_path='source/pharmacodb/drug_annots.tsv.gz',
experiments_path='source/pharmacodb/experiments.tsv.gz',
dose_response_path='source/pharmacodb/dose_responses.tsv.gz',
profiles_path='source/pharmacodb/profiles.tsv.gz',
emitter_prefix=None,
emitter_directory='pharmacodb'):
emitter = JSONEmitter(directory=emitter_directory, prefix=emitter_prefix)
cellline_id_lookup = bmeg.ioutils.read_lookup(cellline_lookup_path)
dose_response = pandas.read_csv(dose_response_path, sep="\t")
cells = pandas.read_csv(cells_path, sep="\t")
drugs = pandas.read_csv(drugs_path, sep="\t")
drug_annots = | pandas.read_csv(drug_annots_path, sep="\t") | pandas.read_csv |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from collections import OrderedDict
import pandas as pd
import pathlib
class Report:
# daily report of the account
# contain those followings: returns, costs turnovers, accounts, cash, bench, value
# update report
def __init__(self):
self.init_vars()
def init_vars(self):
self.accounts = OrderedDict() # account postion value for each trade date
self.returns = OrderedDict() # daily return rate for each trade date
self.turnovers = OrderedDict() # turnover for each trade date
self.costs = OrderedDict() # trade cost for each trade date
self.values = OrderedDict() # value for each trade date
self.cashes = OrderedDict()
self.latest_report_date = None # pd.TimeStamp
def is_empty(self):
return len(self.accounts) == 0
def get_latest_date(self):
return self.latest_report_date
def get_latest_account_value(self):
return self.accounts[self.latest_report_date]
def update_report_record(
self,
trade_date=None,
account_value=None,
cash=None,
return_rate=None,
turnover_rate=None,
cost_rate=None,
stock_value=None,
):
# check data
if None in [
trade_date,
account_value,
cash,
return_rate,
turnover_rate,
cost_rate,
stock_value,
]:
raise ValueError(
"None in [trade_date, account_value, cash, return_rate, turnover_rate, cost_rate, stock_value]"
)
# update report data
self.accounts[trade_date] = account_value
self.returns[trade_date] = return_rate
self.turnovers[trade_date] = turnover_rate
self.costs[trade_date] = cost_rate
self.values[trade_date] = stock_value
self.cashes[trade_date] = cash
# update latest_report_date
self.latest_report_date = trade_date
# finish daily report update
def generate_report_dataframe(self):
report = pd.DataFrame()
report["account"] = pd.Series(self.accounts)
report["return"] = pd.Series(self.returns)
report["turnover"] = pd.Series(self.turnovers)
report["cost"] = pd.Series(self.costs)
report["value"] = | pd.Series(self.values) | pandas.Series |
import numpy as np
import os.path
import pandas as pd
import sys
import math
# find parent directory and import base (travis)
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
# print(sys.path)
# print(os.path)
class LeslieProbitInputs(ModelSharedInputs):
"""
Input class for LeslieProbit.
"""
def __init__(self):
"""Class representing the inputs for LeslieProbit"""
super(LeslieProbitInputs, self).__init__()
# self.a_n = pd.Series([], dtype="object")
# self.c_n = pd.Series([], dtype="object")
self.grass_type = pd.Series([], dtype="object")
self.percent_active_ingredient = pd.Series([], dtype="float")
self.foliar_half_life = pd.Series([], dtype="float")
self.sol = pd.Series([], dtype="float")
self.time_steps = pd.Series([], dtype="float")
self.number_applications = pd.Series([], dtype="float")
self.application_rates = pd.Series([], dtype="float")
self.application_days = pd.Series([], dtype="float")
self.b = pd.Series([], dtype="float")
self.test_species = pd.Series([], dtype="object")
self.ld50_test = pd.Series([], dtype="float")
# self.bw_tested = pd.Series([], dtype="float")
# self.ass_species = pd.Series([], dtype="object")
# self.bw_ass = pd.Series([], dtype="float")
self.mineau_scaling_factor = pd.Series([], dtype="float")
self.probit_gamma = | pd.Series([], dtype="float") | pandas.Series |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = ‘V3’ or ‘V4’
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# Where <LS> = ‘J1’ or ‘J2’ or ‘J3’ then <DOB> should <= to 10 years prior to <DECOM>
mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM'])
# That is, raise error if DECOM > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_407():
error = ErrorDefinition(
code='407',
description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.',
affected_fields=['DEC', 'DOB', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['E45', 'E46', 'E47', 'E48']
# convert dates to datetime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <REC> = ‘E45’ or ‘E46’ or ‘E47’ or ‘E48’ then <DOB> must be < 18 years prior to <DEC>
mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC'])
# That is, raise error if DEC > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_1007():
error = ErrorDefinition(
code='1007',
description='Care leaver information is not required for 17- or 18-year olds who are still looked after.',
affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_end = dfs['metadata']['collection_end']
# convert dates to datetime format
oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
oc3.reset_index(inplace=True)
merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3'])
# If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided
check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & (
merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end)
# That is, check that 17<=age<19
check_dec_rec = merged['REC'].isna() | merged['DEC'].isna()
# if either DEC or REC are absent
mask = check_age & check_dec_rec & (
merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna())
# Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too
# error locations
oc3_error_locs = merged.loc[mask, 'index_oc3']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()}
return error, _validate
def validate_442():
error = ErrorDefinition(
code='442',
description='Unique Pupil Number (UPN) field is not completed.',
affected_fields=['UPN', 'LS']
)
def _validate(dfs):
if ('Episodes' not in dfs) or ('Header' not in dfs):
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
code_list = ['V3', 'V4']
# merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header.
merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er'])
# Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided
mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna()
episode_error_locs = merged.loc[mask, 'index_eps']
header_error_locs = merged.loc[mask, 'index_er']
return {'Episodes': episode_error_locs.tolist(),
# Select unique values since many episodes are joined to one header
# and multiple errors will be raised for the same index.
'Header': header_error_locs.dropna().unique().tolist()}
return error, _validate
def validate_344():
error = ErrorDefinition(
code='344',
description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided
mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & (
oc3['ACTIV'].notna() | oc3['ACCOM'].notna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_345():
error = ErrorDefinition(
code='345',
description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided
mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_384():
error = ErrorDefinition(
code='384',
description='A child receiving respite care cannot be in a long-term foster placement ',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4'
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
(episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_390():
error = ErrorDefinition(
code='390',
description='Reason episode ceased is adopted but child has not been previously placed for adoption.',
affected_fields=['PLACE', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6'
mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~(
(episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | (
episodes['PLACE'] == 'A6'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_378():
error = ErrorDefinition(
code='378',
description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# the & sign supercedes the ==, so brackets are necessary here
mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_398():
error = ErrorDefinition(
code='398',
description='Distance field completed but child looked after under legal status V3 or V4.',
affected_fields=['LS', 'HOME_POST', 'PL_POST']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
episodes['HOME_POST'].notna() | episodes['PL_POST'].notna())
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_451():
error = ErrorDefinition(
code='451',
description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.',
affected_fields=['DEC', 'REC', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_519():
error = ErrorDefinition(
code='519',
description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = (ad1['LS_ADOPTR'] == 'L2') & (
(ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF'))
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_520():
error = ErrorDefinition(
code='520',
description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
# check condition
mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF')
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_522():
error = ErrorDefinition(
code='522',
description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.',
affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
# Convert to datetimes
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# Boolean mask
mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED']
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_563():
error = ErrorDefinition(
code='563',
description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank',
affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \
placed_adoption['DATE_PLACED'].isna()
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_544():
error = ErrorDefinition(
code='544',
description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
convict = oc2['CONVICTED'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_634():
error = ErrorDefinition(
code='634',
description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.',
affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PrevPerm' not in dfs:
return {}
else:
episodes = dfs['Episodes']
prevperm = dfs['PrevPerm']
collection_start = dfs['metadata']['collection_start']
# convert date field to appropriate format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# the maximum date has the highest possibility of satisfying the condition
episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max')
# prepare to merge
episodes.reset_index(inplace=True)
prevperm.reset_index(inplace=True)
merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps'])
# If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016
mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & (
merged['LAST_DECOM'] < collection_start)
eps_error_locs = merged.loc[mask, 'index_eps']
prevperm_error_locs = merged.loc[mask, 'index_prev']
# return {'PrevPerm':prevperm_error_locs}
return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()}
return error, _validate
def validate_158():
error = ErrorDefinition(
code='158',
description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.',
affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna()
error_locations = oc2.index[error_mask]
return {'OC2': error_locations.tolist()}
return error, _validate
def validate_133():
error = ErrorDefinition(
code='133',
description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid',
affected_fields=['ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1',
'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2',
'0']
error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes)
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.tolist()}
return error, _validate
def validate_565():
error = ErrorDefinition(
code='565',
description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_START']
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
mask = missing['MIS_START'].notna() & missing['MISSING'].isna()
error_locations = missing.index[mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_433():
error = ErrorDefinition(
code='433',
description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['original_index'] = episodes.index
episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True)
episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1)
rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B'])
date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM']
missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna()
same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD']
error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child
error_locations = episodes['original_index'].loc[error_mask].sort_values()
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_437():
error = ErrorDefinition(
code='437',
description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.',
affected_fields=['REC'],
)
# !# potential false negatives, as this only operates on the current year's data
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes.sort_values(['CHILD', 'DECOM'], inplace=True)
episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1)
# drop rows with missing DECOM as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15'])
has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD']
error_mask = ceased_e2_e15 & has_later_episode
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_547():
error = ErrorDefinition(
code='547',
description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
healthck = oc2['HEALTH_CHECK'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_635():
error = ErrorDefinition(
code='635',
description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1',
affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM']
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
else:
prev_perm = dfs['PrevPerm']
# raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent.
mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna())
error_locations = prev_perm.index[mask]
return {'PrevPerm': error_locations.to_list()}
return error, _validate
def validate_550():
error = ErrorDefinition(
code='550',
description='A placement provider code of PR0 can only be associated with placement P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0')
validation_error_locations = episodes.index[mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_217():
error = ErrorDefinition(
code='217',
description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.',
affected_fields=['PLACE', 'DECOM', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce')
reason_new_ep = ['S', 'T', 'U']
place_codes = ['A3', 'A5']
mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[
'RNE'].isin(reason_new_ep)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_518():
error = ErrorDefinition(
code='518',
description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_517():
error = ErrorDefinition(
code='517',
description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_558():
error = ErrorDefinition(
code='558',
description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided',
affected_fields=['DATE_PLACED_CEASED', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
rec_codes = ['E11', 'E12']
placeEpisodes = episodes[episodes['REC'].isin(rec_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_453():
error = ErrorDefinition(
code='453',
description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce')
episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce')
# drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
episodes_last = episodes_last.dropna(subset=['DECOM'])
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2
error_mask = in_both_years & same_rne & last_year_open & different_pl_dist
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_516():
error = ErrorDefinition(
code='516',
description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.',
affected_fields=['REC', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6']
rec_codes = ['E45', 'E46']
error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes)
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_511():
error = ErrorDefinition(
code='511',
description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.',
affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1'])
validation_error_mask = mask
validation_error_locations = AD1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_524():
error = ErrorDefinition(
code='524',
description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_441():
error = ErrorDefinition(
code='441',
description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
reviews = reviews.dropna(subset=['REVIEW', 'DOB'])
mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & (
reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_184():
error = ErrorDefinition(
code='184',
description='Date of decision that a child should be placed for adoption is before the child was born.',
affected_fields=['DATE_PLACED', # PlacedAdoptino
'DOB'], # Header
)
def _validate(dfs):
if 'Header' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
child_record = dfs['Header']
placed_for_adoption = dfs['PlacedAdoption']
all_data = (placed_for_adoption
.reset_index()
.merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A']))
all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce')
all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna()
validation_error = ~mask
validation_error_locations = all_data[validation_error]['index'].unique()
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_612():
error = ErrorDefinition(
code='612',
description="Date of birth field has been completed but mother field indicates child is not a mother.",
affected_fields=['SEX', 'MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
error_mask = (
((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna())
& (header['SEX'].astype(str) == '2')
& header['MC_DOB'].notna()
)
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_552():
"""
This error checks that the first adoption episode is after the last decision !
If there are multiple of either there may be unexpected results !
"""
error = ErrorDefinition(
code="552",
description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.",
# Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types.
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
# get the required datasets
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
# keep index values so that they stay the same when needed later on for error locations
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy()
# find most recent adoption decision
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# remove rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()]
placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True)
last_decision = placed_adoption.loc[placed_adoption_inds]
# first time child started adoption
adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()]
adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True)
# full information of first adoption
first_adoption = adoption_eps.loc[adoption_eps_inds]
# date of decision and date of start of adoption (DECOM) have to be put in one table
merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA'])
# check to see if date of decision to place is less than or equal to date placed.
decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"]
# find the corresponding location of error values per file.
episode_error_locs = merged.loc[decided_after_placed, 'index_EP']
placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA']
return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()}
return error, _validate
def validate_551():
error = ErrorDefinition(
code='551',
description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.',
affected_fields=['DATE_PLACED', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
place_codes = ['A3', 'A4', 'A5', 'A6']
placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'].isna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_557():
error = ErrorDefinition(
code='557',
description="Child for whom the decision was made that they should be placed for adoption has left care " +
"but was not adopted and information on the decision that they should no longer be placed for " +
"adoption items has not been completed.",
affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption
'PLACE', 'LS', 'REC'], # Episodes
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'PlacedAdoption' not in dfs:
return {}
else:
eps = dfs['Episodes']
placed = dfs['PlacedAdoption']
eps = eps.reset_index()
placed = placed.reset_index()
child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])
order_granted = eps['LS'].isin(['D1', 'E1'])
not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna()
placed['ceased_incomplete'] = (
placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna()
)
eps = eps[(child_placed | order_granted) & not_adopted]
eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True)
eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']]
EP_errors = eps['index_EP']
PA_errors = eps['index_PA'].dropna()
return {
'Episodes': EP_errors.to_list(),
'PlacedAdoption': PA_errors.to_list(),
}
return error, _validate
def validate_207():
error = ErrorDefinition(
code='207',
description='Mother status for the current year disagrees with the mother status already recorded for this child.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str)
mother_was_true = header_merged['MOTHER_last'].astype(str) == '1'
error_mask = in_both_years & mother_is_different & mother_was_true
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_523():
error = ErrorDefinition(
code='523',
description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).",
affected_fields=['DATE_PLACED', 'DATE_INT'],
)
def _validate(dfs):
if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs):
return {}
else:
placed_adoption = dfs["PlacedAdoption"]
ad1 = dfs["AD1"]
# keep initial index values to be reused for locating errors later on.
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
# convert to datetime to enable comparison
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y",
errors='coerce')
ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# drop rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()]
ad1 = ad1[ad1["DATE_INT"].notna()]
# bring corresponding values together from both dataframes
merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"])
# find error values
different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED']
# map error locations to corresponding indices
pa_error_locations = merged_df.loc[different_dates, 'index_PA']
ad1_error_locations = merged_df.loc[different_dates, 'index_AD']
return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()}
return error, _validate
def validate_3001():
error = ErrorDefinition(
code='3001',
description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
oc3 = dfs['OC3']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB17'] = header['DOB'] + pd.DateOffset(years=17)
oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2'])
age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start)
error_mask = accom_foster & age_17_in_year
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_389():
error = ErrorDefinition(
code='389',
description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB16'] = header['DOB'] + pd.DateOffset(years=16)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7'])
ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC']
error_mask = ceased_asc & ~ceased_over_16
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_387():
error = ErrorDefinition(
code='387',
description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB14'] = header['DOB'] + pd.DateOffset(years=14)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6'])
ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC']
dec_present = episodes_merged['DEC'].notna()
error_mask = ceased_indep & ~ceased_over_14 & dec_present
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_452():
error = ErrorDefinition(
code='452',
description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str)
error_mask = in_both_years & same_rne & last_year_open & different_pl_la
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_386():
error = ErrorDefinition(
code='386',
description='Reason episode ceased is adopted but child has reached age 18.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = (
episodes
.reset_index()
.merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True)
.set_index('index')
.dropna(subset=['DOB18', 'DEC'])
)
ceased_adopted = episodes_merged['REC'].str.upper().astype(str).isin(['E11', 'E12'])
ceased_under_18 = episodes_merged['DOB18'] > episodes_merged['DEC']
error_mask = ceased_adopted & ~ceased_under_18
error_locations = episodes_merged.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_363():
error = ErrorDefinition(
code='363',
description='Child assessment order (CAO) lasted longer than 7 days allowed in the Children Act 1989.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
L2_eps = episodes[episodes['LS'] == 'L3'].copy()
L2_eps['original_index'] = L2_eps.index
L2_eps = L2_eps[L2_eps['DECOM'].notna()]
L2_eps.loc[L2_eps['DEC'].isna(), 'DEC'] = collection_end_str
L2_eps['DECOM'] = pd.to_datetime(L2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
L2_eps = L2_eps.dropna(subset=['DECOM'])
L2_eps['DEC'] = | pd.to_datetime(L2_eps['DEC'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import pandas as pd
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_CRUDE_URL,
JS_USA_INITIAL_JOBLESS_URL,
JS_USA_CORE_PCE_PRICE_URL,
JS_USA_CPI_MONTHLY_URL,
JS_USA_LMCI_URL,
JS_USA_ADP_NONFARM_URL,
JS_USA_GDP_MONTHLY_URL,
)
# 东方财富-美国-未决房屋销售月率
def macro_usa_phs():
"""
未决房屋销售月率
http://data.eastmoney.com/cjsj/foreign_0_5.html
:return: 未决房屋销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'HKZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'stat': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625474966006'
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'时间',
'前值',
'现值',
'发布日期',
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
return temp_df
# 金十数据中心-经济指标-美国-经济状况-美国GDP
def macro_usa_gdp_monthly():
"""
美国国内生产总值(GDP)报告, 数据区间从20080228-至今
https://datacenter.jin10.com/reportType/dc_usa_gdp
:return: pandas.Series
2008-02-28 0.6
2008-03-27 0.6
2008-04-30 0.9
2008-06-26 1
2008-07-31 1.9
...
2019-06-27 3.1
2019-07-26 2.1
2019-08-29 2
2019-09-26 2
2019-10-30 0
"""
t = time.time()
res = requests.get(
JS_USA_GDP_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国国内生产总值(GDP)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "53",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gdp"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告
def macro_usa_cpi_monthly():
"""
美国CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_cpi
https://cdn.jin10.com/dc/reports/dc_usa_cpi_all.js?v=1578741110
:return: 美国CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CPI_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国居民消费价格指数(CPI)(月环比)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "9",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "cpi_monthly"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告
def macro_usa_core_cpi_monthly():
"""
美国核心CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_cpi
https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v=1578740570
:return: 美国核心CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心CPI月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "6",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_cpi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告
def macro_usa_personal_spending():
"""
美国个人支出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_personal_spending
https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v=1578741327
:return: 美国个人支出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国个人支出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "35",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_personal_spending"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告
def macro_usa_retail_sales():
"""
美国零售销售月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_retail_sales
https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v=1578741528
:return: 美国零售销售月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国零售销售月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "39",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_retail_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告
def macro_usa_import_price():
"""
美国进口物价指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_import_price
https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v=1578741716
:return: 美国进口物价指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国进口物价指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "18",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_import_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告
def macro_usa_export_price():
"""
美国出口价格指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_export_price
https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v=1578741832
:return: 美国出口价格指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国出口价格指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "79",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_export_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-LMCI
def macro_usa_lmci():
"""
美联储劳动力市场状况指数报告, 数据区间从20141006-至今
https://datacenter.jin10.com/reportType/dc_usa_lmci
https://cdn.jin10.com/dc/reports/dc_usa_lmci_all.js?v=1578742043
:return: 美联储劳动力市场状况指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_LMCI_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美联储劳动力市场状况指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "93",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "lmci"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告
def macro_usa_unemployment_rate():
"""
美国失业率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_unemployment_rate
https://cdn.jin10.com/dc/reports/dc_usa_unemployment_rate_all.js?v=1578821511
:return: 获取美国失业率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_UNEMPLOYMENT_RATE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国失业率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "47",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "unemployment_rate"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国挑战者企业裁员人数报告
def macro_usa_job_cuts():
"""
美国挑战者企业裁员人数报告, 数据区间从19940201-至今
https://datacenter.jin10.com/reportType/dc_usa_job_cuts
https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v=1578742262
:return: 美国挑战者企业裁员人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国挑战者企业裁员人数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "78",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_job_cuts"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国非农就业人数报告
def macro_usa_non_farm():
"""
美国非农就业人数报告, 数据区间从19700102-至今
https://datacenter.jin10.com/reportType/dc_nonfarm_payrolls
https://cdn.jin10.com/dc/reports/dc_nonfarm_payrolls_all.js?v=1578742490
:return: 美国非农就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_NON_FARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国非农就业人数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "33",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "non_farm"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国ADP就业人数报告
def macro_usa_adp_employment():
"""
美国ADP就业人数报告, 数据区间从20010601-至今
https://datacenter.jin10.com/reportType/dc_adp_nonfarm_employment
https://cdn.jin10.com/dc/reports/dc_adp_nonfarm_employment_all.js?v=1578742564
:return: 美国ADP就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_ADP_NONFARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ADP就业人数(万人)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "adp"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国核心PCE物价指数年率报告
def macro_usa_core_pce_price():
"""
美国核心PCE物价指数年率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_pce_price
https://cdn.jin10.com/dc/reports/dc_usa_core_pce_price_all.js?v=1578742641
:return: 美国核心PCE物价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CORE_PCE_PRICE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心PCE物价指数年率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "80",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "core_pce_price"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国实际个人消费支出季率初值报告
def macro_usa_real_consumer_spending():
"""
美国实际个人消费支出季率初值报告, 数据区间从20131107-至今
https://datacenter.jin10.com/reportType/dc_usa_real_consumer_spending
https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v=1578742802
:return: 美国实际个人消费支出季率初值报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国实际个人消费支出季率初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "81",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_real_consumer_spending"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国贸易帐报告
def macro_usa_trade_balance():
"""
美国贸易帐报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_trade_balance
https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v=1578742911
:return: 美国贸易帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国贸易帐报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "42",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_trade_balance"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国经常帐报告
def macro_usa_current_account():
"""
美国经常帐报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_usa_current_account
https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v=1578743012
:return: 美国经常帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国经常账报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "12",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_current_account"
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-贝克休斯钻井报告
def macro_usa_rig_count():
"""
贝克休斯钻井报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_rig_count_summary
https://cdn.jin10.com/dc/reports/dc_rig_count_summary_all.js?v=1578743203
:return: 贝克休斯钻井报告-当周
:rtype: pandas.Series
"""
t = time.time()
params = {
"_": t
}
res = requests.get("https://cdn.jin10.com/data_center/reports/baker.json", params=params)
temp_df = pd.DataFrame(res.json().get("values")).T
big_df = pd.DataFrame()
big_df["钻井总数_钻井数"] = temp_df["钻井总数"].apply(lambda x: x[0])
big_df["钻井总数_变化"] = temp_df["钻井总数"].apply(lambda x: x[1])
big_df["美国石油钻井_钻井数"] = temp_df["美国石油钻井"].apply(lambda x: x[0])
big_df["美国石油钻井_变化"] = temp_df["美国石油钻井"].apply(lambda x: x[1])
big_df["混合钻井_钻井数"] = temp_df["混合钻井"].apply(lambda x: x[0])
big_df["混合钻井_变化"] = temp_df["混合钻井"].apply(lambda x: x[1])
big_df["美国天然气钻井_钻井数"] = temp_df["美国天然气钻井"].apply(lambda x: x[0])
big_df["美国天然气钻井_变化"] = temp_df["美国天然气钻井"].apply(lambda x: x[1])
big_df = big_df.astype("float")
return big_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国个人支出月率报告
# 金十数据中心-经济指标-美国-产业指标-制造业-美国生产者物价指数(PPI)报告
def macro_usa_ppi():
"""
美国生产者物价指数(PPI)报告, 数据区间从20080226-至今
https://datacenter.jin10.com/reportType/dc_usa_ppi
https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v=1578743628
:return: 美国生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "37",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国核心生产者物价指数(PPI)报告
def macro_usa_core_ppi():
"""
美国核心生产者物价指数(PPI)报告, 数据区间从20080318-至今
https://datacenter.jin10.com/reportType/dc_usa_core_ppi
https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v=1578743709
:return: 美国核心生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "7",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国API原油库存报告
def macro_usa_api_crude_stock():
"""
美国API原油库存报告, 数据区间从20120328-至今
https://datacenter.jin10.com/reportType/dc_usa_api_crude_stock
https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v=1578743859
:return: 美国API原油库存报告-今值(万桶)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国API原油库存报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万桶)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "69",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_api_crude_stock"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国Markit制造业PMI初值报告
def macro_usa_pmi():
"""
美国Markit制造业PMI初值报告, 数据区间从20120601-至今
https://datacenter.jin10.com/reportType/dc_usa_pmi
https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v=1578743969
:return: 美国Markit制造业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "74",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国ISM制造业PMI报告
def macro_usa_ism_pmi():
"""
美国ISM制造业PMI报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v=1578744071
:return: 美国ISM制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "28",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工业产出月率报告
def macro_usa_industrial_production():
"""
美国工业产出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_industrial_production
https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v=1578744188
:return: 美国工业产出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工业产出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "20",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_industrial_production"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国耐用品订单月率报告
def macro_usa_durable_goods_orders():
"""
美国耐用品订单月率报告, 数据区间从20080227-至今
https://datacenter.jin10.com/reportType/dc_usa_durable_goods_orders
https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v=1578744295
:return: 美国耐用品订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国耐用品订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "13",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_durable_goods_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工厂订单月率报告
def macro_usa_factory_orders():
"""
美国工厂订单月率报告, 数据区间从19920401-至今
https://datacenter.jin10.com/reportType/dc_usa_factory_orders
https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v=1578744385
:return: 美国工厂订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工厂订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "16",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_factory_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国Markit服务业PMI初值报告
def macro_usa_services_pmi():
"""
美国Markit服务业PMI初值报告, 数据区间从20120701-至今
https://datacenter.jin10.com/reportType/dc_usa_services_pmi
https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v=1578744503
:return: 美国Markit服务业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit服务业PMI初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "89",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_services_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国商业库存月率报告
def macro_usa_business_inventories():
"""
美国商业库存月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_business_inventories
https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v=1578744618
:return: 美国商业库存月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国商业库存月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "4",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_business_inventories"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国ISM非制造业PMI报告
def macro_usa_ism_non_pmi():
"""
美国ISM非制造业PMI报告, 数据区间从19970801-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_non_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v=1578744693
:return: 美国ISM非制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM非制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "29",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_non_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国NAHB房产市场指数报告
def macro_usa_nahb_house_market_index():
"""
美国NAHB房产市场指数报告, 数据区间从19850201-至今
https://datacenter.jin10.com/reportType/dc_usa_nahb_house_market_index
https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v=1578744817
:return: 美国NAHB房产市场指数报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国NAHB房产市场指数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = | pd.to_datetime(date_list) | pandas.to_datetime |
import os, sys
from pathlib import Path
import math, random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
try:
from data_handle.sad_object import *
except:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from data_handle.sad_object import *
def gen_csv_trackers(data_dir):
# data_dir - objf
obj_folders = os.listdir(data_dir)
for objf in obj_folders:
obj_files = os.listdir(data_dir+objf) # all files/images under this folder
t_list = []
x_list = []
y_list = []
other_list = []
invalid_files = []
for f in obj_files:
info = f[:-4]
try:
t_list.append(int(info.split('_')[1]))
x_list.append(float(info.split('_')[2]))
y_list.append(float(info.split('_')[3]))
other_list.append(info.split('_')[0]+'_'+info.split('_')[1]+'.png')
except:
invalid_files.append(f)
continue
for f in invalid_files:
obj_files.remove(f)
df = pd.DataFrame({'f':obj_files,'t':t_list,'x':x_list,'y':y_list}).sort_values(by='t', ignore_index=True)
df.to_csv(data_dir+objf+'/data.csv', index=False)
def gather_all_data(data_dir, past, maxT, save_dir=None, minT=1):
# data_dir - objf(1,2,...)
# - All(all in one folder)
if save_dir is None:
save_dir = data_dir
column_name = [f'f{i}' for i in range(0,past+1)] + ['T', 'x', 'y']
df_all = pd.DataFrame(columns=column_name)
obj_folders = os.listdir(data_dir)
for objf in obj_folders:
df_obj = pd.read_csv(data_dir+objf+'/data.csv')
for T in range(minT,maxT+1):
sample_list = []
for i in range(len(df_obj)-past-T): # each sample
sample = []
################## Sample START ##################
for j in range(past+1):
sample.append(df_obj.iloc[i+j]['f'])
sample.append(T)
sample.append(df_obj.iloc[i+past+T]['x'])
sample.append(df_obj.iloc[i+past+T]['y'])
################## Sample E N D ##################
sample_list.append(sample)
df_T = | pd.DataFrame(sample_list, columns=df_all.columns) | pandas.DataFrame |
import os
import cv2
import numpy as np
from keras.models import load_model
import keras.layers as layers
import keras.models as models
files = []
path_to_dataset = os.getcwd()
files.extend(os.listdir(path_to_dataset + "/dataset5"))
path = 'dataset5/'
images = []
for i in files:
img = cv2.imread(path+i)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (300,150))
retval, img = cv2.threshold(img, 0, 255, type = cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
images.append(img)
print("Encoded "+str(i)+" images.")
images = np.array(images).reshape(90,300,150,1)
images = images / 255.
test_files = []
path_to_dataset = os.getcwd()
test_files.extend(os.listdir(path_to_dataset + "/dataset6"))
path = 'dataset6/'
test_images = []
for i in test_files:
img = cv2.imread(path+i)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (300,150))
retval, img = cv2.threshold(img, 0, 255, type = cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
test_images.append(img)
print("Encoded "+str(i)+" test images.")
test_images = np.array(test_images).reshape(390,300,150,1)
test_images = test_images / 255.
img_width = 300
img_height = 150
batch = 128
def modelCreator():
modelA = models.Sequential()
modelA.add(layers.Conv2D(16, (3, 3), input_shape=(img_width, img_height, 1)))
modelA.add(layers.BatchNormalization())
modelA.add(layers.Activation("relu"))
modelA.add(layers.MaxPooling2D((2, 2)))
modelA.add(layers.Conv2D(32, (3, 3)))
modelA.add(layers.BatchNormalization())
modelA.add(layers.Activation("relu"))
modelA.add(layers.MaxPooling2D((2, 2)))
modelA.add(layers.Conv2D(64, (3, 3)))
modelA.add(layers.BatchNormalization())
modelA.add(layers.Activation("relu"))
modelA.add(layers.MaxPooling2D((2, 2)))
modelA.add(layers.Conv2D(128, (3, 3)))
modelA.add(layers.BatchNormalization())
modelA.add(layers.Activation("relu"))
modelA.add(layers.MaxPooling2D((2, 2)))
modelA.add(layers.Conv2D(256, (3, 3)))
modelA.add(layers.BatchNormalization())
modelA.add(layers.Activation("relu"))
modelA.add(layers.MaxPooling2D((2, 2)))
modelA.add(layers.Flatten())
modelA.add(layers.Dense(batch))
modelA.add(layers.Reshape((1, batch)))
return modelA
modelu = modelCreator()
modelu.load_weights('model_weights.h5')
vectorized_train = modelu.predict(images)
final_dists = []
finale = []
for i in range(0, len(test_images)):
j = 0
if(i%13==0):
j = j + 3
x = vectorized_train[j:3+j]
vectorized_i = modelu.predict(test_images[i].reshape(1, 300, 150, 1))
vectorized_i = np.concatenate([vectorized_i, vectorized_i, vectorized_i])
v = np.sum(np.square(x - vectorized_i))
final_dists.append(v)
if(v < 1015):
finale.append("YES")
else:
finale.append("NO")
import pandas as pd
test_files_pd = pd.DataFrame(test_files)
yes_no_pd = | pd.DataFrame(finale) | pandas.DataFrame |
#!/usr/bin/env python3
"""
DrugCentral db utility functions.
"""
import os,sys,re,json,logging,yaml
import pandas as pd
from pandas.io.sql import read_sql_query
import psycopg2,psycopg2.extras
#############################################################################
def Connect(dbhost, dbport, dbname, dbusr, dbpw):
"""Connect to db; specify default cursor type DictCursor."""
dsn = ("host='%s' port='%s' dbname='%s' user='%s' password='%s'"%(dbhost, dbport, dbname, dbusr, dbpw))
dbcon = psycopg2.connect(dsn)
dbcon.cursor_factory = psycopg2.extras.DictCursor
return dbcon
#############################################################################
def Version(dbcon, dbschema="public", fout=None):
sql = (f"SELECT * FROM {dbschema}.dbversion")
logging.debug(f"SQL: {sql}")
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
return df
#############################################################################
def MetaListdbs(dbcon, fout=None):
"""Pg meta-command: list dbs from pg_database."""
sql = ("SELECT pg_database.datname, pg_shdescription.description FROM pg_database LEFT OUTER JOIN pg_shdescription on pg_shdescription.objoid = pg_database.oid WHERE pg_database.datname ~ '^drug'")
logging.debug(f"SQL: {sql}")
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
return df
#############################################################################
def ListColumns(dbcon, dbschema="public", fout=None):
df=None;
sql1 = (f"SELECT table_name FROM information_schema.tables WHERE table_schema = '{dbschema}'")
df1 = read_sql_query(sql1, dbcon)
for tname in df1.table_name:
sql2 = (f"SELECT column_name,data_type FROM information_schema.columns WHERE table_schema = '{dbschema}' AND table_name = '{tname}'")
df_this = read_sql_query(sql2, dbcon)
df_this["schema"] = dbschema
df_this["table"] = tname
df = df_this if df is None else pd.concat([df, df_this])
df = df[["schema", "table", "column_name", "data_type"]]
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def ListTables(dbcon, dbschema="public", fout=None):
'''Listing the tables.'''
sql = (f"SELECT table_name FROM information_schema.tables WHERE table_schema = '{dbschema}'")
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def ListTablesRowCounts(dbcon, dbschema="public", fout=None):
'''Listing the table rowcounts.'''
df=None;
sql1 = (f"SELECT table_name FROM information_schema.tables WHERE table_schema = '{dbschema}'")
df1 = read_sql_query(sql1, dbcon)
for tname in df1.table_name:
sql2 = (f"SELECT COUNT(*) AS rowcount FROM {dbschema}.{tname}")
df_this = read_sql_query(sql2, dbcon)
df_this["schema"] = dbschema
df_this["table"] = tname
df = df_this if df is None else pd.concat([df, df_this])
df = df[["schema", "table", "rowcount"]]
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def ListStructures(dbcon, dbschema="public", fout=None):
sql = (f"SELECT id,name,cas_reg_no,smiles,inchikey,inchi,cd_formula AS formula,cd_molweight AS molweight FROM {dbschema}.structures")
logging.debug(f"SQL: {sql}")
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def ListStructures2Smiles(dbcon, dbschema="public", fout=None):
sql = (f"SELECT smiles, id, name FROM {dbschema}.structures WHERE smiles IS NOT NULL")
logging.debug(f"SQL: {sql}")
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def ListStructures2Molfile(dbcon, dbschema="public", fout=None):
"""fout is required arg."""
n_out=0;
cur = dbcon.cursor()
cur.execute(f"SELECT molfile, id, name FROM {dbschema}.structures WHERE molfile IS NOT NULL")
for row in cur:
molfile = re.sub(r'^[\s]*\n', str(row["id"])+"\n", row["molfile"])
fout.write(molfile)
fout.write("> <DRUGCENTRAL_STRUCT_ID>\n"+str(row["id"])+"\n\n")
fout.write("> <NAME>\n"+row["name"]+"\n\n")
fout.write("$$$$\n")
n_out+=1
logging.info(f"n_out: {n_out}")
#############################################################################
def ListProducts(dbcon, dbschema="public", fout=None):
sql = (f"SELECT * FROM {dbschema}.product")
logging.debug(f"SQL: {sql}")
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def ListActiveIngredients(dbcon, dbschema="public", fout=None):
sql = (f"SELECT * FROM {dbschema}.active_ingredient")
logging.debug(f"SQL: {sql}")
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def ListXrefTypes(dbcon, fout=None):
sql="""\
SELECT DISTINCT
id AS xref_type_id,
type AS xref_type,
description AS xref_type_description
FROM
id_type
ORDER BY
xref_type
"""
logging.debug(f"SQL: {sql}")
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def ListXrefs(dbcon, xref_type=None, fout=None):
sql="""\
SELECT
idn.id_type xref_type,
idn.identifier xref,
s.id dc_struct_id,
s.name dc_struct_name
FROM
identifier idn
JOIN structures s ON s.id=idn.struct_id
"""
if xref_type:
sql += f"WHERE idn.id_type = '{xref_type}'"
logging.debug(f"SQL: {sql}")
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def ListIndications(dbcon, fout=None):
sql="""\
SELECT DISTINCT
omop.concept_id omop_concept_id,
omop.concept_name omop_concept_name,
omop.umls_cui,
omop.cui_semantic_type umls_semantic_type,
omop.snomed_conceptid,
omop.snomed_full_name
FROM
omop_relationship omop
JOIN
structures s ON omop.struct_id = s.id
WHERE
omop.relationship_name = 'indication'
"""
logging.debug(f"SQL: {sql}")
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def ListIndicationTargets(dbcon, fout=None):
sql="""\
SELECT DISTINCT
omop.concept_name AS omop_concept,
omop.umls_cui,
atf.struct_id,
s.name AS struct_name,
atf.target_id,
atf.target_name,
atf.gene,
atf.action_type,
atf.act_source,
atf.act_type,
atf.act_comment,
atf.relation,
atf.moa,
atf.moa_source,
atf.moa_source_url,
r.pmid AS ref_pmid,
r.doi AS ref_doi,
r.title AS ref_title,
r.dp_year AS ref_year
FROM
act_table_full atf
JOIN structures s ON s.id = atf.struct_id
JOIN omop_relationship omop ON omop.struct_id = s.id
LEFT OUTER JOIN reference r ON r.id = atf.moa_ref_id
WHERE
omop.relationship_name = 'indication'
"""
logging.debug(f"SQL: {sql}")
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def ListDrugdruginteractions(dbcon, fout=None):
sql="""\
SELECT
ddi.id AS ddi_id,
ddi.drug_class1,
ddi.drug_class2,
ddi.source_id,
drug_class1.id drug_class_id1,
drug_class1.source source1,
drug_class1.is_group is_group1,
drug_class2.id drug_class_id2,
drug_class2.source source2,
drug_class2.is_group is_group2
FROM
ddi
JOIN drug_class drug_class1 ON drug_class1.name = ddi.drug_class1
JOIN drug_class drug_class2 ON drug_class2.name = ddi.drug_class2
"""
logging.debug(f"SQL: {sql}")
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def SearchProducts(dbcon, terms, fout=None):
"""Search names via Pg regular expression (SIMILAR TO)."""
df=None;
for term in terms:
sql= f"""\
SELECT DISTINCT
s.id struct_id,
s.name struct_name,
p.id product_id,
p.ndc_product_code,
p.form product_form,
p.generic_name product_generic_name,
p.product_name,
p.route product_route
FROM
structures AS s
JOIN
active_ingredient ai ON ai.struct_id = s.id
JOIN
product p ON p.ndc_product_code = ai.ndc_product_code
WHERE
p.product_name.name ~* '{term}' OR p.generic_name product_generic_name ~* '{term}'
"""
logging.debug(f"SQL: {sql}")
df_this = read_sql_query(sql, dbcon)
df = pd.concat([df, df_this])
logging.info(f"n_out: {df.shape[0]}")
if fout is not None: df.to_csv(fout, "\t", index=False)
else: return df
#############################################################################
def SearchIndications(dbcon, terms, fout=None):
"""Search names via Pg regular expression (SIMILAR TO)."""
df=None;
for term in terms:
sql=f"""\
SELECT DISTINCT
omop.concept_id omop_concept_id,
omop.concept_name omop_concept_name,
omop.umls_cui,
omop.cui_semantic_type umls_semantic_type,
omop.snomed_conceptid,
omop.snomed_full_name
FROM
omop_relationship omop
JOIN
structures s ON omop.struct_id = s.id
WHERE
omop.relationship_name = 'indication'
AND (omop.concept_name ~* '{term}' OR omop.snomed_full_name ~* '{term}')
"""
logging.debug(f"SQL: {sql}")
df_this = read_sql_query(sql, dbcon)
df = df_this if df is None else pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def GetIndicationStructures(dbcon, ids, fout=None):
"""Input OMOP conceptIds (INTEGER)."""
df=None;
sql="""\
SELECT DISTINCT
omop.concept_id omop_concept_id,
omop.concept_name omop_concept_name,
s.id struct_id,
s.name struct_name,
s.smiles,
s.inchikey,
s.inchi,
s.cd_formula,
s.cd_molweight
FROM
omop_relationship omop
JOIN
structures s ON omop.struct_id = s.id
WHERE
omop.relationship_name = 'indication'
AND omop.concept_id = {}
"""
for id_this in ids:
logging.debug(sql.format(id_this))
df_this = read_sql_query(sql.format(id_this), dbcon)
df = df_this if df is None else pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def GetStructure(dbcon, ids, fout=None):
df=None;
sql = ("""SELECT id,name,cas_reg_no,smiles,inchikey,inchi,cd_formula AS formula,cd_molweight AS molweight FROM structures WHERE id = '{}'""")
for id_this in ids:
logging.debug(sql.format(id_this))
df_this = read_sql_query(sql.format(id_this), dbcon)
df = df_this if df is None else pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def GetDrugPage(dbcon, struct_id, fout):
"""Structure, with IDs, names, xrefs, and ATCs, plus associated products."""
df_struct = GetStructure(dbcon, [struct_id], None) # Should return one row.
if df_struct.empty: return None
drug = df_struct.to_dict(orient='records')[0]
#Add xrefs
df_xrefs = GetStructureXrefs(dbcon, [struct_id], None)
drug["xrefs"] = df_xrefs[["xref_type", "xref"]].to_dict(orient='records')
#Add names
df_names = GetStructureSynonyms(dbcon, [struct_id], None)
drug["synonyms"] = df_names["synonym"].tolist()
#Add ATCs
df_atcs = GetStructureAtcs(dbcon, [struct_id], None)
if not df_atcs.empty:
drug["atcs"] = df_atcs[["atc_code","atc_l1_code","atc_l1_name","atc_l2_code","atc_l2_name","atc_l3_code","atc_l3_name","atc_l4_code","atc_l4_name","atc_substance"]].to_dict(orient='records')
#Add products
df_products = GetStructureProducts(dbcon, [struct_id], None)
if not df_products.empty:
drug["products"] = df_products[["product_id","ndc_product_code","product_form","product_generic_name","product_name","product_route","product_marketing_status","product_active_ingredient_count"]].to_dict(orient='records')
#Add targets
df_targets = GetStructureTargets(dbcon, [struct_id], None)
if not df_targets.empty:
drug["targets"] = df_targets[["target_id","target_name","gene","action_type","act_source","act_type","act_comment","relation","moa","moa_source","moa_source_url","ref_pmid","ref_doi","ref_title","ref_year"]].to_dict(orient='records')
fout.write(json.dumps(drug, indent=2))
#############################################################################
def GetStructureByXref(dbcon, xref_type, ids, fout=None):
if not xref_type:
logging.error("xref_type required.")
return None
df=None;
sql = ("""SELECT idn.identifier xref, idn.id_type xref_type, s.id dc_struct_id, s.name dc_struct_name FROM structures AS s JOIN identifier AS idn ON idn.struct_id=s.id WHERE idn.id_type = '"""+xref_type+"""' AND idn.identifier = '{}'""")
for id_this in ids:
logging.debug(sql.format(id_this))
df_this = read_sql_query(sql.format(id_this), dbcon)
df = df_this if df is None else pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def GetStructureBySynonym(dbcon, ids, fout=None):
df=None;
sql = ("""SELECT str.id, str.name structure_name, syn.name synonym FROM structures AS str JOIN synonyms AS syn ON syn.id=str.id WHERE syn.name = '{}'""")
for id_this in ids:
logging.debug(sql.format(id_this))
df_this = read_sql_query(sql.format(id_this), dbcon)
df = df_this if df is None else pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def GetStructureXrefs(dbcon, ids, fout=None):
df=None;
sql = ("""SELECT struct_id, id_type AS xref_type, identifier AS xref FROM identifier WHERE struct_id = '{}'""")
for id_this in ids:
logging.debug(sql.format(id_this))
df_this = read_sql_query(sql.format(id_this), dbcon)
df = df_this if df is None else pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def GetStructureProducts(dbcon, ids, fout=None):
df=None;
sql="""\
SELECT DISTINCT
s.id struct_id,
s.name struct_name,
s.smiles,
s.inchikey,
p.id product_id,
p.ndc_product_code,
p.form product_form,
p.generic_name product_generic_name,
p.product_name,
p.route product_route,
p.marketing_status product_marketing_status,
p.active_ingredient_count product_active_ingredient_count
FROM
structures AS s
JOIN
active_ingredient ai ON ai.struct_id = s.id
JOIN
product p ON p.ndc_product_code = ai.ndc_product_code
WHERE
s.id = '{}'
"""
for id_this in ids:
logging.debug(sql.format(id_this))
df_this = read_sql_query(sql.format(id_this), dbcon)
df = df_this if df is None else pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def GetStructureOBProducts(dbcon, ids, fout=None):
df=None;
sql="""\
SELECT DISTINCT
s.id struct_id,
s.name struct_name,
s.smiles,
s.inchikey,
ob.id ob_id,
ob.product_no ob_product_no,
ob.ingredient,
ob.dose_form,
ob.route,
ob.strength,
ob.appl_type,
ob.appl_no
FROM
structures s
JOIN struct2obprod s2ob ON s2ob.struct_id = s.id
JOIN ob_product ob ON ob.id = s2ob.prod_id
WHERE
s.id = '{}'
"""
for id_this in ids:
logging.debug(sql.format(id_this))
df_this = read_sql_query(sql.format(id_this), dbcon)
df = df_this if df is None else pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def GetStructureTargets(dbcon, ids, fout=None):
df=None; n_out=0;
sql="""\
SELECT DISTINCT
atf.struct_id,
atf.target_id,
atf.target_name,
atf.gene,
atf.action_type,
atf.act_source,
atf.act_type,
atf.act_comment,
atf.relation,
atf.moa,
atf.moa_source,
atf.moa_source_url,
r.pmid AS ref_pmid,
r.doi AS ref_doi,
r.title AS ref_title,
r.dp_year AS ref_year
FROM
act_table_full atf
JOIN structures s ON s.id = atf.struct_id
LEFT OUTER JOIN reference r ON r.id = atf.moa_ref_id
WHERE
atf.struct_id = {}
"""
for id_this in ids:
logging.debug(sql.format(id_this))
df_this = read_sql_query(sql.format(id_this), dbcon)
if fout is None: df = pd.concat([df, df_this])
else: df_this.to_csv(fout, "\t", index=False)
n_out += df_this.shape[0]
logging.info(f"n_out: {n_out}")
return df
#############################################################################
def ListAtcs(dbcon, fout=None):
"""List ATC codes and drug count for which drugs exist."""
sql="""\
SELECT DISTINCT
atc.l1_code atc_l1_code, atc.l1_name atc_l1_name,
atc.l2_code atc_l2_code, atc.l2_name atc_l2_name,
atc.l3_code atc_l3_code, atc.l3_name atc_l3_name,
atc.l4_code atc_l4_code, atc.l4_name atc_l4_name,
COUNT(DISTINCT s.id) drug_count
FROM
atc
JOIN
struct2atc ON struct2atc.id = atc.id
JOIN
structures s ON s.id = struct2atc.struct_id
GROUP BY
atc.l1_code, atc.l1_name,
atc.l2_code, atc.l2_name,
atc.l3_code, atc.l3_name,
atc.l4_code, atc.l4_name
ORDER BY
atc.l1_name, atc.l2_name, atc.l3_name, atc.l4_name
"""
logging.debug(sql)
df = read_sql_query(sql, dbcon)
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def GetStructureSynonyms(dbcon, ids, fout=None):
df=None;
sql="""\
SELECT DISTINCT
s.id struct_id,
s.name struct_name,
synonyms.name AS synonym
FROM
structures AS s
JOIN
synonyms ON synonyms.id = s.id
WHERE
s.id = '{}'
"""
for id_this in ids:
logging.debug(sql.format(id_this))
df_this = read_sql_query(sql.format(id_this), dbcon)
df = df_this if df is None else pd.concat([df, df_this])
if fout: df.to_csv(fout, "\t", index=False)
logging.info(f"n_out: {df.shape[0]}")
return df
#############################################################################
def GetStructureAtcs(dbcon, ids, fout=None):
df=None;
sql="""\
SELECT DISTINCT
s.id struct_id,
s.name struct_name,
s.smiles,
s.inchikey,
atc.code AS atc_code,
atc.l1_code AS atc_l1_code,
atc.l1_name AS atc_l1_name,
atc.l2_code AS atc_l2_code,
atc.l2_name AS atc_l2_name,
atc.l3_code AS atc_l3_code,
atc.l3_name AS atc_l3_name,
atc.l4_code AS atc_l4_code,
atc.l4_name AS atc_l4_name,
atc.chemical_substance AS atc_substance
FROM
structures AS s
JOIN
struct2atc ON struct2atc.struct_id = s.id
JOIN
atc ON atc.id = struct2atc.id
WHERE
s.id = '{}'
"""
for id_this in ids:
logging.debug(sql.format(id_this))
df_this = read_sql_query(sql.format(id_this), dbcon)
df = df_this if df is None else | pd.concat([df, df_this]) | pandas.concat |
#read a csv file, loading it into a DataFrame
import numpy as np #python's array proccesing / linear algebra library
import pandas as pd #data processing / stats library
import matplotlib.pyplot as plt #data visualization
import csv
#read in some data
fn = 'polling_data.csv'
df=pd.read_csv(fn)
#we can manually sets print options (lots of stuff like precision, max_colwidth avail)
pd.set_option('display.width', 500)
pd.set_option('display.max_rows', 5)
print("Here's what our data looks like:\n")
print(df.head(n=4))
#these are the indices auto-loaded:
print('\n')
print('row index:')
print(df.index)
print('\ncolumn index:')
print(df.columns)
print('\ncheck out the data types:')
pd.set_option('display.max_rows', 10)
print(df.dtypes)
#to select one or more columns, use slice notation
print('\n')
print(df["Datetime"])
#note that Datetime is not right (it came from Excel). Here's a fix:
df=df.assign(Datetime= | pd.to_datetime('1899-12-30') | pandas.to_datetime |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from argcheck import (expect_types,
optional,
preprocess)
from xutils import py_assert
from alphaware.const import INDEX_FACTOR
from alphaware.enums import (FreqType,
OutputDataFormat)
from .input_validation import ensure_pd_df
@expect_types(data=(pd.Series, pd.DataFrame))
def convert_df_format(data, target_format=OutputDataFormat.MULTI_INDEX_DF, col_name='factor',
multi_index=INDEX_FACTOR):
if target_format == OutputDataFormat.MULTI_INDEX_DF:
tmp = data.stack()
data_ = pd.DataFrame(tmp)
data_.index.names = multi_index.full_index
data_.columns = [col_name]
else:
tmp = data.unstack()
index = tmp.index
columns = tmp.columns.get_level_values(multi_index.sec_index).tolist()
data_ = pd.DataFrame(tmp.values, index=index, columns=columns)
return data_
@expect_types(df=(pd.Series, pd.DataFrame))
def top(df, column=None, n=5):
if isinstance(df, pd.Series):
ret = df.sort_values(ascending=False)[:n]
else:
py_assert(column is not None, "Specify the col name or use pandas Series type of data")
ret = df.sort_values(by=column, ascending=False)[:n]
return ret
@expect_types(data=(pd.DataFrame, pd.Series), freq=optional(FreqType, str))
def group_by_freq(data, freq=FreqType.EOM):
data_ = pd.DataFrame(data) if isinstance(data, pd.Series) else data
if freq == FreqType.EOD:
return data_.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
elif freq == FreqType.EOM:
return data_.groupby(pd.TimeGrouper(freq='M'))
elif freq == FreqType.EOQ:
return data_.groupby(pd.TimeGrouper(freq='Q'))
elif freq == FreqType.EOY:
return data_.groupby(pd.TimeGrouper(freq='A'))
@expect_types(x=(pd.DataFrame, pd.Series))
def quantile_calc(x, quantiles, bins):
if quantiles is not None and bins is None:
return pd.qcut(x, quantiles, labels=False) + 1
elif bins is not None and quantiles is None:
return pd.cut(x, bins, labels=False) + 1
raise ValueError('Either quantiles or bins should be provided')
@expect_types(x=(pd.DataFrame, pd.Series))
def fwd_return(x, date_index=INDEX_FACTOR.date_index, sec_index=INDEX_FACTOR.sec_index, period=1):
"""
每个日期和股票代码对应的未来收益
"""
dates = sorted(set(x.index.get_level_values(date_index)))
ret = pd.DataFrame()
for i in range(len(dates) - period):
shift_date = dates[i + period]
data_concat = x.loc[shift_date].reset_index()
data_concat[date_index] = [dates[i]] * len(data_concat)
ret = | pd.concat([ret, data_concat], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 10 15:36:18 2021
Who would be so cruel to someone like you?
No one but you
Who would make the rules for the things that you do?
No one but you
I woke up this morning, looked at the sky
I thought of all of the time passing by
It didn't matter how hard we tried
'Cause in the end
@author: KING
"""
#%% 主函数
from sympy import symbols, solve
from sklearn.neural_network import MLPRegressor
from sklearn.neural_network import MLPClassifier
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
import statsmodels.discrete.discrete_model as logitm
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
import math
import seaborn as sns
import statsmodels.regression.linear_model as lm_
import statsmodels.robust.robust_linear_model as roblm_
import statsmodels.regression.quantile_regression as QREG
from statsmodels.stats.outliers_influence import summary_table
import itertools
def log_A(x):## log10 以10为底数
xs = math.log(x,10)
return xs
chla = symbols('chla')
SA = symbols('SA')
depth = symbols('depth')
WT = symbols('WT')
def log_series(x):## log10 以10为底数
ou = []
for ir in range(0,len(x)):
try:
xs = math.log(x[ir],10)
ou.append(xs)
except:
xs = np.nan
ou.append(xs)
oui = pd.Series(ou)
return oui
def FuntOUT(olspp,ols_r,TYPE,XZXsp,zxqj,UTR):
if TYPE == 'OLS':
result_XianZHU = ols_r.pvalues
result_params = ols_r.params
result_conf = ols_r.conf_int(zxqj)
resultCB = pd.concat([result_XianZHU, result_params, result_conf ],axis=1)
resultCB.columns = ['p','params','conf_left','conf_right']
XZxiang = resultCB[resultCB['p']<=XZXsp]
functio_XZ = XZxiang.index.tolist()
model_r2 = np.round(ols_r.rsquared,3)
model_r2_rejusted = np.round(ols_r.rsquared_adj,3)
model_aic = np.round(ols_r.aic,3)
model_bic = np.round(ols_r.bic,3)
st, rssew, ss2 = summary_table(ols_r, alpha= 0.05)
predict_mean_ci_low, predict_mean_ci_upp = rssew[:, 4:6].T
conf_left_p = sm.OLS(predict_mean_ci_low, logbl).fit()
conf_right_p = sm.OLS(predict_mean_ci_upp, logbl).fit()
baseline = TRT(resultCB,functio_XZ,1,UTR)
result_XianZHU_CONF = conf_left_p.pvalues
result_params_CONF = conf_left_p.params
result_conf_CONF = conf_left_p.conf_int(zxqj)
resultCB_CONF = pd.concat([result_XianZHU_CONF, result_params_CONF, result_conf_CONF ],axis=1)
resultCB_CONF.columns = ['p','params','conf_left','conf_right']
XZxiang_CONF = resultCB_CONF[resultCB_CONF['p']<=XZXsp]
functio_XZ_CONF = XZxiang_CONF.index.tolist()
conf_left = TRT(resultCB_CONF,functio_XZ_CONF,1,UTR)
result_XianZHU_CONF2 = conf_right_p.pvalues
result_params_CONF2 = conf_right_p.params
result_conf_CONF2 = conf_right_p.conf_int(zxqj)
resultCB_CONF2 = pd.concat([result_XianZHU_CONF2, result_params_CONF2, result_conf_CONF2 ],axis=1)
resultCB_CONF2.columns = ['p','params','conf_left','conf_right']
XZxiang_CONF2 = resultCB_CONF2[resultCB_CONF2['p']<=XZXsp]
functio_XZ_CONF2 = XZxiang_CONF2.index.tolist()
conf_right = TRT(resultCB_CONF2,functio_XZ_CONF2,1,UTR)
ALL_func = resultCB
functio_ALL = ALL_func.index.tolist()
quanbuFUNCTION = TRT( ALL_func,functio_ALL ,1,UTR)
WE_res = pd.DataFrame(list(['('+str(quanbuFUNCTION)+')','('+str(baseline)+')','('+str(conf_left)+')','('+str(conf_right)+')',model_r2,model_r2_rejusted,model_aic,model_bic])).T
WE_res.columns = ['ALL_Function','SigniFicant_baseline','conf_left','conf_right','r2','r2_adj','aic','bic']
if TYPE == 'MID':
result_XianZHU = ols_r.pvalues
result_params = ols_r.params
result_conf = ols_r.conf_int(zxqj)
resultCB = pd.concat([result_XianZHU, result_params, result_conf ],axis=1)
resultCB.columns = ['p','params','conf_left','conf_right']
XZxiang = resultCB[resultCB['p']<=XZXsp]
functio_XZ = XZxiang.index.tolist()
model_r2 = np.round(ols_r.prsquared,3)
model_r2_rejusted = np.nan
model_aic = np.nan
model_bic = np.nan
baseline = TRT(resultCB,functio_XZ,1,UTR)
conf_left = TRT(resultCB,functio_XZ,2,UTR)
conf_right = TRT(resultCB,functio_XZ,3,UTR)
ALL_func = resultCB
functio_ALL = ALL_func.index.tolist()
quanbuFUNCTION = TRT( ALL_func,functio_ALL ,1,UTR)
WE_res = pd.DataFrame(list(['('+str(quanbuFUNCTION)+')','('+str(baseline)+')','('+str(conf_left)+')','('+str(conf_right)+')',model_r2,model_r2_rejusted,model_aic,model_bic])).T
WE_res.columns = ['ALL_Function','SigniFicant_baseline','conf_left','conf_right','r2','r2_adj','aic','bic']
if TYPE == 'RLM':
result_XianZHU = ols_r.pvalues
result_params = ols_r.params
result_conf = ols_r.conf_int(zxqj)
resultCB = pd.concat([result_XianZHU, result_params, result_conf ],axis=1)
resultCB.columns = ['p','params','conf_left','conf_right']
XZxiang = resultCB[resultCB['p']<=XZXsp]
functio_XZ = XZxiang.index.tolist()
model_r2 = np.round(olspp.rsquared,3)
model_r2_rejusted = np.nan
model_aic = np.nan
model_bic = np.nan
baseline = TRT(resultCB,functio_XZ,1,UTR)
conf_left = TRT(resultCB,functio_XZ,2,UTR)
conf_right = TRT(resultCB,functio_XZ,3,UTR)
ALL_func = resultCB
functio_ALL = ALL_func.index.tolist()
quanbuFUNCTION = TRT( ALL_func,functio_ALL ,1,UTR)
WE_res = pd.DataFrame(list(['('+str(quanbuFUNCTION)+')','('+str(baseline)+')','('+str(conf_left)+')','('+str(conf_right)+')',model_r2,model_r2_rejusted,model_aic,model_bic])).T
WE_res.columns = ['ALL_Function','SigniFicant_baseline','conf_left','conf_right','r2','r2_adj','aic','bic']
return baseline,conf_left,conf_right,WE_res
def Trans(eq):
if eq == 'chla' :
globals()[eq] = chla
return chla
if eq == 'SA' :
globals()[eq] = SA
return SA
if eq == 'depth' :
globals()[eq] = depth
return depth
if eq == 'WT' :
globals()[eq] = WT
return WT
def TRT(resultCB,functio_WW,NUM,UTR):
FC = []
if UTR[0] == 'chla':
EP_V1 = 'chla'
if UTR[0] == 'SA':
EP_V1 = 'SA'
if UTR[0] == 'depth':
EP_V1 = 'depth'
if UTR[0] == 'restime':
EP_V1 = 'restime'
if UTR[0] == 'WT':
EP_V1 = 'WT'
if UTR[1] == 'chla':
EP_V2 = 'chla'
if UTR[1] == 'SA':
EP_V2 = 'SA'
if UTR[1] == 'depth':
EP_V2 = 'depth'
if UTR[1] == 'restime':
EP_V2 = 'restime'
if UTR[1] == 'WT':
EP_V2 = 'WT'
if UTR[2] == 'chla':
EP_V3 = 'chla'
if UTR[2] == 'SA':
EP_V3 = 'SA'
if UTR[2] == 'depth':
EP_V3 = 'depth'
if UTR[2] == 'restime':
EP_V3 = 'restime'
if UTR[2] == 'WT':
EP_V3 = 'WT'
EP_SBL_JH1 = EP_V1 + '&'+ EP_V2
EP_SBL_JH2 = EP_V1 + '&'+ EP_V3
EP_SBL_JH3 = EP_V2 + '&'+ EP_V3
EP_TBL_JH = EP_V1 + '&'+ EP_V2 + '&'+ EP_V3
for renj in functio_WW:
if renj == 'chla':
dy = (np.round(resultCB.T['chla'][NUM],3) * symbols('chla'))
FC.append(dy)
if renj == 'SA':
dy = (np.round(resultCB.T['SA'][NUM],3) *symbols('SA'))
FC.append(dy)
if renj == 'depth':
dy = (np.round(resultCB.T['depth'][NUM],3) *symbols('depth'))
FC.append(dy)
if renj == 'WT':
dy = (np.round(resultCB.T['WT'][NUM],3) * symbols('WT'))
FC.append(dy)
if renj == EP_SBL_JH1:
dy = (np.round(resultCB.T[EP_SBL_JH1 ][NUM],3) * symbols(EP_SBL_JH1))
FC.append(dy)
if renj == EP_SBL_JH2:
dy = (np.round(resultCB.T[EP_SBL_JH2 ][NUM],3) * symbols(EP_SBL_JH2))
FC.append(dy)
if renj == EP_SBL_JH3:
dy = (np.round(resultCB.T[EP_SBL_JH3 ][NUM],3) * symbols(EP_SBL_JH3))
FC.append(dy)
if renj == EP_TBL_JH:
dy = (np.round(resultCB.T[EP_TBL_JH][NUM],3) * symbols(EP_TBL_JH))
FC.append(dy)
DFC = ((np.sum(np.array(FC))) + np.round(resultCB.T['const'][NUM] ,3))
return DFC
def Flux_Canculate(module_ADD_CHANGSHU,MODEL,UTR,Model_Standerror):
#module_ADD_CHANGSHU = 0
all_HY = []
all_HY_z = []
all_HY_y = []
for i in range(0,180):
for j in range(0,20):
section_SA = upscaling.iloc[i + 2 ,j + 8]
if section_SA != 0:
m_SA = log_A( upscaling.iloc[i + 2 , 5] )
m_Chla = log_A( upscaling.iloc[0, j + 8 ] )
m_WT = log_A( upscaling.iloc[i + 2 , 6] )
m_Depth = log_A( upscaling.iloc[i + 2 , 3] )
S_Variable1 = symbols(UTR[0])
S_Variable2 = symbols(UTR[1])
S_Variable3 = symbols(UTR[2])
S_SBLjh_1 = symbols(UTR[0] + '&' + UTR[1])
S_SBLjh_2 = symbols(UTR[0] + '&' + UTR[2])
S_SBLjh_3 = symbols(UTR[1] + '&' + UTR[2])
S_TBLjh = symbols(UTR[0] + '&' + UTR[1] + '&' + UTR[2])
if UTR[0] == 'chla':
EP_V1 = m_Chla
if UTR[0] == 'SA':
EP_V1 =m_SA
if UTR[0] == 'depth':
EP_V1 = m_Depth
if UTR[0] == 'WT':
EP_V2 = m_WT
if UTR[1] == 'chla':
EP_V2 = m_Chla
if UTR[1] == 'SA':
EP_V2 =m_SA
if UTR[1] == 'depth':
EP_V2 = m_Depth
if UTR[1] == 'WT':
EP_V2 = m_WT
if UTR[2] == 'chla':
EP_V3 = m_Chla
if UTR[2] == 'SA':
EP_V3 =m_SA
if UTR[2] == 'depth':
EP_V3 = m_Depth
if UTR[2] == 'WT':
EP_V3 = m_WT
EP_SBL_JH1 = EP_V1 * EP_V2
EP_SBL_JH2 = EP_V1 * EP_V3
EP_SBL_JH3 = EP_V2 * EP_V3
EP_TBL_JH = EP_V1 * EP_V2 * EP_V3
test_fUNC= float(MODEL.evalf(subs={
S_Variable1:EP_V1,
S_Variable2:EP_V2,
S_Variable3:EP_V3,
S_SBLjh_1:(EP_SBL_JH1),
S_SBLjh_2:(EP_SBL_JH2),
S_SBLjh_3:(EP_SBL_JH3),
S_TBLjh:(EP_TBL_JH),
}))
reTUEN_FuncValues = (10** test_fUNC ) - module_ADD_CHANGSHU
rF_z = (10** (test_fUNC - 1.96*Model_Standerror) ) - module_ADD_CHANGSHU
rF_y = (10** (test_fUNC + 1.96*Model_Standerror) ) - module_ADD_CHANGSHU
hy_dA = (reTUEN_FuncValues) * section_SA * 1000000*365
hy_dA_z = (rF_z) * section_SA * 1000000*365
hy_dA_y = (rF_y) * section_SA * 1000000*365
all_HY.append(hy_dA)
all_HY_z.append(hy_dA_z)
all_HY_y.append(hy_dA_y)
if section_SA == 0:
all_HY.append(0)
all_HY_z.append(0)
all_HY_y.append(0)
ALL_origin = pd.Series(all_HY)
sum_allORIG = np.sum(ALL_origin);
out_TONGLIANGTOTAL = (sum_allORIG*(10**(-15))) * 0.6
ALL_origin_z = pd.Series(all_HY_z)
sum_allORIG_z = np.sum(ALL_origin_z);
out_TONGLIANGTOTAL_z = (sum_allORIG_z*(10**(-15))) * 0.6
ALL_origin_y = pd.Series(all_HY_y)
sum_allORIG_y = np.sum(ALL_origin_y);
out_TONGLIANGTOTAL_y = (sum_allORIG_y*(10**(-15))) * 0.6
###########################
ALLmit = []
ALLmit_z = []
ALLmit_y = []
rwe = [0,20,40,60,80,100,120,140,160]
for rw in rwe:
all_HY2 = []
all_HY2_z = []
all_HY2_y = []
dwmj = []
for i2 in range(0+rw,20+rw):
for j2 in range(0,20):
section_SA2 = upscaling.iloc[i2 + 2 ,j2 + 8]
if section_SA2 != 0:
m_SA2 = log_A( upscaling.iloc[i2 + 2 , 5] )
m_Chla2 = log_A( upscaling.iloc[0, j2 + 8 ] )
m_WT2 = log_A( upscaling.iloc[i2 + 2 , 6] )
m_Depth2 = log_A( upscaling.iloc[i2 + 2 , 3] )
S_Variable1 = symbols(UTR[0])
S_Variable2 = symbols(UTR[1])
S_Variable3 = symbols(UTR[2])
S_SBLjh_1 = symbols(UTR[0] + '&' + UTR[1])
S_SBLjh_2 = symbols(UTR[0] + '&' + UTR[2])
S_SBLjh_3 = symbols(UTR[1] + '&' + UTR[2])
S_TBLjh = symbols(UTR[0] + '&' + UTR[1] + '&' + UTR[2])
if UTR[0] == 'chla':
EP_V1 = m_Chla2
if UTR[0] == 'SA':
EP_V1 =m_SA2
if UTR[0] == 'depth':
EP_V1 = m_Depth2
if UTR[0] == 'WT':
EP_V2 = m_WT2
if UTR[1] == 'chla':
EP_V2 = m_Chla2
if UTR[1] == 'SA':
EP_V2 =m_SA2
if UTR[1] == 'depth':
EP_V2 = m_Depth2
if UTR[1] == 'WT':
EP_V2 = m_WT2
if UTR[2] == 'chla':
EP_V3 = m_Chla2
if UTR[2] == 'SA':
EP_V3 =m_SA2
if UTR[2] == 'depth':
EP_V3 = m_Depth2
if UTR[2] == 'WT':
EP_V3 = m_WT2
EP_SBL_JH1 = EP_V1 * EP_V2
EP_SBL_JH2 = EP_V1 * EP_V3
EP_SBL_JH3 = EP_V2 * EP_V3
EP_TBL_JH = EP_V1 * EP_V2 * EP_V3
test_fUNC2= float(MODEL.evalf(subs={
S_Variable1:EP_V1,
S_Variable2:EP_V2,
S_Variable3:EP_V3,
S_SBLjh_1:(EP_SBL_JH1),
S_SBLjh_2:(EP_SBL_JH2),
S_SBLjh_3:(EP_SBL_JH3),
S_TBLjh:(EP_TBL_JH),
}))
reTUEN_FuncValues2 = (10** test_fUNC2 ) - module_ADD_CHANGSHU
reTUEN_FuncValues2_z = (10** (test_fUNC2 - 1.96*Model_Standerror) ) - module_ADD_CHANGSHU
reTUEN_FuncValues2_y = (10** (test_fUNC2 + 1.96*Model_Standerror) ) - module_ADD_CHANGSHU
hy_dA2 = (reTUEN_FuncValues2) * section_SA2 * 1000000*365
hy_dA2_z = (reTUEN_FuncValues2_z) * section_SA2 * 1000000*365
hy_dA2_y = (reTUEN_FuncValues2_y) * section_SA2 * 1000000*365
all_HY2.append(hy_dA2)
all_HY2_z.append(hy_dA2_z)
all_HY2_y.append(hy_dA2_y)
dwmj.append(reTUEN_FuncValues)
if section_SA2 == 0:
all_HY2.append(0)
dwmj.append(0)
all_HY2_z.append(0)
all_HY2_y.append(0)
ALL_origin2 = pd.Series(all_HY2)
sum_allORIG2 = np.sum(ALL_origin2);
out_TONGLIANG2 = sum_allORIG2*(10**(-15))
ALLmit.append(out_TONGLIANG2*0.6)
ALL_origin2_z = pd.Series(all_HY2_z)
sum_allORIG2_z = np.sum(ALL_origin2_z);
out_TONGLIANG2_z = sum_allORIG2_z*(10**(-15))
ALLmit_z.append(out_TONGLIANG2_z*0.6)
ALL_origin2_y = pd.Series(all_HY2_y)
sum_allORIG2_y = np.sum(ALL_origin2_y);
out_TONGLIANG2_y = sum_allORIG2_y*(10**(-15))
ALLmit_y.append(out_TONGLIANG2_y*0.6)
print ('=================================================')
print ('输出的 ' + str(ForcastName)+ ' 总通量是(Tg) ',np.round(out_TONGLIANGTOTAL,3))
print ('输出的 ' + str('左右分别为')+ ' 总通量是(Tg) ',np.round(out_TONGLIANGTOTAL_z,3))
print ('输出的 ' + str('左右分别为')+ ' 总通量是(Tg) ',np.round(out_TONGLIANGTOTAL_y,3))
print ('次级组合分别为',np.round(ALLmit,3),np.round(ALLmit_z,3),np.round(ALLmit_y,3))
print ('显著的方程是',MODEL)
print ('=====================运算finish=======================')
return np.round(out_TONGLIANGTOTAL,3),np.round(ALLmit,3),np.round(out_TONGLIANGTOTAL_z,3) ,np.round(out_TONGLIANGTOTAL_y,3),np.round(ALLmit_z,3),np.round(ALLmit_y,3)
def UDI(two_interaction,num):
shuang = []
for two_IT_1 in itertools.combinations(two_interaction, num):
if num == 1:
shuang.append(two_IT_1[0])
if num == 2:
shuang.append([two_IT_1[0],two_IT_1[1] ])
if num == 3:
shuang.append([two_IT_1[0],two_IT_1[1] ,two_IT_1[2]])
if num == 4:
shuang.append([two_IT_1[0],two_IT_1[1] ,two_IT_1[2],two_IT_1[3]])
return shuang
def UDI_Value(two_interaction_values,num):
shuang = []
for two_IT_1 in itertools.combinations(two_interaction_values, num):
if num == 1:
shuang.append(two_IT_1[0])
if num == 2:
shuang.append([two_IT_1[0],two_IT_1[1] ])
if num == 3:
shuang.append([two_IT_1[0],two_IT_1[1] ,two_IT_1[2]])
if num == 4:
shuang.append([two_IT_1[0],two_IT_1[1] ,two_IT_1[2],two_IT_1[3]])
return shuang
#%% 首先预测CH4_T
upscaling = pd.read_excel(r'C:\Users\KING\Desktop\温室气体数据整理_集合0508\UPscaling.xlsx',sheet_name='Lake_99')
eEdata = pd.read_excel(r'C:\Users\KING\Desktop\Triumphant_0515_湖泊.xlsx')
eEdatatype = eEdata[eEdata['Type']!='reservoirs']
zhdata = eEdatatype
VARILIST = ['chla','SA','depth']
y_BL = 'co2'
Yfor_Variable = zhdata[y_BL]
FC_testA = []
FC_nameA = []
JH_2ji = []
JH_2ji_SYM = []
JH_3ji = []
JH_3ji_SYM = []
JH_1ji = []
JH_1ji_SYM = []
FC_testA = []
FC_nameA = []
JH_2ji = []
JH_2ji_SYM = []
JH_1ji = []
JH_1ji_SYM = []
Y_forlist = []
for UTR in itertools.combinations(VARILIST, 3):
S_Variable1 = symbols(UTR[0])
S_Variable2 = symbols(UTR[1])
S_Variable3 = symbols(UTR[2])
S_SBLjh_1 = symbols(UTR[0] + '&' + UTR[1])
S_SBLjh_2 = symbols(UTR[0] + '&' + UTR[2])
S_SBLjh_3 = symbols(UTR[1] + '&' + UTR[2])
S_TBLjh = symbols(UTR[0] + '&' + UTR[1] + '&' + UTR[2])
Q_Data = pd.concat([Yfor_Variable,zhdata['chla_execute'],zhdata['SA'],zhdata['Mean_execute'] ,zhdata['WT_execute'] ], axis = 1).dropna(axis=0).reset_index(drop=True)
Y_for = log_series(Q_Data[y_BL] + 328.1)
Y_forlist.append(Y_for)
for pie in range(0,3):
if UTR[pie] == 'chla':
chla= log_series(Q_Data['chla_execute'])
if UTR[pie] == 'SA':
SA = log_series(Q_Data['SA'])
if UTR[pie] == 'depth':
depth = log_series(Q_Data['Mean_execute'])
if UTR[pie] == 'WT':
WT = log_series(Q_Data['WT_execute'])
BLmz = UTR
Var1 = BLmz[0];BL1 = Trans(Var1).rename( UTR[0], inplace = True)
Var2 = BLmz[1];BL2 = Trans(Var2).rename( UTR[1], inplace = True)
Var3 = BLmz[2];BL3 = Trans(Var3).rename( UTR[2], inplace = True)
comB = [BL1,BL2,BL3]
#comB.columns = [UTR[0],UTR[1],UTR[2]]
SymB = [Var1,Var2,Var3]
FC_testA.append(comB)
FC_nameA.append(SymB)
selfbl = SymB
for rew_2ji in itertools.combinations(selfbl, 2): ##此时添加最次级子交互项:
JH_G_1 = rew_2ji[0]; BLJH_G_1 = Trans(JH_G_1)
JH_G_2 = rew_2ji[1]; BLJH_G_2 = Trans(JH_G_2)
JH_C_bl = BLJH_G_1*BLJH_G_2
JH_C_symbol = JH_G_1+ '&' + JH_G_2
JH_2ji.append(JH_C_bl)
JH_2ji_SYM.append(JH_C_symbol)
## 此时添加初级子交互项
rew_1j = BL1*BL2*BL3
rew_1jSYM = SymB[0] + '&' + SymB[1] + '&' + SymB[2]
JH_1ji.append(rew_1j)
JH_1ji_SYM.append(rew_1jSYM)
single_noninteraction_numble = len(FC_nameA)
Value_FC_Single = []
Value_FC_twointer_dan = []
Value_FC_threeinter_dan = []
Value_FC_twointer_shuang = []
Value_FC_threeinter_shuang = []
Value_FC_threeinter_san = []
Value_FC_twointer_san = []
Value_FC_threeinter_si = []
Value_FC_twointer_si = []
Value_FC_fourthinter_one = []
Value_FC_2dan_plus_3dan = []
Value_FC_2dan_plus_3shuang= []
Value_FC_2dan_plus_3san= []
Value_FC_2dan_plus_3si= []
Value_FC_2dan_plus_4= []
Value_FC_2shuang_plus_3dan = []
Value_FC_2shuang_plus_3shuang= []
Value_FC_2shuang_plus_3san= []
Value_FC_2shuang_plus_3si= []
Value_FC_2shuang_plus_4= []
Value_FC_2san_plus_3dan = []
Value_FC_2san_plus_3shuang= []
Value_FC_2san_plus_3san= []
Value_FC_2san_plus_3si= []
Value_FC_2san_plus_4= []
Value_FC_2si_plus_3dan = []
Value_FC_2si_plus_3shuang= []
Value_FC_2si_plus_3san= []
Value_FC_2si_plus_3si= []
Value_FC_2si_plus_4= []
Value_FC_3dan_plus_4 = []
Value_FC_3shuang_plus_4 = []
Value_FC_3san_plus_4 = []
Value_FC_3si_plus_4 = []
FC_Single = []
FC_twointer_dan = []
FC_threeinter_dan = []
FC_twointer_shuang = []
FC_threeinter_shuang = []
FC_threeinter_san = []
FC_twointer_san = []
FC_threeinter_si = []
FC_twointer_si = []
FC_fourthinter_one = []
FC_2dan_plus_3dan = []
FC_2dan_plus_3shuang = []
FC_2dan_plus_3san = []
FC_2dan_plus_3si = []
FC_2dan_plus_4 = []
FC_2shuang_plus_3dan = []
FC_2shuang_plus_3shuang = []
FC_2shuang_plus_3san = []
FC_2shuang_plus_3si = []
FC_2shuang_plus_4 = []
FC_2san_plus_3dan = []
FC_2san_plus_3shuang = []
FC_2san_plus_3san = []
FC_2san_plus_3si = []
FC_2san_plus_4 = []
FC_2si_plus_3dan = []
FC_2si_plus_3shuang = []
FC_2si_plus_3san = []
FC_2si_plus_3si = []
FC_2si_plus_4 = []
FC_3dan_plus_4 = []
FC_3shuang_plus_4 = []
FC_3san_plus_4 = []
FC_3si_plus_4 = []
Y_FC_Single = []
Y_FC_twointer_dan = []
Y_FC_threeinter_dan = []
Y_FC_twointer_shuang = []
Y_FC_threeinter_shuang = []
Y_FC_threeinter_san = []
Y_FC_twointer_san = []
Y_FC_threeinter_si = []
Y_FC_twointer_si = []
Y_FC_fourthinter_one = []
Y_FC_2dan_plus_3dan = []
Y_FC_2dan_plus_3shuang= []
Y_FC_2dan_plus_3san= []
Y_FC_2dan_plus_3si= []
Y_FC_2dan_plus_4= []
Y_FC_2shuang_plus_3dan = []
Y_FC_2shuang_plus_3shuang= []
Y_FC_2shuang_plus_3san= []
Y_FC_2shuang_plus_3si= []
Y_FC_2shuang_plus_4= []
Y_FC_2san_plus_3dan = []
Y_FC_2san_plus_3shuang= []
Y_FC_2san_plus_3san= []
Y_FC_2san_plus_3si= []
Y_FC_2san_plus_4= []
Y_FC_2si_plus_3dan = []
Y_FC_2si_plus_3shuang= []
Y_FC_2si_plus_3san= []
Y_FC_2si_plus_3si= []
Y_FC_2si_plus_4= []
Y_FC_3dan_plus_4 = []
Y_FC_3shuang_plus_4 = []
Y_FC_3san_plus_4 = []
Y_FC_3si_plus_4 = []
for single_order in range( 0 , single_noninteraction_numble):
two_interaction = JH_2ji_SYM[3*single_order:3 + 3*single_order] ## 双变量符号
two_interaction_values = JH_2ji[3*single_order:3 + 3*single_order] ## 双变量数值
three_interaction = JH_1ji_SYM[1*single_order:1 + 1*single_order] ## 三变量符号
three_interaction_values = JH_1ji[1*single_order:1 + 1*single_order] ## 三变量数值
####################### 华丽分割线 #######################
Value_single_order = single_order
Sym_single = FC_nameA[single_order]
Value_single = FC_testA[single_order]
FC_Single.append(Sym_single) #### A先单独变量组一个方程组
Value_FC_Single.append(Value_single)
Y_FC_Single.append(Y_forlist[single_order]) #### B 单独变量 + 双变量交互单
for Order_twointer_dan in range(0,3):
twointer_dan_Sym = [two_interaction[Order_twointer_dan]]
twointer_dan_Value = two_interaction_values[Order_twointer_dan]
FC_twointer_dan.append( Sym_single + twointer_dan_Sym )
Value_FC_twointer_dan.append( Value_single + [twointer_dan_Value] )
Y_FC_twointer_dan.append(Y_forlist[single_order])
for Order_2dan_plus_3dan in range(0,1): #### 单独变量 + 两遍量单 + 三变量交互单
Sym_2dan_plus_3dan = [three_interaction[Order_2dan_plus_3dan]]
Value_2dan_plus_3dan = three_interaction_values[Order_2dan_plus_3dan]
FC_2dan_plus_3dan.append( Sym_single +twointer_dan_Sym + Sym_2dan_plus_3dan )
Value_FC_2dan_plus_3dan.append( Value_single + [twointer_dan_Value] + [Value_2dan_plus_3dan] )
Y_FC_2dan_plus_3dan.append(Y_forlist[single_order])
for Order_2dan_plus_3shuang in range(0,1): #### 单独变量 + 两遍量单 + 三变量交互双
Sym_2dan_plus_3shuang = UDI(three_interaction,1)
Value_2dan_plus_3shuang = UDI_Value(three_interaction_values,1)
Sym_2dan_plus_3shuang_A = Sym_2dan_plus_3shuang[Order_2dan_plus_3shuang]
Value_2dan_plus_3shuang_A = Value_2dan_plus_3shuang[Order_2dan_plus_3shuang]
FC_2dan_plus_3shuang.append( Sym_single +twointer_dan_Sym +[Sym_2dan_plus_3shuang_A ] )
Value_FC_2dan_plus_3shuang.append( Value_single + [twointer_dan_Value] +[ Value_2dan_plus_3shuang_A])
Y_FC_2dan_plus_3shuang.append(Y_forlist[single_order])
for Order_2dan_plus_3san in range(0,1): #### 单独变量 + 两遍量单 + 三变量交互三
Sym_2dan_plus_3san = UDI(three_interaction,1)
Value_2dan_plus_3san = UDI_Value(three_interaction_values,1)
Sym_2dan_plus_3san_A = Sym_2dan_plus_3san[Order_2dan_plus_3san]
Value_2dan_plus_3san_A = Value_2dan_plus_3san[Order_2dan_plus_3san]
FC_2dan_plus_3san.append( Sym_single + twointer_dan_Sym + [Sym_2dan_plus_3san_A ] )
Value_FC_2dan_plus_3san.append( Value_single + [twointer_dan_Value] + [Value_2dan_plus_3san_A ] )
Y_FC_2dan_plus_3san.append(Y_forlist[single_order])
for Order_twointer_shuang in range(0,3): #### C 单独变量 + 双变量交互双
shuangQJ = UDI(two_interaction,2)
Values_shuangQJ = UDI_Value(two_interaction_values,2)
twointer_shuang_Sym = shuangQJ[Order_twointer_shuang]
twointer_shuang_Value = Values_shuangQJ[Order_twointer_shuang]
FC_twointer_shuang.append( Sym_single + twointer_shuang_Sym )
Value_FC_twointer_shuang.append( Value_single + twointer_shuang_Value )
Y_FC_twointer_shuang.append(Y_forlist[single_order])
for Order_2dan_plus_3dan in range(0,1): #### 单独变量 + 两变量双 + 三变量交互单
Sym_2dan_plus_3dan = [three_interaction[Order_2dan_plus_3dan]]
Value_2dan_plus_3dan = three_interaction_values[Order_2dan_plus_3dan]
FC_2shuang_plus_3dan.append( Sym_single +twointer_shuang_Sym + Sym_2dan_plus_3dan )
Value_FC_2shuang_plus_3dan.append( Value_single + [twointer_shuang_Value] + [Value_2dan_plus_3dan] )
Y_FC_2shuang_plus_3dan.append(Y_forlist[single_order])
for Order_2dan_plus_3shuang in range(0,1): #### 单独变量 + 两变量双 + 三变量交互双
Sym_2dan_plus_3shuang = UDI(three_interaction,1)
Value_2dan_plus_3shuang = UDI_Value(three_interaction_values,1)
Sym_2dan_plus_3shuang_A = Sym_2dan_plus_3shuang[Order_2dan_plus_3shuang]
Value_2dan_plus_3shuang_A = Value_2dan_plus_3shuang[Order_2dan_plus_3shuang]
FC_2shuang_plus_3shuang.append( Sym_single +twointer_shuang_Sym +[Sym_2dan_plus_3shuang_A ] )
Value_FC_2shuang_plus_3shuang.append( Value_single + [twointer_shuang_Value] + [Value_2dan_plus_3shuang_A])
Y_FC_2shuang_plus_3shuang.append(Y_forlist[single_order])
for Order_2dan_plus_3san in range(0,1): #### 单独变量 + 两变量双 + 三变量交互三
Sym_2dan_plus_3san = UDI(three_interaction,1)
Value_2dan_plus_3san = UDI_Value(three_interaction_values,1)
Sym_2dan_plus_3san_A = Sym_2dan_plus_3san[Order_2dan_plus_3san]
Value_2dan_plus_3san_A = Value_2dan_plus_3san[Order_2dan_plus_3san]
FC_2shuang_plus_3san.append( Sym_single + twointer_shuang_Sym + [Sym_2dan_plus_3san_A ] )
Value_FC_2shuang_plus_3san.append( Value_single + [twointer_shuang_Value] + [Value_2dan_plus_3san_A ] )
Y_FC_2shuang_plus_3san.append(Y_forlist[single_order])
for Order_twointer_san in range(0,1): #### D 单独变量 + 双变量交互三
sanQJ = UDI(two_interaction,3)
Values_sanQJ = UDI_Value(two_interaction_values,3)
twointer_san_Sym = sanQJ[Order_twointer_san]
twointer_san_Value = Values_sanQJ[Order_twointer_san]
FC_twointer_san.append( Sym_single + twointer_san_Sym )
Value_FC_twointer_san.append( Value_single + twointer_san_Value )
Y_FC_twointer_san.append(Y_forlist[single_order])
for Order_2dan_plus_3dan in range(0,1): #### 单独变量 + 两变量三 + 三变量交互单
Sym_2dan_plus_3dan = [three_interaction[Order_2dan_plus_3dan]]
Value_2dan_plus_3dan = three_interaction_values[Order_2dan_plus_3dan]
FC_2san_plus_3dan.append( Sym_single +twointer_san_Sym + Sym_2dan_plus_3dan )
Value_FC_2san_plus_3dan.append( Value_single + [twointer_san_Value] + [Value_2dan_plus_3dan] )
Y_FC_2san_plus_3dan.append(Y_forlist[single_order])
for Order_fourth in range(0,1): #### J 单独变量 + 四变量交互单
QJ_fourth = UDI(three_interaction,1)
Values_QJ_fourth = UDI_Value(three_interaction_values,1)
fourth_Sym = QJ_fourth[Order_fourth]
fourth_Value = Values_QJ_fourth[Order_fourth]
FC_fourthinter_one.append( Sym_single + [fourth_Sym ] )
Value_FC_fourthinter_one.append( Value_single + [fourth_Value ])
Y_FC_fourthinter_one.append(Y_forlist[single_order])
FC_all_QUANBU = FC_Single + FC_twointer_dan + FC_threeinter_dan + FC_twointer_shuang + FC_threeinter_shuang + FC_threeinter_san+FC_twointer_san + FC_threeinter_si + FC_twointer_si + FC_fourthinter_one + FC_2dan_plus_3dan + FC_2dan_plus_3shuang + FC_2dan_plus_3san + FC_2dan_plus_3si + FC_2dan_plus_4 + FC_2shuang_plus_3dan + FC_2shuang_plus_3shuang + FC_2shuang_plus_3san + FC_2shuang_plus_3si + FC_2shuang_plus_4 + FC_2san_plus_3dan + FC_2san_plus_3shuang + FC_2san_plus_3san + FC_2san_plus_3si + FC_2san_plus_4 + FC_2si_plus_3dan + FC_2si_plus_3shuang + FC_2si_plus_3san + FC_2si_plus_3si + FC_2si_plus_4 + FC_3dan_plus_4 + FC_3shuang_plus_4 + FC_3san_plus_4 + FC_3si_plus_4
Value_all_QUANBU = Value_FC_Single + Value_FC_twointer_dan + Value_FC_threeinter_dan + Value_FC_twointer_shuang + Value_FC_threeinter_shuang + Value_FC_threeinter_san+Value_FC_twointer_san + Value_FC_threeinter_si + Value_FC_twointer_si + Value_FC_fourthinter_one + Value_FC_2dan_plus_3dan + Value_FC_2dan_plus_3shuang + Value_FC_2dan_plus_3san + Value_FC_2dan_plus_3si + Value_FC_2dan_plus_4 + Value_FC_2shuang_plus_3dan + Value_FC_2shuang_plus_3shuang + Value_FC_2shuang_plus_3san + Value_FC_2shuang_plus_3si + Value_FC_2shuang_plus_4 + Value_FC_2san_plus_3dan + Value_FC_2san_plus_3shuang + Value_FC_2san_plus_3san + Value_FC_2san_plus_3si + Value_FC_2san_plus_4 + Value_FC_2si_plus_3dan + Value_FC_2si_plus_3shuang + Value_FC_2si_plus_3san + Value_FC_2si_plus_3si + Value_FC_2si_plus_4 + Value_FC_3dan_plus_4 + Value_FC_3shuang_plus_4 + Value_FC_3san_plus_4 + Value_FC_3si_plus_4
Y_all_QUANBU = Y_FC_Single + Y_FC_twointer_dan + Y_FC_threeinter_dan + Y_FC_twointer_shuang + Y_FC_threeinter_shuang + Y_FC_threeinter_san+ Y_FC_twointer_san + Y_FC_threeinter_si + Y_FC_twointer_si + Y_FC_fourthinter_one + Y_FC_2dan_plus_3dan + Y_FC_2dan_plus_3shuang + Y_FC_2dan_plus_3san + Y_FC_2dan_plus_3si + Y_FC_2dan_plus_4 + Y_FC_2shuang_plus_3dan + Y_FC_2shuang_plus_3shuang + Y_FC_2shuang_plus_3san + Y_FC_2shuang_plus_3si + Y_FC_2shuang_plus_4 + Y_FC_2san_plus_3dan + Y_FC_2san_plus_3shuang + Y_FC_2san_plus_3san + Y_FC_2san_plus_3si + Y_FC_2san_plus_4 + Y_FC_2si_plus_3dan + Y_FC_2si_plus_3shuang + Y_FC_2si_plus_3san + Y_FC_2si_plus_3si + Y_FC_2si_plus_4 + Y_FC_3dan_plus_4 + Y_FC_3shuang_plus_4 + Y_FC_3san_plus_4 + Y_FC_3si_plus_4
Second_Q_Func_ALL = []
for du_2rd,du_2nd in enumerate(FC_all_QUANBU):
if FC_all_QUANBU[du_2rd][0] == 'chla1':
m2 = [FC_all_QUANBU[du_2rd][0]] + ['chla2'] + FC_all_QUANBU[du_2rd][1:]
Second_Q_Func_ALL.append(m2)
Third_Q_Func_ALL = []
for du_3rd,du_3nd in enumerate(FC_all_QUANBU):
if FC_all_QUANBU[du_3rd][0] == 'chla1':
m3 = [FC_all_QUANBU[du_3rd][0]] + ['chla2'] + ['chla3'] + FC_all_QUANBU[du_3rd][1:]
Third_Q_Func_ALL.append(m3)
Fourth_Q_Func_ALL = []
for du_4rd,du_4nd in enumerate(FC_all_QUANBU):
if FC_all_QUANBU[du_4rd][0] == 'chla1':
m4 = [FC_all_QUANBU[du_4rd][0]] + ['chla2'] + ['chla3'] + ['chla4']+ FC_all_QUANBU[du_4rd][1:]
Fourth_Q_Func_ALL.append(m4)
VL_Second_Q = []
for du_2rd,du_2nd in enumerate(Value_all_QUANBU ):
if Value_all_QUANBU [du_2rd][0].name == 'chla1':
m2 = [Value_all_QUANBU [du_2rd][0]] + [Value_all_QUANBU [du_2rd][0]**2] + Value_all_QUANBU [du_2rd][1:]
VL_Second_Q.append(m2)
VL_Third_Q = []
for du_3rd,du_3nd in enumerate(Value_all_QUANBU):
if Value_all_QUANBU[du_3rd][0].name == 'chla1':
m3 = [Value_all_QUANBU[du_3rd][0]] + [Value_all_QUANBU[du_3rd][0]**2] + [Value_all_QUANBU[du_3rd][0]**3]+ Value_all_QUANBU[du_3rd][1:]
VL_Third_Q.append(m3)
VL_Foutrh_Q = []
for du_4rd,du_4nd in enumerate(Value_all_QUANBU):
if Value_all_QUANBU[du_4rd][0].name == 'chla1':
m4 = [Value_all_QUANBU[du_4rd][0]] + [Value_all_QUANBU[du_4rd][0]**2] + [Value_all_QUANBU[du_4rd][0]**3]+ [Value_all_QUANBU[du_4rd][0]**4]+ Value_all_QUANBU[du_4rd][1:]
VL_Foutrh_Q.append(m4)
Y_Second_Q = []
for du_2rd,du_2nd in enumerate(Value_all_QUANBU):
if Value_all_QUANBU[du_2rd][0].name == 'chla1':
m2 = Y_all_QUANBU[du_2rd]
Y_Second_Q.append(m2)
Y_Third_Q = []
for du_3rd,du_3nd in enumerate(Value_all_QUANBU):
if Value_all_QUANBU[du_3rd][0].name == 'chla1':
m3 = Y_all_QUANBU[du_3rd]
Y_Third_Q.append(m3)
Y_Foutrh_Q = []
for du_4rd,du_4nd in enumerate(Value_all_QUANBU):
if Value_all_QUANBU[du_4rd][0].name == 'chla1':
m4 =Y_all_QUANBU[du_4rd]
Y_Foutrh_Q .append(m4)
Y_all_57 = Y_all_QUANBU + Y_Second_Q + Y_Third_Q + Y_Foutrh_Q
FC_all_57 = FC_all_QUANBU + Second_Q_Func_ALL + Third_Q_Func_ALL + Fourth_Q_Func_ALL
VL_all_57 = Value_all_QUANBU + VL_Second_Q + VL_Third_Q + VL_Foutrh_Q
#%%
CLASS_res = []
module_ADD_CHANGSHU = 328.22
ForcastName = 'CO2'
XZXsp = 0.05
zxqj = 0.6
#Carbon_neutralization = 5
for otwe,DJLmmot in enumerate(VL_all_57):
for TYPE in ['OLS','RLM','MID']:
try:
logbl = pd.concat(DJLmmot,axis=1)
logbl.columns= FC_all_57[otwe]
logbl = sm.add_constant(logbl)
UTR = FC_all_57[otwe][:4]
Yfor = Y_all_57[otwe]
olspp = sm.OLS(Yfor, logbl).fit()
Model_Standerror = np.round((np.mean((sm.OLS(Yfor, logbl).fit()).get_prediction().se_mean ) ) , 3)
if TYPE == 'OLS':
ols_r = sm.OLS(Yfor, logbl).fit()
Model_Standerror = np.round((np.mean((sm.OLS(Yfor, logbl).fit()).get_prediction().se_mean ) ) , 3)
baseline,conf_left,conf_right,MODEL_res = FuntOUT( olspp, ols_r,'OLS',XZXsp,zxqj,UTR)
GAS_TG_Base,allmit,blz,bly,flz,fly = Flux_Canculate(module_ADD_CHANGSHU,baseline,UTR,Model_Standerror)[0:6]
if 200 <= GAS_TG_Base <= 800:
#GAS_TG_left = Flux_Canculate(module_ADD_CHANGSHU,conf_left,UTR)[0]
#GAS_TG_right = Flux_Canculate(module_ADD_CHANGSHU,conf_right,UTR)[0]
GAS_pd = pd.concat( [MODEL_res,pd.DataFrame([GAS_TG_Base]),pd.DataFrame([blz]),pd.DataFrame([bly]),
pd.DataFrame(allmit),
pd.DataFrame([flz]).T,pd.DataFrame([fly]).T,
pd.DataFrame([TYPE]), pd.DataFrame([len(logbl)]) ],axis=1 )
GAS_pd.columns = ['ALL_Function','SigniFicant_baseline','conf_left','conf_right','r2','r2_adj','aic','bic','Gas_base','Gas_z','Gas_y','ALLMIT','ALLMITz','ALLMITy','TYPE','n']
CLASS_res.append(GAS_pd.reset_index(drop=True))
if TYPE == 'RLM':
ols_r = sm.OLS(Yfor, logbl).fit()
rlm_r = sm.RLM(Yfor, logbl).fit()
baseline,conf_left,conf_right,MODEL_res = FuntOUT(olspp,rlm_r,'RLM',XZXsp,zxqj,UTR)
GAS_TG_Base,allmit,blz,bly,flz,fly = Flux_Canculate(module_ADD_CHANGSHU,baseline,UTR,Model_Standerror)[0:6]
if 200 <= GAS_TG_Base <= 800:
#GAS_TG_left = Flux_Canculate(module_ADD_CHANGSHU,conf_left,UTR)[0]
#GAS_TG_right = Flux_Canculate(module_ADD_CHANGSHU,conf_right,UTR)[0]
GAS_pd = pd.concat( [MODEL_res,pd.DataFrame([GAS_TG_Base]),pd.DataFrame([blz]),pd.DataFrame([bly]),
pd.DataFrame(allmit),
pd.DataFrame([flz]).T,pd.DataFrame([fly]).T,
pd.DataFrame([TYPE]), pd.DataFrame([len(logbl)]) ],axis=1 )
GAS_pd.columns = ['ALL_Function','SigniFicant_baseline','conf_left','conf_right','r2','r2_adj','aic','bic','Gas_base','Gas_z','Gas_y','ALLMIT','ALLMITz','ALLMITy','TYPE','n']
CLASS_res.append(GAS_pd.reset_index(drop=True))
if TYPE == 'MID':
mid_r = QREG.QuantReg(Yfor, logbl).fit(q=0.5)
baseline,conf_left,conf_right,MODEL_res = FuntOUT(olspp,ols_r,mid_r,'MID',XZXsp,zxqj,UTR)
GAS_TG_Base,allmit,blz,bly,flz,fly = Flux_Canculate(module_ADD_CHANGSHU,baseline,UTR,Model_Standerror)[0:6]
if 200 <= GAS_TG_Base <= 800:
#GAS_TG_left = Flux_Canculate(module_ADD_CHANGSHU,conf_left,UTR)[0]
#GAS_TG_right = Flux_Canculate(module_ADD_CHANGSHU,conf_right,UTR)[0]
GAS_pd = pd.concat( [MODEL_res, | pd.DataFrame([GAS_TG_Base]) | pandas.DataFrame |
from io import StringIO
from copy import deepcopy
import numpy as np
import pandas as pd
import re
from glypnirO_GUI.get_uniprot import UniprotParser
from sequal.sequence import Sequence
from sequal.resources import glycan_block_dict
# Defining important colume names within the dataset
sequence_column_name = "Peptide\n< ProteinMetrics Confidential >"
glycans_column_name = "Glycans\nNHFAGNa"
starting_position_column_name = "Starting\nposition"
modifications_column = "Modification Type(s)"
observed_mz_column_name = "Calc.\nmass (M+H)"
protein_column_name = "Protein Name"
rt = "Scan Time"
selected_aa = {"N", "S", "T"}
tmt_mod_regex = re.compile("\w(\d+)\((.+)\)")
# Defining important regular expression pattern to parse the dataset
regex_glycan_number_pattern = "\d+"
glycan_number_regex = re.compile(regex_glycan_number_pattern)
regex_pattern = "\.[\[\]\w\.\+\-]*\."
sequence_regex = re.compile(regex_pattern)
uniprot_regex = re.compile("(?P<accession>[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})(?P<isoform>-\d)?")
glycan_regex = re.compile("(\w+)\((\d+)\)")
# Function to filter for only PSM collections that do not only containing unglycosylated peptides
def filter_U_only(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 or True not in np.isin(unique_glycan, "U"):
# print(unique_glycan)
return True
return False
# Filter for only PSMs that are unglycosylated within PSM collections that do not only containing unglycosylated peptides
def filter_with_U(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 \
and \
True in np.isin(unique_glycan, "U"):
return True
return False
# parse modification mass and convert it from string to float
def get_mod_value(amino_acid):
if amino_acid.mods:
if amino_acid.mods[0].value.startswith("+"):
return float(amino_acid.mods[0].value[1:])
else:
return -float(amino_acid.mods[0].value[1:])
else:
return 0
# load fasta file into a dictionary
def load_fasta(fasta_file_path, selected=None, selected_prefix=""):
with open(fasta_file_path, "rt") as fasta_file:
result = {}
current_seq = ""
for line in fasta_file:
line = line.strip()
if line.startswith(">"):
if selected:
if selected_prefix + line[1:] in selected:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[current_seq] += line
return result
# Storing analysis result for each protein
class Result:
def __init__(self, df):
self.df = df
self.empty = df.empty
def separate_result(self):
normal_header = []
df = self.df
for c in df.columns:
if c in {"Protein", "Peptides", "Position", "Glycans"}:
normal_header.append(c)
else:
yield Result(df[normal_header+[c]])
def calculate_proportion(self, occupancy=True, separate_sample_df=False):
"""
calculate proportion of each glycoform from the dataset
:type occupancy: bool
whether or not to calculate the proportion as occupancy which would includes unglycosylated form.
"""
df = self.df.copy()
#print(df)
grouping_peptides = [# "Isoform",
"Peptides", "Position"]
grouping_position = [# "Isoform",
"Position"]
if "Protein" in df.columns:
grouping_peptides = ["Protein"] + grouping_peptides
grouping_position = ["Protein"] + grouping_position
if not occupancy:
df = df[df["Glycans"] != "U"]
if "Peptides" in df.columns:
gr = grouping_peptides
else:
gr = grouping_position
for _, g in df.groupby(gr):
if "Value" in g.columns:
total = g["Value"].sum()
for i, r in g.iterrows():
df.at[i, "Value"] = r["Value"] / total
else:
for c in g.columns:
if c not in {"Protein", "Peptides", "Position", "Glycans"}:
total = g[c].sum()
for i, r in g.iterrows():
df.at[i, c] = r[c] / total
if separate_sample_df:
return [df[gr + [c]] for c in df.columns]
return df
def to_summary(self, df=None, name="", trust_byonic=False, occupancy=True):
"""
:type trust_byonic: bool
whether or not to calculate calculate raw values for each individual position assigned by byonic
:type occupancy: bool
whether or not to calculate the proportion as occupancy which would includes unglycosylated form.
:type df: pd.DataFrame
"""
grouping_peptides = [# "Isoform",
"Peptides", "Position", "Glycans"]
grouping_position = [# "Isoform",
"Position", "Glycans"]
if df is None:
df = self.df
if "Protein" in df.columns:
grouping_peptides = ["Protein"] + grouping_peptides
grouping_position = ["Protein"] + grouping_position
if not occupancy:
df = df[df["Glycans"] != "U"]
if trust_byonic:
temp = df.set_index(grouping_position)
else:
temp = df.set_index(grouping_peptides)
if "Value" in temp.columns:
temp.rename(columns={"Value": name}, inplace=True)
else:
temp = temp.rename(columns={k: name for k in temp.columns if k not in {"Protein", "Peptides", "Position", "Glycans"}})
#print(temp)
return temp
# Object containing each individual protein. much of the methods involved in the analysis is contained within this object
# Each protein is assigned one of the GlypnirOcomponent object with a subset of their PD and Byonic data
class GlypnirOComponent:
def __init__(self, filename, area_filename=None, replicate_id=None, condition_id=None, protein_name=None, protein_column=protein_column_name, minimum_score=0, trust_byonic=False, legacy=False, mode=1):
sequence_column_name = "Peptide\n< ProteinMetrics Confidential >"
glycans_column_name = "Glycans\nNHFAGNa"
starting_position_column_name = "Starting\nposition"
modifications_column = "Modification Type(s)"
observed_mz_column_name = "Calc.\nmass (M+H)"
protein_column_name = "Protein Name"
self.protein_column = protein_column
self.sequence_column = None
self.glycans_column = None
self.starting_position_column = None
self.modifications_column = None
self.observed_mz_column = None
if type(filename) == pd.DataFrame:
data = filename.copy()
else:
if filename.endswith(".xlsx"):
data = pd.read_excel(filename, sheet_name="Spectra")
elif filename.endswith(".txt"):
data = pd.read_csv(filename, sep="\t")
if mode == 1:
self.protein_column = protein_column_name
self.sequence_column = sequence_column_name
self.glycans_column = glycans_column_name
self.starting_position_column = starting_position_column_name
self.modifications_column = modifications_column
self.observed_mz_column = observed_mz_column_name
if area_filename is not None:
if type(area_filename) == pd.DataFrame:
file_with_area = area_filename
else:
if area_filename.endswith("xlsx"):
file_with_area = pd.read_excel(area_filename)
else:
file_with_area = | pd.read_csv(area_filename, sep="\t") | pandas.read_csv |
## Convert .Bed to .HDF5 file (saving mean and std genotype seperately)
import pandas as pd
import os
from pysnptools.snpreader import SnpData
from pysnptools.snpreader import Pheno, Bed
import h5py
import numpy as np
from tqdm import tqdm
import argparse
def main(args):
genome_path = args.genome_path
phenotype_path = args.phenotype_path
phenotype = pd.read_csv(phenotype_path)
snp_on_disk = Bed(genome_path, count_A1=True)
iid = pd.DataFrame(snp_on_disk.iid, columns=['FID','IID'])
phenotype['IID'] = phenotype['IID'].astype(str)
phenotype['FID'] = phenotype['FID'].astype(str)
iid_merged = | pd.merge(iid, phenotype, on=['IID','FID']) | pandas.merge |
# NO TIENE LOS DATOS QUE HACEN FALTA
# SIN PROCESAR, MUCHO TRABAJO POR MENOS DE 100k REGISTROS UTILES
# %%
import os
import pandas as pd
import numpy as np
import datetime
from scripts import motor, quitardecimal, valores, modelogeneral, especifico, origensegunvin, version, modelogenerico, especifico2, corregirmodelo, segmentacion, cilindrada, traccion, versionespecifico, versionurgencia, marca
pd.set_option('display.max_colwidth', -1)
# %% CARGA DE DATOS
path = r'D:\Basededatos\Origen\Panama\8701'
os.chdir(path)
files = os.listdir(path)
files
files_xls = [f for f in files if f[-3:] == 'xls']
files_xls
panama = pd.DataFrame()
for f in files_xls:
data = pd.read_excel(f)
panama = pd.concat([panama , data], ignore_index=True, join='outer')
# %% COLUMNAS UTILES
panama["MERCADO"] = "PANAMA"
panama["CANTIDAD"] = 1
panama.rename(columns={
"COD_PAIS_PROCEDEN": "ORIGEN",
"COD_ARANCELARIO": "DATOS PERSONALES",
'CANT_COMERCIAL': "CANTIDAD",
'ESPEC_MERC': 'MODELO/VERSION'
},
inplace=True)
panama["TIPO_VEHICULO"] = None
panama["SEGMENTO.1"] = None
panama["MODELO"] = None
panama["VERSION"] = None
panama["MODELO GENERICO"] = None
panama["CARROCERIA"] = None
panama["MARCA"] = None
panama["AÑO"] = None
panama["MOTOR"] = None
panama["CILINDRADA"] = None
panama["CILINDROS"] = None
panama["TRANSMISION"] = None
panama["TRACCION"] = None
panama["COMBUSTIBLE"] = None
panama["NUMERO CHASIS / VIN"] = None
panama["NUMERO MOTOR"] = None
panama["DISPOSICION CILINDROS"] = None
columnasutiles = ["MERCADO",
"TIPO_VEHICULO",
"SEGMENTO.1",
"MARCA",
"MODELO GENERICO",
"MODELO",
"MODELO/VERSION",
"VERSION",
"AÑO",
"ORIGEN",
"MOTOR",
"CARROCERIA",
"CILINDRADA",
"CILINDROS",
"COMBUSTIBLE",
"DISPOSICION CILINDROS",
"TRANSMISION",
"TRACCION",
"NUMERO CHASIS / VIN",
"NUMERO MOTOR",
"DATOS PERSONALES",
"CANTIDAD"
]
panama = panama[columnasutiles]
# %%
panama.dropna(inplace=True, how="all")
# %%
panama["MODELO/VERSION"] = panama["MODELO/VERSION"].astype(str)
panama["MODELO/VERSION"] = panama["MODELO/VERSION"].str.strip()
panama["MODELO/VERSION"] = panama["MODELO/VERSION"].str.upper()
panama = marca(panama)
# %%
condicion = panama["MARCA"].isna()
panama.loc[condicion, "MODELO/VERSION"][380:].head(20)
# %%
valores(panama[condicion], "MODELO/VERSION")
# %%
panama.info()
# %%
valores(panama, "MARCA")
# %% TIPO_VEHICULO
listatipo = r'''(TRACTOR|MOTOCICLETA|AUTO|CAMION|TRUCK)'''
panama["TIPO_VEHICULO"] = panama["MODELO/VERSION"].str.extract(listasegmento, expand=False).str.strip()
listasegmento = r'''(PICK-UP|PICKUP|PICK UP)'''
panama["SEGMENTO.1"] = panama["MODELO/VERSION"].str.extract(listasegmento, expand=False).str.strip()
condicion = panama["SEGMENTO.1"].isin(["PICK-UP", "PICKUP", "PICK UP"])
panama.loc[condicion,"TIPO_VEHICULO"] = "AUTO"
# %% VERIFICACION SEGMENTO
#condicion = panama["SEGMENTO.1"].isna()
#panama[condicion][50:100]
# %% MARCA
# BUSCAR MARCA EN TEXTO
regex = r'MARCA\s+\b(\w+)\b'
regex2 = r'MARCA:\s+\b(\w+)\b'
regex3 = r'marca\s+\b(\w+)\b'
regex4 = r'marca:\s+\b(\w+)\b'
regex5 = r'Marca\s+\b(\w+)\b'
regex6 = r'Marca:\s+\b(\w+)\b'
listamarca = r'''(vikino|KIA|HYUNDAI|FREIGHTLINER|INTERNATIONAL|CAPACITY|HERO|yamaha|suzuki|kawasaki|SUZUKI|Suzuki|BISEK|YAMAHA|<NAME>|PRATO FORNE|KAWWASAKI|HENGNIU|FREIGHT LINER|FORD|HONDA|KAWASAKI|vikyno|AVA)'''
panama["MARCA"] = panama["DESCRIPCION"].str.extract(regex, expand=False).str.strip()
panama["MARCA2"] = panama["DESCRIPCION"].str.extract(regex2, expand=False).str.strip()
panama["MARCA3"] = panama["DESCRIPCION"].str.extract(regex3, expand=False).str.strip()
panama["MARCA4"] = panama["DESCRIPCION"].str.extract(regex4, expand=False).str.strip()
panama["MARCA5"] = panama["DESCRIPCION"].str.extract(regex5, expand=False).str.strip()
panama["MARCA6"] = panama["DESCRIPCION"].str.extract(regex6, expand=False).str.strip()
panama["MARCA7"] = panama["DESCRIPCION"].str.extract(listamarca, expand=False).str.strip()
# UNIR EN UNICA COLUMNA
condicion = panama["MARCA2"].notna()
condicion2 = panama["MARCA3"].notna()
condicion3 = panama["MARCA4"].notna()
condicion4 = panama["MARCA5"].notna()
condicion5 = panama["MARCA6"].notna()
condicion6 = panama["MARCA7"].notna()
panama.loc[condicion, "MARCA"] = panama.loc[condicion, "MARCA2"]
panama.loc[condicion2, "MARCA"] = panama.loc[condicion2, "MARCA3"]
panama.loc[condicion3, "MARCA"] = panama.loc[condicion3, "MARCA4"]
panama.loc[condicion4, "MARCA"] = panama.loc[condicion4, "MARCA5"]
panama.loc[condicion5, "MARCA"] = panama.loc[condicion5, "MARCA6"]
panama.loc[condicion6, "MARCA"] = panama.loc[condicion6, "MARCA7"]
panama = panama[["FECHA", "DESCRIPCION", "CILINDRADA", "SEGMENTO.1", "MARCA"]]
# %% VERIFICACION MARCA
#tienemarca = r'''(MARCA|marca|MARCA:|marca:|Marca)'''
#panama["verificacionmarca"] = panama["DESCRIPCION"].str.extract(tienemarca, expand=False).str.strip()
#condicion = panama["verificacionmarca"].notna() & panama["MARCA"].isna()
#panama[condicion]
# %% CILINDROS.1
regex = r'(\d+\d)?\S*CC'
regex2 = r'(\d+\d)?\S*C.C'
regex3 = r'(\d+\d)?\S*C.C.'
regex4 = r'(\d+\d)?\S*cc'
regex5 = r'(\d+\d)?\S*c.c'
regex6 = r'(\d+\d)?\S*c.c.'
regex7 = r'(\d+\d)?\S* CC'
regex8 = r'(\d+\d)?\S* C.C'
regex9 = r'(\d+\d)?\S* C.C.'
regex10 = r'(\d+\d)?\S* cc'
regex11 = r'(\d+\d)?\S* c.c'
regex12 = r'(\d+\d)?\S* c.c.'
panama["CILINDROS.1"] = panama["DESCRIPCION"].str.extract(regex, expand=False).str.strip()
panama["CILINDROS.2"] = panama["DESCRIPCION"].str.extract(regex2, expand=False).str.strip()
panama["CILINDROS.3"] = panama["DESCRIPCION"].str.extract(regex3, expand=False).str.strip()
panama["CILINDROS.4"] = panama["DESCRIPCION"].str.extract(regex4, expand=False).str.strip()
panama["CILINDROS.5"] = panama["DESCRIPCION"].str.extract(regex5, expand=False).str.strip()
panama["CILINDROS.6"] = panama["DESCRIPCION"].str.extract(regex6, expand=False).str.strip()
panama["CILINDROS.7"] = panama["DESCRIPCION"].str.extract(regex7, expand=False).str.strip()
panama["CILINDROS.8"] = panama["DESCRIPCION"].str.extract(regex8, expand=False).str.strip()
panama["CILINDROS.9"] = panama["DESCRIPCION"].str.extract(regex9, expand=False).str.strip()
panama["CILINDROS.10"] = panama["DESCRIPCION"].str.extract(regex10, expand=False).str.strip()
panama["CILINDROS.11"] = panama["DESCRIPCION"].str.extract(regex11, expand=False).str.strip()
panama["CILINDROS.12"] = panama["DESCRIPCION"].str.extract(regex12, expand=False).str.strip()
# UNIR EN UNICA COLUMNA
condicion = panama["CILINDROS.2"].notna()
condicion2 = panama["CILINDROS.3"].notna()
condicion3 = panama["CILINDROS.4"].notna()
condicion4 = panama["CILINDROS.5"].notna()
condicion5 = panama["CILINDROS.6"].notna()
condicion6 = panama["CILINDROS.7"].notna()
condicion7 = panama["CILINDROS.8"].notna()
condicion8 = panama["CILINDROS.9"].notna()
condicion9 = panama["CILINDROS.10"].notna()
condicion10 = panama["CILINDROS.11"].notna()
condicion11 = panama["CILINDROS.12"].notna()
panama.loc[condicion, "CILINDROS.1"] = panama.loc[condicion, "CILINDROS.2"]
panama.loc[condicion2, "CILINDROS.1"] = panama.loc[condicion2, "CILINDROS.3"]
panama.loc[condicion3, "CILINDROS.1"] = panama.loc[condicion3, "CILINDROS.4"]
panama.loc[condicion4, "CILINDROS.1"] = panama.loc[condicion4, "CILINDROS.5"]
panama.loc[condicion5, "CILINDROS.1"] = panama.loc[condicion5, "CILINDROS.6"]
panama.loc[condicion6, "CILINDROS.1"] = panama.loc[condicion6, "CILINDROS.7"]
panama.loc[condicion7, "CILINDROS.1"] = panama.loc[condicion7, "CILINDROS.8"]
panama.loc[condicion8, "CILINDROS.1"] = panama.loc[condicion8, "CILINDROS.9"]
panama.loc[condicion9, "CILINDROS.1"] = panama.loc[condicion9, "CILINDROS.10"]
panama.loc[condicion10, "CILINDROS.1"] = panama.loc[condicion10, "CILINDROS.11"]
panama.loc[condicion11, "CILINDROS.1"] = panama.loc[condicion11, "CILINDROS.12"]
panama = panama[["FECHA", "DESCRIPCION", "CILINDRADA", "SEGMENTO.1", "MARCA", "CILINDROS.1"]]
# %% VERIFICACION CILINDROS.1
#condicion = panama["CILINDROS.1"].isna()
#panama[condicion][50:100]
# %% MODELO
# BUSCAR MODELO EN TEXTO
# BUSCAR TEXTO DESPUSE DE LA PALABRA MODELO
regex = r'MODELO\s+\b(\w+)\b'
regex2 = r'MODELO:\s+\b(\w+)\b'
regex3 = r'MODELO,\s+\b(\w+)\b'
regex4 = r'modelo\s+\b(\w+)\b'
regex5 = r'modelo:\s+\b(\w+)\b'
regex6 = r'modelo,\s+\b(\w+)\b'
regex7 = r'Modelo\s+\b(\w+)\b'
regex8 = r'Modelo:\s+\b(\w+)\b'
regex9 = r'Modelo,\s+\b(\w+)\b'
regex10 = r'MODEL\s+\b(\w+)\b'
listamodelo = r'''(HIACE|HI ACE|W41|COASTER|L300|H-1)'''
# APLICAR CADA REGEX
panama["MODELO"] = panama["DESCRIPCION"].str.extract(regex, expand=False).str.strip()
panama["MODELO2"] = panama["DESCRIPCION"].str.extract(regex2, expand=False).str.strip()
panama["MODELO3"] = panama["DESCRIPCION"].str.extract(regex3, expand=False).str.strip()
panama["MODELO4"] = panama["DESCRIPCION"].str.extract(regex4, expand=False).str.strip()
panama["MODELO5"] = panama["DESCRIPCION"].str.extract(regex5, expand=False).str.strip()
panama["MODELO6"] = panama["DESCRIPCION"].str.extract(regex6, expand=False).str.strip()
panama["MODELO7"] = panama["DESCRIPCION"].str.extract(regex7, expand=False).str.strip()
panama["MODELO8"] = panama["DESCRIPCION"].str.extract(regex8, expand=False).str.strip()
panama["MODELO9"] = panama["DESCRIPCION"].str.extract(regex9, expand=False).str.strip()
panama["MODELO10"] = panama["DESCRIPCION"].str.extract(regex10, expand=False).str.strip()
panama["MODELO11"] = panama["DESCRIPCION"].str.extract(listamodelo, expand=False).str.strip()
# CASO ESPECIAL TRACTORES
tractores = (panama["SEGMENTO.1"] == "TRACTO") & (panama["MODELO"].isna())
panama[tractores]["MODELO"] = panama["DESCRIPCION"].str.extract(r'SERIE\s+\b(\w+)\b', expand=False).str.strip()
panama[tractores]["MODELO"] = panama["DESCRIPCION"].str.extract(r'SERIE NO.\s+\b(\w+)\b', expand=False).str.strip()
panama[tractores]["MODELO"] = panama["DESCRIPCION"].str.extract(r'SERIE#\s+\b(\w+)\b', expand=False).str.strip()
# UNIR EN UNICA COLUMNA
condicion = panama["MODELO2"].notna()
condicion2 = panama["MODELO3"].notna()
condicion3 = panama["MODELO4"].notna()
condicion4 = panama["MODELO5"].notna()
condicion5 = panama["MODELO6"].notna()
condicion6 = panama["MODELO7"].notna()
condicion7 = panama["MODELO8"].notna()
condicion8 = panama["MODELO9"].notna()
condicion9 = panama["MODELO10"].notna()
condicion10 = panama["MODELO11"].notna()
panama.loc[condicion, "MODELO"] = panama.loc[condicion, "MODELO2"]
panama.loc[condicion2, "MODELO"] = panama.loc[condicion2, "MODELO3"]
panama.loc[condicion3, "MODELO"] = panama.loc[condicion3, "MODELO4"]
panama.loc[condicion4, "MODELO"] = panama.loc[condicion4, "MODELO5"]
panama.loc[condicion5, "MODELO"] = panama.loc[condicion5, "MODELO6"]
panama.loc[condicion6, "MODELO"] = panama.loc[condicion6, "MODELO7"]
panama.loc[condicion7, "MODELO"] = panama.loc[condicion7, "MODELO8"]
panama.loc[condicion8, "MODELO"] = panama.loc[condicion8, "MODELO9"]
panama.loc[condicion9, "MODELO"] = panama.loc[condicion9, "MODELO10"]
panama.loc[condicion10, "MODELO"] = panama.loc[condicion10, "MODELO11"]
panama = panama[["FECHA", "DESCRIPCION", "CILINDRADA", "SEGMENTO.1", "MARCA", "CILINDROS.1", "MODELO"]]
# %% VERIFICACION MODELO
#condicion = panama["MODELO"].isna()
#panama[condicion][500:550]
# %% VIN O SERIES
# BUSCAR TEXTO DESPUSE DE LA PALABRA VIN
regex1 = r'VIN\s+\b(\w+)\b'
regex2 = r'VIN:\s+\b(\w+)\b'
regex3 = r'CHASIS\s+\b(\w+)\b'
regex4 = r'CHARSIS\s+\b(\w+)\b'
# APLICAR CADA REGEX
panama["NUMERO CHASIS / VIN"] = panama["DESCRIPCION"].str.extract(regex1, expand=False).str.strip()
panama["NUMERO CHASIS / VIN2"] = panama["DESCRIPCION"].str.extract(regex2, expand=False).str.strip()
panama["NUMERO CHASIS / VIN3"] = panama["DESCRIPCION"].str.extract(regex3, expand=False).str.strip()
panama["NUMERO CHASIS / VIN4"] = panama["DESCRIPCION"].str.extract(regex4, expand=False).str.strip()
# UNIR EN UNICA COLUMNA
condicion = panama["NUMERO CHASIS / VIN2"].notna()
condicion2 = panama["NUMERO CHASIS / VIN3"].notna()
condicion3 = panama["NUMERO CHASIS / VIN4"].notna()
panama.loc[condicion, "NUMERO CHASIS / VIN"] = panama.loc[condicion, "NUMERO CHASIS / VIN2"]
panama.loc[condicion2, "NUMERO CHASIS / VIN"] = panama.loc[condicion2, "NUMERO CHASIS / VIN3"]
panama.loc[condicion3, "NUMERO CHASIS / VIN"] = panama.loc[condicion3, "NUMERO CHASIS / VIN4"]
panama = panama[["FECHA", "DESCRIPCION", "CILINDRADA", "SEGMENTO.1", "MARCA", "CILINDROS.1", "MODELO", "NUMERO CHASIS / VIN"]]
# %% transmision
listatransmision = r'''(MANUAL|AUTOMATICO)'''
panama["TRANSMISION"] = panama["DESCRIPCION"].str.extract(listatransmision, expand=False).str.strip()
panama = panama[["FECHA", "DESCRIPCION", "CILINDRADA", "SEGMENTO.1", "MARCA", "CILINDROS.1", "MODELO", "NUMERO CHASIS / VIN", "TRANSMISION"]]
# %% COMBUSTIBLE
listacombustible = r'''(DIESEL|GASOLINA)'''
panama["COMBUSTIBLE"] = panama["DESCRIPCION"].str.extract(listacombustible, expand=False).str.strip()
panama = panama[["FECHA", "DESCRIPCION", "CILINDRADA", "SEGMENTO.1", "MARCA", "CILINDROS.1", "MODELO", "NUMERO CHASIS / VIN", "TRANSMISION", "COMBUSTIBLE"]]
# %% MOTOR
regex1 = r'MOTOR\s+\b(\w+)\b'
panama["NUMERO MOTOR"] = panama["DESCRIPCION"].str.extract(regex1, expand=False).str.strip()
panama = panama[["FECHA", "DESCRIPCION", "CILINDRADA", "SEGMENTO.1", "MARCA", "CILINDROS.1", "MODELO", "NUMERO CHASIS / VIN", "TRANSMISION", "COMBUSTIBLE", "NUMERO MOTOR"]]
# %% CILINDROS
regex1 = r'(\w+)\s+CILINDROS'
regex2 = r'(\w+)\s+CILINDRO'
regex3 = r'(\d+)+CIL'
regex4 = r'(\d+)+CILS'
panama["CILINDROS"] = panama["DESCRIPCION"].str.extract(regex1, expand=False).str.strip()
panama["CILINDROS2"] = panama["DESCRIPCION"].str.extract(regex2, expand=False).str.strip()
panama["CILINDROS3"] = panama["DESCRIPCION"].str.extract(regex3, expand=False).str.strip()
panama["CILINDROS4"] = panama["DESCRIPCION"].str.extract(regex4, expand=False).str.strip()
condicion = panama["CILINDROS2"].notna()
condicion2 = panama["CILINDROS3"].notna()
condicion3 = panama["CILINDROS4"].notna()
panama.loc[condicion, "CILINDROS"] = panama.loc[condicion, "CILINDROS2"]
panama.loc[condicion2, "CILINDROS"] = panama.loc[condicion2, "CILINDROS3"]
panama.loc[condicion3, "CILINDROS"] = panama.loc[condicion3, "CILINDROS4"]
panama = panama[["FECHA", "DESCRIPCION", "CILINDRADA", "SEGMENTO.1", "MARCA", "CILINDROS.1", "MODELO", "NUMERO CHASIS / VIN", "CILINDROS"]]
# %%
| pd.set_option('display.max_colwidth', -1) | pandas.set_option |
"""
Copyright 2021 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import pandas as pd
import numpy as np
import networkx as nx
from absl import logging as lg
from gensim.models.doc2vec import Doc2Vec
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import manhattan_distances
from scipy.spatial import distance_matrix as d_matrix
from yacos.essential import IO
class Similarity:
"""Static class to measuse the similarity between programs."""
__version__ = '1.0.0'
__d2v_model_llvm_seq = None
__d2v_model_syntax_seq = None
__d2v_model_syntax_token_kind = None
__d2v_model_syntax_token_kind_variable = None
__d2v_dir = 'yacos/doc2vec'
@staticmethod
def __populate_data(training_benchmarks,
training_directory,
test_benchmarks,
test_directory):
"""Create test and training data.
Parameters
----------
training_benchmarks : list
training_directory : str
tests_benchmark : list
test_directory : str
Returns
-------
training_data : pandas.DataFrame
test_data : pandas.DataFrame
"""
training_data = {}
for training_benchmark in training_benchmarks:
index = training_benchmark.find('.')
suite_name = training_benchmark[:index]
bench_name = training_benchmark[index+1:]
benchmark_dir = os.path.join(training_directory,
suite_name)
data = IO.load_yaml_or_fail('{}/{}.yaml'.format(benchmark_dir,
bench_name))
if data:
training_data[training_benchmark] = data
if not training_data:
lg.error('Training features do not exist.')
sys.exit(1)
test_data = {}
for test_benchmark in test_benchmarks:
index = test_benchmark.find('.')
suite_name = test_benchmark[:index]
bench_name = test_benchmark[index+1:]
benchmark_dir = os.path.join(test_directory,
suite_name)
data = IO.load_yaml_or_fail('{}/{}.yaml'.format(benchmark_dir,
bench_name))
if data:
test_data[test_benchmark] = data
if not test_data:
lg.error('Training features do not exist.')
sys.exit(1)
training_data = pd.DataFrame.from_dict(training_data, orient='index')
test_data = pd.DataFrame.from_dict(test_data, orient='index')
return training_data, test_data
@staticmethod
def __get_root(g):
"""Find the root node.
Parameters
----------
g : networkx
"""
root = None
for node in g.nodes(data=True):
if 'root' in node[1]:
root = node[0]
break
else:
lg.warning('Root node not found (using node 0 as root).')
return 0
return root
@staticmethod
def __node_match_strong(g1_node, g2_node):
return g1_node == g2_node
@staticmethod
def __node_match_weak(g1_node, g2_node):
g1_attribute = g1_node['attr'] if 'attr' in g1_node else 'not found'
g2_attribute = g2_node['attr'] if 'attr' in g2_node else 'not found'
return g1_attribute == g2_attribute
@staticmethod
def __edge_match(g1_edge, g2_edge):
return g1_edge == g2_edge
@staticmethod
def __load_doc2vec_model_syntax_seq():
"""Load a doc2vec model."""
MODEL = 'd2v_syntax_seq.model'
top_dir = os.path.join(os.environ.get('HOME'), '.local')
if not os.path.isdir(os.path.join(top_dir, 'yacos')):
lg.error('YaCoS data does not exist.')
sys.exit(1)
Similarity.__d2v_model_syntax_seq = Doc2Vec.load(
os.path.join(top_dir, Similarity.__d2v_dir, MODEL)
)
@staticmethod
def __load_doc2vec_model_syntax_token_kind():
"""Load a doc2vec model."""
MODEL = 'd2v_syntax_token_kind.model'
top_dir = os.path.join(os.environ.get('HOME'), '.local')
if not os.path.isdir(os.path.join(top_dir, 'yacos')):
lg.error('YaCoS data does not exist.')
sys.exit(1)
Similarity.__d2v_model_syntax_token_kind = Doc2Vec.load(
os.path.join(top_dir, Similarity.__d2v_dir, MODEL)
)
@staticmethod
def __load_doc2vec_model_syntax_token_kind_variable():
"""Load a doc2vec model."""
MODEL = 'd2v_syntax_token_kind_variable.model'
top_dir = os.path.join(os.environ.get('HOME'), '.local')
if not os.path.isdir(os.path.join(top_dir, 'yacos')):
lg.error('YaCoS data does not exist.')
sys.exit(1)
Similarity.__d2v_model_syntax_token_kind = Doc2Vec.load(
os.path.join(top_dir, Similarity.__d2v_dir, MODEL)
)
@staticmethod
def __load_doc2vec_model_llvm_seq():
"""Load a doc2vec model."""
MODEL = 'd2v_llvm_seq.model'
top_dir = os.path.join(os.environ.get('HOME'), '.local')
if not os.path.isdir(os.path.join(top_dir, 'yacos')):
lg.error('YaCoS data does not exist.')
sys.exit(1)
Similarity.__d2v_model_syntax_token_kind = Doc2Vec.load(
os.path.join(top_dir, Similarity.__d2v_dir, MODEL)
)
@staticmethod
def euclidean_distance_from_data(training_data,
test_data):
"""Euclidean distance.
Parameters
----------
training_data : dict
test_data : dict
Returns
-------
training_data : list (rows)
test_data : list (rows)
distance : dict
"""
training_data = pd.DataFrame.from_dict(training_data, orient='index')
test_data = pd.DataFrame.from_dict(test_data, orient='index')
distance = euclidean_distances(test_data, training_data)
return training_data.index, test_data.index, distance
@staticmethod
def euclidean_distance(training_benchmarks,
training_directory,
test_benchmarks,
test_directory):
"""Euclidean distance.
Parameters
----------
training_benchmarks : list
training_directory : str
test_benchmarks : list
test_directory : list
Returns
-------
training_data : list (rows)
test_data : list (rows)
distance : dict
"""
training_data, test_data = Similarity.__populate_data(
training_benchmarks,
training_directory,
test_benchmarks,
test_directory
)
distance = euclidean_distances(test_data, training_data)
return training_data.index, test_data.index, distance
@staticmethod
def cosine_distance_from_data(training_data,
test_data):
"""Cosine distance.
Parameters
----------
training_data : dict
test_data : dict
Returns
-------
training_data : list (rows)
test_data : list (rows)
distance : dict
"""
training_data = | pd.DataFrame.from_dict(training_data, orient='index') | pandas.DataFrame.from_dict |
# script with function to prepare and support evaluation
import glob
import yaml
import pandas as pd
import os
# load simulation results and calculate RTT
# network and algorithm name are used to filter the results
def sim_delays(network, algorithm):
sim_results = glob.glob('../eval/{}/{}/{}*.yaml'.format(network, algorithm, network))
sim_delays = []
for result_file in sim_results:
with open(result_file, 'r') as f:
result = yaml.load(f)
# one-way = RTT/2
result['chain_rtt'] = result['metrics']['total_delay'] * 2
for delay in result['metrics']['delays']:
delay['rtt'] = delay['delay'] * 2
sim_delays.append(result)
return sim_delays
# load corresponding emulation results and calculate RTT
def emu_delays(network, algorithm):
emu_results = glob.glob('../eval/{}/{}/emulation/{}*.yaml'.format(network, algorithm, network))
emu_delays = []
for result_file in emu_results:
with open(result_file, 'r') as f:
result = yaml.load(f)
# chain delays = httping = TCP handshake = 2x RTT
result['chain_rtt'] = result['chain_delay']['delay'] / 2
# vnf delays = ping = RTT
for delay in result['delays']:
delay['rtt'] = delay['delay']
emu_delays.append(result)
return emu_delays
# sort and match simulation and emulation delays; calc difference and ratio; return structured pandas dataframes
def match_sim_emu(sim_delays, emu_delays):
input_cols = ['network', 'num_nodes', 'num_edges', 'service', 'num_vnfs', 'sources', 'num_sources', 'algorithm']
chain_df = pd.DataFrame(columns=input_cols + ['sim_rtt', 'emu_rtt'])
vnf_df = | pd.DataFrame(columns=input_cols + ['src', 'dest', 'sim_rtt', 'emu_rtt']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Monday 3 December 2018
@author: <NAME>
"""
import os
import pandas as pd
import numpy as np
import feather
import time
from datetime import date
import sys
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import normalize
import somoclu
from delprocess.loadprofiles import resampleProfiles
from .metrics import mean_index_adequacy, davies_bouldin_score
from ..support import cluster_dir, results_dir
def progress(n, stats):
"""Report progress information, return a string."""
s = "%s : " % (n)
s += "\nsilhouette: %(silhouette).3f " % stats
s += "\ndbi: %(dbi).3f " % stats
s += "\nmia: %(mia).3f " % stats
return print(s)
def clusterStats(cluster_stats, n, X, cluster_labels, preprocessing, transform, tic, toc):
stats = {'n_sample': 0,
'cluster_size': [],
'silhouette': 0.0,
'dbi': 0.0,
'mia': 0.0,
'all_scores': 0.0,
# 'cdi': 0.0,
't0': time.time(),
'batch_fit_time': 0.0,
'total_sample': 0}
cluster_stats[n] = stats
try:
cluster_stats[n]['total_sample'] += X.shape[0]
cluster_stats[n]['n_sample'] = X.shape[0]
cluster_stats[n]['silhouette'] = silhouette_score(X, cluster_labels, sample_size=10000)
cluster_stats[n]['dbi'] = davies_bouldin_score(X, cluster_labels)
cluster_stats[n]['mia'] = mean_index_adequacy(X, cluster_labels)
#cluster_stats[n_clusters][y]['cdi'] =cluster_dispersion_index(Xbatch, cluster_labels) DON'T RUN LOCALLY!! - need to change to chunked alogrithm once released
cluster_stats[n]['cluster_size'] = np.bincount(cluster_labels)
cluster_stats[n]['batch_fit_time'] = toc - tic
cluster_stats[n]['preprocessing'] = preprocessing
cluster_stats[n]['transform'] = transform
cluster_stats[n]['all_scores'] = cluster_stats[n]['dbi']*cluster_stats[n]['mia']/cluster_stats[n]['silhouette']
s = "%s : " % (n)
s += "\nsilhouette: %(silhouette).3f " % stats
s += "\ndbi: %(dbi).3f " % stats
s += "\nmia: %(mia).3f " % stats
print(s)
except:
print('Could not compute clustering stats for n = ' + str(n))
pass
return cluster_stats
def saveResults(experiment_name, cluster_stats, cluster_centroids, som_dim, elec_bin, save=True):
"""
Saves cluster stats results and centroids for a single clustering iteration.
Called inside kmeans() and som() functions.
"""
for k, v in cluster_stats.items():
n = k
evals = pd.DataFrame(cluster_stats).T
evals['experiment_name'] = experiment_name
evals['som_dim'] = som_dim
evals['n_clust'] = n
evals['elec_bin'] = elec_bin
eval_results = evals.drop(labels='cluster_size', axis=1).reset_index(drop=True)
# eval_results.rename({'index':'k'}, axis=1, inplace=True)
eval_results[['dbi','mia','silhouette']] = eval_results[['dbi','mia','silhouette']].astype(float)
eval_results['date'] = date.today().isoformat()
# eval_results['best_clusters'] = None
centroid_results = pd.DataFrame(cluster_centroids)
centroid_results['experiment_name'] = experiment_name
centroid_results['som_dim'] = som_dim
centroid_results['n_clust'] = n
centroid_results['elec_bin'] = elec_bin
try:
centroid_results['cluster_size'] = evals['cluster_size'][n]
except:
centroid_results['cluster_size'] = np.nan
centroid_results.reset_index(inplace=True)
centroid_results.rename({'index':'k'}, axis=1, inplace=True)
centroid_results['date'] = date.today().isoformat()
#3 Save Results
if save is True:
os.makedirs(results_dir, exist_ok=True)
erpath = os.path.join(results_dir, 'cluster_results.csv')
if os.path.isfile(erpath):
eval_results.to_csv(erpath, mode='a', index=False, header=False)
else:
eval_results.to_csv(erpath, index=False)
os.makedirs(cluster_dir, exist_ok=True)
crpath = os.path.join(cluster_dir, experiment_name + '_centroids.csv')
if os.path.isfile(crpath):
centroid_results.to_csv(crpath, mode='a', index=False, header=False)
else:
centroid_results.to_csv(crpath, index=False)
print('Results saved for', experiment_name, str(som_dim), str(n))
return eval_results, centroid_results
def xBins(X, bin_type):
if bin_type == 'amd':
Xdd_A = X.sum(axis=1)
Xdd = Xdd_A*230/1000
XmonthlyPower = resampleProfiles(Xdd, interval='M', aggfunc='sum')
Xamd = resampleProfiles(XmonthlyPower, interval='A', aggfunc='mean').reset_index().groupby('ProfileID').mean()
Xamd.columns=['amd']
amd_bins = [0, 1, 50, 150, 400, 600, 1200, 2500, 4000]
bin_labels = ['{0:.0f}-{1:.0f}'.format(x,y) for x, y in zip(amd_bins[:-1], amd_bins[1:])]
Xamd['bins'] = pd.cut(Xamd.amd, amd_bins, labels=bin_labels, right=True, include_lowest=True)
Xbin_dict = dict()
for c in Xamd.bins.cat.categories:
Xbin_dict[c] = Xamd[Xamd.bins==c].index.values
del Xdd_A, Xdd, XmonthlyPower, Xamd
if bin_type == 'integral':
Xint = normalize(X).cumsum(axis=1)
Xintn = pd.DataFrame(Xint, index=X.index)
Xintn['max'] = X.max(axis=1)
clusterer = MiniBatchKMeans(n_clusters=8, random_state=10)
clusterer.fit(np.array(Xintn))
cluster_labels = clusterer.predict(np.array(Xintn))
labl = pd.DataFrame(cluster_labels, index=X.index)
Xbin_dict = dict()
for c in labl[0].unique():
Xbin_dict['bin'+str(c)] = labl[labl[0]==c].index.values
return Xbin_dict
def preprocessX(X, norm=None):
if norm == 'unit_norm': #Kwac et al 2013
Xnorm = normalize(X)
elif norm == 'zero-one': #Dent et al 2014
Xnorm = np.array(X.divide(X.max(axis=1), axis=0))
elif norm == 'demin': #Jin et al 2016
Xnorm = normalize(X.subtract(X.min(axis=1), axis=0))
elif norm == 'sa_norm': #Dekenah 2014
Xnorm = np.array(X.divide(X.mean(axis=1), axis=0))
else:
Xnorm = np.array(X)
#Xnorm.fillna(0, inplace=True)
Xnorm[np.isnan(Xnorm)] = 0
return Xnorm
def kmeans(X, range_n_clusters, top_lbls=10, preprocessing = None, bin_X=False, experiment_name=None):
"""
This function applies the MiniBatchKmeans algorithm from sklearn on inputs X for range_n_clusters.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
Returns cluster stats, cluster centroids and cluster labels.
"""
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'all':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = pd.DataFrame()
stats = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
from copy import copy
from functools import wraps
from time import time
import skimage.filters
import funcs
import numpy as np
import pandas as pd
import seaborn as sns
import uncertainties as un
from funcs.post_processing.images.soot_foil import deltas as pp_deltas
from matplotlib import patches
from matplotlib import pyplot as plt
from matplotlib_scalebar.scalebar import ScaleBar
from scipy.stats import ks_2samp, t, ttest_ind, ttest_ind_from_stats
from skimage import io, transform
from uncertainties import unumpy as unp
d_drive = funcs.dir.d_drive
DF_SF_SPATIAL = pd.read_csv(
os.path.join(
d_drive,
"Data",
"Processed",
"Soot Foil",
"spatial_calibrations.csv",
)
)
SF_DATE = "2020-12-27"
SF_SHOT = 3
SF_IMG_DIR = os.path.join(
d_drive,
"Data",
"Processed",
"Soot Foil",
"foil images",
SF_DATE,
f"Shot {SF_SHOT:02d}",
)
SF_SPATIAL_SHOT_MASK = (DF_SF_SPATIAL["date"] == SF_DATE) & (
DF_SF_SPATIAL["shot"] == SF_SHOT
)
SF_DELTA_MM = DF_SF_SPATIAL[SF_SPATIAL_SHOT_MASK]["delta_mm"]
SF_DELTA_PX = DF_SF_SPATIAL[SF_SPATIAL_SHOT_MASK]["delta_px"]
PLOT_FILETYPE = "png"
# ibm color blind safe palette
# https://lospec.com/palette-list/ibm-color-blind-safe
# https://davidmathlogic.com/colorblind/#%23648FFF-%23785EF0-%23DC267F-%23FE6100-%23FFB000
COLOR_SC = "#fe6100"
COLOR_SF = "#648fff"
SAVE_LOC = os.path.join(d_drive, "Measurement-Paper", "images")
DPI = 200
def hex2rgb(hex_color):
hex_color = hex_color.replace("#", "")
r = int(hex_color[:2], 16)
g = int(hex_color[2:4], 16)
b = int(hex_color[4:], 16)
return r, g, b
def rgb2hex(r, g, b):
out = f"#{hex(r)[2:]}{hex(g)[2:]}{hex(b)[2:]}"
return out
def hex_add(c0, c1):
"""
c0 + c1
Parameters
----------
c0
c1
Returns
-------
"""
r0, g0, b0 = hex2rgb(c0)
r1, g1, b1 = hex2rgb(c1)
r_out = min(255, r0 + r1)
g_out = min(255, g0 + g1)
b_out = min(255, b0 + b1)
out = rgb2hex(r_out, g_out, b_out)
return out
def hex_sub(c0, c1):
"""
c0 - c1
Parameters
----------
c0
c1
Returns
-------
"""
r0, g0, b0 = hex2rgb(c0)
r1, g1, b1 = hex2rgb(c1)
r_out = max(0, r0 - r1)
g_out = max(0, g0 - g1)
b_out = max(0, b0 - b1)
out = rgb2hex(r_out, g_out, b_out)
return out
def timed(func):
@wraps(func)
def _timed(*args, **kwargs):
name = func.__name__
if name != "main":
sys.stderr.write(f"{func.__name__} ")
t0 = time()
out = func(*args, **kwargs)
t1 = time()
if name != "main":
sys.stderr.write(f"took {t1 - t0:6f} sec\n")
else:
sys.stderr.write(f"Done! {t1 - t0:6f} sec\n")
return out
return _timed
@timed
def set_plot_format():
common_size = 7.5
sns.set_color_codes("deep")
sns.set_context(
"paper",
rc={
"font.size": common_size,
"axes.titlesize": common_size + 1.5,
"axes.titleweight": "bold",
"axes.labelsize": common_size,
"xtick.labelsize": common_size,
"ytick.labelsize": common_size,
},
)
sns.set_style(
{
"font.family": "serif",
"font.serif": "Computer Modern",
}
)
# plt.rcParams["axes.titleweight"] = "bold"
plt.rcParams["figure.dpi"] = DPI
def sf_imread(
img_path,
plot=True,
):
"""
Thin wrapper around `skimage.io.imread` that rotates the image if it is
to be used for plotting, but does not if it is to be used for measurements.
Parameters
----------
img_path : str
Path to image
plot : bool
Determines whether or not image will be rotated 90 degrees
Returns
-------
np.array
"""
img_in = io.imread(img_path)
if plot:
img_in = transform.rotate(img_in, -90) # show images going left-right
return img_in
# noinspection PyTypeChecker
def get_scale_bar(
delta_px,
delta_mm,
cell_size,
text_color="#000",
box_color="#fff",
box_alpha=1,
rotation="vertical",
):
"""
Thin wrapper around ScaleBar that does a bit of standard formatting for
my needs.
Parameters
----------
delta_px : float
Calibration delta (px)
delta_mm : float
Calibration delta (mm)
cell_size : float
Fixed value to display in scale bar
text_color : str
Text color (hex)
box_color: str
Background box color (hex)
box_alpha : float
Box alpha -- NOTE: does not apply to border >:(
rotation : str
Which direction to place the scale bar: "vertical" or "horizontal"
Returns
-------
ScaleBar
"""
return ScaleBar(
delta_mm / delta_px,
"mm",
location=3,
fixed_value=cell_size,
scale_formatter=(lambda x, u: f"{x:.1f} {u}"),
border_pad=0.2,
color=text_color,
box_color=box_color,
box_alpha=box_alpha,
rotation=rotation,
)
@timed
def get_schlieren_data(estimator):
"""
Read in schlieren data from assorted .h5 stores and calculate cell sizes
for individual shots.
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
"""
# read in data
df_schlieren_tube = pd.DataFrame()
df_schlieren_all_frames = pd.DataFrame()
df_schlieren_frames = pd.DataFrame()
for group in ("fffff", "hhhhh", "ggggg"):
with pd.HDFStore(
f"/d/Data/Processed/Data/data_{group}.h5",
"r",
) as store:
df_schlieren_tube = pd.concat((df_schlieren_tube, store.data))
with pd.HDFStore(
f"/d/Data/Processed/Data/schlieren_{group}.h5",
"r",
) as store:
df_schlieren_all_frames = pd.concat(
(df_schlieren_all_frames, store.data)
)
# fix jacked up measurement
with pd.HDFStore(
"/d/Data/Processed/Data/tube_data_2020-08-07.h5",
"r",
) as store:
df_schlieren_tube[
(
(df_schlieren_tube["date"] == "2020-08-07")
& (df_schlieren_tube["shot"] == 3)
)
] = store.data.iloc[0].values
# After some analysis it looks like I was dumb and used u_delta_px as
# u_loc_px. This gives an overly large estimate of uncertainty for
# schlieren. Fix that before continuing with the analysis.
df_schlieren_all_frames.loc[:, "u_delta_px"] = (
df_schlieren_all_frames["u_loc_px"].copy()
)
df_schlieren_all_frames.loc[:, "u_loc_px"] = df_schlieren_all_frames[
"u_loc_px"
].div(np.sqrt(2))
# calculate cell size measurements
df_schlieren_tube = df_schlieren_tube[
np.isclose(df_schlieren_tube["phi_nom"], 1)
& np.isclose(df_schlieren_tube["dil_mf_nom"], 0.2)
& (df_schlieren_tube["fuel"] == "CH4")
& (df_schlieren_tube["oxidizer"] == "N2O")
& (df_schlieren_tube["diluent"] == "N2")
]
df_schlieren_tube["cell_size"] = np.NaN
df_schlieren_tube["u_cell_size"] = np.NaN
u_delta_bias = np.sqrt(2) / 2
deltas = unp.uarray(
df_schlieren_all_frames["delta_px"],
df_schlieren_all_frames["u_delta_px"], # precision only
) + un.ufloat(0, u_delta_bias)
spatials = unp.uarray(
df_schlieren_all_frames["spatial_centerline"],
df_schlieren_all_frames["u_spatial_centerline"],
)
cell_sizes = deltas * spatials
df_schlieren_all_frames["cell_size"] = unp.nominal_values(cell_sizes)
df_schlieren_all_frames["u_cell_size"] = unp.std_devs(cell_sizes)
for (date, shot), _ in df_schlieren_tube.groupby(["date", "shot"]):
_df_this_shot = df_schlieren_all_frames[
(
(df_schlieren_all_frames["date"] == date)
& (df_schlieren_all_frames["shot"] == shot)
)
].dropna()
df_schlieren_frames = pd.concat((df_schlieren_frames, _df_this_shot))
if len(_df_this_shot):
_deltas = unp.uarray(
_df_this_shot["delta_px"],
_df_this_shot["u_delta_px"],
)
_mm_per_px = unp.uarray(
_df_this_shot["spatial_centerline"],
_df_this_shot["u_spatial_centerline"],
)
_meas = estimator(_deltas * _mm_per_px) * 2
# noinspection PyUnresolvedReferences
df_schlieren_tube.loc[
(
(df_schlieren_tube["date"] == date)
& (df_schlieren_tube["shot"] == shot)
),
["cell_size", "u_cell_size"],
] = (_meas.nominal_value, _meas.std_dev)
df_schlieren_tube = df_schlieren_tube[
~pd.isna(df_schlieren_tube["cell_size"])
]
return df_schlieren_frames, df_schlieren_tube
@timed
def build_schlieren_images(
cmap,
df_meas,
image_width=None,
image_height=None,
save=False,
limits_x=(10, 110),
limits_y=(10, 210),
):
"""
Generates images of:
* Raw schlieren image
* Schlieren image with triple point locations identified
Images will be rendered with an aspect ration of 0.5; only one of
`image_width`, `image_height` should be given.
Parameters
----------
cmap : str
Colormap to use for schlieren frame
df_meas : pd.DataFrame
DataFrame of schlieren measurements
image_width : float or None
Image width (in)
image_height : float or None
Image height (in)
save : bool
Whether or not to save images
limits_x : tuple
X limits of trimmed image
limits_y : tuple
Y limits of trimmed image
Returns
-------
"""
aspect_ratio = 0.5 # w/h
if image_width is None and image_height is None:
raise ValueError("image_width or image_height must be given")
if image_width is None:
image_width = image_height * aspect_ratio
elif image_height is None:
image_height = image_width / aspect_ratio
date = "2020-08-07"
shot = 3
frame = 0
tube_data_h5_suffix = "fffff"
with pd.HDFStore(
f"/d/Data/Processed/Data/data_{tube_data_h5_suffix}.h5", "r"
) as store:
schlieren_key_date = date.replace("-", "_")
key = (
f"/schlieren/d{schlieren_key_date}/" + f"shot{shot:02d}/"
f"frame_{frame:02d}"
)
schlieren_raw = np.fliplr(store[key])
# jankily shoehorn spatial calibration into existing function
mm_per_px = df_meas[
(df_meas["date"] == date)
& (df_meas["shot"] == shot)
]["spatial_centerline"].iloc[0]
schlieren_scalebar = get_scale_bar(
1,
mm_per_px,
cell_size=25.4,
)
# trim image to ROI
limits_x = sorted(limits_x)
limits_y = sorted(limits_y)
schlieren_raw = (
schlieren_raw[np.arange(*limits_y), :][:, np.arange(*limits_x)]
)
schlieren_raw /= schlieren_raw.max()
schlieren_raw = skimage.filters.unsharp_mask(
schlieren_raw,
radius=1.5,
amount=3,
)
df_meas = df_meas[
(df_meas["loc_px"] >= limits_y[0])
& (df_meas["loc_px"] <= limits_y[1])
]
df_meas["loc_px"] -= limits_y[0]
# raw frame
name = "schlieren_frame_raw"
fig, ax = plt.subplots(figsize=(image_width, image_height))
fig.canvas.set_window_title(name)
ax.imshow(schlieren_raw, cmap=cmap)
ax.axis("off")
# ax.set_title("Raw")
ax.grid(False)
ax.add_artist(
copy(schlieren_scalebar),
)
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
# frame with triple point measurements
name = "schlieren_frame_measurements"
fig, ax = plt.subplots(figsize=(image_width, image_height))
fig.canvas.set_window_title(name)
ax.imshow(schlieren_raw, cmap=cmap)
ax.axis("off")
# ax.set_title("Measurements")
ax.grid(False)
for loc_px in df_meas[
(df_meas["date"] == date)
& (df_meas["shot"] == shot)
& (df_meas["frame"] == frame)
]["loc_px"]:
plt.axhline(
loc_px,
c=COLOR_SC,
lw=0.5,
)
ax.add_artist(
copy(schlieren_scalebar),
)
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
@timed
def calculate_schlieren_cell_size(
df_schlieren_frames,
iqr_fencing=False,
estimator=np.mean,
):
"""
Parameters
----------
df_schlieren_frames : pd.DataFrame
DataFrame containing schlieren data
iqr_fencing : bool
Remove outliers using IQR fencing?
estimator : func
Estimator function to use
Returns
-------
Tuple[float, float, unp.uarray, int]
* Measured nominal cell size (mm)
* Measured cell size uncertainty (mm)
* Individual deltas with uncertainties (mm)
* Number of measurements
"""
df_schlieren_frames = df_schlieren_frames.copy().dropna()
if iqr_fencing:
# remove outliers
meas_mean = df_schlieren_frames["cell_size"].mean()
meas_std = df_schlieren_frames["cell_size"].std()
mask = (
meas_mean - 1.5 * meas_std <= df_schlieren_frames["cell_size"]
) & (df_schlieren_frames["cell_size"] <= meas_mean + 1.5 * meas_std)
del meas_std, meas_mean # make sure we use reduced dataset!
else:
# leave em
mask = np.ones_like(df_schlieren_frames["cell_size"], dtype=bool)
meas = unp.uarray(
df_schlieren_frames["cell_size"][mask],
df_schlieren_frames["u_cell_size"][mask].values,
)
n_meas = len(meas)
nominal_values = unp.nominal_values(meas)
# cell_size_meas = np.sum(meas) / n_meas
cell_size_meas = 2 * estimator(meas)
cell_size_uncert_population = (
nominal_values.std() / np.sqrt(n_meas) * t.ppf(0.975, n_meas - 1)
)
# noinspection PyUnresolvedReferences
cell_size_uncert_schlieren = np.sqrt(
np.sum(
np.square(
[
cell_size_uncert_population,
cell_size_meas.std_dev,
]
)
)
)
uncertainty = {
"instrument": cell_size_meas.std_dev,
"population": cell_size_uncert_population,
"total": cell_size_uncert_schlieren,
}
# noinspection PyUnresolvedReferences
return (
cell_size_meas.nominal_value,
uncertainty,
meas,
n_meas,
)
@timed
def plot_schlieren_measurement_distribution(
schlieren_meas,
cell_size_meas,
cell_size_uncert,
plot_width,
plot_height,
save=False,
):
"""
Plot the distribution of schlieren measurements
Parameters
----------
schlieren_meas : np.array
Array of individual schlieren nominal measurements (mm)
cell_size_meas : float
Nominal mean cell size measurement (mm)
cell_size_uncert : float
Uncertainty in cell size (mm)
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save the plot
Returns
-------
"""
name = "schlieren_measurement_distribution"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
sns.distplot(
schlieren_meas,
hist=False,
# rug=True,
ax=ax,
color=COLOR_SC,
)
ax_ylim = ax.get_ylim()
plt.fill_between(
[cell_size_meas + cell_size_uncert, cell_size_meas - cell_size_uncert],
ax_ylim[0],
ax_ylim[1],
alpha=0.25,
color=COLOR_SC,
ec=None,
zorder=-1,
)
ax.axvline(
cell_size_meas,
c=COLOR_SC,
ls="--",
alpha=0.7,
zorder=-1,
)
ax.set_ylim(ax_ylim)
ax.set_xlabel("Measured Cell Size (mm)")
ax.set_ylabel("Probability Density\n(1/mm)")
# ax.set_title("Schlieren Cell Size Measurement Distribution")
ax.grid(False)
plt.tight_layout()
sns.despine()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
@timed
def plot_all_schlieren_deltas_distribution(
df_schlieren_frames,
plot_width,
plot_height,
save=False,
):
"""
Plot the distribution of all schlieren deltas in the dataset
Parameters
----------
df_schlieren_frames : pd.DataFrame
Dataframe containing all schlieren frame deltas
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save the plot
Returns
-------
"""
name = "schlieren_all_deltas_distribution"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
deltas = (
df_schlieren_frames["spatial_centerline"]
* df_schlieren_frames["delta_px"]
).dropna()
sns.kdeplot(
deltas,
ax=ax,
color=COLOR_SC,
clip=[0, 100],
)
ax.set_xlabel("Triple Point Delta (mm)")
ax.set_ylabel("Probability Density\n(1/mm)")
# ax.set_title("Schlieren Triple Point Delta Distribution")
ax.grid(False)
plt.tight_layout()
sns.despine()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
return deltas
@timed
def plot_all_soot_foil_deltas_distribution(
soot_foil_meas,
plot_width,
plot_height,
save=False,
):
"""
Plot the distribution of all schlieren deltas in the dataset
Parameters
----------
soot_foil_meas : list
List of all soot foil deltas
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save the plot
Returns
-------
"""
name = "soot_foil_all_deltas_distribution"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
sns.kdeplot(
soot_foil_meas,
ax=ax,
color=COLOR_SF,
clip=[0, 100],
)
ax.set_xlabel("Triple Point Delta (mm)")
ax.set_ylabel("Probability Density\n(1/mm)")
# ax.set_title("Soot Foil Triple Point Delta Distribution")
ax.grid(False)
plt.xlim([0, 100])
plt.tight_layout()
sns.despine()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
@timed
def plot_both_delta_distributions(
df_schlieren_frames,
soot_foil_meas,
plot_width,
plot_height,
save=False,
):
"""
Plot the distribution of all schlieren and soot foil deltas in the dataset
Parameters
----------
df_schlieren_frames : pd.DataFrame
Dataframe containing all schlieren frame deltas
soot_foil_meas : list
List of all soot foil deltas
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save the plot
Returns
-------
"""
name = "all_deltas_distributions"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
deltas = (
df_schlieren_frames["spatial_centerline"]
* df_schlieren_frames["delta_px"]
).dropna()
sns.kdeplot(
deltas,
ax=ax,
color=COLOR_SC,
label="Schlieren",
clip=[0, 100],
)
sns.kdeplot(
soot_foil_meas,
ax=ax,
color=COLOR_SF,
label="Soot Foil",
clip=[0, 100],
)
ax.set_xlabel("Triple Point Delta (mm)")
ax.set_ylabel("Probability Density\n(1/mm)")
plt.xlim([0, 100])
# ax.set_title("Triple Point Delta Distributions")
ax.grid(False)
plt.legend(frameon=False)
plt.tight_layout()
sns.despine()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
# noinspection DuplicatedCode
@timed
def plot_schlieren_measurement_convergence(
schlieren_meas,
schlieren_uncert,
schlieren_meas_all,
n_schlieren_meas,
plot_width,
plot_height,
save=False,
):
"""
Plots convergence of schlieren measurements vs. number of measurements.
Parameters
----------
schlieren_meas : float
Actual measured schlieren value
schlieren_uncert : float
Uncertainty in measured value
schlieren_meas_all : np.array
Array of individual schlieren nominal measurements (mm)
n_schlieren_meas : int
Number of schlieren measurements
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save the plot
Returns
-------
"""
name = "schlieren_measurement_convergence"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
n_meas = np.arange(1, n_schlieren_meas + 1)
schlieren_meas_all = pd.Series(schlieren_meas_all)
running_mean = schlieren_meas_all.rolling(
n_schlieren_meas,
min_periods=0,
).median()
ax.plot(
n_meas,
np.abs(running_mean - schlieren_meas) * 100 / schlieren_meas,
c=COLOR_SC,
)
ax.axhline(
schlieren_uncert / schlieren_meas * 100,
c="k",
alpha=0.5,
zorder=-1,
lw=0.5,
ls=(0, (5, 1, 1, 1)),
)
ax.set_xlim([2, len(running_mean)])
ax.set_xlabel("Number of Triple Point Deltas")
ax.set_ylabel("Absolute Difference\nFrom Final (%)")
# ax.set_title("Schlieren Cell Size Measurement Convergence")
ax.grid(False)
plt.tight_layout()
sns.despine()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
# noinspection DuplicatedCode
@timed
def plot_soot_foil_measurement_convergence(
nominal_measurement,
uncert,
all_measurements,
plot_width,
plot_height,
save=False,
):
"""
Plots convergence of soot foil measurements vs. number of measurements.
Parameters
----------
nominal_measurement : float
Actual measured value
uncert : float
Uncertainty in measured value
all_measurements : np.array
Array of individual nominal measurements (mm)
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save the plot
Returns
-------
"""
name = "soot_foil_measurement_convergence"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
n_meas = len(all_measurements)
meas_range = np.arange(1, n_meas + 1)
all_measurements = pd.Series(all_measurements)
running_mean = all_measurements.rolling(
n_meas,
min_periods=0,
).median()
uncert_pct = uncert / nominal_measurement * 100
ax.plot(
meas_range,
np.abs(running_mean - nominal_measurement) * 100 / nominal_measurement,
c=COLOR_SF,
)
ax.axhline(
uncert_pct,
c="k",
alpha=0.5,
zorder=-1,
lw=0.5,
ls=(0, (5, 1, 1, 1)),
)
ax.set_xlim([2, len(running_mean)])
ax.set_xlabel("Number of Triple Point Deltas")
ax.set_ylabel("Absolute Difference\nFrom Final (%)")
plt.ticklabel_format(
style="sci",
axis="x",
scilimits=(0, 0),
useMathText=True,
)
# ax.set_title("Soot Foil Cell Size Measurement Convergence")
ax.grid(False)
plt.tight_layout()
sns.despine()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
return uncert_pct
@timed
def build_soot_foil_images(
cmap,
image_height,
save=False,
):
"""
Generates images of:
* Raw soot foil image next to traced soot foil
* Zoomed in trace with arrows to demonstrate measurements
Parameters
----------
cmap : str
Colormap to use for schlieren frame
image_height : float or None
Image height (in)
save : bool
Whether or not to save images
Returns
-------
"""
# settings
aspect_ratio = 2 # w/h
image_width = aspect_ratio * image_height
sf_scalebar = get_scale_bar(
SF_DELTA_PX,
SF_DELTA_MM,
cell_size=25.4,
)
# read in foil images
sf_img = sf_imread(os.path.join(SF_IMG_DIR, "square.png"))
sf_img_lines_thk = sf_imread(os.path.join(SF_IMG_DIR, "lines_thk.png"))
# display foil images
name = "soot_foil_images_main"
fig, ax = plt.subplots(1, 2, figsize=(image_width, image_height))
fig.canvas.set_window_title(name)
ax[0].imshow(sf_img, cmap=cmap)
ax[0].axis("off")
# ax[0].set_title("Soot Foil")
ax[1].imshow(sf_img_lines_thk, cmap=cmap)
ax[1].axis("off")
# ax[1].set_title("Traced Cells")
for a in ax:
a.add_artist(
copy(sf_scalebar),
)
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
# read in zoomed lines
sf_img_lines_z = sf_imread(os.path.join(SF_IMG_DIR, "lines_zoomed.png"))
sf_img_lines_z = np.rot90(
np.rot90(sf_img_lines_z)
) # don't want to redo this
# plot zoomed lines
name = "soot_foil_lines_zoomed"
fig, ax = plt.subplots(figsize=(image_height, image_height))
fig.canvas.set_window_title(name)
ax.imshow(sf_img_lines_z, cmap=cmap)
plt.axis("off")
# plt.title("Traced Cells\n(Close-up)")
lines_scale = 900 / 330 # scaled up for quality
arrow_x = 160 * lines_scale
arrow_length = np.array([36, 32, 86, 52, 88, 35, 50]) * lines_scale
arrow_y_top = np.array([-10, 20, 46, 126, 172, 254, 282]) * lines_scale
n_arrows = len(arrow_length)
for i in range(n_arrows):
if i == 0:
arrowstyle = "-|>"
elif i == n_arrows - 1:
arrowstyle = "<|-"
else:
arrowstyle = "<|-|>"
arrow = patches.FancyArrowPatch(
(arrow_x, arrow_y_top[i]),
(arrow_x, arrow_y_top[i] + arrow_length[i]),
arrowstyle=arrowstyle,
mutation_scale=5,
linewidth=0.75,
color=COLOR_SF,
)
plt.gca().add_artist(arrow)
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
# regular vs irregular
irreg_scalebar = get_scale_bar(
2268,
300,
cell_size=25.4,
)
irregular_image_path = os.path.join(
d_drive,
"Data",
"Processed",
"Soot Foil",
"foil images",
"2020-10-26",
"Shot 01",
"square.png",
)
img_irregular = sf_imread(irregular_image_path, plot=True)
img_regular = sf_imread(os.path.join(SF_IMG_DIR, "square_regular.png"))
# display foil images
name = "soot_foil_irregular_cells"
fig, ax = plt.subplots(1, 2, figsize=(image_width, image_height))
fig.canvas.set_window_title(name)
ax[0].imshow(img_regular, cmap=cmap)
ax[0].axis("off")
# ax[0].set_title("Regular")
ax[0].add_artist(copy(sf_scalebar))
ax[1].imshow(img_irregular, cmap=cmap)
ax[1].axis("off")
# ax[1].set_title("Irregular")
ax[1].add_artist(copy(irreg_scalebar))
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
@timed
def soot_foil_px_cal_uncertainty(
plot_width,
plot_height,
save=False,
):
"""
Calculate soot foil pixel location uncertainty and plot measurement
distribution from repeatability test.
NOTE: this function modifies DF_SF_SPATIAL, therefore this should be run
before calculations referencing soot foil uncertainty!
Parameters
----------
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save images
Returns
-------
"""
# add pixel delta calibration precision uncertainty
# estimate using IMG_1983 (2020-12-27 Shot 03)
px_cal_deltas = np.array(
[
2344, # this is what is saved in the .xcf
2347,
2345,
2345,
2345,
2344,
2344,
2345,
2344,
2345,
]
)
u_px_cal_deltas = (
px_cal_deltas.std()
/ np.sqrt(len(px_cal_deltas))
* t.ppf(0.975, len(px_cal_deltas) - 1)
)
# calculate and apply new calibration pixel uncertainty
# existing measurement accounts for sqrt2 from delta
# this applies directly without that because it is a direct delta
# measurement
DF_SF_SPATIAL["u_delta_px"] = np.sqrt(
np.sum(
np.square(
np.array(
[
DF_SF_SPATIAL["u_delta_px"], # bias (preexisting)
u_px_cal_deltas, # precision (new)
]
)
)
)
)
# no need to do this for calibration mm uncertainty because it's a direct
# ruler
# reading, not a measurement of an existing quantity with a ruler
# (i.e. bias only)
name = "soot_foil_px_cal_uncertainty_distribution"
fig = plt.figure(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
sns.distplot(
px_cal_deltas,
hist=False,
color=COLOR_SF,
)
ax_ylim = plt.ylim()
plt.fill_between(
[
px_cal_deltas.mean() + u_px_cal_deltas,
px_cal_deltas.mean() - u_px_cal_deltas,
],
ax_ylim[0],
ax_ylim[1],
alpha=0.25,
color=COLOR_SF,
ec=None,
zorder=-1,
)
plt.axvline(
px_cal_deltas.mean(),
c=COLOR_SF,
ls="--",
alpha=0.7,
zorder=-1,
)
plt.ylim(ax_ylim)
# plt.title(
# "Soot Foil Pixel Calibration Distance\nRepeatability Distribution"
# )
plt.grid(False)
plt.xlabel("Ruler Distance (px)")
plt.ylabel("Probability\nDensity (1/px)")
sns.despine()
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
def find_row_px_loc(row):
row_locs = np.where(row == 255)[0]
double_check = row_locs[
np.abs(np.diff([row_locs, np.roll(row_locs, -1)], axis=0)).flatten() > 1
]
if len(double_check):
meas = double_check[0]
else:
meas = row_locs[0]
return meas
def get_all_image_px_locs(img):
return np.apply_along_axis(find_row_px_loc, 1, img)
def soot_foil_px_delta_uncertainty():
# add measurement pixel location precision uncertainty
# estimate using IMG_1983 (2020-12-27 Shot 03)
images = funcs.post_processing.images.schlieren.find_images_in_dir(
os.path.join(
d_drive,
"Data",
"Processed",
"Soot Foil",
"foil images",
"2020-12-27",
"Shot 03",
"uncertainty",
),
".png",
)
img_size = io.imread(images[0]).shape[0] # get image size
n_repeatability_images = len(images)
repeatability_px_locs = (
np.ones(
(
img_size,
n_repeatability_images,
)
)
* np.NaN
)
for i, img_loc in enumerate(images):
img = io.imread(img_loc)
repeatability_px_locs[:, i] = get_all_image_px_locs(img)
# use max std of all rows as uncertainty estimate
u_px_delta_precision = (
np.std(
repeatability_px_locs,
axis=1,
).max()
/ np.sqrt(n_repeatability_images)
* t.ppf(
0.975,
n_repeatability_images - 1,
)
) * np.sqrt(2) # accounts for propagation in delta
u_px_delta_bias = 0.5 * np.sqrt(2) # accounts for propagation in delta
# calculate and apply new measurement pixel location precision uncertainty
uncert_total = np.sqrt(
np.sum(np.square(np.array([u_px_delta_bias, u_px_delta_precision])))
)
uncert = {
"bias": u_px_delta_bias,
"precision": u_px_delta_precision,
"total": uncert_total
}
return uncert
@timed
# noinspection PyUnresolvedReferences
def calculate_soot_foil_cell_size(
# n_schlieren_meas,
iqr_fencing,
estimator=np.mean,
use_cache=True,
save_cache=False,
):
"""
Calculates the mean cell size from soot foil images
Parameters
----------
# n_schlieren_meas : int
Number of schlieren measurements, which is used to trim down the data
set after outliers have been removed -- NOTE: this is being left in
in case it needs to be used again later, however the randomly selected
batch of measurements from the first time this was run has been
preserved and will be used for the sake of continuity.
iqr_fencing : bool
Remove outliers using IQR fencing?
estimator : func
Estimator function to use
use_cache : bool
Use cached data?
save_cache : bool
Overwrite cached data?
Returns
-------
Tuple[np.array, float, float, pd.DataFrame]
* Per-foil measurements (mm)
* Mean cell size (mm)
* Cell size uncertainty (mm)
"""
cache_file = os.path.join(
d_drive,
"Data",
"Processed",
"Data",
"soot_foil_measurement_study.h5",
)
uncert_delta_px = soot_foil_px_delta_uncertainty()
if use_cache:
with pd.HDFStore(cache_file, "r") as store:
all_meas = store.data["measurements"].values
all_total_uncerts = store.data["total_uncertainties"].values
all_cal_px_uncerts = store.data["u_cal_px"].values
all_cal_mm_uncerts = store.data["u_cal_mm"].values
else:
date_shot = (
# remove 4 at random
# np.random.choice(range(19), 4, False)
# Out[3]: array([16, 4, 5, 6])
# date, shot
("2020-11-12", 0),
("2020-11-13", 8),
("2020-11-23", 3),
# ("2020-11-23", 4),
# ("2020-11-23", 6),
# ("2020-11-23", 7),
("2020-11-24", 0),
("2020-11-24", 3),
("2020-11-24", 7),
("2020-11-25", 0),
("2020-12-20", 8),
("2020-12-21", 9),
("2020-12-27", 0),
("2020-12-27", 1),
("2020-12-27", 2),
# ("2020-12-27", 3),
("2020-12-27", 6),
("2020-12-27", 7),
("2020-12-27", 8),
)
u_d_px = uncert_delta_px["total"]
all_meas = []
all_total_uncerts = []
all_dates = []
all_shots = []
all_cal_mm_uncerts = []
all_cal_px_uncerts = []
all_n_deltas = np.ones(len(date_shot)) * np.NaN
for idx, (date, shot) in enumerate(date_shot):
cal_mm, cal_px, u_cal_mm, u_cal_px = DF_SF_SPATIAL[
(DF_SF_SPATIAL["date"] == date) &
(DF_SF_SPATIAL["shot"] == shot)
][["delta_mm", "delta_px", "u_delta_mm", "u_delta_px"]].values[0]
d_px = pp_deltas.get_px_deltas_from_lines(
os.path.join(
d_drive,
"Data",
"Processed",
"Soot Foil",
"foil images",
f"{date}",
f"Shot {shot:02d}",
"composite.png",
),
apply_uncertainty=False,
)
all_n_deltas[idx] = len(d_px)
all_cal_mm_uncerts.append(u_cal_mm)
all_cal_px_uncerts.append(u_cal_px)
# apply uncertainties
d_px = unp.uarray(d_px, u_d_px)
cal_mm = un.ufloat(cal_mm, u_cal_mm)
cal_px = un.ufloat(cal_px, u_cal_px)
# calculate!
d_mm = d_px * cal_mm / cal_px
all_meas.extend(list(unp.nominal_values(d_mm)))
all_total_uncerts.extend(list(unp.std_devs(d_mm)))
n_current_meas = len(d_mm)
all_dates.extend(list([date]*n_current_meas))
all_shots.extend(list([shot]*n_current_meas))
if save_cache:
df_meas = pd.DataFrame([
pd.Series(all_dates, name="date"),
pd.Series(all_shots, name="shot"),
pd.Series(all_meas, name="measurements"),
pd.Series(all_total_uncerts, name="total_uncertainties"),
pd.Series(all_cal_px_uncerts, name="u_cal_px"),
pd.Series(all_cal_mm_uncerts, name="u_cal_mm"),
]).T
with pd.HDFStore(cache_file, "w") as store:
store.put("data", df_meas)
measurements = unp.uarray(
all_meas,
all_total_uncerts,
)
meas_nominal = unp.nominal_values(measurements)
if iqr_fencing:
# remove outliers
mean = meas_nominal.mean()
std = meas_nominal.std()
meas_mask = (meas_nominal <= mean + std * 1.5) & (
meas_nominal >= mean - std * 1.5
)
measurements = measurements[meas_mask]
meas_nominal = meas_nominal[meas_mask]
del mean, std # don't accidentally reuse these!
# scale to match number of samples with schlieren
# reduced_indices = sorted(np.random.choice(
# np.arange(len(measurements)),
# n_schlieren_meas,
# replace=False,
# ))
# reduced_indices = [0, 1, 3, 5, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]
#
# measurements = measurements[reduced_indices]
# meas_nominal = meas_nominal[reduced_indices]
# date_shot_index = pd.MultiIndex.from_tuples(date_shot)[reduced_indices]
# read in data
with | pd.HDFStore("/d/Data/Processed/Data/data_soot_foil.h5", "r") | pandas.HDFStore |
import os
import zipfile as zp
import pandas as pd
import numpy as np
import core
import requests
class Labels:
init_cols = [
'station_id', 'station_name', 'riv_or_lake', 'hydroy', 'hydrom', 'day',
'lvl', 'flow', 'temp', 'month']
trans_cols = [
'date', 'year', 'month', 'day', 'hydroy', 'hydrom', 'station_id', 'station_name',
'riv_or_lake', 'riv_or_lake_id', 'lvl', 'flow', 'temp']
def transform(trans_df):
trans_df = trans_df.reset_index().drop('index', axis=1)
dfc = trans_df.copy()
lstrip = 'AĄBCĆDEĘFGHIJKLŁMNŃOÓPQRSŚTUVWXYZŹŻaąbcćdeęfghijklłmnńoópqrsśtuvwxyzźż( '
rivlakeid = dfc['riv_or_lake'].map(lambda x: x.lstrip(lstrip).rstrip(')'))
trans_df['riv_or_lake'] = trans_df['riv_or_lake'].map(lambda x: x.rstrip(' ()1234567890 '))
trans_df['riv_or_lake_id'] = rivlakeid
trans_df['month'] = trans_df['month'].fillna(method='ffill').astype(int)
trans_df['day'] = trans_df['day'].fillna(method='ffill').astype(int)
trans_df['year'] = trans_df['hydroy']
trans_df.loc[(trans_df['month'] == 11) | (trans_df['month'] == 12), 'year'] = trans_df['year'].astype(int) - 1
trans_df['date'] = pd.to_datetime(trans_df[['year', 'month', 'day']])
trans_df = trans_df[Labels.trans_cols]
trans_df.loc[trans_df['lvl'] == 9999, 'lvl'] = np.nan
trans_df.loc[trans_df['flow'] == 99999.999, 'flow'] = np.nan
trans_df.loc[trans_df['temp'] == 99.9, 'temp'] = np.nan
return trans_df
def getframe(year: int, month: int, stationid=None, station=None):
core.makedir(dirname='temp')
zipname = f'codz_{year}_{core.strnumb(month)}.zip'
csvname = f'codz_{year}_{core.strnumb(month)}.csv'
url = f'https://danepubliczne.imgw.pl/data/dane_pomiarowo_obserwacyjne/dane_hydrologiczne/dobowe/{year}/{zipname}'
r = requests.get(url)
with open(f'temp/{zipname}', 'wb') as file:
file.write(r.content)
with zp.ZipFile(f'temp/{zipname}', 'r') as zip_ref:
zip_ref.extractall(path='temp')
df = pd.read_csv(f'temp/{csvname}', encoding='windows-1250', header=None)
df.columns = Labels.init_cols
if stationid is not None:
df = df.loc[df['station_id'] == int(stationid)]
elif station is not None:
df = df.loc[df['station_name'] == station]
os.remove(f'temp/{zipname}')
os.remove(f'temp/{csvname}')
return df
def getyear(year: int, stationid=None, station=None, save=False):
err(stationid, station)
if not isinstance(year, int):
raise Exception('year argument must be an integer')
elif year not in range(1951, 2021):
raise Exception('year argument not in available range (1951, 2020)')
else:
year_df = pd.DataFrame([], columns=Labels.init_cols)
for month in range(1, 12+1):
df = getframe(year, month, stationid, station)
year_df = pd.concat([year_df, df], ignore_index=True)
year_df = transform(year_df)
if save:
core.makedir('Saved')
if stationid is not None:
year_df.to_csv(f'Saved/hydro_daily_{year}_{stationid}.csv', index=False, encoding='utf-8')
elif station is not None:
year_df.to_csv(f'Saved/hydro_daily_{year}_{station}.csv', index=False, encoding='utf-8')
elif stationid is None or station is None:
year_df.to_csv(f'Saved/hydro_daily_{year}_all.csv', index=False, encoding='utf-8')
return year_df.reset_index().drop('index', axis=1)
def getrange(first_year: int, last_year: int, stationid=None, station=None, save=False):
err(stationid, station)
if not isinstance(first_year, int) or not isinstance(last_year, int):
raise Exception('first_year and last_year arguments must be integers')
elif first_year not in range(1951, 2021) or last_year not in range(1951, 2021):
raise Exception('year argument out of available range (1951-2020)')
else:
range_df = pd.DataFrame([], columns=Labels.trans_cols)
for year in range(first_year, last_year + 1):
for month in range(1, 12+1):
df = getframe(year, month, stationid, station)
range_df = pd.concat([range_df, df], ignore_index=True)
range_df = transform(range_df)
if save:
core.makedir('Saved')
if stationid is not None:
range_df.to_csv(f'Saved/hydro_daily_range_{first_year}-{last_year}_{stationid}.csv', index=False, encoding='utf-8')
elif station is not None:
range_df.to_csv(f'Saved/hydro_daily_range_{first_year}-{last_year}_{station}.csv', index=False, encoding='utf-8')
elif stationid is None or station is None:
range_df.to_csv(f'Saved/hydro_daily_range_{first_year}-{last_year}_all.csv', index=False, encoding='utf-8')
return range_df.reset_index().drop('index', axis=1)
def getmonth(year: int, month: int, stationid=None, station=None, save=False):
err(stationid, station)
if not isinstance(year, int) or not isinstance(month, int):
raise Exception('year and month arguments must be integers')
elif month not in range(1, 13):
raise Exception('month argument not in range (1-12)')
elif year not in range(1951, 2021):
raise Exception('year argument not in available range (1951-2020)')
else:
month_df = getframe(year, month, stationid, station)
if month_df.empty:
raise Exception('there is no station with chosen name or id ')
else:
month_df.columns = Labels.init_cols
month_df = transform(month_df)
if save:
core.makedir('Saved')
if stationid is not None:
month_df.to_csv(f'Saved/hydro_daily_{year}_{core.strnumb(month)}_{stationid}.csv', index=False, encoding='utf-8')
elif station is not None:
month_df.to_csv(f'Saved/hydro_daily_{year}_{core.strnumb(month)}_{station}.csv', index=False, encoding='utf-8')
elif stationid is None or station is None:
month_df.to_csv(f'Saved/hydro_daily_{year}_{core.strnumb(month)}_all.csv', index=False, encoding='utf-8')
return month_df
def err(stationid, station):
if not isinstance(stationid, int) and stationid is not None:
raise Exception('stationid argument must be an integer')
elif not isinstance(station, str) and station is not None:
raise Exception('station argument must be a string')
def metadata(stationid: int, data: str) -> list:
if stationid is None:
raise Exception('missing stationid argument')
elif not isinstance(stationid, int) and stationid is not None:
raise Exception('stationid argument must be an integer')
meta = | pd.read_csv('metadata/hydro_stations.csv', encoding='utf-8') | pandas.read_csv |
from common.util import ReviewUtil
import pandas as pd
from collections import Counter
import numpy as np
from scipy.stats import norm
import os
class ZYJTemporalAnalysis:
def __init__(self, input_dir: str, threshold: float = 0.9999, num_day_thres: float = 100.):
self.input_path = input_dir
self.threshold = threshold
self.num_day_thres = num_day_thres
@staticmethod
def get_risks(df, prod_id):
if df.empty or df.shape[0] < 100:
return pd.DataFrame
review_date = df["creationTime"].apply(lambda x: x.split(" ")[0]).tolist()
date_freqs = Counter(review_date).items()
(date, freqs) = zip(*date_freqs)
np_freqs = np.array(freqs)
m = np.mean(np_freqs)
sd = np.std(np_freqs)
r = (np_freqs - 5 * m) / sd
prob = norm(0, 1).cdf(r)
mus = [m] * len(date)
sds = [sd] * len(date)
prod_ids = [prod_id] * len(date)
analysis_df = pd.DataFrame({"date": date, "count": freqs, "prob": prob, "mu": mus, "sd": sds, "prod_id": prod_ids})
return analysis_df
@staticmethod
def is_outlier(points, thresh=3.5):
if len(points.shape) == 1:
points = points[:, None]
mu = np.mean(points, axis=0)
diff = np.sum((points - mu) ** 2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.mean(diff)
modified_z_score = (0.6745 * diff + 0.0001) / (med_abs_deviation + 0.0001)
return modified_z_score > thresh
@staticmethod
def get_water_army_reviews2(df):
df['creationTime'] = pd.to_datetime(df["creationTime"])
df['referenceTime'] = pd.to_datetime(df["referenceTime"])
diff = df['referenceTime'] - df['creationTime']
df['date_diff'] = diff.apply(lambda x: x.days)
df['ratingDate'] = df['creationTime'].apply(lambda x: x.date())
try:
count_iterable = Counter(df['ratingDate'])
df_counter = pd.DataFrame.from_dict(count_iterable, orient='index').reset_index()
mask = ZYJTemporalAnalysis.is_outlier(df_counter[0])
outlier_dates = set(df_counter['index'][mask])
suspicious_list = df[df['ratingDate'].isin(outlier_dates) & (df['date_diff'] < 30)]['id'].values.tolist()
if len(suspicious_list) > 20:
print(suspicious_list)
return suspicious_list
else:
return list()
except KeyError:
return list()
@staticmethod
def get_water_army_reviews(df):
num_top_date_delta = 5
df['referenceTime'] = | pd.to_datetime(df["referenceTime"]) | pandas.to_datetime |
import os
from io import BytesIO
import zipfile
import time
import warnings
import json
from pathlib import Path
import argparse
import requests
import pandas as pd
import geopandas as gpd
import fiona
DATA_DIR = Path(os.path.dirname(__file__), "../data")
RAW_DIR = Path(DATA_DIR, "raw")
PROCESSED_DIR = Path(DATA_DIR, "processed")
def load_gdf(path, epsg=27700):
gdf = gpd.read_file(path)
gdf.to_crs(epsg=epsg, inplace=True)
return gdf
def download_la_shape(lad20cd="E08000021", overwrite=False):
save_path = Path(PROCESSED_DIR, lad20cd, "la_shape", "la.shp")
if os.path.exists(save_path) and not overwrite:
return gpd.read_file(save_path)
os.makedirs(save_path.parent, exist_ok=True)
# From https://geoportal.statistics.gov.uk/datasets/ons::local-authority-districts-december-2020-uk-bgc/about
base = "https://services1.arcgis.com/ESMARspQHYMw9BZ9/arcgis/rest/services/Local_Authority_Districts_December_2020_UK_BGC/FeatureServer/0"
query = (
f"query?where=LAD20CD%20%3D%20%27{lad20cd}%27&outFields=*&outSR=27700&f=json"
)
url = f"{base}/{query}"
la = query_ons_records(url, save_path=None)
la = columns_to_lowercase(la)
la = la[["geometry", "lad20cd", "lad20nm"]]
la.to_file(save_path)
return la
def lad20cd_to_lad11cd(lad20cd, mappings=None):
if mappings is None:
mappings = download_oa_mappings()
return mappings[mappings.lad20cd == lad20cd]["lad11cd"].unique()
def lad11cd_to_lad20cd(lad11cd, mappings=None):
if mappings is None:
mappings = download_oa_mappings()
return mappings[mappings.lad11cd == lad11cd]["lad20cd"].unique()
def lad20nm_to_lad20cd(lad20nm, mappings=None):
if mappings is None:
mappings = download_oa_mappings()
return mappings[mappings.lad20nm == lad20nm]["lad20cd"].iloc[0]
def lad20cd_to_lad20nm(lad20cd, mappings=None):
if mappings is None:
mappings = download_oa_mappings()
return mappings[mappings.lad20cd == lad20cd]["lad20nm"].iloc[0]
def lad11nm_to_lad11cd(lad11nm, mappings=None):
if mappings is None:
mappings = download_oa_mappings()
return mappings[mappings.lad11nm == lad11nm]["lad11cd"].iloc[0]
def download_oa_shape(lad11cd="E08000021", lad20cd=None, overwrite=False):
if isinstance(lad11cd, str):
lad11cd = [lad11cd]
if lad20cd is None:
lad20cd = lad11cd_to_lad20cd(lad11cd[0])[0]
save_path = Path(PROCESSED_DIR, lad20cd, "oa_shape", "oa.shp")
if os.path.exists(save_path) and not overwrite:
return gpd.read_file(save_path)
os.makedirs(save_path.parent, exist_ok=True)
oa = []
for la in lad11cd:
# From https://geoportal.statistics.gov.uk/datasets/ons::output-areas-december-2011-boundaries-ew-bgc-1/about
url = f"https://ons-inspire.esriuk.com/arcgis/rest/services/Census_Boundaries/Output_Area_December_2011_Boundaries/FeatureServer/2/query?where=lad11cd%20%3D%20'{la}'&outFields=*&outSR=27700&f=json"
oa.append(query_ons_records(url, save_path=None))
oa = pd.concat(oa)
oa = columns_to_lowercase(oa)
oa = oa[["oa11cd", "geometry"]]
oa.to_file(save_path)
return oa
def download_oa_mappings(overwrite=False):
save_path = Path(RAW_DIR, "oa_mappings.csv")
if os.path.exists(save_path) and not overwrite:
return pd.read_csv(save_path, dtype=str)
# 2011
# https://geoportal.statistics.gov.uk/datasets/ons::output-area-to-lower-layer-super-output-area-to-middle-layer-super-output-area-to-local-authority-district-december-2011-lookup-in-england-and-wales/about
url = "https://opendata.arcgis.com/api/v3/datasets/6ecda95a83304543bc8feedbd1a58303_0/downloads/data?format=csv&spatialRefId=4326"
df2011 = pd.read_csv(url)
df2011.drop("ObjectId", axis=1, inplace=True)
# 2020
# https://geoportal.statistics.gov.uk/datasets/ons::output-area-to-lower-layer-super-output-area-to-middle-layer-super-output-area-to-local-authority-district-december-2020-lookup-in-england-and-wales/about
url = "https://opendata.arcgis.com/api/v3/datasets/65664b00231444edb3f6f83c9d40591f_0/downloads/data?format=csv&spatialRefId=4326"
df2020 = pd.read_csv(url)
df2020.drop("FID", axis=1, inplace=True)
merged = pd.merge(df2011, df2020, how="outer")
merged = columns_to_lowercase(merged)
merged.to_csv(save_path, index=False)
return merged
def download_centroids(overwrite=False):
save_path = Path(RAW_DIR, "centroids.csv")
if os.path.exists(save_path) and not overwrite:
return pd.read_csv(save_path)
# From https://geoportal.statistics.gov.uk/datasets/ons::output-areas-december-2011-population-weighted-centroids-1/about
url = "https://opendata.arcgis.com/api/v3/datasets/b0c86eaafc5a4f339eb36785628da904_0/downloads/data?format=csv&spatialRefId=27700"
df = pd.read_csv(url)
df = columns_to_lowercase(df)
df = df[["oa11cd", "x", "y"]]
df.to_csv(save_path, index=False)
return df
def download_populations_region(url):
r = requests.get(url)
zip_file = zipfile.ZipFile(BytesIO(r.content))
file_name = None
for name in zip_file.namelist():
if ".xlsx" in name:
file_name = name
break
if not file_name:
raise ValueError("No .xlsx found in zip archive")
xl_file = zip_file.open(file_name)
df = pd.read_excel(
xl_file, sheet_name="Mid-2019 Persons", skiprows=4, thousands=","
)
df_total = df[["OA11CD", "All Ages"]]
df_total.rename(columns={"All Ages": "population"}, inplace=True)
df_total = columns_to_lowercase(df_total)
df_total = df_total[["oa11cd", "population"]]
df_ages = df.drop(["All Ages", "LSOA11CD"], axis=1)
df_ages.rename(columns={"90+": 90}, inplace=True)
df_ages = columns_to_lowercase(df_ages)
return df_total, df_ages
def download_populations(overwrite=False):
save_path_total = Path(RAW_DIR, "population_total.csv")
save_path_ages = Path(RAW_DIR, "population_ages.csv")
if (
os.path.exists(save_path_total)
and os.path.exists(save_path_ages)
and not overwrite
):
return pd.read_csv(save_path_total), pd.read_csv(save_path_ages)
# From https://www.ons.gov.uk/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/censusoutputareaestimatesinthenortheastregionofengland
region_urls = [
"https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/censusoutputareaestimatesinthelondonregionofengland/mid2019sape22dt10a/sape22dt10amid2019london.zip",
"https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/censusoutputareaestimatesintheyorkshireandthehumberregionofengland/mid2019sape22dt10c/sape22dt10cmid2019yorkshireandthehumber.zip",
"https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/censusoutputareaestimatesinthesouthwestregionofengland/mid2019sape22dt10g/sape22dt10gmid2019southwest.zip",
"https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/censusoutputareaestimatesintheeastmidlandsregionofengland/mid2019sape22dt10f/sape22dt10fmid2019eastmidlands.zip",
"https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/censusoutputareaestimatesinthesoutheastregionofengland/mid2019sape22dt10i/sape22dt10imid2019southeast.zip",
"https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/censusoutputareaestimatesintheeastregionofengland/mid2019sape22dt10h/sape22dt10hmid2019east.zip",
"https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/censusoutputareaestimatesinthewestmidlandsregionofengland/mid2019sape22dt10e/sape22dt10emid2019westmidlands.zip",
"https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/censusoutputareaestimatesinthenorthwestregionofengland/mid2019sape22dt10b/sape22dt10bmid2019northwest.zip",
"https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/censusoutputareaestimatesinthenortheastregionofengland/mid2019sape22dt10d/sape22dt10dmid2019northeast.zip",
"https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/censusoutputareaestimatesinwales/mid2019sape22dt10j/sape22dt10jmid2019wales.zip",
]
df_total = []
df_ages = []
for i, r in enumerate(region_urls):
print("Dowloading region", i + 1, "out of", len(region_urls), ":", r)
region_total, region_ages = download_populations_region(r)
df_total.append(region_total)
df_ages.append(region_ages)
df_total = pd.concat(df_total)
df_ages = pd.concat(df_ages)
df_total.to_csv(save_path_total, index=False)
df_ages.to_csv(save_path_ages, index=False)
return df_total, df_ages
def download_workplace(overwrite=False):
save_path = Path(RAW_DIR, "workplace.csv")
if overwrite:
warnings.warn(
"Not possible to download workplace data directly. Go to "
"https://www.nomisweb.co.uk/query/construct/summary.asp?mode=construct&version=0&dataset=1300"
)
workplace = pd.read_csv(save_path, thousands=",")
workplace = columns_to_lowercase(workplace)
return workplace
def download_uo_sensors(overwrite=False):
save_path = Path(RAW_DIR, "uo_sensors", "uo_sensors.shp")
if os.path.exists(save_path) and not overwrite:
return gpd.read_file(save_path)
query = "http://uoweb3.ncl.ac.uk/api/v1.1/sensors/json/?theme=Air+Quality" # &bbox_p1_x=-1.988472&bbox_p1_y=54.784364&bbox_p2_x=-1.224922&bbox_p2_y=55.190148"
response = requests.get(query)
sensors = json.loads(response.content)["sensors"]
df = | pd.DataFrame(sensors) | pandas.DataFrame |
"""
json 불러와서 캡션 붙이는 것
"""
import json
import pandas as pd
path = './datasets/vqa/v2_OpenEnded_mscoco_train2014_questions.json'
with open(path) as question:
question = json.load(question)
# question['questions'][0]
# question['questions'][1]
# question['questions'][2]
df = pd.DataFrame(question['questions'])
df
caption_path = './datasets/caption/vis_st_trainval.json'
with open(caption_path) as cap:
cap = json.load(cap)
df_cap = pd.DataFrame(cap)
df_cap
df_addcap = pd.merge(df, df_cap, how='left', on='image_id')
del df_addcap['file_path']
########################################################################################################################
"""
pandas to json
"""
df_addcap.to_json('./datasets/caption/train_cap2.json', orient='table')
with open('./datasets/caption/train_cap2.json') as train_cap:
train_cap = json.load(train_cap)
########################################################################################################################
########################################################################################################################
"""
answer + cap
"""
path = '/home/nextgen/Desktop/mcan-vqa/datasets/vqa/v2_mscoco_train2014_annotations.json'
path = './datasets/vqa/v2_mscoco_val2014_annotations.json'
with open(path) as answer:
answer = json.load(answer)
answer['annotations'][0]
df_ans = pd.DataFrame(answer['annotations'])
df_ans[:0]
del df_ans['question_type']
del df_ans['answers']
del df_ans['answer_type']
del df_ans['image_id']
df_ans[df_ans['question_id']==458752000]
df_addcap2 = pd.merge(df_addcap, df_ans, how='left', on='question_id')
df_addcap2[:0]
df_addcap2['multiple_choice_answer']
# del df_addcap['file_path']
df_addcap2.to_json('./datasets/caption/val_qacap.json', orient='table')
with open('./datasets/caption/train_qacap.json') as train_qacap:
train_qacap = json.load(train_qacap)
########################################################################################################################
"""val test도 마찬가지"""
path = './datasets/vqa/v2_OpenEnded_mscoco_val2014_questions.json'
with open(path) as question:
question = json.load(question)
df = pd.DataFrame(question['questions'])
df
caption_path = './datasets/caption/vis_st_trainval.json'
with open(caption_path) as cap:
cap = json.load(cap)
df_cap = pd.DataFrame(cap)
df_cap
df_addcap = pd.merge(df, df_cap, how='left', on='image_id')
df_addcap[:0]
del df_addcap['file_path']
df_addcap.to_json('./datasets/caption/val_cap.json', orient='table')
#test
path = './datasets/vqa/v2_OpenEnded_mscoco_test-dev2015_questions.json'
with open(path) as question:
question = json.load(question)
df = pd.DataFrame(question['questions'])
df
df['image_id'] = df.image_id.astype(int)
caption_path = './datasets/caption/vis_st_test.json'
with open(caption_path) as cap:
cap = json.load(cap)
df_cap = pd.DataFrame(cap)
df_cap
df_cap['image_id'] = df_cap.image_id.astype(int)
df_addcap = pd.merge(df, df_cap, how='left', on='image_id')
df_addcap[:0]
del df_addcap['file_path']
df_addcap.to_json('./datasets/caption/test_cap.json', orient='table')
########################################################################################################################
from core.data.ans_punct import prep_ans
import numpy as np
import en_vectors_web_lg, random, re, json
import json
from core.data.data_utils import ques_load
stat_ques_list = \
json.load(open('./datasets/caption/train_cap.json', 'r'))['data'] + \
json.load(open('./datasets/caption/val_cap.json', 'r'))['data'] + \
json.load(open('./datasets/caption/test_cap.json', 'r'))['data']
def tokenize(stat_ques_list, use_glove):
token_to_ix = {
'PAD': 0,
'UNK': 1,
}
spacy_tool = None
pretrained_emb = []
if use_glove:
spacy_tool = en_vectors_web_lg.load()
pretrained_emb.append(spacy_tool('PAD').vector)
pretrained_emb.append(spacy_tool('UNK').vector)
for ques in stat_ques_list:
words = re.sub(
r"([.,'!?\"()*#:;])",
'',
ques['question'].lower()
).replace('-', ' ').replace('/', ' ').split()
for word in words:
if word not in token_to_ix:
token_to_ix[word] = len(token_to_ix)
if use_glove:
pretrained_emb.append(spacy_tool(word).vector)
for ques in stat_ques_list:
words = re.sub(
r"([.,'!?\"()*#:;])",
'',
ques['caption'].lower()
).replace('-', ' ').replace('/', ' ').split()
for word in words:
if word not in token_to_ix:
token_to_ix[word] = len(token_to_ix)
if use_glove:
pretrained_emb.append(spacy_tool(word).vector)
pretrained_emb = np.array(pretrained_emb)
return token_to_ix, pretrained_emb
token_to_ix, pretrained_emb = tokenize(stat_ques_list, True)
#######################################################################################################################
# with open('./datasets/vqa/v2_mscoco_train2014_annotations.json') as answer:
# answer = json.load(answer)
#
# answer['annotations'][2]
"""
답을 이용하는거로 하면 train val 비교로해야 함
test셋은 답을 제공하지 않아서 test할 때 답을 이용하는 모델을 사용할 수 없음
"""
####
import cal_sim
import pandas as pd
with open('datasets/caption/train_cap.json') as train_cap:
train_cap = json.load(train_cap)
with open('datasets/caption/val_cap.json') as val_cap:
val_cap = json.load(val_cap)
with open('datasets/caption/test_cap.json') as test_cap:
test_cap = json.load(test_cap)
df_train = pd.DataFrame(train_cap['data'])
df_val = | pd.DataFrame(val_cap['data']) | pandas.DataFrame |
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb( | frequencies.get_freq_code('H') | pandas.tseries.frequencies.get_freq_code |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
| tm.assert_frame_equal(df, store["b"]) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pandas as pd
import matplotlib
from importlib import reload
import matplotlib.pyplot as plt
import elements
elements = reload(elements)
from elements.event import Event
import os
from scipy.fft import fft, fftfreq, ifft
#%%
#meta data
meta_event = pd.read_csv('data/meta_data.csv')
#List of events
plt.ion()
whole_events = [i.split('.')[0] for i in os.listdir('data/csv')]
#just testing some data
id = whole_events[0]
start= 0
end = -1
e = Event(id, start, end)
keys = ['Time (s)', ' Va', ' Vb', ' Vc', ' Ia', ' Ib', ' Ic', ' In']
yf, yf_mag_real, xf, start_index, N, T = e.fft_analyzer(keys[6])
fig = e.show_detail()
print(meta_event.loc[meta_event['EventId']==int(id)].values)
#%%
#get the fft for each event as the input features
whole_events = [i.split('.')[0] for i in os.listdir('data/csv')]
number_of_freqs = 200
Nf = number_of_freqs * 7 # number of features
features = {}
max_voltage = 0
max_current = 0
bad_data_index= [122, 123, 124, 125, 127, 128]
unique_causes = meta_event['Cause'].unique()
bad_events_id = []
causes = pd.DataFrame(columns={'id','cause','label'})
#%%
#saving figures just for visual reference
causes = pd.read_pickle('data/causes.pkl')
for ev in whole_events[0:1]:
path = "figures/eventshape/{}.png".format(ev)
e = Event(ev, start, end)
fig = e.show_detail()
cause = causes.loc[causes['id'] == ev]['cause'].values[0]
plt.title("{}".format(cause))
plt.show()
fig.savefig(path)
#%%
#save figures by their known group
causes = pd.read_pickle('data/causes.pkl')
causes = causes.groupby('cause')
all_causes = causes.groups.keys()
for c in all_causes:
path = 'figures/knownclusters/{}'.format(c)
os.mkdir(path)
for ev in causes.get_group(c)['id']:
e = Event(ev,0,-1)
fig = e.show_detail()
cause = c
plt.title("{}".format(cause))
plt.show()
save_path = path + '/{}.png'.format(ev)
fig.savefig(save_path)
#%%
#known event true figures
def show_event(ev, cl):
causes = pd.read_pickle('data/causes.pkl')
e = Event(ev, 0, -1, 'resampled')
path = 'figures/eval_clusters/{}/{}.png'.format(cl, ev)
selected_data = e.data.loc[:, e.data.columns != 'Time (s)']
f, (ax1, ax2) = plt.subplots(2, 1)
for i in selected_data.keys()[0:3]:
ax1.plot(selected_data[i], label=i)
for i in selected_data.keys()[3:]:
ax2.plot(selected_data[i], label=i)
ax1.set_title(ev)
# plt.legend()
f.savefig(path)
plt.show()
#%%
# read the main dataset
true_clusters_known = pd.read_pickle('data/known_true_clusters_ids.pkl')
all_clusters_data = []
cl = 'Equipment'
for cl in true_clusters_known.keys():
# os.mkdir('figures/eval_clusters/{}'.format(cl))
for ev in true_clusters_known[cl].dropna():
show_event(ev, cl)
#%%
#known event true figures
def show_event_res(ev, cl):
causes = pd.read_pickle('data/causes.pkl')
e = Event(ev, 0, -1, 'resampled')
path = 'figures/eval_res/{}/{}.png'.format(cl, ev)
selected_data = e.res().loc[:, e.res().columns != 'Time (s)']
f, (ax1, ax2) = plt.subplots(2, 1)
for i in selected_data.keys()[0:3]:
ax1.plot(selected_data[i], label=i)
for i in selected_data.keys()[3:]:
ax2.plot(selected_data[i], label=i)
ax1.set_title(ev)
# plt.legend()
f.savefig(path)
plt.show()
#%%
# read the main dataset with res
true_clusters_known = pd.read_pickle('data/known_true_clusters_ids.pkl')
all_clusters_data = []
cl = 'Tree'
for cl in true_clusters_known.keys():
# os.mkdir('figures/eval_res/{}'.format(cl))
for ev in true_clusters_known[cl].dropna():
show_event_res(ev, cl)
#%%
bad_events_horizon = {
'Animal': ['21831','21832','21833','21834','21840','21842','21843'],
'Equipment': ['21835','21836','21837','21841','21850','21862','21863','21865','21873'],
'Lightning': ['21856','21857','21858','21859','21860','21861'],
'Animal': ['2771','21838','21839','21844','21845','21846','21847','21848','21851','21852','21853','21854']
}
flat_list = [item for sublist in list(bad_events_horizon.values()) for item in sublist]
# matplotlib.use("Qt5Agg")
plt.ion()
# def onclick(event):
# global pause
# pause = not pause
#%%
#known event true figures
def show_bad_event(ev):
causes = | pd.read_pickle('data/causes.pkl') | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 10:00:33 2018
@author: jdkern
"""
from __future__ import division
from sklearn import linear_model
from statsmodels.tsa.api import VAR
import scipy.stats as st
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
######################################################################
# LOAD
######################################################################
#import data
df_load = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='hourly_load',header=0)
df_weather = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='weather',header=0)
BPA_weights = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='BPA_location_weights',header=0)
CAISO_weights = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='CAISO_location_weights',header=0)
Name_list=pd.read_csv('Synthetic_demand_pathflows/Covariance_Calculation.csv')
Name_list=list(Name_list.loc['SALEM_T':])
Name_list=Name_list[1:]
df_wind=pd.read_csv('Synthetic_wind_power/wind_power_sim.csv',header=0)
sim_years = int(len(df_wind)/8760) + 3
sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0,index_col=0)
sim_weather = sim_weather.iloc[0:365*sim_years,:]
sim_weather = sim_weather.iloc[365:len(sim_weather)-730,:]
sim_weather = sim_weather.reset_index(drop=True)
#weekday designation
dow = df_weather.loc[:,'Weekday']
#generate simulated day of the week assuming starts from monday
count=0
sim_dow= np.zeros(len(sim_weather))
for i in range(0,len(sim_weather)):
count = count +1
if count <=5:
sim_dow[i]=1
elif count > 5:
sim_dow[i]=0
if count ==7:
count =0
#Generate a datelist
datelist=pd.date_range(pd.datetime(2017,1,1),periods=365).tolist()
sim_month=np.zeros(len(sim_weather))
sim_day=np.zeros(len(sim_weather))
sim_year=np.zeros(len(sim_weather))
count=0
for i in range(0,len(sim_weather)):
if count <=364:
sim_month[i]=datelist[count].month
sim_day[i]=datelist[count].day
sim_year[i]=datelist[count].year
else:
count=0
sim_month[i]=datelist[count].month
sim_day[i]=datelist[count].day
sim_year[i]=datelist[count].year
count=count+1
######################################################################
# BPAT
######################################################################
#Find the simulated data at the sites
col_BPA_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T']
col_BPA_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W']
BPA_sim_T=sim_weather[col_BPA_T].values
BPA_sim_W=sim_weather[col_BPA_W].values
sim_days = len(sim_weather)
weighted_SimT = np.zeros((sim_days,1))
###########################################
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise']
num_cities = len(cities)
num_days = len(df_weather)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
weighted_AvgT = np.zeros((num_days,1))
for i in cities:
n1 = i + '_MaxT'
n2 = i + '_MinT'
n3 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = 0.5*df_weather.loc[:,n1] + 0.5*df_weather.loc[:,n2]
weighted_AvgT[:,0] = weighted_AvgT[:,0] + AvgT[:,j]*BPA_weights.loc[0,i]
Wind[:,j] = df_weather.loc[:,n3]
weighted_SimT[:,0] = weighted_SimT[:,0] + BPA_sim_T[:,j]*BPA_weights.loc[0,i]
#Convert simulated temperature to F
weighted_SimT=(weighted_SimT * 9/5) +32
BPA_sim_T_F=(BPA_sim_T * 9/5) +32
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
HDD_sim = np.zeros((sim_days,num_cities))
CDD_sim = np.zeros((sim_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
for i in range(0,sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-BPA_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,BPA_sim_T_F[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(BPA_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(BPA_sim_W,binary_HDD_sim)
#convert load to array
BPA_load = df_load.loc[:,'BPA'].values
#remove NaNs
a = np.argwhere(np.isnan(BPA_load))
for i in a:
BPA_load[i] = BPA_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(BPA_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X70p = M[(M[:,0] >= 70),2:]
y70p = M[(M[:,0] >= 70),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X40_50 = M[(M[:,0] >= 40) & (M[:,0] < 50),2:]
y40_50 = M[(M[:,0] >= 40) & (M[:,0] < 50),1]
X30_40 = M[(M[:,0] >= 30) & (M[:,0] < 40),2:]
y30_40 = M[(M[:,0] >= 30) & (M[:,0] < 40),1]
X25_30 = M[(M[:,0] >= 25) & (M[:,0] < 30),2:]
y25_30 = M[(M[:,0] >= 25) & (M[:,0] < 30),1]
X25m = M[(M[:,0] < 25),2:]
y25m = M[(M[:,0] < 25),1]
X70p_Sim = M_sim[(M_sim[:,0] >= 70),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X40_50_Sim = M_sim[(M_sim[:,0] >= 40) & (M_sim[:,0] < 50),1:]
X30_40_Sim = M_sim[(M_sim[:,0] >= 30) & (M_sim[:,0] < 40),1:]
X25_30_Sim = M_sim[(M_sim[:,0] >= 25) & (M_sim[:,0] < 30),1:]
X25m_Sim = M_sim[(M_sim[:,0] < 25),1:]
#multivariate regression
#Create linear regression object
reg70p = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg40_50 = linear_model.LinearRegression()
reg30_40 = linear_model.LinearRegression()
reg25_30 = linear_model.LinearRegression()
reg25m = linear_model.LinearRegression()
# Train the model using the training sets
if len(y70p) > 0:
reg70p.fit(X70p,y70p)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y40_50) > 0:
reg40_50.fit(X40_50,y40_50)
if len(y30_40) > 0:
reg30_40.fit(X30_40,y30_40)
if len(y25_30) > 0:
reg25_30.fit(X25_30,y25_30)
if len(y25m) > 0:
reg25m.fit(X25m,y25m)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=70:
y_hat = reg70p.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] >= 40 and M[i,0] < 50:
y_hat = reg40_50.predict(s)
elif M[i,0] >= 30 and M[i,0] < 40:
y_hat = reg30_40.predict(s)
elif M[i,0] >= 25 and M[i,0] < 30:
y_hat = reg25_30.predict(s)
elif M[i,0] < 25:
y_hat = reg25m.predict(s)
predicted = np.append(predicted,y_hat)
BPA_p = predicted.reshape((len(predicted),1))
#Simulate using the regression above
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=70:
y_hat = reg70p.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] >= 40 and M_sim[i,0] < 50:
y_hat = reg40_50.predict(s)
elif M_sim[i,0] >= 30 and M_sim[i,0] < 40:
y_hat = reg30_40.predict(s)
elif M_sim[i,0] >= 25 and M_sim[i,0] < 30:
y_hat = reg25_30.predict(s)
elif M_sim[i,0] < 25:
y_hat = reg25m.predict(s)
simulated = np.append(simulated,y_hat)
BPA_sim = simulated.reshape((len(simulated),1))
a=st.pearsonr(peaks,BPA_p)
print(a[0]**2, a[1])
# Residuals
BPAresiduals = BPA_p - peaks
BPA_y = peaks
# RMSE
RMSE = (np.sum((BPAresiduals**2))/len(BPAresiduals))**.5
output = np.column_stack((BPA_p,peaks))
#########################################################################
# CAISO
#########################################################################
#Find the simulated data at the sites
col_CAISO_T = ['FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_CAISO_W = ['FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','SAN FRANCISCO_W']
CAISO_sim_T=sim_weather[col_CAISO_T].values
CAISO_sim_W=sim_weather[col_CAISO_W].values
sim_days = len(sim_weather)
weighted_SimT = np.zeros((sim_days,1))
#find average temps
cities = ['Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_weather)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
weighted_AvgT = np.zeros((num_days,1))
for i in cities:
n1 = i + '_MaxT'
n2 = i + '_MinT'
n3 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = 0.5*df_weather.loc[:,n1] + 0.5*df_weather.loc[:,n2]
Wind[:,j] = df_weather.loc[:,n3]
weighted_AvgT[:,0] = weighted_AvgT[:,0] + AvgT[:,j]*CAISO_weights.loc[1,i]
weighted_SimT[:,0] = weighted_SimT[:,0] + CAISO_sim_T[:,j]*CAISO_weights.loc[1,i]
#Convert simulated temperature to F
weighted_SimT=(weighted_SimT * 9/5) +32
CAISO_sim_T_F=(CAISO_sim_T * 9/5) +32
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
HDD_sim = np.zeros((sim_days,num_cities))
CDD_sim = np.zeros((sim_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
for i in range(0,sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-CAISO_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,CAISO_sim_T_F[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
CDD_wind_sim = np.multiply(CAISO_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(CAISO_sim_W,binary_HDD_sim)
###########################
# CAISO - SDGE
###########################
#convert load to array
SDGE_load = df_load.loc[:,'SDGE'].values
#remove NaNs
a = np.argwhere(np.isnan(SDGE_load))
for i in a:
SDGE_load[i] = SDGE_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(SDGE_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
SDGE_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
#
simulated = np.append(simulated,y_hat)
SDGE_sim = simulated.reshape((len(simulated),1))
# Residuals
SDGEresiduals = SDGE_p - peaks
SDGE_y = peaks
#a=st.pearsonr(peaks,SDGE_p)
#print a[0]**2
# RMSE
RMSE = (np.sum((SDGEresiduals**2))/len(SDGEresiduals))**.5
###########################
# CAISO - SCE
###########################
#convert load to array
SCE_load = df_load.loc[:,'SCE'].values
#remove NaNs
a = np.argwhere(np.isnan(SCE_load))
for i in a:
SCE_load[i] = SCE_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(SCE_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
##multivariate regression
#
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
SCE_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
simulated = np.append(simulated,y_hat)
SCE_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,SCE_p)
#print a[0]**2
# Residuals
SCEresiduals = SCE_p - peaks
SCE_y = peaks
# RMSE
RMSE = (np.sum((SCEresiduals**2))/len(SCEresiduals))**.5
###########################
# CAISO - PG&E Valley
###########################
#convert load to array
PGEV_load = df_load.loc[:,'PGE_V'].values
#remove NaNs
a = np.argwhere(np.isnan(PGEV_load))
for i in a:
PGEV_load[i] = PGEV_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(PGEV_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
##multivariate regression
#
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEV_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
simulated = np.append(simulated,y_hat)
PGEV_sim = simulated.reshape((len(simulated),1))
a=st.pearsonr(peaks,PGEV_p)
print(a[0]**2, a[1])
# Residuals
PGEVresiduals = PGEV_p - peaks
PGEV_y = peaks
# RMSE
RMSE = (np.sum((PGEVresiduals**2))/len(PGEVresiduals))**.5
###########################
# CAISO - PG&E Bay
###########################
#convert load to array
PGEB_load = df_load.loc[:,'PGE_B'].values
#remove NaNs
a = np.argwhere(np.isnan(PGEB_load))
for i in a:
PGEB_load[i] = PGEB_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(PGEB_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEB_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s) #
simulated = np.append(simulated,y_hat)
PGEB_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,PGEB_p)
#print a[0]**2
# Residuals
PGEBresiduals = PGEB_p - peaks
PGEB_y = peaks
# RMSE
RMSE = (np.sum((PGEBresiduals**2))/len(PGEBresiduals))**.5
#Collect residuals from load regression
R = np.column_stack((BPAresiduals,SDGEresiduals,SCEresiduals,PGEVresiduals,PGEBresiduals))
ResidualsLoad = R[0:3*365,:]
###################################
# PATH 46
###################################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/46_daily.xlsx',sheet_name='Sheet1',header=0)
#find average temps
cities = ['Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Path66'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path46'}, inplace=True)
df_data.rename(columns={4:'Weekday'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
y = df_data.loc[:,'Path46']
#multivariate regression
jan_reg_46 = linear_model.LinearRegression()
feb_reg_46 = linear_model.LinearRegression()
mar_reg_46 = linear_model.LinearRegression()
apr_reg_46 = linear_model.LinearRegression()
may_reg_46 = linear_model.LinearRegression()
jun_reg_46 = linear_model.LinearRegression()
jul_reg_46 = linear_model.LinearRegression()
aug_reg_46 = linear_model.LinearRegression()
sep_reg_46 = linear_model.LinearRegression()
oct_reg_46 = linear_model.LinearRegression()
nov_reg_46 = linear_model.LinearRegression()
dec_reg_46 = linear_model.LinearRegression()
# Train the model using the training sets
jan_reg_46.fit(jan.loc[:,'Weekday':],jan.loc[:,'Path46'])
feb_reg_46.fit(feb.loc[:,'Weekday':],feb.loc[:,'Path46'])
mar_reg_46.fit(mar.loc[:,'Weekday':],mar.loc[:,'Path46'])
apr_reg_46.fit(apr.loc[:,'Weekday':],apr.loc[:,'Path46'])
may_reg_46.fit(may.loc[:,'Weekday':],may.loc[:,'Path46'])
jun_reg_46.fit(jun.loc[:,'Weekday':],jun.loc[:,'Path46'])
jul_reg_46.fit(jul.loc[:,'Weekday':],jul.loc[:,'Path46'])
aug_reg_46.fit(aug.loc[:,'Weekday':],aug.loc[:,'Path46'])
sep_reg_46.fit(sep.loc[:,'Weekday':],sep.loc[:,'Path46'])
oct_reg_46.fit(oct.loc[:,'Weekday':],oct.loc[:,'Path46'])
nov_reg_46.fit(nov.loc[:,'Weekday':],nov.loc[:,'Path46'])
dec_reg_46.fit(dec.loc[:,'Weekday':],dec.loc[:,'Path46'])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'Weekday':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jan_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = feb_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = mar_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = apr_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = may_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jun_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jul_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = aug_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = sep_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = oct_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = nov_reg_46.predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = dec_reg_46.predict(s)
predicted = np.append(predicted,p)
Path46_p = predicted
# Residuals
residuals = predicted - y.values
Residuals46 = np.reshape(residuals[730:],(1095,1))
Path46_y = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
##R2
#a=st.pearsonr(y,predicted)
#print a[0]**2
###############################
# NW PATHS
###############################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/NW_Path_data.xlsx',sheet_name='Daily',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Weekday'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
H = df_data
#df_data.to_excel('Synthetic_demand_pathflows/cX.xlsx')
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path8'}, inplace=True)
df_data.rename(columns={4:'Path14'}, inplace=True)
df_data.rename(columns={5:'Path3'}, inplace=True)
df_data.rename(columns={6:'BPA_wind'}, inplace=True)
df_data.rename(columns={7:'BPA_hydro'}, inplace=True)
df_data.rename(columns={8:'Weekday'}, inplace=True)
df_data.rename(columns={9:'Salem_HDD'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path8','Path14','Path3']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
NWPaths_p= np.zeros((len(cX),num_lines))
NWPaths_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name='jan_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='feb_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='mar_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='apr_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='may_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='jun_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='jul_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='aug_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='sep_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='oct_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='nov_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='dec_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
# Train the model using the training sets
name='jan_reg_NW' + str(line)
locals()[name].fit(jan.loc[:,'BPA_wind':],jan.loc[:,line])
name='feb_reg_NW' + str(line)
locals()[name].fit(feb.loc[:,'BPA_wind':],feb.loc[:,line])
name='mar_reg_NW' + str(line)
locals()[name].fit(mar.loc[:,'BPA_wind':],mar.loc[:,line])
name='apr_reg_NW' + str(line)
locals()[name].fit(apr.loc[:,'BPA_wind':],apr.loc[:,line])
name='may_reg_NW' + str(line)
locals()[name].fit(may.loc[:,'BPA_wind':],may.loc[:,line])
name='jun_reg_NW' + str(line)
locals()[name].fit(jun.loc[:,'BPA_wind':],jun.loc[:,line])
name='jul_reg_NW' + str(line)
locals()[name].fit(jul.loc[:,'BPA_wind':],jul.loc[:,line])
name='aug_reg_NW' + str(line)
locals()[name].fit(aug.loc[:,'BPA_wind':],aug.loc[:,line])
name='sep_reg_NW' + str(line)
locals()[name].fit(sep.loc[:,'BPA_wind':],sep.loc[:,line])
name='oct_reg_NW' + str(line)
locals()[name].fit(oct.loc[:,'BPA_wind':],oct.loc[:,line])
name='nov_reg_NW' + str(line)
locals()[name].fit(nov.loc[:,'BPA_wind':],nov.loc[:,line])
name='dec_reg_NW' + str(line)
locals()[name].fit(dec.loc[:,'BPA_wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'BPA_wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jan_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='feb_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='mar_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='apr_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='may_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jun_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jul_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='aug_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='sep_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='oct_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='nov_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='dec_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
NWPaths_p[:,line_index] = predicted
# Residuals
residuals = predicted - y.values
export_residuals[:,line_index] = residuals
NWPaths_y[:,line_index] = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
# #R2
# a=st.pearsonr(y,predicted)
# print a[0]**2
ResidualsNWPaths = export_residuals
###############################
# Other CA PATHS
###############################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/OtherCA_Path_data.xlsx',sheet_name='Daily',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Path66'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path61'}, inplace=True)
df_data.rename(columns={4:'Path42'}, inplace=True)
df_data.rename(columns={5:'Path24'}, inplace=True)
df_data.rename(columns={6:'Path45'}, inplace=True)
df_data.rename(columns={7:'BPA_wind'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path61','Path42','Path24','Path45']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
OtherCA_Paths_p= np.zeros((len(cX),num_lines))
OtherCA_Paths_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name_1='jan_reg_CA' + str(line)
name_2='feb_reg_CA' + str(line)
name_3='mar_reg_CA' + str(line)
name_4='apr_reg_CA' + str(line)
name_5='may_reg_CA' + str(line)
name_6='jun_reg_CA' + str(line)
name_7='jul_reg_CA' + str(line)
name_8='aug_reg_CA' + str(line)
name_9='sep_reg_CA' + str(line)
name_10='oct_reg_CA' + str(line)
name_11='nov_reg_CA' + str(line)
name_12='dec_reg_CA' + str(line)
locals()[name_1] = linear_model.LinearRegression()
locals()[name_2] = linear_model.LinearRegression()
locals()[name_3] = linear_model.LinearRegression()
locals()[name_4] = linear_model.LinearRegression()
locals()[name_5] = linear_model.LinearRegression()
locals()[name_6] = linear_model.LinearRegression()
locals()[name_7] = linear_model.LinearRegression()
locals()[name_8] = linear_model.LinearRegression()
locals()[name_9] = linear_model.LinearRegression()
locals()[name_10] = linear_model.LinearRegression()
locals()[name_11] = linear_model.LinearRegression()
locals()[name_12] = linear_model.LinearRegression()
# Train the model using the training sets
locals()[name_1].fit(jan.loc[:,'BPA_wind':],jan.loc[:,line])
locals()[name_2].fit(feb.loc[:,'BPA_wind':],feb.loc[:,line])
locals()[name_3].fit(mar.loc[:,'BPA_wind':],mar.loc[:,line])
locals()[name_4].fit(apr.loc[:,'BPA_wind':],apr.loc[:,line])
locals()[name_5].fit(may.loc[:,'BPA_wind':],may.loc[:,line])
locals()[name_6].fit(jun.loc[:,'BPA_wind':],jun.loc[:,line])
locals()[name_7].fit(jul.loc[:,'BPA_wind':],jul.loc[:,line])
locals()[name_8].fit(aug.loc[:,'BPA_wind':],aug.loc[:,line])
locals()[name_9].fit(sep.loc[:,'BPA_wind':],sep.loc[:,line])
locals()[name_10].fit(oct.loc[:,'BPA_wind':],oct.loc[:,line])
locals()[name_11].fit(nov.loc[:,'BPA_wind':],nov.loc[:,line])
locals()[name_12].fit(dec.loc[:,'BPA_wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'BPA_wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
OtherCA_Paths_p[:,line_index] = predicted
# Residuals
residuals = predicted - y.values
export_residuals[:,line_index] = residuals
OtherCA_Paths_y[:,line_index] = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
# #R2
# a=st.pearsonr(y,predicted)
# print a[0]**2
ResidualsOtherCA_Paths = export_residuals
##########################
# PATH 65 & 66
##########################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/Path65_66_regression_data.xlsx',sheet_name='Sheet1',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Weekday'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path65'}, inplace=True)
df_data.rename(columns={4:'Path66'}, inplace=True)
df_data.rename(columns={5:'Wind'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path65','Path66']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
Path65_66_p = np.zeros((len(cX),num_lines))
Path65_66_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name_1='jan_reg_6566' + str(line)
name_2='feb_reg_6566' + str(line)
name_3='mar_reg_6566' + str(line)
name_4='apr_reg_6566' + str(line)
name_5='may_reg_6566' + str(line)
name_6='jun_reg_6566' + str(line)
name_7='jul_reg_6566' + str(line)
name_8='aug_reg_6566' + str(line)
name_9='sep_reg_6566' + str(line)
name_10='oct_reg_6566' + str(line)
name_11='nov_reg_6566' + str(line)
name_12='dec_reg_6566' + str(line)
locals()[name_1] = linear_model.LinearRegression()
locals()[name_2] = linear_model.LinearRegression()
locals()[name_3] = linear_model.LinearRegression()
locals()[name_4] = linear_model.LinearRegression()
locals()[name_5] = linear_model.LinearRegression()
locals()[name_6] = linear_model.LinearRegression()
locals()[name_7] = linear_model.LinearRegression()
locals()[name_8] = linear_model.LinearRegression()
locals()[name_9] = linear_model.LinearRegression()
locals()[name_10] = linear_model.LinearRegression()
locals()[name_11] = linear_model.LinearRegression()
locals()[name_12] = linear_model.LinearRegression()
# Train the model using the training sets
locals()[name_1].fit(jan.loc[:,'Wind':],jan.loc[:,line])
locals()[name_2].fit(feb.loc[:,'Wind':],feb.loc[:,line])
locals()[name_3].fit(mar.loc[:,'Wind':],mar.loc[:,line])
locals()[name_4].fit(apr.loc[:,'Wind':],apr.loc[:,line])
locals()[name_5].fit(may.loc[:,'Wind':],may.loc[:,line])
locals()[name_6].fit(jun.loc[:,'Wind':],jun.loc[:,line])
locals()[name_7].fit(jul.loc[:,'Wind':],jul.loc[:,line])
locals()[name_8].fit(aug.loc[:,'Wind':],aug.loc[:,line])
locals()[name_9].fit(sep.loc[:,'Wind':],sep.loc[:,line])
locals()[name_10].fit(oct.loc[:,'Wind':],oct.loc[:,line])
locals()[name_11].fit(nov.loc[:,'Wind':],nov.loc[:,line])
locals()[name_12].fit(dec.loc[:,'Wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'Wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
Path65_66_p[:,line_index] = predicted
Path65_66_y[:,line_index] = y.values
# Residuals
residuals = predicted - y.values
export_residuals[:,line_index] = residuals
#
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
#R2
# a=st.pearsonr(y,predicted)
# print a[0]**2
Residuals65_66 = export_residuals[730:,:]
#####################################################################
# Residual Analysis
#####################################################################
R = np.column_stack((ResidualsLoad,ResidualsNWPaths,ResidualsOtherCA_Paths,Residuals46,Residuals65_66))
rc = np.shape(R)
cols = rc[1]
mus = np.zeros((cols,1))
stds = np.zeros((cols,1))
R_w = np.zeros(np.shape(R))
sim_days = len(R_w)
#whiten residuals
for i in range(0,cols):
mus[i] = np.mean(R[:,i])
stds[i] = np.std(R[:,i])
R_w[:,i] = (R[:,i] - mus[i])/stds[i]
#Vector autoregressive model on residuals
model = VAR(R_w)
results = model.fit(1)
sim_residuals = np.zeros((sim_days,cols))
errors = np.zeros((sim_days,cols))
p = results.params
y_seeds = R_w[-1]
C = results.sigma_u
means = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
E = np.random.multivariate_normal(means,C,sim_days)
ys = np.zeros((cols,1))
# Generate cross correlated residuals
for i in range(0,sim_days):
for j in range(1,cols+1):
name='y' + str(j)
locals()[name]= p[0,j-1] + p[1,j-1]*y_seeds[0]+ p[2,j-1]*y_seeds[1]+ p[3,j-1]*y_seeds[2]+ p[4,j-1]*y_seeds[3]+ p[5,j-1]*y_seeds[4]+ p[6,j-1]*y_seeds[5]+ p[7,j-1]*y_seeds[6]+ p[8,j-1]*y_seeds[7]+ p[9,j-1]*y_seeds[8]+ p[10,j-1]*y_seeds[9]+ p[11,j-1]*y_seeds[10]+ p[12,j-1]*y_seeds[11]+ p[13,j-1]*y_seeds[12]+ p[13,j-1]*y_seeds[12]+ p[14,j-1]*y_seeds[13]+ p[15,j-1]*y_seeds[14]+E[i,j-1]
for j in range(1,cols+1):
name='y' + str(j)
y_seeds[j-1]=locals()[name]
sim_residuals[i,:] = [y1,y2,y3,y4,y5,y6,y7,y8,y9,y10,y11,y12,y13,y14,y15]
for i in range(0,cols):
sim_residuals[:,i] = sim_residuals[:,i]*stds[i]*(1/np.std(sim_residuals[:,i])) + mus[i]
#validation
Y = np.column_stack((np.reshape(BPA_y[0:3*365],(1095,1)),np.reshape(SDGE_y[0:3*365],(1095,1)),np.reshape(SCE_y[0:3*365],(1095,1)),np.reshape(PGEV_y[0:3*365],(1095,1)),np.reshape(PGEB_y[0:3*365],(1095,1)),NWPaths_y,OtherCA_Paths_y,np.reshape(Path46_y[730:],(1095,1)),np.reshape(Path65_66_y[730:,:],(1095,2))))
combined_BPA = np.reshape(sim_residuals[:,0],(1095,1)) + np.reshape(BPA_p[0:3*365],(1095,1))
combined_SDGE = np.reshape(sim_residuals[:,1],(1095,1)) + np.reshape(SDGE_p[0:3*365],(1095,1))
combined_SCE = np.reshape(sim_residuals[:,2],(1095,1)) + np.reshape(SCE_p[0:3*365],(1095,1))
combined_PGEV = np.reshape(sim_residuals[:,3],(1095,1)) + np.reshape(PGEV_p[0:3*365],(1095,1))
combined_PGEB = np.reshape(sim_residuals[:,4],(1095,1)) + np.reshape(PGEB_p[0:3*365],(1095,1))
combined_Path8 = np.reshape(sim_residuals[:,5],(1095,1)) + np.reshape(NWPaths_p[:,0],(1095,1))
combined_Path14 = np.reshape(sim_residuals[:,6],(1095,1)) + np.reshape(NWPaths_p[:,1],(1095,1))
combined_Path3 = np.reshape(sim_residuals[:,7],(1095,1)) + np.reshape(NWPaths_p[:,2],(1095,1))
combined_Path61 = np.reshape(sim_residuals[:,8],(1095,1)) + np.reshape(OtherCA_Paths_p[:,0],(1095,1))
combined_Path42 = np.reshape(sim_residuals[:,9],(1095,1)) + np.reshape(OtherCA_Paths_p[:,1],(1095,1))
combined_Path24 = np.reshape(sim_residuals[:,10],(1095,1)) + np.reshape(OtherCA_Paths_p[:,2],(1095,1))
combined_Path45 = np.reshape(sim_residuals[:,11],(1095,1)) + np.reshape(OtherCA_Paths_p[:,3],(1095,1))
combined_Path46 = np.reshape(sim_residuals[:,12],(1095,1)) + np.reshape(Path46_p[730:],(1095,1))
combined_Path65 = np.reshape(sim_residuals[:,13],(1095,1)) + np.reshape(Path65_66_p[730:,0],(1095,1))
combined_Path66 = np.reshape(sim_residuals[:,14],(1095,1)) + np.reshape(Path65_66_p[730:,1],(1095,1))
combined = np.column_stack((combined_BPA,combined_SDGE,combined_SCE,combined_PGEV,combined_PGEB,combined_Path8,combined_Path14,combined_Path3,combined_Path61,combined_Path42,combined_Path24,combined_Path45,combined_Path46,combined_Path65,combined_Path66))
rc = np.shape(Y)
cols = rc[1]
names = ['BPA','SDGE','SCE','PGEV','PGEB','Path8','Path14','Path3','Path61','Path42','Path24','Path45','Path46','Path65','Path66']
#for n in names:
#
# n_index = names.index(n)
#
# plt.figure()
# plt.plot(combined[:,n_index],'r')
# plt.plot(Y[:,n_index],'b')
# plt.title(n)
#
##########################################################################################################################################################
#Simulating demand and path
#########################################################################################################################################################
#Sim Residual
simulation_length=len(sim_weather)
syn_residuals = np.zeros((simulation_length,cols))
errors = np.zeros((simulation_length,cols))
y_seeds = R_w[-1]
C = results.sigma_u
means = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
E = np.random.multivariate_normal(means,C,simulation_length)
ys = np.zeros((cols,1))
for i in range(0,simulation_length):
for n in range(0,cols):
ys[n] = p[0,n]
for m in range(0,cols):
ys[n] = ys[n] + p[m+1,n]*y_seeds[n]
ys[n] = ys[n] + E[i,n]
for n in range(0,cols):
y_seeds[n] = ys[n]
syn_residuals[i,:] = np.reshape([ys],(1,cols))
for i in range(0,cols):
syn_residuals[:,i] = syn_residuals[:,i]*stds[i]*(1/np.std(syn_residuals[:,i])) + mus[i]
##################################################
# PATH NW
##################################################
#This only uses BPA wind and hydro
col_nw_T =['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_nw_W =['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W','TUCSON_W','PHOENIX_W','LAS VEGAS_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','<NAME>']
num_cities = len(col_nw_T)
NW_sim_T=sim_weather[col_nw_T].values
NW_sim_W=sim_weather[col_nw_W].values
NW_sim_T=sim_weather[col_nw_T].values
NW_sim_W=sim_weather[col_nw_W].values
NW_sim_T_F=(NW_sim_T * 9/5) +32
NW_sim_W =NW_sim_W *2.23694
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-NW_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,NW_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(NW_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(NW_sim_W,binary_HDD_sim)
#Need Month,Day,Year,8 14 3 BPA_wind,BPA_hydro
sim_BPA_hydro = | pd.read_csv('PNW_hydro/FCRPS/Path_dams.csv',header=None) | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:biovectors]
# language: python
# name: conda-env-biovectors-py
# ---
# # Get Publication Times for Pubmed Abstracts
# +
import csv
from pathlib import Path
import time
import pandas as pd
import requests
import tqdm
# -
# Write the api caller function
def call_entrez(pubmed_ids):
while True:
try:
url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=pubmed&retmode=json&id="
id_str = ",".join(map(str, pubmed_ids))
response = requests.get(f"{url}{id_str}")
assert response.status_code == 200
response = response.json()
return response["result"]
except Exception as e:
print(e)
print("Had an error will try again in thirty minutes!!")
time.sleep(1800)
pmid_df = pd.read_csv("output/pmid.tsv", sep="\t", names=["pmid"])
print(pmid_df.shape)
pmid_df.head()
# +
if Path("output/pmid_to_pub_date.tsv").exists():
# Start from checkpoint incase something goes wrong
parsed_ids = pd.read_csv("output/pmid_to_pub_date.tsv", sep="\t")
parsed_ids_set = set(parsed_ids.pmid.tolist())
remaining_ids = set(pmid_df.pmid.tolist()) - parsed_ids_set
pmid_df = | pd.DataFrame(remaining_ids, columns=["pmid"]) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import Categorical, Series
import pandas._testing as tm
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, False, False, True, True, False])),
("last", Series([False, True, True, False, False, False, False])),
(False, Series([False, True, True, False, True, True, False])),
],
)
def test_drop_duplicates(any_numpy_dtype, keep, expected):
tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))
if tc.dtype == "bool":
pytest.skip("tested separately in test_drop_duplicates_bool")
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize(
"keep, expected",
[
("first", | Series([False, False, True, True]) | pandas.Series |
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score, auc, roc_curve, confusion_matrix, fbeta_score
from imblearn.over_sampling import BorderlineSMOTE
from collections import Counter
import gc as gc
from sklearn.feature_selection import RFE
#-------------------------------------------------------------------------------------------------------------------------
def kfold_smote_RFE(features_num, classifier, folds, df_train_filtered_std, y_train, smote='y'):
"""K_fold training/validation for RFE with LightGBM/RandomForest/XGBoost/CATBoost,
with SMOTE train re-sampling,
features_num-> select the number of features for RFE"""
# get a list of models to evaluate
def get_models():
models = dict()
for i in range(2, features_num+1):
models[str(i)] = RFE(estimator=classifier, n_features_to_select=i)
return models
# data from each foldf
fold_results = list()
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(df_train_filtered_std, y_train)):
train_x, train_y = df_train_filtered_std.iloc[train_idx], y_train.iloc[train_idx]
valid_x, valid_y = df_train_filtered_std.iloc[valid_idx], y_train.iloc[valid_idx]
# summarize class distribution
counter = Counter(train_y)
print('\n-----------------------------------------------------')
print('Fold %2d, original distribution: ' % (n_fold + 1))
print(counter)
if smote=='y':
# transform the dataset
oversample = BorderlineSMOTE()
train_x, train_y = oversample.fit_resample(train_x, train_y)
# summarize the new class distribution
counter = Counter(train_y)
print('Fold %2d, re-sampled distribution: ' % (n_fold + 1))
print(counter)
# get the models to evaluate
models = get_models()
# evaluate the models and store results
models_results, names = list(), list()
for name, model in models.items():
# Print the number of features of the model
print('\nFeatures:%s' % (name))
# fit RFE
model.fit(train_x, train_y)
# validation per model
probas = model.predict_proba(valid_x)[:, 1]
# ROC-AUC per model
AUC = roc_auc_score(valid_y, probas)
# Collecting results
models_results.append(AUC)
names.append(name)
# summarize all features
for i in range(train_x.shape[1]):
print('Column: %d, Selected %s, Rank: %.3f' % (i, model.support_[i], model.ranking_[i]))
# Print AUC score
print(f'\nAUC: {AUC}')
print('\nModels results')
print(models_results)
fold_results.append(models_results)
print('\nFolds results')
print(fold_results)
fold_results = np.asarray(fold_results)
# plot model performance for comparison
plt.figure(figsize=(15,10))
plt.boxplot(fold_results, labels=range(2,features_num+1), showmeans=True)
plt.title('RECURSIVE FEATURE ELIMINATION'
f'\n\ntrain re-sampling (SMOTE):"{smote}"',fontsize=20)
plt.xlabel('Numbers of features selected',fontsize=15)
plt.ylabel('Crossvalidation AUC',fontsize=15)
plt.ylim((0.5, 0.8))
# save
plt.savefig(f'projets\\07_loan_customer_scoring\\production\\savefig\\model_test_{smote_case}\\feature_selection\\{class_weigh_case}\\feature_selection_RFE_feature_number.png', transparent=True)
plt.show()
return fold_results
#-------------------------------------------------------------------------------------------------------------------------
# Classification with kfold available for several algorithms
def kfold_classif(classifier, folds, df_train_std, target_train, df_val_std, target_val, custom_loss, fbeta, fbeta_number=0, logistic_regression=False, train_resampling='n', eval_set=False, scorer='auc', early_stopping_rounds=None, verbose=200):
"""K_fold training/validation for DecisionTree/RandomForest/LightGBM/XGBoost/CATBoost/LogisticRegression,
train_resampling-> borderline smote re-sampling on the train part,
fbetanumber-> for function to optimize"""
""""num_iteration=clf.best_iteration_ can be added in the predict_proba() when callable """
# Create arrays and dataframes to store results
crossvalid_probas = np.zeros(df_train_std.shape[0])
valid_probas = np.zeros(df_val_std.shape[0])
fold_AUC_list = []
feature_importance_df = pd.DataFrame()
feats = [f for f in df_train_std.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
# Modification of columns
df_train_std_2 = df_train_std[feats]
df_val_std_2 = df_val_std[feats]
df_train_std_2.columns = ["".join (c if c.isalnum() else "_" for c in str(x)) for x in df_train_std_2.columns]
df_val_std_2.columns = ["".join (c if c.isalnum() else "_" for c in str(x)) for x in df_val_std_2.columns]
# define thresholds
thresholds = np.arange(0, 1, 0.001)
# apply threshold to positive probabilities to create labels
def to_labels(pos_probs, threshold):
return (pos_probs >= threshold).astype('int')
def custom_cost_function(testy, yhat):
# get the fn and the fp from the confusion matrix
tn, fp, fn, tp = confusion_matrix(testy, yhat).ravel()
# function
y = 10*fn + fp
return y
# data from each fold
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(df_train_std_2, target_train)):
train_x, train_y = df_train_std_2.iloc[train_idx], target_train.iloc[train_idx]
valid_x, valid_y = df_train_std_2.iloc[valid_idx], target_train.iloc[valid_idx]
# Re-sampling
if train_resampling=='y':
# summarize class distribution
counter = Counter(train_y)
print('Fold %2d, original distribution: ' % (n_fold + 1))
print(counter)
# transform the dataset
oversample = BorderlineSMOTE()
train_x, train_y = oversample.fit_resample(train_x, train_y)
# summarize the new class distribution
counter = Counter(train_y)
print('Fold %2d, re-sampled distribution: ' % (n_fold + 1))
print(counter)
# classifier instance
clf = classifier
# fitting
if eval_set==True:
clf.fit(train_x, train_y, eval_set=[(train_x, train_y), (valid_x, valid_y)],
eval_metric=scorer, verbose=verbose, early_stopping_rounds=early_stopping_rounds)
if eval_set==False:
clf.fit(train_x, train_y)
# validation
crossvalid_probas[valid_idx] = clf.predict_proba(valid_x)[:, 1]
# ROC-AUC
AUC = roc_auc_score(valid_y, crossvalid_probas[valid_idx])
fold_AUC_list.append(AUC)
# showing results from each fold
print('Fold %2d AUC : %.6f' % (n_fold + 1, AUC))
# Collecting results
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
# Classifier case
if logistic_regression==True:
fold_importance_df["importance"] = clf.coef_[0]
if logistic_regression==False:
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
fold_importance_df["val_fold_AUC"] = AUC
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
feature_importance_df.sort_values(by='importance', ascending=False, inplace=True)
#validation_ROC_AUC = roc_auc_score(target_train, crossvalid_probas)
valid_probas += clf.predict_proba(df_val_std)[:, 1] / folds.n_splits
del train_x, train_y, valid_x, valid_y
gc.collect()
# Final performance
mean_crossvalid_fold_ROC_AUC = sum(fold_AUC_list)/len(fold_AUC_list)
print('Mean cross-validation ROC-AUC score %.6f' % mean_crossvalid_fold_ROC_AUC)
#validation_ROC_AUC = roc_auc_score(target_train, crossvalid_probas)
validation_ROC_AUC = roc_auc_score(target_val, valid_probas)
print('Validation ROC-AUC score %.6f' % validation_ROC_AUC)
# Optimising the threshold
if (fbeta==True)&(fbeta_number!=0):
# evaluate each threshold with f-beta loss function
scores = [fbeta_score(target_val.values, to_labels(valid_probas, t), average='weighted', beta=fbeta_number) for t in thresholds]
# get best threshold
ix = np.argmax(scores)
print(f'Threshold=%.3f, F-{fbeta_number} score_max=%.5f' % (thresholds[ix], scores[ix]))
best_score = scores[ix]
threshold = thresholds[ix]
if custom_loss=='y':
# evaluate each threshold with custom loss function
scores = [custom_cost_function(target_val.values, to_labels(valid_probas, t)) for t in thresholds]
# get best threshold
ix = np.argmin(scores)
print(f'Threshold=%.3f, Custom loss function (10*fn + fp) score_min=%.5f' % (thresholds[ix], scores[ix]))
best_score = scores[ix]
threshold = thresholds[ix]
return clf, feature_importance_df, mean_crossvalid_fold_ROC_AUC, validation_ROC_AUC, best_score, threshold
#-------------------------------------------------------------------------------------------------------------------------
# One hot encoder (avec récupération des labels)
from sklearn.preprocessing import OneHotEncoder as SklearnOneHotEncoder
import pandas as pd
import numpy as np
class OneHotEncoder(SklearnOneHotEncoder):
def __init__(self, **kwargs):
super(OneHotEncoder, self).__init__(**kwargs)
self.fit_flag = False
def fit(self, X, **kwargs):
out = super().fit(X)
self.fit_flag = True
return out
def transform(self, X, **kwargs):
sparse_matrix = super(OneHotEncoder, self).transform(X)
new_columns = self.get_new_columns(X=X)
d_out = pd.DataFrame(sparse_matrix.toarray(), columns=new_columns, index=X.index)
return d_out
def fit_transform(self, X, **kwargs):
self.fit(X)
return self.transform(X)
def get_new_columns(self, X):
new_columns = []
for i, column in enumerate(X.columns):
j = 0
while j < len(self.categories_[i]):
new_columns.append(f'{column}_<{self.categories_[i][j]}>')
j += 1
return new_columns
#-------------------------------------------------------------------------------------------------------------------------
# Targer Encoding ou One Hot Encoding (1 nouvelle colonne crée)
def encoding_transform_with_merge(dataframe, column, fix_column, trained_model, column_new_name):
"""Fonction transfroam,nt une colonne de dataframe à partir d'un modèle fitté"""
"""renseigner le modèle fitté"""
"""Indiquer le nouveau nom de colonne"""
"""Indiquer une colonne fixe pour le merge"""
# Création d'un dataframe avec la colonne souhaitée et une colonne repère en index pour éviter de perdre l'ordre après .transform (re-indexage possible de la fonction):
dataframe_work = pd.DataFrame(dataframe[[column,fix_column]], columns=[column,fix_column])
dataframe_work.set_index([fix_column], inplace = True)
# Transform
dataframe_work[column_new_name] = trained_model.transform(dataframe_work[column])
dataframe_work.drop(column, axis=1, inplace=True)
# La colonne repère a été passée en index puis réapparaît après un reset index:
dataframe_work.reset_index(inplace=True)
# Merge avec colonne commune fix_column:
dataframe = pd.merge(dataframe, dataframe_work, on=fix_column)
return dataframe
# Label Encoding ou One Hot Encoding (1 nouvelle colonne crée)
def label_encoding_transform_with_merge(dataframe, column, fix_column, trained_model, column_new_name):
"""Fonction transfroam,nt une colonne de dataframe à partir d'un modèle fitté"""
"""renseigner le modèle fitté"""
"""Indiquer le nouveau nom de colonne"""
"""Indiquer une colonne fixe pour le merge"""
# Création d'un dataframe avec la colonne souhaitée et une colonne repère en index pour éviter de perdre l'ordre après .transform (re-indexage possible de la fonction):
dataframe_work = pd.DataFrame(dataframe[[column,fix_column]], columns=[column,fix_column])
dataframe_work.set_index([fix_column], inplace = True)
# Transform
dataframe_work[column_new_name] = dataframe_work[column].apply(lambda x: trained_model.transform([x])[0] if pd.notna(x) else np.NaN)
dataframe_work.drop(column, axis=1, inplace=True)
# La colonne repère a été passée en index puis réapparaît après un reset index:
dataframe_work.reset_index(inplace=True)
# Merge avec colonne commune fix_column:
dataframe = pd.merge(dataframe, dataframe_work, on=fix_column)
return dataframe
# Targer Encoding ou One Hot Encoding (1 nouvelle colonne crée)
def target_encoding_transform_with_merge(dataframe, column, fix_column, trained_model, column_new_name):
"""Fonction transfroam,nt une colonne de dataframe à partir d'un modèle fitté"""
"""renseigner le modèle fitté"""
"""Indiquer le nouveau nom de colonne"""
"""Indiquer une colonne fixe pour le merge"""
# Création d'un dataframe avec la colonne souhaitée et une colonne repère en index pour éviter de perdre l'ordre après .transform (re-indexage possible de la fonction):
dataframe_work = pd.DataFrame(dataframe[[column,fix_column]], columns=[column,fix_column])
dataframe_work.set_index([fix_column], inplace = True)
# Transform
dataframe_work[column_new_name] = trained_model.transform(dataframe_work[column])
dataframe_work.drop(column, axis=1, inplace=True)
# La colonne repère a été passée en index puis réapparaît après un reset index:
dataframe_work.reset_index(inplace=True)
# Merge avec colonne commune fix_column:
dataframe = | pd.merge(dataframe, dataframe_work, on=fix_column) | pandas.merge |
'''Some helper functions for data ETL including:
- Load features from dataframe
- Normalization and denormalize
- Load dataset, pytorch dataset
- Load adjacent matrix, load graph network
- Preprocess dataset
'''
import numpy as np
import pandas as pd
import torch
from datetime import datetime
import dgl
###
# Function: Load features from given dataframe
def load_features(feat_path, dtype=np.float32):
feat_df = pd.read_csv(feat_path)
feat = np.array(feat_df, dtype=dtype)
return feat
###
# Function: normalize data using min max approach
def min_max_normalization(array):
return 2*(array-np.min(array))/(np.max(array)-np.min(array))-1
###
# Function: denormalize the array given min max
def denormalize(array, min, max):
return (array+1) * (max - min)/2 + min
###
# Function: get csv file and return pandas dataframe
# Input: path to csv file
# Output: pandas dataframe
def load_dataset(feat_path, dtype=np.float32):
feat_df = | pd.read_csv(feat_path) | pandas.read_csv |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = (
"The default of the 'keep_tz' keyword in "
"DatetimeIndex.to_series will change to True in a future "
"release."
)
assert msg in str(m[0].message)
with tm.assert_produces_warning(FutureWarning):
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx")
df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx)
expected = DataFrame(
{
"idx": [
datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5),
],
"a": range(5),
"b": ["A", "B", "C", "D", "E"],
},
columns=["idx", "a", "b"],
)
expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"], utc=True).tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=Index(di, name="index")
)
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name="x")
original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]]
result = original.set_index("x")
expected = DataFrame({"y": np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_empty_column(self):
# GH 1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=("a", "m", "p", "x"),
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_rename(self, float_frame):
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(
renamed2.rename(columns=str.upper), float_frame, check_names=False
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["foo", "bar"]))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"]))
# have to pass something
with pytest.raises(TypeError, match="must pass an index to rename"):
float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"]))
# other axis
renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"]))
# index with name
index = Index(["foo", "bar"], name="name")
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name"))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self, float_frame):
# GH 15704
expected = float_frame.rename_axis("foo")
result = float_frame.copy()
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
expected = float_frame.rename_axis("bar", axis=1)
result = float_frame.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
def test_rename_axis_raises(self):
# https://github.com/pandas-dev/pandas/issues/17833
df = DataFrame({"A": [1, 2], "B": [1, 2]})
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis({0: 10, 1: 20}, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=1)
with pytest.raises(ValueError, match="Use `.rename`"):
df["A"].rename_axis(id)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
df = DataFrame(
{"x": [i for i in range(len(mi))], "y": [i * 10 for i in range(len(mi))]},
index=mi,
)
# Test for rename of the Index object of columns
result = df.rename_axis("cols", axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="cols"))
# Test for rename of the Index object of columns using dict
result = result.rename_axis(columns={"cols": "new"}, axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="new"))
# Test for renaming index using dict
result = df.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
# Test for renaming index providing complete list
result = df.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
# Test for changing index and columns at same time
sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"])
result = sdf.rename_axis(index="foo", columns="meh")
assert result.index.name == "foo"
assert result.columns.name == "meh"
# Test different error cases
with pytest.raises(TypeError, match="Must pass"):
df.rename_axis(index="wrong")
with pytest.raises(ValueError, match="Length of names"):
df.rename_axis(index=["wrong"])
with pytest.raises(TypeError, match="bogus"):
df.rename_axis(bogus=None)
@pytest.mark.parametrize(
"kwargs, rename_index, rename_columns",
[
({"mapper": None, "axis": 0}, True, False),
({"mapper": None, "axis": 1}, False, True),
({"index": None}, True, False),
({"columns": None}, False, True),
({"index": None, "columns": None}, True, True),
({}, False, False),
],
)
def test_rename_axis_none(self, kwargs, rename_index, rename_columns):
# GH 25034
index = Index(list("abc"), name="foo")
columns = Index(["col1", "col2"], name="bar")
data = np.arange(6).reshape(3, 2)
df = DataFrame(data, index, columns)
result = df.rename_axis(**kwargs)
expected_index = index.rename(None) if rename_index else index
expected_columns = columns.rename(None) if rename_columns else columns
expected = DataFrame(data, expected_index, expected_columns)
tm.assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"]
)
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples(
[("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"]
)
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={"C": "foo"}, copy=False)
renamed["foo"] = 1.0
assert (float_frame["C"] == 1.0).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={"C": "foo"})
assert "C" in float_frame
assert "foo" not in float_frame
c_id = id(float_frame["C"])
float_frame = float_frame.copy()
float_frame.rename(columns={"C": "foo"}, inplace=True)
assert "C" not in float_frame
assert "foo" in float_frame
assert id(float_frame["foo"]) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]})
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
df = df.set_index(["a", "b"])
df.columns = ["2001-01-01"]
expected = DataFrame(
[[1], [2]],
index=MultiIndex.from_tuples(
[("foo", "bah"), ("bar", "bas")], names=["a", "b"]
),
columns=["2001-01-01"],
)
tm.assert_frame_equal(df, expected)
def test_rename_bug2(self):
# GH 19497
# rename was changing Index to MultiIndex if Index contained tuples
df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"])
df = df.rename({(1, 1): (5, 4)}, axis="index")
expected = DataFrame(
data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"]
)
tm.assert_frame_equal(df, expected)
def test_rename_errors_raises(self):
df = DataFrame(columns=["A", "B", "C", "D"])
with pytest.raises(KeyError, match="'E'] not found in axis"):
df.rename(columns={"A": "a", "E": "e"}, errors="raise")
@pytest.mark.parametrize(
"mapper, errors, expected_columns",
[
({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]),
({"A": "a"}, "raise", ["a", "B", "C", "D"]),
(str.lower, "raise", ["a", "b", "c", "d"]),
],
)
def test_rename_errors(self, mapper, errors, expected_columns):
# GH 13473
# rename now works with errors parameter
df = DataFrame(columns=["A", "B", "C", "D"])
result = df.rename(columns=mapper, errors=errors)
expected = DataFrame(columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_reorder_levels(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
tm.assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(["L0", "L1", "L2"])
tm.assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(
levels=[["bar"], ["bar"], ["bar"]],
codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
names=["L0", "L0", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels(["L0", "L0", "L0"])
tm.assert_frame_equal(result, expected)
def test_reset_index(self, float_frame):
stacked = float_frame.stack()[::2]
stacked = DataFrame({"foo": stacked, "bar": stacked})
names = ["first", "second"]
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, level_codes) in enumerate(
zip(stacked.index.levels, stacked.index.codes)
):
values = lev.take(level_codes)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(
deleveled["first"], deleveled2["level_0"], check_names=False
)
tm.assert_series_equal(
deleveled["second"], deleveled2["level_1"], check_names=False
)
# default name assigned
rdf = float_frame.reset_index()
exp = Series(float_frame.index.values, name="index")
tm.assert_series_equal(rdf["index"], exp)
# default name assigned, corner case
df = float_frame.copy()
df["index"] = "foo"
rdf = df.reset_index()
exp = Series(float_frame.index.values, name="level_0")
tm.assert_series_equal(rdf["level_0"], exp)
# but this is ok
float_frame.index.name = "index"
deleveled = float_frame.reset_index()
tm.assert_series_equal(deleveled["index"], Series(float_frame.index))
tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled))))
# preserve column names
float_frame.columns.name = "columns"
resetted = float_frame.reset_index()
assert resetted.columns.name == "columns"
# only remove certain columns
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index(["A", "B"])
# TODO should reset_index check_names ?
tm.assert_frame_equal(rs, float_frame, check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index("A")
xp = float_frame.reset_index().set_index(["index", "B"])
tm.assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = float_frame.copy()
resetted = float_frame.reset_index()
df.reset_index(inplace=True)
tm.assert_frame_equal(df, resetted, check_names=False)
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index("A", drop=True)
xp = float_frame.copy()
del xp["A"]
xp = xp.set_index(["B"], append=True)
tm.assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_name(self):
df = DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
index=Index(range(2), name="x"),
)
assert df.reset_index().index.name is None
assert df.reset_index(drop=True).index.name is None
df.reset_index(inplace=True)
assert df.index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"])
for levels in ["A", "B"], [0, 1]:
# With MultiIndex
result = df.set_index(["A", "B"]).reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True)
tm.assert_frame_equal(result, df[["C", "D"]])
# With single-level Index (GH 16263)
result = df.set_index("A").reset_index(level=levels[0])
tm.assert_frame_equal(result, df)
result = df.set_index("A").reset_index(level=levels[:1])
tm.assert_frame_equal(result, df)
result = df.set_index(["A"]).reset_index(level=levels[0], drop=True)
tm.assert_frame_equal(result, df[["B", "C", "D"]])
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ["A", "B"], ["A"]:
with pytest.raises(KeyError, match="Level E "):
df.set_index(idx_lev).reset_index(level=["A", "E"])
with pytest.raises(IndexError, match="Too many levels"):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series(
(9.81 * time ** 2) / 2, index=Index(time, name="time"), name="speed"
)
df = DataFrame(s1)
resetted = s1.reset_index()
assert resetted["time"].dtype == np.float64
resetted = df.reset_index()
assert resetted["time"].dtype == np.float64
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ["x", "y", "z"]
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(
vals,
Index(idx, name="a"),
columns=[["b", "b", "c"], ["mean", "median", "mean"]],
)
rs = df.reset_index()
xp = DataFrame(
full, columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(
full, columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill="blah")
xp = DataFrame(
full, columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
df = DataFrame(
vals,
MultiIndex.from_arrays([[0, 1, 2], ["x", "y", "z"]], names=["d", "a"]),
columns=[["b", "b", "c"], ["mean", "median", "mean"]],
)
rs = df.reset_index("a")
xp = DataFrame(
full,
Index([0, 1, 2], name="d"),
columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index("a", col_fill=None)
xp = DataFrame(
full,
Index(range(3), name="d"),
columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index("a", col_fill="blah", col_level=1)
xp = DataFrame(
full,
Index(range(3), name="d"),
columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
def test_reset_index_multiindex_nan(self):
# GH6322, testing reset_index on MultiIndexes
# when we have a nan or all nan
df = DataFrame(
{"A": ["a", "b", "c"], "B": [0, 1, np.nan], "C": np.random.rand(3)}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame(
{"A": [np.nan, "b", "c"], "B": [0, 1, 2], "C": np.random.rand(3)}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({"A": ["a", "b", "c"], "B": [0, 1, 2], "C": [np.nan, 1.1, 2.2]})
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame(
{
"A": ["a", "b", "c"],
"B": [np.nan, np.nan, np.nan],
"C": np.random.rand(3),
}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = DataFrame(
[[1, 2], [3, 4]],
columns=date_range("1/1/2013", "1/2/2013"),
index=["A", "B"],
)
result = df.reset_index()
expected = DataFrame(
[["A", 1, 2], ["B", 3, 4]],
columns=["index", datetime(2013, 1, 1), datetime(2013, 1, 2)],
)
tm.assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = DataFrame([[0, 0], [1, 1]], columns=["A", "B"], index=RangeIndex(stop=2))
result = df.reset_index()
assert isinstance(result.index, RangeIndex)
expected = DataFrame(
[[0, 0, 0], [1, 1, 1]],
columns=["index", "A", "B"],
index= | RangeIndex(stop=2) | pandas.RangeIndex |
import inspect
import os
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.model_understanding.graphs import visualize_decision_tree
from evalml.pipelines.components import ComponentBase
from evalml.utils.gen_utils import (
SEED_BOUNDS,
_convert_to_woodwork_structure,
_convert_woodwork_types_wrapper,
_rename_column_names_to_numeric,
classproperty,
convert_to_seconds,
drop_rows_with_nans,
get_importable_subclasses,
get_random_seed,
import_or_raise,
infer_feature_types,
jupyter_check,
pad_with_nans,
save_plot
)
@patch('importlib.import_module')
def test_import_or_raise_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml")
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message")
with pytest.raises(Exception, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib")
def test_import_or_raise_imports():
math = import_or_raise("math", "error message")
assert math.ceil(0.1) == 1
def test_convert_to_seconds():
assert convert_to_seconds("10 s") == 10
assert convert_to_seconds("10 sec") == 10
assert convert_to_seconds("10 second") == 10
assert convert_to_seconds("10 seconds") == 10
assert convert_to_seconds("10 m") == 600
assert convert_to_seconds("10 min") == 600
assert convert_to_seconds("10 minute") == 600
assert convert_to_seconds("10 minutes") == 600
assert convert_to_seconds("10 h") == 36000
assert convert_to_seconds("10 hr") == 36000
assert convert_to_seconds("10 hour") == 36000
assert convert_to_seconds("10 hours") == 36000
with pytest.raises(AssertionError, match="Invalid unit."):
convert_to_seconds("10 years")
def test_get_random_seed_rng():
def make_mock_random_state(return_value):
class MockRandomState(np.random.RandomState):
def __init__(self):
self.min_bound = None
self.max_bound = None
super().__init__()
def randint(self, min_bound, max_bound):
self.min_bound = min_bound
self.max_bound = max_bound
return return_value
return MockRandomState()
rng = make_mock_random_state(42)
assert get_random_seed(rng) == 42
assert rng.min_bound == SEED_BOUNDS.min_bound
assert rng.max_bound == SEED_BOUNDS.max_bound
def test_get_random_seed_int():
# ensure the invariant "min_bound < max_bound" is enforced
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=0)
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=-1)
# test default boundaries to show the provided value should modulate within the default range
assert get_random_seed(SEED_BOUNDS.max_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.max_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.max_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.max_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.max_bound + 2) == SEED_BOUNDS.min_bound + 2
assert get_random_seed(SEED_BOUNDS.min_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.min_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.min_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.min_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.min_bound + 2) == SEED_BOUNDS.min_bound + 2
# vectorize get_random_seed via a wrapper for easy evaluation
default_min_bound = inspect.signature(get_random_seed).parameters['min_bound'].default
default_max_bound = inspect.signature(get_random_seed).parameters['max_bound'].default
assert default_min_bound == SEED_BOUNDS.min_bound
assert default_max_bound == SEED_BOUNDS.max_bound
def get_random_seed_vec(min_bound=None, max_bound=None): # passing None for either means no value is provided to get_random_seed
def get_random_seed_wrapper(random_seed):
return get_random_seed(random_seed,
min_bound=min_bound if min_bound is not None else default_min_bound,
max_bound=max_bound if max_bound is not None else default_max_bound)
return np.vectorize(get_random_seed_wrapper)
# ensure that regardless of the setting of min_bound and max_bound, the output of get_random_seed always stays
# between the min_bound (inclusive) and max_bound (exclusive), and wraps neatly around that range using modular arithmetic.
vals = np.arange(-100, 100)
def make_expected_values(vals, min_bound, max_bound):
return np.array([i if (min_bound <= i and i < max_bound) else ((i - min_bound) % (max_bound - min_bound)) + min_bound
for i in vals])
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=None)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=10)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=None)(vals),
make_expected_values(vals, min_bound=-10, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=0, max_bound=5)(vals),
make_expected_values(vals, min_bound=0, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=0)(vals),
make_expected_values(vals, min_bound=-5, max_bound=0))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=5)(vals),
make_expected_values(vals, min_bound=-5, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=5, max_bound=10)(vals),
make_expected_values(vals, min_bound=5, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=-5)(vals),
make_expected_values(vals, min_bound=-10, max_bound=-5))
def test_class_property():
class MockClass:
name = "MockClass"
@classproperty
def caps_name(cls):
return cls.name.upper()
assert MockClass.caps_name == "MOCKCLASS"
def test_get_importable_subclasses_wont_get_custom_classes():
class ChildClass(ComponentBase):
pass
assert ChildClass not in get_importable_subclasses(ComponentBase)
@patch('importlib.import_module')
def test_import_or_warn_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml", warning=True)
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message", warning=True)
with pytest.warns(UserWarning, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib", warning=True)
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check_errors(mock_import_or_raise):
mock_import_or_raise.side_effect = ImportError
assert not jupyter_check()
mock_import_or_raise.side_effect = Exception
assert not jupyter_check()
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check(mock_import_or_raise):
mock_import_or_raise.return_value = MagicMock()
mock_import_or_raise().core.getipython.get_ipython.return_value = True
assert jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = False
assert not jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = None
assert not jupyter_check()
def _check_equality(data, expected, check_index_type=True):
if isinstance(data, pd.Series):
pd.testing.assert_series_equal(data, expected, check_index_type)
else:
pd.testing.assert_frame_equal(data, expected, check_index_type)
@pytest.mark.parametrize("data,num_to_pad,expected",
[(pd.Series([1, 2, 3]), 1, pd.Series([np.nan, 1, 2, 3])),
(pd.Series([1, 2, 3]), 0, pd.Series([1, 2, 3])),
(pd.Series([1, 2, 3, 4], index=pd.date_range("2020-10-01", "2020-10-04")),
2, pd.Series([np.nan, np.nan, 1, 2, 3, 4])),
(pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]}), 0,
pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]})),
(pd.DataFrame({"a": [4, 5, 6], "b": ["a", "b", "c"]}), 1,
pd.DataFrame({"a": [np.nan, 4, 5, 6], "b": [np.nan, "a", "b", "c"]})),
(pd.DataFrame({"a": [1, 0, 1]}), 2,
pd.DataFrame({"a": [np.nan, np.nan, 1, 0, 1]}))])
def test_pad_with_nans(data, num_to_pad, expected):
padded = pad_with_nans(data, num_to_pad)
_check_equality(padded, expected)
def test_pad_with_nans_with_series_name():
name = "data to pad"
data = pd.Series([1, 2, 3], name=name)
padded = pad_with_nans(data, 1)
_check_equality(padded, pd.Series([np.nan, 1, 2, 3], name=name))
@pytest.mark.parametrize("data, expected",
[([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [1., 2., 3, None]})],
[pd.Series([1., 2.], index=pd.Int64Index([1, 2])),
pd.DataFrame({"a": [2., 3.]}, index=pd.Int64Index([1, 2]))]),
([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [3., 4., None, None]})],
[pd.Series([1.], index=pd.Int64Index([1])),
pd.DataFrame({"a": [4.]}, index= | pd.Int64Index([1]) | pandas.Int64Index |
import pandas as pd
import numpy as np
import os
# Function to import multiple files into a dictionary - use for global country and region data.
def get_data(path, name):
'''Function to read in data files from csv and import it into a dictionary of dataframes - used for global country and region data.'''
filenames = os.listdir(path)
filenames = [f for f in filenames if f.lower().endswith(".csv")]
filenames.sort()
dataframes = {}
df_names = []
for filename in filenames:
df_name = name + "_" + filename[0:2]# the name for the dataframe
df_names.append(df_name)
file = "{}/{}".format(path,filename)
dataframes[df_name] = pd.read_csv(file)
print('Read in data completed.')
return dataframes
# Function to make the date column the proper data type and add a month column.
def insert_month(df):
'''Changes object date to datetime object date and creates a column with month.'''
df['date'] = pd.to_datetime(df.loc[:,'date'])
df["month"]= | pd.DatetimeIndex(df['date']) | pandas.DatetimeIndex |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def plot_testing_acc(x, y):
df = pd.DataFrame({'x': x, 'y': y, 'z': y})
f1 = plt.figure(1)
plt.plot('x','y',data=df, marker='o', color='blue')
plt.title("Testing accuracy vs Number of Batches (100s)")
plt.xlabel("Number of Batches")
plt.ylabel("Test Accuracy")
f1.show()
def plot_training_acc(x, y):
df = pd.DataFrame({'x': x, 'y': y, 'z': y})
f2= plt.figure(2)
plt.plot('x','y',data=df, marker='o', color='green')
plt.title("Training Accuracy vs Number of Batches (100s)")
plt.xlabel("Number of Batches")
plt.ylabel("Train Accuracy")
f2.show()
def plot_training_vs_test(x, y):
df = pd.DataFrame({'x': x, 'y': y, 'z': y})
f3= plt.figure(3)
plt.plot('x','y',data=df, marker='o', color='mediumvioletred')
plt.title("Testing Accuracy vs Training Accuracy")
plt.xlabel("Training Accuracy")
plt.ylabel("Testing Accuracy")
f3.show()
#only care about labels 0, 1, 2, 4
#can have a line plot of compton events
def plot_labels_accuracy_compton(batches, heights):
label_0_height = heights[0]
label_1_height = heights[1]
label_2_height = heights[2]
label_4_height = heights[4]
df_0 = pd.DataFrame({'x0': batches, 'y0': label_0_height})
df_1 = | pd.DataFrame({'x1': batches, 'y1': label_1_height}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import functools
import os
from collections import Counter
from multiprocessing import Pool as ThreadPool
from random import sample
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
from apps.kemures.kernel.config.global_var import MAX_THREAD
class AnalyticsOverview:
def __init__(self):
# Load data
self.__song_msd_df = pd.read_csv(os.getcwd() + '/datasets/oneMillionSongs/clean_set/songs.csv')
song_by_track_df = pd.read_csv(os.getcwd() + '/datasets/oneMillionSongs/clean_set/unique_tracks.txt',
sep='<SEP>', names=['track_id', 'id', 'title', 'artist'])
gender_df = pd.read_csv(os.getcwd() + '/datasets/oneMillionSongs/clean_set/msd-MAGD-genreAssignment.cls',
sep='\t', names=['track_id', 'gender'])
self.__users_preferences_df = pd.read_csv(
os.getcwd() + '/datasets/oneMillionSongs/clean_set/train_triplets.txt',
sep='\t', names=['user_id', 'song_id', 'play_count'])
# Remove Duplicate
song_by_track_df = song_by_track_df.drop(['title', 'artist'], axis=1)
song_by_track_df = song_by_track_df.drop_duplicates(['id'])
# Join
self.__song_msd_df = pd.merge(self.__song_msd_df, song_by_track_df, how='left', left_on='id',
right_on='id')
merg = | pd.merge(self.__song_msd_df, gender_df, how='inner', left_on='track_id', right_on='track_id') | pandas.merge |
# get the latest version of pandas_profiling
from pathlib import Path
import numpy as np
import pandas as pd
from pandas_profiling import ProfileReport
if __name__ == "__main__":
# data set location http://eforexcel.com/wp/downloads-18-sample-csv-files-data-sets-for-testing-sales/
df=pd.read_csv("/home/prasad/Downloads/sales_records.csv")
# Prepare missing values
df = df.replace("\\?", np.nan, regex=True)
# changing date fields from categorical to date
df['Order Date'] = pd.to_datetime(df['Order Date'],infer_datetime_format=True )
df['Ship Date'] = | pd.to_datetime(df['Ship Date'],infer_datetime_format=True ) | pandas.to_datetime |
# coding: utf8
import torch
import numpy as np
import os
import warnings
import pandas as pd
from time import time
import logging
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from sklearn.utils import column_or_1d
import scipy.sparse as sp
from clinicadl.tools.deep_learning.iotools import check_and_clean
from clinicadl.tools.deep_learning import EarlyStopping, save_checkpoint
#####################
# CNN train / test #
#####################
def train(model, train_loader, valid_loader, criterion, optimizer, resume, log_dir, model_dir, options, logger=None):
"""
Function used to train a CNN.
The best model and checkpoint will be found in the 'best_model_dir' of options.output_dir.
Args:
model: (Module) CNN to be trained
train_loader: (DataLoader) wrapper of the training dataset
valid_loader: (DataLoader) wrapper of the validation dataset
criterion: (loss) function to calculate the loss
optimizer: (torch.optim) optimizer linked to model parameters
resume: (bool) if True, a begun job is resumed
log_dir: (str) path to the folder containing the logs
model_dir: (str) path to the folder containing the models weights and biases
options: (Namespace) ensemble of other options given to the main script.
logger: (logging object) writer to stdout and stderr
"""
from tensorboardX import SummaryWriter
from time import time
if logger is None:
logger = logging
columns = ['epoch', 'iteration', 'time',
'balanced_accuracy_train', 'loss_train',
'balanced_accuracy_valid', 'loss_valid']
if hasattr(model, "variational") and model.variational:
columns += ["kl_loss_train", "kl_loss_valid"]
filename = os.path.join(os.path.dirname(log_dir), 'training.tsv')
if not resume:
check_and_clean(model_dir)
check_and_clean(log_dir)
results_df = pd.DataFrame(columns=columns)
with open(filename, 'w') as f:
results_df.to_csv(f, index=False, sep='\t')
options.beginning_epoch = 0
else:
if not os.path.exists(filename):
raise ValueError('The training.tsv file of the resumed experiment does not exist.')
truncated_tsv = pd.read_csv(filename, sep='\t')
truncated_tsv.set_index(['epoch', 'iteration'], inplace=True)
truncated_tsv.drop(options.beginning_epoch, level=0, inplace=True)
truncated_tsv.to_csv(filename, index=True, sep='\t')
# Create writers
writer_train = SummaryWriter(os.path.join(log_dir, 'train'))
writer_valid = SummaryWriter(os.path.join(log_dir, 'validation'))
# Initialize variables
best_valid_accuracy = -1.0
best_valid_loss = np.inf
epoch = options.beginning_epoch
model.train() # set the model to training mode
train_loader.dataset.train()
early_stopping = EarlyStopping('min', min_delta=options.tolerance, patience=options.patience)
mean_loss_valid = None
t_beginning = time()
while epoch < options.epochs and not early_stopping.step(mean_loss_valid):
logger.info("Beginning epoch %i." % epoch)
model.zero_grad()
evaluation_flag = True
step_flag = True
tend = time()
total_time = 0
for i, data in enumerate(train_loader, 0):
t0 = time()
total_time = total_time + t0 - tend
if options.gpu:
imgs, labels = data['image'].cuda(), data['label'].cuda()
else:
imgs, labels = data['image'], data['label']
if hasattr(model, "variational") and model.variational:
z, mu, std, train_output = model(imgs)
kl_loss = kl_divergence(z, mu, std)
loss = criterion(train_output, labels) + kl_loss
else:
train_output = model(imgs)
loss = criterion(train_output, labels)
# Back propagation
loss.backward()
del imgs, labels
if (i + 1) % options.accumulation_steps == 0:
step_flag = False
optimizer.step()
optimizer.zero_grad()
del loss
# Evaluate the model only when no gradients are accumulated
if options.evaluation_steps != 0 and (i + 1) % options.evaluation_steps == 0:
evaluation_flag = False
_, results_train = test(model, train_loader, options.gpu, criterion)
mean_loss_train = results_train["total_loss"] / (len(train_loader) * train_loader.batch_size)
_, results_valid = test(model, valid_loader, options.gpu, criterion)
mean_loss_valid = results_valid["total_loss"] / (len(valid_loader) * valid_loader.batch_size)
model.train()
train_loader.dataset.train()
global_step = i + epoch * len(train_loader)
writer_train.add_scalar('balanced_accuracy', results_train["balanced_accuracy"], global_step)
writer_train.add_scalar('loss', mean_loss_train, global_step)
writer_valid.add_scalar('balanced_accuracy', results_valid["balanced_accuracy"], global_step)
writer_valid.add_scalar('loss', mean_loss_valid, global_step)
logger.info("%s level training accuracy is %f at the end of iteration %d"
% (options.mode, results_train["balanced_accuracy"], i))
logger.info("%s level validation accuracy is %f at the end of iteration %d"
% (options.mode, results_valid["balanced_accuracy"], i))
t_current = time() - t_beginning
row = [epoch, i, t_current,
results_train["balanced_accuracy"], mean_loss_train,
results_valid["balanced_accuracy"], mean_loss_valid]
if hasattr(model, "variational") and model.variational:
row += [results_train["total_kl_loss"] / (len(train_loader) * train_loader.batch_size),
results_valid["total_kl_loss"] / (len(valid_loader) * valid_loader.batch_size)]
row_df = pd.DataFrame([row], columns=columns)
with open(filename, 'a') as f:
row_df.to_csv(f, header=False, index=False, sep='\t')
tend = time()
logger.debug('Mean time per batch loading: %.10f s'
% (total_time / len(train_loader) * train_loader.batch_size))
# If no step has been performed, raise Exception
if step_flag:
raise Exception('The model has not been updated once in the epoch. The accumulation step may be too large.')
# If no evaluation has been performed, warn the user
elif evaluation_flag and options.evaluation_steps != 0:
warnings.warn('Your evaluation steps are too big compared to the size of the dataset.'
'The model is evaluated only once at the end of the epoch')
# Always test the results and save them once at the end of the epoch
model.zero_grad()
logger.debug('Last checkpoint at the end of the epoch %d' % epoch)
_, results_train = test(model, train_loader, options.gpu, criterion)
mean_loss_train = results_train["total_loss"] / (len(train_loader) * train_loader.batch_size)
_, results_valid = test(model, valid_loader, options.gpu, criterion)
mean_loss_valid = results_valid["total_loss"] / (len(valid_loader) * valid_loader.batch_size)
model.train()
train_loader.dataset.train()
global_step = (epoch + 1) * len(train_loader)
writer_train.add_scalar('balanced_accuracy', results_train["balanced_accuracy"], global_step)
writer_train.add_scalar('loss', mean_loss_train, global_step)
writer_valid.add_scalar('balanced_accuracy', results_valid["balanced_accuracy"], global_step)
writer_valid.add_scalar('loss', mean_loss_valid, global_step)
logger.info("%s level training accuracy is %f at the end of iteration %d"
% (options.mode, results_train["balanced_accuracy"], len(train_loader)))
logger.info("%s level validation accuracy is %f at the end of iteration %d"
% (options.mode, results_valid["balanced_accuracy"], len(train_loader)))
t_current = time() - t_beginning
row = [epoch, i, t_current,
results_train["balanced_accuracy"], mean_loss_train,
results_valid["balanced_accuracy"], mean_loss_valid]
if hasattr(model, "variational") and model.variational:
row += [results_train["total_kl_loss"] / (len(train_loader) * train_loader.batch_size),
results_valid["total_kl_loss"] / (len(valid_loader) * valid_loader.batch_size)]
row_df = pd.DataFrame([row], columns=columns)
with open(filename, 'a') as f:
row_df.to_csv(f, header=False, index=False, sep='\t')
accuracy_is_best = results_valid["balanced_accuracy"] > best_valid_accuracy
loss_is_best = mean_loss_valid < best_valid_loss
best_valid_accuracy = max(results_valid["balanced_accuracy"], best_valid_accuracy)
best_valid_loss = min(mean_loss_valid, best_valid_loss)
save_checkpoint({'model': model.state_dict(),
'epoch': epoch,
'valid_loss': mean_loss_valid,
'valid_acc': results_valid["balanced_accuracy"]},
accuracy_is_best, loss_is_best,
model_dir)
# Save optimizer state_dict to be able to reload
save_checkpoint({'optimizer': optimizer.state_dict(),
'epoch': epoch,
'name': options.optimizer,
},
False, False,
model_dir,
filename='optimizer.pth.tar')
epoch += 1
os.remove(os.path.join(model_dir, "optimizer.pth.tar"))
os.remove(os.path.join(model_dir, "checkpoint.pth.tar"))
def evaluate_prediction(y, y_pred):
"""
Evaluates different metrics based on the list of true labels and predicted labels.
Args:
y: (list) true labels
y_pred: (list) corresponding predictions
Returns:
(dict) ensemble of metrics
"""
true_positive = np.sum((y_pred == 1) & (y == 1))
true_negative = np.sum((y_pred == 0) & (y == 0))
false_positive = np.sum((y_pred == 1) & (y == 0))
false_negative = np.sum((y_pred == 0) & (y == 1))
accuracy = (true_positive + true_negative) / (true_positive + true_negative + false_positive + false_negative)
if (true_positive + false_negative) != 0:
sensitivity = true_positive / (true_positive + false_negative)
else:
sensitivity = 0.0
if (false_positive + true_negative) != 0:
specificity = true_negative / (false_positive + true_negative)
else:
specificity = 0.0
if (true_positive + false_positive) != 0:
ppv = true_positive / (true_positive + false_positive)
else:
ppv = 0.0
if (true_negative + false_negative) != 0:
npv = true_negative / (true_negative + false_negative)
else:
npv = 0.0
balanced_accuracy = (sensitivity + specificity) / 2
results = {'accuracy': accuracy,
'balanced_accuracy': balanced_accuracy,
'sensitivity': sensitivity,
'specificity': specificity,
'ppv': ppv,
'npv': npv,
}
return results
def test(model, dataloader, use_cuda, criterion, mode="image", use_labels=True):
"""
Computes the predictions and evaluation metrics.
Args:
model: (Module) CNN to be tested.
dataloader: (DataLoader) wrapper of a dataset.
use_cuda: (bool) if True a gpu is used.
criterion: (loss) function to calculate the loss.
mode: (str) input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
use_labels (bool): If True the true_label will be written in output DataFrame and metrics dict will be created.
Returns
(DataFrame) results of each input.
(dict) ensemble of metrics + total loss on mode level.
"""
model.eval()
dataloader.dataset.eval()
if mode == "image":
columns = ["participant_id", "session_id", "true_label", "predicted_label"]
elif mode in ["patch", "roi", "slice"]:
columns = ['participant_id', 'session_id', '%s_id' % mode, 'true_label', 'predicted_label', 'proba0', 'proba1']
else:
raise ValueError("The mode %s is invalid." % mode)
softmax = torch.nn.Softmax(dim=1)
results_df = pd.DataFrame(columns=columns)
total_loss = 0
total_kl_loss = 0
total_time = 0
tend = time()
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
t0 = time()
total_time = total_time + t0 - tend
if use_cuda:
inputs, labels = data['image'].cuda(), data['label'].cuda()
else:
inputs, labels = data['image'], data['label']
if hasattr(model, "variational") and model.variational:
z, mu, std, outputs = model(inputs)
kl_loss = kl_divergence(z, mu, std)
total_kl_loss += kl_loss.item()
else:
outputs = model(inputs)
if use_labels:
loss = criterion(outputs, labels)
total_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
# Generate detailed DataFrame
for idx, sub in enumerate(data['participant_id']):
if mode == "image":
row = [[sub, data['session_id'][idx], labels[idx].item(), predicted[idx].item()]]
else:
normalized_output = softmax(outputs)
row = [[sub, data['session_id'][idx], data['%s_id' % mode][idx].item(),
labels[idx].item(), predicted[idx].item(),
normalized_output[idx, 0].item(), normalized_output[idx, 1].item()]]
row_df = pd.DataFrame(row, columns=columns)
results_df = pd.concat([results_df, row_df])
del inputs, outputs, labels
tend = time()
results_df.reset_index(inplace=True, drop=True)
if not use_labels:
results_df = results_df.drop("true_label", axis=1)
metrics_dict = None
else:
metrics_dict = evaluate_prediction(results_df.true_label.values.astype(int),
results_df.predicted_label.values.astype(int))
metrics_dict['total_loss'] = total_loss
metrics_dict['total_kl_loss'] = total_kl_loss
torch.cuda.empty_cache()
return results_df, metrics_dict
def sort_predicted(model, data_df, input_dir, model_options, criterion, keep_true,
batch_size=1, num_workers=0, gpu=False):
from .data import return_dataset, get_transforms
from torch.utils.data import DataLoader
from copy import copy
if keep_true is None:
return data_df
_, all_transforms = get_transforms(model_options.mode, model_options.minmaxnormalization)
dataset = return_dataset(mode=model_options.mode, input_dir=input_dir,
data_df=data_df, preprocessing=model_options.preprocessing,
train_transformations=None, all_transformations=all_transforms,
params=model_options)
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
test_options = copy(model_options)
test_options.gpu = gpu
results_df, _ = test(model, dataloader, gpu, criterion, model_options.mode, use_labels=True)
sorted_df = data_df.sort_values(['participant_id', 'session_id']).reset_index(drop=True)
results_df = results_df.sort_values(['participant_id', 'session_id']).reset_index(drop=True)
if keep_true:
return sorted_df[results_df.true_label == results_df.predicted_label].reset_index(drop=True)
else:
return sorted_df[results_df.true_label != results_df.predicted_label].reset_index(drop=True)
#################################
# Voting systems
#################################
def mode_level_to_tsvs(output_dir, results_df, metrics, fold, selection, mode, dataset='train', cnn_index=None):
"""
Writes the outputs of the test function in tsv files.
Args:
output_dir: (str) path to the output directory.
results_df: (DataFrame) the individual results per patch.
metrics: (dict or DataFrame) the performances obtained on a series of metrics.
fold: (int) the fold for which the performances were obtained.
selection: (str) the metrics on which the model was selected (best_acc, best_loss)
mode: (str) input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
dataset: (str) the dataset on which the evaluation was performed.
cnn_index: (int) provide the cnn_index only for a multi-cnn framework.
"""
if cnn_index is None:
performance_dir = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', selection)
else:
performance_dir = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', 'cnn-%i' % cnn_index,
selection)
os.makedirs(performance_dir, exist_ok=True)
results_df.to_csv(os.path.join(performance_dir, '%s_%s_level_prediction.tsv' % (dataset, mode)), index=False,
sep='\t')
if metrics is not None:
metrics["%s_id" % mode] = cnn_index
if isinstance(metrics, dict):
pd.DataFrame(metrics, index=[0]).to_csv(os.path.join(performance_dir, '%s_%s_level_metrics.tsv' % (dataset, mode)),
index=False, sep='\t')
elif isinstance(metrics, pd.DataFrame):
metrics.to_csv(os.path.join(performance_dir, '%s_%s_level_metrics.tsv' % (dataset, mode)),
index=False, sep='\t')
else:
raise ValueError("Bad type for metrics: %s. Must be dict or DataFrame." % type(metrics).__name__)
def concat_multi_cnn_results(output_dir, fold, selection, mode, dataset, num_cnn):
"""Concatenate the tsv files of a multi-CNN framework"""
prediction_df = pd.DataFrame()
metrics_df = pd.DataFrame()
for cnn_index in range(num_cnn):
cnn_dir = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', 'cnn-%i' % cnn_index)
performance_dir = os.path.join(cnn_dir, selection)
cnn_pred_path = os.path.join(performance_dir, '%s_%s_level_prediction.tsv' % (dataset, mode))
cnn_metrics_path = os.path.join(performance_dir, '%s_%s_level_metrics.tsv' % (dataset, mode))
cnn_pred_df = pd.read_csv(cnn_pred_path, sep='\t')
prediction_df = pd.concat([prediction_df, cnn_pred_df])
os.remove(cnn_pred_path)
if os.path.exists(cnn_metrics_path):
cnn_metrics_df = pd.read_csv(cnn_metrics_path, sep='\t')
metrics_df = pd.concat([metrics_df, cnn_metrics_df])
os.remove(cnn_metrics_path)
# Clean unused files
if len(os.listdir(performance_dir)) == 0:
os.rmdir(performance_dir)
if len(os.listdir(cnn_dir)) == 0:
os.rmdir(cnn_dir)
prediction_df.reset_index(drop=True, inplace=True)
if len(metrics_df) == 0:
metrics_df = None
else:
metrics_df.reset_index(drop=True, inplace=True)
mode_level_to_tsvs(output_dir, prediction_df, metrics_df, fold, selection, mode, dataset)
def retrieve_sub_level_results(output_dir, fold, selection, mode, dataset, num_cnn):
"""Retrieve performance_df for single or multi-CNN framework.
If the results of the multi-CNN were not concatenated it will be done here."""
result_tsv = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', selection,
'%s_%s_level_prediction.tsv' % (dataset, mode))
if os.path.exists(result_tsv):
performance_df = pd.read_csv(result_tsv, sep='\t')
else:
concat_multi_cnn_results(output_dir, fold, selection, mode, dataset, num_cnn)
performance_df = pd.read_csv(result_tsv, sep='\t')
return performance_df
def soft_voting_to_tsvs(output_dir, fold, selection, mode, dataset='test', num_cnn=None,
selection_threshold=None, logger=None, use_labels=True):
"""
Writes soft voting results in tsv files.
Args:
output_dir: (str) path to the output directory.
fold: (int) Fold number of the cross-validation.
selection: (str) criterion on which the model is selected (either best_loss or best_acc)
mode: (str) input used by the network. Chosen from ['patch', 'roi', 'slice'].
dataset: (str) name of the dataset for which the soft-voting is performed. If different from training or
validation, the weights of soft voting will be computed on validation accuracies.
num_cnn: (int) if given load the patch level results of a multi-CNN framework.
selection_threshold: (float) all patches for which the classification accuracy is below the
threshold is removed.
logger: (logging object) writer to stdout and stderr
use_labels: (bool) If True the labels are added to the final tsv
"""
if logger is None:
logger = logging
# Choose which dataset is used to compute the weights of soft voting.
if dataset in ['train', 'validation']:
validation_dataset = dataset
else:
validation_dataset = 'validation'
test_df = retrieve_sub_level_results(output_dir, fold, selection, mode, dataset, num_cnn)
validation_df = retrieve_sub_level_results(output_dir, fold, selection, mode, validation_dataset, num_cnn)
performance_path = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', selection)
os.makedirs(performance_path, exist_ok=True)
df_final, metrics = soft_voting(test_df, validation_df, mode, selection_threshold=selection_threshold,
use_labels=use_labels)
df_final.to_csv(os.path.join(os.path.join(performance_path, '%s_image_level_prediction.tsv' % dataset)),
index=False, sep='\t')
if use_labels:
| pd.DataFrame(metrics, index=[0]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Purpose: Perform Incremental PCA on QPESUMS data.
Description:
--input: directory that contains QPESUMS data as *.npy (6*275*162)
--output: the prefix of output files.
--filter: the file contains a list of timestamp that filters the input data for processing.
--n_components: number of component to output.
--batch_size: the size of data batch for incremental processing, default=100.
--randomseed: integer as the random seed, default="1234543"
"""
import os, sys, csv, logging, argparse, pickle, h5py
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA, IncrementalPCA
import joblib
''' Input processing '''
# Scan QPESUMS data in *.npy: 6*275*162
def search_dbz(srcdir):
import pandas as pd
fileinfo = []
for subdir, dirs, files in os.walk(srcdir, followlinks=True):
for f in files:
if f.endswith('.npy'):
# Parse file name for time information
furi = os.path.join(subdir, f)
finfo = f.split('.')
ftime = finfo[0]
#logging.debug([furi] + finfo[1:3])
fileinfo.append([furi, ftime])
results = pd.DataFrame(fileinfo, columns=['furi', 'timestamp'])
results = results.sort_values(by=['timestamp']).reset_index(drop=True)
return(results)
# Read uris containing QPESUMS data in the format of 6*275*162
def loadDBZ(flist, to_log=False):
''' Load a list a dbz files (in npy format) into one numpy array. '''
xdata = []
for f in flist:
tmp = np.load(f)
# Append new record
if tmp is not None: # Append the flattened data array if it is not None
xdata.append(tmp.flatten())
x = np.array(xdata, dtype=np.float32)
# Convert to log space if specified
if to_log:
x = np.log(x+1)
# done
return(x)
''' Perform Incremental PCA '''
def fit_ipca_partial(finfo, nc=20, bs=100, log_flag=False):
nrec = finfo.shape[0]
# Initialize the IncrementalPCA object
ipca = IncrementalPCA(n_components=nc, batch_size=bs)
# Check whether the last batch size is smaller than n_components
flag_merge_last_batch = False
if np.mod(nrec, bs)<nc:
logging.warning('The last batch is smaller than n_component, merge the last two batches.')
flag_merge_last_batch = True
# Setup batch counter
n_batch = int(np.floor(nrec/bs))
if not flag_merge_last_batch:
n_batch = n_batch + 1
logging.debug('Number of batches: '+str(n_batch))
# Loop through the first (n_batch-1) batch
for i in range(n_batch-1):
# Read batch data
i1 = i * bs
i2 = i1 + bs
# Load batch data
dbz = loadDBZ(finfo['furi'].iloc[i1:i2], to_log=log_flag)
logging.debug('Batch dimension: '+ str(dbz.shape))
# Partial fit with batch data
ipca.partial_fit(dbz)
# In case there is only one batch
if n_batch==1:
i2 = 0
# Fit the last batch
dbz = loadDBZ(finfo['furi'].iloc[i2:nrec], to_log=log_flag)
logging.debug('Final batch dimension: '+ str(dbz.shape))
ipca.partial_fit(dbz)
# done
return(ipca)
''' Project data into PCs '''
def transform_dbz(ipca, finfo, to_log):
dbz = []
# Loop through finfo
for i in range(0,finfo.shape[0]):
f = finfo.iloc[i,:]
#logging.debug('Reading data from: ' + f['furi'])
tmp = np.load(f['furi']).flatten()
# Append new record
if tmp is None: # Copy the previous record if new record is empty
logging.warning('File empty: '+f['furi'])
dbz.append(np.zeros(ipca.n_components))
else:
# Convert to log space if specified
if to_log:
tmp = np.log(tmp+1)
tmp = tmp.reshape(1,len(tmp))
tmp = ipca.transform(tmp).flatten()
dbz.append(tmp)
# Save changes of the storage file
logging.debug('Data dimension after projection: ' + str(np.array(dbz).shape))
return(dbz)
def writeToCsv(output, fname, header=None):
# Overwrite the output file:
with open(fname, 'w', newline='', encoding='utf-8-sig') as csvfile:
writer = csv.writer(csvfile, delimiter=',',quotechar='"', quoting=csv.QUOTE_ALL)
if header is not None:
writer.writerow(header)
for r in output:
writer.writerow(r)
return(0)
#-----------------------------------------------------------------------
# Main function
#-----------------------------------------------------------------------
def main():
# Configure Argument Parser
parser = argparse.ArgumentParser(description='Retrieve DBZ data for further processing.')
parser.add_argument('--input', '-i', help='the directory containing all the DBZ data.')
parser.add_argument('--output', '-o', default='output', help='the output file.')
parser.add_argument('--filter', '-f', default=None, help='the filter file with time-stamps.')
parser.add_argument('--n_components', '-n', default=20, type=int, help='number of component to output.')
parser.add_argument('--batch_size', '-b', default=100, type=int, help='size of each data batch.')
parser.add_argument('--transform', '-t', default=0, type=int, choices=range(0, 2), help='transform data with PCA.')
parser.add_argument('--log_flag', '-g', default=1, type=int, choices=range(0, 2), help="convert to log-scale")
parser.add_argument('--randomseed', '-r', help="integer as the random seed", default="1234543")
parser.add_argument('--logfile', '-l', default=None, help='the log file.')
args = parser.parse_args()
# Set up logging
if not args.logfile is None:
logging.basicConfig(level=logging.DEBUG, filename=args.logfile, filemode='w')
else:
logging.basicConfig(level=logging.DEBUG)
# Scan files for reading
finfo = search_dbz(args.input)
logging.debug('Total data size: '+str(finfo.shape[0]))
# Apply filter if specified
if not args.filter is None:
logging.debug('Read filter file: '+args.filter)
flt = | pd.read_csv(args.filter) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.