prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import copy
import unittest
import functools
import itertools
import types
import numpy as np
import numpy.testing as npt
import pandas as pd
import scipy.stats
from skbio import Sequence, DNA, RNA, Protein, TabularMSA
from skbio.sequence import GrammaredSequence
from skbio.util import classproperty
from skbio.util._decorator import overrides
from skbio.util._testing import ReallyEqualMixin
from skbio.metadata._testing import (MetadataMixinTests,
PositionalMetadataMixinTests)
from skbio.util import assert_data_frame_almost_equal
from skbio.util._testing import assert_index_equal
class TabularMSASubclass(TabularMSA):
"""Used for testing purposes."""
pass
class TestTabularMSAMetadata(unittest.TestCase, ReallyEqualMixin,
MetadataMixinTests):
def setUp(self):
self._metadata_constructor_ = functools.partial(TabularMSA, [])
class TestTabularMSAPositionalMetadata(unittest.TestCase, ReallyEqualMixin,
PositionalMetadataMixinTests):
def setUp(self):
def factory(axis_len, positional_metadata=None):
return TabularMSA([DNA('A' * axis_len)],
positional_metadata=positional_metadata)
self._positional_metadata_constructor_ = factory
class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
def test_from_dict_empty(self):
self.assertEqual(TabularMSA.from_dict({}), TabularMSA([], index=[]))
def test_from_dict_single_sequence(self):
self.assertEqual(TabularMSA.from_dict({'foo': DNA('ACGT')}),
TabularMSA([DNA('ACGT')], index=['foo']))
def test_from_dict_multiple_sequences(self):
msa = TabularMSA.from_dict(
{1: DNA('ACG'), 2: DNA('GGG'), 3: DNA('TAG')})
# Sort because order is arbitrary.
msa.sort()
self.assertEqual(
msa,
TabularMSA([DNA('ACG'), DNA('GGG'), DNA('TAG')], index=[1, 2, 3]))
def test_from_dict_invalid_input(self):
# Basic test to make sure error-checking in the TabularMSA constructor
# is being invoked.
with self.assertRaisesRegex(
ValueError, 'must match the number of positions'):
TabularMSA.from_dict({'a': DNA('ACG'), 'b': DNA('ACGT')})
def test_constructor_invalid_dtype(self):
with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*Sequence'):
TabularMSA([Sequence('')])
with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*int'):
TabularMSA([42, DNA('')])
def test_constructor_not_monomorphic(self):
with self.assertRaisesRegex(TypeError,
'matching type.*RNA.*DNA'):
TabularMSA([DNA(''), RNA('')])
with self.assertRaisesRegex(TypeError,
'matching type.*float.*Protein'):
TabularMSA([Protein(''), Protein(''), 42.0, Protein('')])
def test_constructor_unequal_length(self):
with self.assertRaisesRegex(
ValueError,
'must match the number of positions.*1 != 0'):
TabularMSA([Protein(''), Protein('P')])
with self.assertRaisesRegex(
ValueError,
'must match the number of positions.*1 != 3'):
TabularMSA([Protein('PAW'), Protein('ABC'), Protein('A')])
def test_constructor_non_iterable(self):
with self.assertRaises(TypeError):
TabularMSA(42)
def test_constructor_minter_and_index_both_provided(self):
with self.assertRaisesRegex(ValueError, 'both.*minter.*index'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str,
index=['a', 'b'])
def test_constructor_invalid_minter_callable(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=float)
def test_constructor_missing_minter_metadata_key(self):
with self.assertRaises(KeyError):
TabularMSA([DNA('ACGT', metadata={'foo': 'bar'}), DNA('TGCA')],
minter='foo')
def test_constructor_unhashable_minter_metadata_key(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=[])
def test_constructor_index_length_mismatch_iterable(self):
with self.assertRaisesRegex(ValueError,
'sequences.*2.*index length.*0'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=iter([]))
def test_constructor_index_length_mismatch_index_object(self):
with self.assertRaisesRegex(ValueError,
'sequences.*2.*index length.*0'):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=pd.Index([]))
def test_constructor_invalid_index_scalar(self):
with self.assertRaises(TypeError):
TabularMSA([DNA('ACGT'), DNA('TGCA')], index=42)
def test_constructor_non_unique_labels(self):
msa = TabularMSA([DNA('ACGT'), DNA('ACGT')], index=[1, 1])
assert_index_equal(msa.index, pd.Int64Index([1, 1]))
def test_constructor_empty_no_index(self):
# sequence empty
msa = TabularMSA([])
self.assertIsNone(msa.dtype)
self.assertEqual(msa.shape, (0, 0))
assert_index_equal(msa.index, pd.RangeIndex(0))
with self.assertRaises(StopIteration):
next(iter(msa))
# position empty
seqs = [DNA(''), DNA('')]
msa = TabularMSA(seqs)
self.assertIs(msa.dtype, DNA)
self.assertEqual(msa.shape, (2, 0))
assert_index_equal(msa.index, | pd.RangeIndex(2) | pandas.RangeIndex |
# Functions for OCR block identification script
#
import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser
from glob import glob
from pathlib import Path
from psycopg2.extras import RealDictCursor
from time import localtime, strftime
from fuzzywuzzy import fuzz
import pandas as pd
from datetime import date
#Insert query
insert_q = "INSERT INTO ocr_interpreted_blocks (document_id, block_id, data_type, interpreted_value, verbatim_value, data_source, match_score) VALUES (%(document_id)s, %(block_id)s, %(data_type)s, %(interpreted_value)s, %(verbatim_value)s, %(data_source)s, %(match_score)s) ON CONFLICT (document_id, block_id, data_type) DO UPDATE SET interpreted_value = %(interpreted_value)s, verbatim_value = %(verbatim_value)s"
#OCR Database
conn = psycopg2.connect(host = settings.ocr_host, database = settings.ocr_db, user = settings.ocr_user, password = settings.ocr_password, connect_timeout = 60)
conn.autocommit = True
db_cursor = conn.cursor(cursor_factory=RealDictCursor)
#GIS database
conn2 = psycopg2.connect(host = settings.gis_host, database = settings.gis_db, user = settings.gis_user, password = settings.gis_password, connect_timeout = 60)
db_cursor2 = conn2.cursor(cursor_factory=RealDictCursor)
#Delete previous id's
db_cursor.execute("DELETE FROM ocr_interpreted_blocks WHERE document_id IN (SELECT document_id FROM ocr_documents WHERE project_id = %(project_id)s)", {'project_id': settings.project_id})
logger1.debug(db_cursor.query.decode("utf-8"))
#Get entries with confidence value over the threshold from settings
db_cursor.execute("SELECT document_id, block, string_agg(word_text, ' ') as block_text, avg(confidence) as block_confidence FROM (SELECT * FROM ocr_entries WHERE confidence > %(confidence)s AND document_id IN (SELECT document_id FROM ocr_documents WHERE project_id = %(project_id)s) order by word) b GROUP BY document_id, block, word_line", {'confidence': settings.confidence, 'project_id': settings.project_id})
ocr_blocks = db_cursor.fetchall()
logger1.debug(db_cursor.query.decode("utf-8"))
#Iterate for dates
from_year = 1800
def check_year(block_text):
"""
Check if block of text is a year
"""
from_year = 1800
today = date.today()
cur_year = today.strftime("%Y")
interpreted_value = ""
alpha_block = re.sub(r'\W+ ,-/', '', block_text).strip()
if len(alpha_block) < 5 or len(re.sub(r'\W+', '', ocr_block['block_text']).strip()) < 5:
alpha_block_yr = re.sub(r'\W+', '', alpha_block).strip()
if len(alpha_block_yr) == 4:
#Could be a year
try:
for y in range(from_year, int(cur_year)):
if int(alpha_block_yr) == y:
interpreted_value = "{}".format(alpha_block_yr)
return interpreted_value
except:
continue
return None
#Iterate blocks
for ocr_block in ocr_blocks:
logger1.info("Block text: {}".format(ocr_block['block_text']))
#Identify year
#This year
today = date.today()
cur_year = today.strftime("%Y")
interpreted_value = ""
alpha_block = re.sub(r'\W+ ,-/', '', ocr_block['block_text']).strip()
if len(alpha_block) < 5 or len(re.sub(r'\W+', '', ocr_block['block_text']).strip()) < 5:
#Too short to parse
alpha_block_yr = re.sub(r'\W+', '', alpha_block).strip()
if len(alpha_block_yr) == 4:
#Year
try:
for y in range(from_year, int(cur_year)):
if int(alpha_block_yr) == y:
interpreted_value = "{}".format(alpha_block_yr)
db_cursor.execute(insert_q, {'document_id': ocr_block['document_id'], 'block_id': ocr_block['block'], 'data_type': 'Date (year)', 'interpreted_value': interpreted_value, 'verbatim_value': alpha_block, 'data_source': '', 'match_score': 0})
logger1.info('Date (year): {}'.format(interpreted_value))
break
except:
continue
else:
continue
if alpha_block in settings.collector_strings:
#Codeword that indicates this is a collector
continue
if "No." in alpha_block:
#Codeword that indicates this is not a date
continue
if alpha_block[-1] == "\'":
#Ends in quote, so it should be an elevation, not a date
elev_text = alpha_block.split(' ')
elev_text = elev_text[len(elev_text) - 1].strip()
interpreted_value = "{}\'".format(re.findall(r'\d+', elev_text))
db_cursor.execute(insert_q, {'document_id': ocr_block['document_id'], 'block_id': ocr_block['block'], 'data_type': 'elevation', 'interpreted_value': interpreted_value, 'verbatim_value': elev_text, 'data_source': '', 'match_score': 0})
logger1.info('Elevation: {}'.format(interpreted_value))
continue
if alpha_block[-1] == "m" or alpha_block[-1] == "masl":
#Ends in quote, so it should be an elevation, not a date
elev_text = alpha_block.split(' ')
elev_text = elev_text[len(elev_text) - 1].strip()
interpreted_value = "{}m".format(re.findall(r'\d+', elev_text))
db_cursor.execute(insert_q, {'document_id': ocr_block['document_id'], 'block_id': ocr_block['block'], 'data_type': 'elevation', 'interpreted_value': interpreted_value, 'verbatim_value': elev_text, 'data_source': '', 'match_score': 0})
logger1.info('Elevation: {}'.format(interpreted_value))
continue
for i in range(from_year, int(cur_year)):
if interpreted_value == "":
if str(i) in ocr_block['block_text']:
#Check if can directly parse the date
for d_format in ['DMY', 'YMD', 'MDY']:
if dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']}) != None:
this_date = dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']})
interpreted_value = this_date.strftime("%Y-%m-%d")
verbatim_value = alpha_block
continue
#Check if there is a month in roman numerals
roman_month = {"I": "Jan", "II": "Feb", "III": "Mar", "IV": "Apr", "V": "May", "VI": "Jun", "VII": "Jul", "VIII": "Aug", "IX": "Sep", "X": "Oct", "XI": "Nov", "X11": "Dec"}
for m in roman_month:
if m in ocr_block['block_text']:
#Possible year and month found
this_text = ocr_block['block_text'].replace(m, roman_month[m])
alpha_block = re.sub(r'\W+ ,-/', '', this_text).strip()
#Try to parse date
for d_format in ['DMY', 'YMD', 'MDY']:
if dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']}) != None:
this_date = dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']})
interpreted_value = this_date.strftime("%Y-%m-%d")
verbatim_value = alpha_block
continue
if interpreted_value == "":
for i in range(99):
if interpreted_value == "":
if i < 10:
i = "0{}".format(i)
else:
i = str(i)
if "-{}".format(i) in ocr_block['block_text'] or "\'{}".format(i) in ocr_block['block_text'] or " {}".format(i) in ocr_block['block_text'] or "/{}".format(i) in ocr_block['block_text']:
#Check if can directly parse the date
alpha_block = re.sub(r'\W+ ,-/', '', ocr_block['block_text']).strip()
for d_format in ['DMY', 'YMD', 'MDY']:
if dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']}) != None:
this_date = dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']})
if int(this_date.strftime("%Y")) > int(cur_year):
#If it interprets year 64 as 2064
this_date_year = int(this_date.strftime("%Y")) - 1000
else:
this_date_year = this_date.strftime("%Y")
interpreted_value = "{}-{}".format(this_date_year, this_date.strftime("%m-%d"))
verbatim_value = alpha_block
break
#Check if there is a month in roman numerals
roman_month = {"I": "Jan", "II": "Feb", "III": "Mar", "IV": "Apr", "V": "May", "VI": "Jun", "VII": "Jul", "VIII": "Aug", "IX": "Sep", "X": "Oct", "XI": "Nov", "X11": "Dec"}
for m in roman_month:
if m in ocr_block['block_text']:
#Possible year and month found
this_text = ocr_block['block_text'].replace(m, roman_month[m])
alpha_block = re.sub(r'\W+ ,-/', '', this_text).strip()
#Try to parse date
for d_format in ['DMY', 'YMD', 'MDY']:
if dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']}) != None:
this_date = dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']})
if int(this_date.strftime("%Y")) > int(cur_year):
#If it interprets year 64 as 2064
this_date_year = int(this_date.strftime("%Y")) - 1000
else:
this_date_year = this_date.strftime("%Y")
interpreted_value = "{}-{}".format(this_date_year, this_date.strftime("%m-%d"))
verbatim_value = alpha_block
break
if interpreted_value != "":
#Remove interpreted values in other fields
db_cursor.execute(insert_q, {'document_id': ocr_block['document_id'], 'block_id': ocr_block['block'], 'data_type': 'Date (Y-M-D)', 'interpreted_value': interpreted_value, 'verbatim_value': verbatim_value, 'data_source': '', 'match_score': 0})
logger1.info('Date: {}'.format(interpreted_value))
continue
#Get sub-state/province localities from GIS database
db_cursor2.execute("SELECT name_2 || ', ' || name_1 || ', ' || name_0 as name, 'locality:sub-state' as name_type, uid FROM gadm2")
sub_states = db_cursor2.fetchall()
logger1.debug(db_cursor2.query.decode("utf-8"))
#Get state/provinces from GIS database
db_cursor2.execute("SELECT name_1 || ', ' || name_0 as name, 'locality:state' as name_type, uid FROM gadm1")
states = db_cursor2.fetchall()
logger1.debug(db_cursor2.query.decode("utf-8"))
#Get countries from GIS database
db_cursor2.execute("SELECT name_0 as name, 'locality:country' as name_type, uid FROM gadm0")
countries = db_cursor2.fetchall()
logger1.debug(db_cursor2.query.decode("utf-8"))
#Get counties, state
db_cursor2.execute("SELECT name_2 || ' Co., ' || name_1 as name, 'locality:county' as name_type, uid FROM gadm2 WHERE name_0 = 'United States' AND type_2 = 'County'")
counties = db_cursor2.fetchall()
logger1.debug(db_cursor2.query.decode("utf-8"))
counties_list = pd.DataFrame(counties)
db_cursor2.execute("SELECT name_2 || ' ' || type_2 || ', ' || name_1 as name, 'locality:county' as name_type, uid FROM gadm2 WHERE name_0 = 'United States'")
counties = db_cursor2.fetchall()
logger1.debug(db_cursor2.query.decode("utf-8"))
counties_list = counties_list.append(counties, ignore_index=True)
db_cursor2.execute("SELECT DISTINCT g.name_2 || ', ' || s.abbreviation as name, 'locality:county' as name_type, g.uid FROM gadm2 g, us_state_abbreviations s WHERE g.name_1 = s.state AND g.name_0 = 'United States'")
counties = db_cursor2.fetchall()
logger1.debug(db_cursor2.query.decode("utf-8"))
counties_list = counties_list.append(counties, ignore_index=True)
db_cursor2.execute("SELECT DISTINCT g.name_2 || ' Co., ' || s.abbreviation as name, 'locality:county' as name_type, g.uid FROM gadm2 g, us_state_abbreviations s WHERE g.name_1 = s.state AND g.name_0 = 'United States'")
counties = db_cursor2.fetchall()
logger1.debug(db_cursor2.query.decode("utf-8"))
counties_list = counties_list.append(counties, ignore_index=True)
#Close GIS database connection
db_cursor2.close()
conn2.close()
#Iterate for localities
for ocr_block in ocr_blocks:
logger1.info("Block text: {}".format(ocr_block['block_text']))
#Countries
localities_match = | pd.DataFrame(counties_list) | pandas.DataFrame |
import re
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import (
FunctionTransformer,
LabelBinarizer,
LabelEncoder,
OneHotEncoder,
OrdinalEncoder,
StandardScaler,
)
def transfrom_array_to_df_onehot(pl,nparray,onehot=True,overal_imp=False):
col_list=[]
col_list_int = pl["preprocessor"].transformers_[0][2] #changes col location
#print(col_list_int)
ordinal_col=pl["preprocessor"].transformers[1][2]
original_col=pl["preprocessor"].transformers[2][2]
col_list=col_list_int+ordinal_col
if onehot:
encoded_col=pl["preprocessor"].transformers_[2][1].named_steps["OneHotEnconding"].get_feature_names_out()
#print(len(encoded_col))
new_enconded_list=[]
for idx,col in enumerate(original_col):
for n_col in encoded_col:
#print(idx,col)
# print("x"+str(idx))
if "x"+str(idx)+"_" in n_col:
# print(col,n_col)
new_enconded_list.append(col+"_"+n_col.split("_")[-1])
col_list=col_list+new_enconded_list
print(col_list)
#print(len(col_list))
else:
col_list=col_list+original_col
if overal_imp==True:
imputed_cols_idx=pl["imputer"].indicator_.features_
imputed_indicator=[col_list[i] for i in imputed_cols_idx]
# print(imputed_indicator)
# print(len(imputed_indicator))
for imp_col in imputed_indicator:
col_list.append(imp_col+"_imput_indicator")
df1 = | pd.DataFrame(nparray, columns=col_list) | pandas.DataFrame |
import datetime as dt
import sqlite3
from os import listdir
import numpy as np
import pandas as pd
import math
"""
ISSUES 12-15
NCATS_DEMOGRAPHICS and visit details - not even there
X Diagnoses ok
ACT Labs doesn't show up after "full list"
ACT Laboratory Tests no show at all
ACT Meds can't drill into
X Procedures ok
X COVID-19 broken
Visit details not there
"""
"""
New version loads totalnum reports into a SQLite3 db from basedir (below) with the name format report_[siteid]_[foo].csv.
Columns must be (in order) c_fullname, agg_date, agg_count. (Case insensitive on column names however.)
Date format for agg_date (as enforced by the totalnum report script), should be YYYY-MM-DD, but the python parser can handle others.
Bigfullnamefile must be a file with all possible paths (e.g., from the concept dimension) with columns: c_fullname, c_name.
hlevel and "domain" are inferred.
SQLite db uses a totalnum_int column in the totalnums table and puts this for reference in bigfullname.
By <NAME>, PhD 05-2020
"""
""" Here's how I get the ontology data for the master list:
select distinct concept_path, name_char from concept_dimension
select distinct c_fullname, c_name, c_visualattributes, c_tooltip from act_covid
and c_visualattributes not like '%H%' and c_synonym_cd!='Y'
(only the first two columns are needed)
To do this for the whole ACT ontology, use my act_master_vw (separate script) and:
select distinct c_fullname, c_name, c_hlevel, c_visualattributes, c_tooltip from act_master_vw
where c_visualattributes not like '%H%' and c_synonym_cd!='Y'
"""
# Thanks https://stackoverflow.com/questions/2298339/standard-deviation-for-sqlite
class StdevFunc:
def __init__(self):
self.M = 0.0
self.S = 0.0
self.k = 1
def step(self, value):
if value is None:
return
tM = self.M
self.M += (value - tM) / self.k
self.S += (value - tM) * (value - self.M)
self.k += 1
def finalize(self):
if self.k < 3:
return None
return math.sqrt(self.S / (self.k-2))
basedir = "/Users/jeffklann/HMS/Projects/ACT/totalnum_data/reports"
bigfullnamefile = '/Users/jeffklann/HMS/Projects/ACT/totalnum_data/ACT_paths_full.csv' # ACT_covid_paths_v3.csv
conn = sqlite3.connect(basedir + '/totalnums.db')
conn.create_aggregate("stdev", 1, StdevFunc)
""" SQL code that creates views and additional tables on the totalnum db for analytics
"""
def postProcess():
sql = r"""
-- Create a pre-joined view for faster coding
drop view if exists totalnums_recent_joined;
create view totalnums_recent_joined as
select c_hlevel,domain,c_visualattributes,f.fullname_int,c_fullname,c_name,agg_date,agg_count,site from
bigfullname f left join totalnums_recent t on f.fullname_int=t.fullname_int;
-- Create a view with old column names
drop view if exists totalnums_oldcols;
create view totalnums_oldcols as
SELECT fullname_int, agg_date AS refresh_date, agg_count AS c, site
FROM totalnums;
drop view if exists totalnums_recent;
-- Set up view for most recent totalnums
create view totalnums_recent as
select t.* from totalnums t inner join
(select fullname_int, site, max(agg_date) agg_date from totalnums group by fullname_int, site) x
on x.fullname_int=t.fullname_int and x.site=t.site and x.agg_date=t.agg_date;
-- Get denominator: any pt in COVID ontology (commented out is any lab test which works better if the site has lab tests)
drop view if exists anal_denom;
create view anal_denom as
select site, agg_count denominator from totalnums_recent where fullname_int in
(select fullname_int from bigfullname where c_fullname='\ACT\UMLS_C0031437\SNOMED_3947185011\');--UMLS_C0022885\')
-- View total / denominator = pct
drop view if exists totalnums_recent_pct;
create view totalnums_recent_pct as
select fullname_int, agg_date, cast(cast(agg_count as float) / denominator * 100 as int) pct, tot.site from totalnums_recent tot inner join anal_denom d on tot.site=d.site;
-- Site outliers: compute avg and stdev.
-- I materialize this (rather than a view) because SQLite doesn't have a stdev function.
drop table if exists outliers_sites;
create table outliers_sites as
select agg_count-stdev-average,* from totalnums_recent r inner join
(select * from
(select fullname_int,avg(agg_count) average, stdev(agg_count) stdev, count(*) num_sites from totalnums_recent r where agg_count>-1 group by fullname_int)
where num_sites>1) stat on stat.fullname_int=r.fullname_int;
-- Site outliers: compute avg and stdev.
-- I materialize this (rather than a view) because SQLite doesn't have a stdev function.
drop table if exists outliers_sites_pct;
create table outliers_sites_pct as
select pct-stdev-average,* from totalnums_recent_pct r inner join
(select * from
(select fullname_int,avg(pct) average, stdev(pct) stdev, count(*) num_sites from totalnums_recent_pct r where pct>=0 group by fullname_int)
where num_sites>1) stat on stat.fullname_int=r.fullname_int;
-- Add some fullnames for summary measures and reporting
drop table if exists toplevel_fullnames;
create table toplevel_fullnames as
select fullname_int from bigfullname where c_fullname like '\ACT\Diagnosis\ICD10\%' and c_hlevel=2 and c_visualattributes not like 'L%'
union all
select fullname_int from bigfullname where c_fullname like '\ACT\Diagnosis\ICD9\V2_2018AA\A18090800\%' and c_hlevel=2 and c_visualattributes not like 'L%'
union all
select fullname_int from bigfullname where c_fullname like '\ACT\Procedures\CPT4\V2_2018AA\A23576389\%' and c_hlevel=2 and c_visualattributes not like 'L%'
union all
select fullname_int from bigfullname where c_fullname like '\ACT\Procedures\HCPCS\V2_2018AA\A13475665\%' and c_hlevel=2 and c_visualattributes not like 'L%'
union all
select fullname_int from bigfullname where c_fullname like '\ACT\Procedures\ICD10\V2_2018AA\A16077350\%' and c_hlevel=2 and c_visualattributes not like 'L%'
union all
select fullname_int from bigfullname where c_fullname like '\ACT\Lab\LOINC\V2_2018AA\%' and c_hlevel=7 and c_visualattributes not like 'L%'
union all
select fullname_int from bigfullname where c_fullname like '\ACT\Medications\MedicationsByVaClass\V2_09302018\%' and c_hlevel=5 and c_visualattributes not like 'L%';
create index toplevel_fullnames_f on toplevel_fullnames(fullname_int);
"""
cur = conn.cursor()
cur.executescript(sql)
cur.close()
def buildDb():
# Build the main totalnums db
files = [f for f in listdir(basedir) if ".csv" in f[-4:]]
totals = []
# Load the files
for f in files:
print(basedir + '/' + f)
tot = totalnum_load(basedir + '/' + f)
totals.append(tot)
# 11-20 - support both utf-8 and cp1252
print(bigfullnamefile)
bigfullname = None
try:
bigfullname = pd.read_csv(bigfullnamefile,index_col='c_fullname',delimiter=',',dtype='str')
except UnicodeDecodeError:
bigfullname = | pd.read_csv(bigfullnamefile,index_col='c_fullname',delimiter=',',dtype='str',encoding='cp1252') | pandas.read_csv |
import pytest
import os
import pandas as pd
from lkmltools.bq_writer import BqWriter
from google.cloud.storage.blob import Blob
from google.cloud.bigquery.job import WriteDisposition
from google.cloud.bigquery.job import LoadJobConfig
def test_write_to_csv():
writer = BqWriter()
df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
target_bucket_name = "tmp_bucket"
bucket_folder = "tmp_bucket"
expected_filename = (
target_bucket_name + os.sep + bucket_folder + os.sep + BqWriter.FILENAME
)
if os.path.exists(expected_filename):
os.remove(expected_filename)
filename = writer._write_to_csv(
df, target_bucket_name, bucket_folder, local_filename=None
)
assert (
filename
== target_bucket_name + os.sep + bucket_folder + os.sep + BqWriter.FILENAME
)
assert os.path.exists(filename)
if os.path.exists(expected_filename):
os.remove(expected_filename)
def test_write_to_csv2():
writer = BqWriter()
df = | pd.DataFrame({"col1": [1, 2], "col2": [3, 4]}) | pandas.DataFrame |
# multivariate multihead multistep
from keras.layers import Flatten
from keras.layers import ConvLSTM2D
from keras.layers.merge import concatenate
from numpy import array
from numpy import hstack
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Reshape
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import os
import gc
import joblib
import functions as func
from sklearn.model_selection import RandomizedSearchCV
from keras.utils import to_categorical
from sklearn.pipeline import Pipeline
from sklearn.metrics import make_scorer
from sklearn.preprocessing import StandardScaler
import time
from sklearn.metrics import fbeta_score
from sklearn.metrics import r2_score
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from keras.wrappers.scikit_learn import KerasClassifier
from keras.wrappers.scikit_learn import KerasRegressor
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.optimizers import Adam
from keras.constraints import maxnorm
from keras.layers import Dropout
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.ensemble import RandomForestRegressor
from sklearn.multioutput import MultiOutputRegressor
def R2_measure(y_true, y_pred):
return r2_score(y_true, y_pred)
def f2_measure(y_true, y_pred):
return fbeta_score(y_true, y_pred, labels=[1, 2], beta=2, average='micro')
def split_sequences(data, n_steps, n_step_out):
data = data.values
X, y = list(), list()
for i in range(len(data)):
end_ix = i + n_steps*6
if end_ix > len(data):
break
Kx = np.empty((1, 12))
for index in np.arange(i, i+(n_steps*6), step=6, dtype=int):
eachhour = index + 6
if eachhour > len(data) or i+(n_steps*6) > len(data):
break
a = data[index: eachhour, : (-1*n_step_out)]
hourlymean_x = np.round(np.mean(a, axis=0), decimals=2)
hourlymean_y = data[eachhour-1, (-1*n_step_out):]
hourlymean_x = hourlymean_x.reshape((1, hourlymean_x.shape[0]))
if index != i:
Kx = np.append(Kx, hourlymean_x, axis=0)
else:
Kx = hourlymean_x
X.append(Kx)
y.append(hourlymean_y)
# print(np.array(X).shape)
return np.array(X), np.array(y)
def temporal_horizon(df, pd_steps, target):
for pd_steps in [1, 3, 6, 12, 24, 36, 48, 60, 72]:
pd_steps = pd_steps * 6
target_values = df[[target]]
target_values = target_values.drop(
target_values.index[0: pd_steps], axis=0)
target_values.index = np.arange(0, len(target_values[target]))
df['Target_'+target+'_t'+str(pd_steps)] = target_values
df = df.drop(df.index[len(df.index)-(72*6): len(df.index)], axis=0)
return df
def create_reg_LSTM_model(input_dim, n_steps_in, n_features, n_steps_out, neurons=1, learn_rate=0.01, dropout_rate=0.0, weight_constraint=0, activation='sigmoid'):
model = Sequential()
model.add(Reshape(target_shape=(
n_steps_in*totalsets, n_features), input_shape=(n_steps_in*n_features*totalsets,)))
# print('n_Steps: ' + str(n_steps_in*totalsets) +
# 'n_features: '+str(n_features))
model.add(LSTM(neurons, activation=activation, return_sequences=True,
kernel_constraint=maxnorm(weight_constraint)))
model.add(Dropout(dropout_rate))
# , return_sequences=True))
model.add(LSTM(neurons, activation=activation))
# model.add(Dense(neurons, activation=activation)) # Adding new layer
model.add(Dense(n_steps_out))
opt = Adam(lr=learn_rate)
model.compile(loss='mse', optimizer=opt)
print('model: ' + str(model))
return model
def create_reg_NN_model(input_dim, n_steps_in, n_features, n_steps_out, neurons=1, learn_rate=0.01, dropout_rate=0.0, weight_constraint=0, activation='sigmoid'):
model = Sequential()
model.add(Dense(neurons, activation=activation,
kernel_constraint=maxnorm(weight_constraint), input_shape=(n_steps_in*n_features*totalsets,)))
model.add(Dropout(dropout_rate))
model.add(Dense(neurons, activation=activation))
model.add(Dense(neurons, activation=activation)) # adding new layer
model.add(Dense(n_steps_out))
opt = Adam(lr=learn_rate)
model.compile(loss='mse', optimizer=opt)
print('model: ' + str(model))
return model
def create_reg_endecodeLSTM_model(input_dim, n_steps_in, n_features, n_steps_out, neurons=1, learn_rate=0.01, dropout_rate=0.0, weight_constraint=0, activation='sigmoid'):
model = Sequential()
model.add(Reshape(target_shape=(
n_steps_in*totalsets, n_features), input_shape=(n_steps_in*n_features*totalsets,)))
model.add(LSTM(neurons, activation=activation,
input_shape=(n_steps_in*totalsets, n_features)))
model.add(RepeatVector(1))
model.add(LSTM(neurons, activation=activation, return_sequences=True))
model.add(TimeDistributed(Dense(n_steps_out)))
opt = Adam(lr=learn_rate)
model.compile(loss='mse', optimizer=opt)
return model
def create_reg_CNNenLSTM_model(input_dim, n_steps_in, n_features, n_steps_out, neurons=1, learn_rate=0.01, dropout_rate=0.0, weight_constraint=0, activation='sigmoid'):
model = Sequential()
model.add(Reshape(target_shape=(
n_steps_in*totalsets, n_features), input_shape=(n_steps_in*n_features*totalsets,)))
model.add(Conv1D(64, 1, activation=activation,
input_shape=(n_steps_in*totalsets, n_features)))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(RepeatVector(1))
model.add(LSTM(neurons, activation=activation, return_sequences=True))
model.add(TimeDistributed(Dense(100, activation=activation)))
model.add(TimeDistributed(Dense(n_steps_out)))
opt = Adam(lr=learn_rate)
model.compile(loss='mse', optimizer=opt)
return model
def create_reg_ConvLSTM_model(input_dim, n_steps_in, n_features, n_steps_out, neurons=1, learn_rate=0.01, dropout_rate=0.0, weight_constraint=0, activation='sigmoid'):
# reshape from [samples, timesteps] into [samples, timesteps, rows, columns, features(channels)]
model = Sequential()
model.add(Reshape(target_shape=(
n_steps_in, totalsets, n_features, 1), input_shape=(n_steps_in*n_features*totalsets,)))
model.add(ConvLSTM2D(64, (1, 3), activation=activation,
input_shape=(n_steps_in, totalsets, n_features, 1)))
model.add(Flatten())
model.add(RepeatVector(1))
model.add(LSTM(neurons, activation=activation, return_sequences=True))
model.add(TimeDistributed(Dense(100, activation=activation)))
model.add(TimeDistributed(Dense(n_steps_out)))
opt = Adam(lr=learn_rate)
model.compile(loss='mse', optimizer=opt)
return model
def algofind(model_name, input_dim, cat, n_steps_in, n_features, n_steps_out):
if cat == 0:
if model_name == 'endecodeLSTM':
model = KerasRegressor(build_fn=create_reg_endecodeLSTM_model, input_dim=input_dim,
epochs=1, batch_size=64, n_steps_in=int(n_steps_in), n_features=int(n_features), n_steps_out=int(n_steps_out), verbose=0)
elif model_name == 'LSTM':
model = KerasRegressor(build_fn=create_reg_LSTM_model, input_dim=input_dim,
epochs=1, batch_size=64, n_steps_in=int(n_steps_in), n_features=int(n_features), n_steps_out=int(n_steps_out), verbose=0)
elif model_name == 'CNNLSTM':
model = KerasRegressor(build_fn=create_reg_CNNenLSTM_model, input_dim=input_dim,
epochs=1, batch_size=64, n_steps_in=int(n_steps_in), n_features=int(n_features), n_steps_out=int(n_steps_out), verbose=0)
elif model_name == 'ConvEnLSTM':
model = KerasRegressor(build_fn=create_reg_ConvLSTM_model, input_dim=input_dim,
epochs=1, batch_size=64, n_steps_in=int(n_steps_in), n_features=int(n_features), n_steps_out=int(n_steps_out), verbose=0)
elif model_name == 'NN':
model = KerasRegressor(build_fn=create_reg_NN_model, input_dim=input_dim,
epochs=1, batch_size=64, n_steps_in=int(n_steps_in), n_features=int(n_features), n_steps_out=int(n_steps_out), verbose=0)
elif model_name == 'DT':
model = MultiOutputRegressor(DecisionTreeRegressor())
elif model_name == 'RF':
model = MultiOutputRegressor(RandomForestRegressor())
elif model_name == 'LR':
model = Pipeline(
[('poly', MultiOutputRegressor(PolynomialFeatures()))])
# ,('fit', MultiOutputRegressor(Ridge()))])
elif model_name == 'SVC':
model = MultiOutputRegressor(SVR())
return model
totalsets = 1
def main():
method = 'OrgData'
# , 'DOcategory', 'pHcategory'] # ysi_blue_green_algae (has negative values for leavon... what does negative mean!?)
# , 'dissolved_oxygen', 'ph']
targets = ['dissolved_oxygen', 'ph'] # 'ysi_blue_green_algae'
models = ['LSTM']
path = 'Sondes_data/train_Summer/'
files = [f for f in os.listdir(path) if f.endswith(
".csv") and f.startswith('leavon')] # leavon
for model_name in models:
for target in targets:
print(target)
if target.find('category') > 0:
cat = 1
directory = 'Results/bookThree/output_Cat_' + \
model_name+'/oversampling_cv_models/'
data = {'target_names': 'target_names', 'method_names': 'method_names', 'window_nuggets': 'window_nuggets', 'temporalhorizons': 'temporalhorizons', 'CV': 'CV',
'file_names': 'file_names', 'std_test_score': 'std_test_score', 'mean_test_score': 'mean_test_score', 'params': 'params', 'bestscore': 'bestscore', 'F1_0': 'F1_0', 'F1_1': 'F1_1', 'P_0': 'P_0', 'P_1': 'P_1', 'R_0': 'R_0', 'R_1': 'R_1', 'acc0_1': 'acc0_1', 'F1_0_1': 'F1_0_1', 'F1_all': 'F1_all', 'fbeta': 'fbeta', 'imfeatures': 'imfeatures', 'configs': 'configs', 'scores': 'scores'}
else:
cat = 0
directory = 'Results/bookThree/output_Reg_' + \
model_name+'/oversampling_cv_models/'
data = {'target_names': 'target_names', 'method_names': 'method_names', 'window_nuggets': 'window_nuggets', 'temporalhorizons': 'temporalhorizons', 'CV': 'CV',
'file_names': 'file_names', 'std_test_score': 'std_test_score', 'mean_test_score': 'mean_test_score', 'params': 'params', 'bestscore': 'bestscore', 'mape': 'mape', 'me': 'me', 'mae': 'mae', 'mse': 'mse', 'rmse': 'rmse', 'R2': 'R2', 'imfeatures': 'imfeatures', 'configs': 'configs', 'scores': 'scores'}
if not os.path.exists(directory):
os.makedirs(directory)
for file in files:
result_filename = 'results_'+target + \
'_'+file+'_'+str(time.time())+'.csv'
dfheader = pd.DataFrame(data=data, index=[0])
dfheader.to_csv(directory+result_filename, index=False)
PrH_index = 0
for n_steps_in in [36, 48, 60]:
print(model_name)
print(str(n_steps_in))
dataset = | pd.read_csv(path+file) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 10:15:25 2021
@author: lenakilian
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import geopandas as gpd
from scipy.stats import pearsonr
ghg_year = 2015 # 2017
dict_cat = 'category_8' # replacement for new_cats
wd = r'/Users/lenakilian/Documents/Ausbildung/UoLeeds/PhD/Analysis/'
years = list(range(2007, 2018, 2))
geog = 'MSOA'
lookup = pd.read_csv(wd + 'data/raw/Geography/Conversion_Lookups/UK_full_lookup_2001_to_2011.csv')\
[['MSOA11CD', 'MSOA01CD', 'RGN11NM']].drop_duplicates()
lookup = lookup.loc[(lookup['RGN11NM'] != 'Northern Ireland') &
(lookup['RGN11NM'] != 'Wales') &
(lookup['RGN11NM'] != 'Scotland')]
lookup['London'] = False; lookup.loc[lookup['RGN11NM'] =='London', 'London'] = True
emissions = {}
for year in [ghg_year]:
year_difference = years[1] - years[0]
year_str = str(year) + '-' + str(year + year_difference - 1)
emissions[year] = pd.read_csv(wd + 'data/processed/GHG_Estimates/' + geog + '_' + year_str + '.csv', index_col=0)
# income data
income = {}
income[2017] = pd.read_csv(wd + 'data/raw/Income_Data/equivalised_income_2017-18.csv', header=4, encoding='latin1')
income[2015] = pd.read_csv(wd + 'data/raw/Income_Data/equivalised_income_2015-16.csv', skiprows=5, header=None, encoding='latin1')
income[2015].columns = income[2017].columns
# import household numbers to adjust income
temp = pd.read_csv(wd + 'data/raw/Geography/Census_Populations/no_households_england.csv').set_index('geography code')[['Household Composition: All categories: Household composition; measures: Value']]
temp = temp.join(pd.read_csv(wd + 'data/raw/Geography/Census_Populations/census2011_pop_england_wales_msoa.csv').set_index('MSOA11CD'))
for year in [2015, 2017]:
income[year] = income[year].set_index('MSOA code')[['Net annual income after housing costs']].join(temp)
income[year].columns = ['hhld_income', 'no_hhlds', 'pop']
income[year]['hhld_income'] = pd.to_numeric(income[year]['hhld_income'].astype(str).str.replace(',', ''), errors='coerce')
income[year]['income'] = income[year]['hhld_income'] * income[year]['no_hhlds'] / income[year]['pop'] / (365/7)
income[year] = income[year].dropna(how='all')
# import census data
# age
age = pd.read_csv(wd + 'data/raw/Census/age_england_wales_msoa.csv', index_col=0, header=7)\
.apply(lambda x: pd.to_numeric(x, errors='coerce')).dropna(how='all')
age.index = [str(x).split(' :')[0] for x in age.index]
# disability
disability = pd.read_csv(wd + 'data/raw/Census/DC3201EW - Long-term health problem or disability.csv', index_col=0, header=10)\
.apply(lambda x: pd.to_numeric(x, errors='coerce')).dropna(how='all').sum(axis=0, level=0).apply(lambda x: x/2)
disability.index = [str(x).split(' :')[0] for x in disability.index]
# ethnicity
ethnicity = pd.read_excel(wd + 'data/raw/Census/KS201EW - Ethnic group.xlsx', index_col=0, header=8)\
.apply(lambda x: pd.to_numeric(x, errors='coerce')).dropna(how='all')
ethnicity.index = [str(x).split(' :')[0] for x in ethnicity.index]
# workplace
workplace = pd.read_csv(wd + 'data/raw/Census/QS702EW - Distance to work.csv', index_col=0, header=8)\
.apply(lambda x: pd.to_numeric(x.astype(str).str.replace(',', ''), errors='coerce')).dropna(how='any')
workplace.index = [str(x).split(' :')[0] for x in workplace.index]
workplace.columns = ['pop', 'total_workplace_dist', 'avg_workplace_dist']
# combine all census data
age = age[['All usual residents']]\
.join(pd.DataFrame(age.loc[:, 'Age 65 to 74':'Age 90 and over'].sum(axis=1))).rename(columns={0:'pop_65+'})\
.join(pd.DataFrame(age.loc[:, 'Age 0 to 4':'Age 10 to 14'].sum(axis=1))).rename(columns={0:'pop_14-'})
age['pop_65+_pct'] = age['pop_65+'] / age['All usual residents'] * 100
age['pop_14-_pct'] = age['pop_14-'] / age['All usual residents'] * 100
disability['not_lim_pct'] = disability['Day-to-day activities not limited'] / disability['All categories: Long-term health problem or disability'] * 100
disability = disability[['Day-to-day activities not limited', 'not_lim_pct']].rename(columns = {'Day-to-day activities not limited':'not_lim'})
disability['lim_pct'] = 100 - disability['not_lim_pct']
ethnicity['bame_pct'] = ethnicity.drop('White', axis=1).sum(1) / ethnicity.sum(1) * 100
census_data = age[['pop_65+_pct', 'pop_65+', 'pop_14-_pct', 'pop_14-']].join(disability).join(ethnicity[['bame_pct']])\
.join(workplace[['total_workplace_dist', 'avg_workplace_dist']], how='left')
# add transport access
ptal_2015 = gpd.read_file(wd + 'data/processed/GWR_data/gwr_data_london_' + str(ghg_year) + '.shp')
ptal_2015 = ptal_2015.set_index('index')[['AI2015', 'PTAL2015', 'AI2015_ln']]
# combine all with emissions data
cat_dict = | pd.read_excel(wd + '/data/processed/LCFS/Meta/lcfs_desc_anne&john.xlsx') | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
This library contains all functions needed to produce the spatial files
of a LARSIM raster model (tgb.dat, utgb.dat, profile.dat).
It uses functions from the TATOO core library.
Author: <NAME>
Chair for Hydrology and River Basin Management
Technical University of Munich
Requires the following ArcGIS licenses:
- Conversion Toolbox
- Spatial Analyst
System requirements:
- Processor: no special requirements
tested with Intel(R) Xeon(R) CPU E5-1650 v4 @ 3.60 GHz
- Memory/RAM: depending on the size of the DEM to be processed
tested with 32,0 GB RAM
- Python IDE for Python 3
- ArcGIS Pro 2.5
Version: v1.0.0, 2021-05-02
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright 2021'
__credits__ = ''
__license__ = 'CC BY-NC-ND 3.0 DE'
__version__ = '1.0.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Production'
# load modules
import os
import sys
import copy
import arcpy
import numpy.matlib
import numpy.lib.recfunctions
import numpy as np
import pandas as pd
import tatoo_common as tc
# check out ArcGIS spatial analyst license
class LicenseError(Exception):
pass
try:
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
print ("Checked out \"Spatial\" Extension")
else:
raise LicenseError
except LicenseError:
print("Spatial Analyst license is unavailable")
except:
print(arcpy.GetMessages(2))
# allow overwriting the outputs
arcpy.env.overwriteOutput = True
# %% function to preprocess a high-resolution digital elevation model
def preprocess_dem(path_dem_hr, path_fnw, cellsz, h_burn,
path_gdb_out, name_dem_mr_f='dem_mr_f',
print_out=False):
"""
Aggregates a high-resolution digital elevation raster, covnert river network
to model resolution raster, burns flow network raster into digital elevation
raster and fills sinks of the resulting raster.
JM 2021
Arguments:
-----------
path_dem_hr: str
path of the high-resolution digital elevation raster
(e.g., 'c:\model_creation.gdb\dem_hr')
path_fnw: str
path of the flow network feature class or shape file (e.g., 'c:\fnw.shp')
cellsz: integer
edge length of the resulting model cells in [m] (e.g., 100)
h_burn: integer
depth of river network burning in digital elevation model
path_gdb_out: str
path of the output file geodatabase (e.g., 'c:\model_creation.gdb')
name_dem_mr_f: str
name of the filled model-resolution digital elevation raster(e.g., 'dem_mr_f')
print_out: boolean (optional, default: False)
true if workprogress shall be print to command line (default: false)
Returns:
-----------
Saves the following files:
- filled model-resolution digital elevation raster
- model-resolution raster representation of the flow network
"""
# arcpy field names (ONLY CHANGE IF ARCPY FUNCTION CHANGES FORCE YOU!)
f_oid = 'OBJECTID'
# paths for intermediates
path_dem_mr_sr = path_gdb_out + 'dem_mr_sr'
path_dem_mr = path_gdb_out + 'dem_mr'
path_fnw_mr = path_gdb_out + 'fnw_mr'
path_dem_mr_cfnw = path_gdb_out + 'dem_mr_cfnw'
# paths for outputs
path_dem_mr_f = path_gdb_out + name_dem_mr_f
# Aggregate high resolution digital elevation model to model resolution
if print_out: print('...aggregate high resolution digital elevation model...')
# create snap raster at origin of coordinate system
dem_mr_sr = arcpy.sa.CreateConstantRaster(
1, 'INTEGER', cellsz, arcpy.Extent(
0.5 * cellsz, 0.5 * cellsz,
cellsz + 0.5 * cellsz, cellsz + 0.5 * cellsz))
dem_mr_sr.save(path_dem_mr_sr)
# save default and set environments
default_env_snr = arcpy.env.snapRaster
default_env_ext = arcpy.env.extent
arcpy.env.snapRaster = path_dem_mr_sr
arcpy.env.extent = path_dem_hr
# aggregate high resolution DEM to model resolution
if arcpy.Exists(path_dem_mr):
arcpy.management.Delete(path_dem_mr)
dem_mr = arcpy.sa.Aggregate(path_dem_hr, cellsz, 'MEAN', 'EXPAND', 'DATA')
dem_mr.save(path_dem_mr)
# cut rivers
if print_out: print('...cut rivers...')
# convert polylines to raster in model grid size
arcpy.conversion.PolylineToRaster(path_fnw, f_oid, path_fnw_mr,
'MAXIMUM_LENGTH', 'NONE', path_dem_mr)
# decrease model resolution elevation raster values at flow network raster cells
dem_mr_cfnw = arcpy.sa.Con(arcpy.sa.IsNull(path_fnw_mr), path_dem_mr,
dem_mr - h_burn)
dem_mr_cfnw.save(path_dem_mr_cfnw)
# reset environment parameters
arcpy.env.snapRaster = default_env_snr
arcpy.env.extent = default_env_ext
# fill cut model resolution digital elevation raster sinks
if print_out: print('...fill cut model resolution digital elevation raster sinks...')
# fill sinks
dem_mr_cfnw_f = arcpy.sa.Fill(path_dem_mr_cfnw, '')
dem_mr_cfnw_f.save(path_dem_mr_f)
# %% function to calculate the model watershed
def calc_watershed(path_dem_mr_cfnw_f, path_pp,
path_gdb_out, name_fd_mr='fd_mr', name_fd_mr_corr='fd_mr_corr',
name_fa_mr='fa_mr', name_ws_s='ws_s',
initial=True, name_fd_p_corr='fd_p_corr', path_fd_p_corr='',
print_out=False):
"""
Creates the model watershed from a filled digital elevation raster using
pour points. The correction point feature class is necessary for the initial
calculation of the model watershed calculation.
JM 2021
Arguments:
-----------
path_dem_mr_cfnw_f: str
path of the filled model-resolution digital elevation raster
(e.g., 'c:\model_creation.gdb\dem_mr_cfnw_f')
path_pp: str
path of the pour point feature class
(e.g., 'c:\model_creation.gdb\pp')
path_gdb_out: str
path of the output file geodatabase (e.g., 'c:\model_creation.gdb')
name_fd_mr: str
name of the output model resolution flow direction raster (e.g., 'fd_mr')
name_fd_mr: str
name of the corrected output model resolution flow direction raster
(e.g., 'fd_mr_corr')
name_fa_mr: str
name of the output model resolution flow accumulation raster (e.g., 'fa_mr')
name_ws_s: str
name of the output watershed polygon feature class (e.g., 'ws_s')
initial: boolean (optional)
true if it is the initial run to calculate the model watershed
name_fd_p_corr: str (optional)
name of the output flow direction correction point feature class
(e.g., 'fd_p_corr')
path_fd_p_corr: str (optional)
path of the output flow direction correction point feature class
needed for case initial=False (e.g., 'fd_p_corr')
print_out: boolean (optional, default: False)
true if workprogress shall be print to command line
Returns:
-----------
Saves the following outputs:
- model resolution flow direction raster
- watershed polygon feature class
- flow direction correction point feature class (optional)
"""
# check inputs
if not initial and not path_fd_p_corr:
sys.exit('With initial=False path_fd_p_corr must not be an empty string!')
# define internal field names
f_pp = 'pp'
f_pp_ws = 'ModelWatershed'
# arcpy field names (ONLY CHANGE IF ARCPY FUNCTION CHANGES FORCE YOU!)
f_oid = 'OBJECTID'
f_val = 'Value'
f_VAL = 'VALUE'
# feature class names from input
name_pp = os.path.split(path_pp)[1]
# define paths of intermediates in working geodatabase
path_ppc = path_gdb_out + 'ppc'
path_spp = path_gdb_out + 'spp'
path_ws_r = path_gdb_out + 'ws_r'
path_ws_sr = path_gdb_out + 'ws_sr'
path_ws_s_sj = path_gdb_out + 'ws_s_sj'
if not initial: path_fd_r_corr = path_gdb_out + 'fd_r_corr'
# paths for outputs
path_fd_mr = path_gdb_out + name_fd_mr
path_fd_mr_corr = path_gdb_out + name_fd_mr_corr
path_fa_mr = path_gdb_out + name_fa_mr
path_ws_s = path_gdb_out + name_ws_s
if initial: path_fd_p_corr = path_gdb_out + name_fd_p_corr
# calculate flow direction
if print_out: print('...calculate flow direction raster...')
if arcpy.Exists(path_fd_mr): arcpy.management.Delete(path_fd_mr)
fd_mr = arcpy.sa.FlowDirection(path_dem_mr_cfnw_f, 'NORMAL', '', 'D8')
fd_mr.save(path_fd_mr)
# if run is initial, create correction flow direction point feature class
# and copy flow direction raster.
field_fd_corr = 'D8'
if initial:
# create flow direction correction feature class and add flow direction
# binary field
sr = arcpy.Describe(path_pp).spatialReference
arcpy.CreateFeatureclass_management(path_gdb_out, name_fd_p_corr,
'POINT', '', 'DISABLED', 'DISABLED',
sr, '', '0', '0', '0', '')
arcpy.AddField_management(path_fd_p_corr, field_fd_corr, 'SHORT', '', '',
'', '', 'NULLABLE', 'NON_REQUIRED', '')
if arcpy.Exists(path_fd_mr_corr): arcpy.management.Delete(path_fd_mr_corr)
arcpy.CopyRaster_management(path_fd_mr, path_fd_mr_corr, '', '', '255',
'NONE', 'NONE', '8_BIT_UNSIGNED', 'NONE',
'NONE', 'GRID', 'NONE', 'CURRENT_SLICE',
'NO_TRANSPOSE')
# else, correct flow direction raster using correction point features
else:
# get number of existing flow direction correction point features
fd_p_corr_descr = arcpy.Describe(path_fd_p_corr)
fd_p_nb = fd_p_corr_descr.extent.XMin
# if there are existing point features (nb!=0), do correction
if not np.isnan(fd_p_nb):
# set environments
default_env_snr = arcpy.env.snapRaster
default_env_csz = arcpy.env.cellSize
arcpy.env.snapRaster = path_fd_mr
arcpy.env.cellSize = path_fd_mr
# convert flow direction correction points to raster
arcpy.PointToRaster_conversion(path_fd_p_corr, field_fd_corr,
path_fd_r_corr, 'MOST_FREQUENT',
'NONE', path_fd_mr)
# change environments
default_env_ext = arcpy.env.extent
default_env_mask = arcpy.env.mask
arcpy.env.extent = 'MAXOF'
arcpy.env.mask = path_fd_mr
# replace flow direction values, where correction points are defined
fd_mr_corr = arcpy.ia.Con(arcpy.ia.IsNull(path_fd_r_corr), path_fd_mr,
path_fd_r_corr)
fd_mr_corr.save(path_fd_mr_corr)
# reset environments
arcpy.env.snapRaster = default_env_snr
arcpy.env.cellSize = default_env_csz
arcpy.env.extent = default_env_ext
arcpy.env.mask = default_env_mask
# else, copy uncorrected flow direction raster
else:
print(('INFO: Flow direction correction point feature'
'class is empty. Original flow direction is used instead.'))
if arcpy.Exists(path_fd_mr_corr): arcpy.management.Delete(path_fd_mr_corr)
arcpy.CopyRaster_management(path_fd_mr, path_fd_mr_corr, '', '', '255',
'NONE', 'NONE', '8_BIT_UNSIGNED', 'NONE',
'NONE', 'GRID', 'NONE', 'CURRENT_SLICE',
'NO_TRANSPOSE')
if print_out: print('...calculate flow accumulation...')
# calculate flow accumulation raster
if arcpy.Exists(path_fa_mr): arcpy.management.Delete(path_fa_mr)
fa_mr = arcpy.sa.FlowAccumulation(path_fd_mr_corr, '', 'DOUBLE', 'D8')
fa_mr.save(path_fa_mr)
# copy pour point feature class
if arcpy.Exists(path_ppc): arcpy.management.Delete(path_ppc)
arcpy.management.CopyFeatures(path_pp, path_ppc, '', '', '', '')
# add adn calculate field using the object ID
arcpy.AddField_management(path_ppc, f_pp, 'LONG', '', '', '', '', 'NULLABLE',
'NON_REQUIRED', '')
arcpy.CalculateField_management(path_ppc, f_pp, '!{0}!'.format(f_oid), 'PYTHON3', '')
# snap pour points to flow accumulation raster
if arcpy.Exists(path_spp): arcpy.management.Delete(path_spp)
spp = arcpy.sa.SnapPourPoint(path_ppc, fa_mr, '40', f_pp)
spp.save(path_spp)
if print_out: print('...calculate watershed...')
# calculate watershed raster
if arcpy.Exists(path_ws_r): arcpy.management.Delete(path_ws_r)
ws_r = arcpy.sa.Watershed(path_fd_mr_corr, spp, f_val)
ws_r.save(path_ws_r)
# set environments
arcpy.env.outputZFlag = 'Same As Input'
arcpy.env.outputMFlag = 'Same As Input'
# convert watershed raster to polygon features
if arcpy.Exists(path_ws_sr): arcpy.management.Delete(path_ws_sr)
arcpy.RasterToPolygon_conversion(path_ws_r, path_ws_sr, 'NO_SIMPLIFY', f_VAL,
'SINGLE_OUTER_PART', '')
if print_out: print('...select model watersheds...')
pp_fieldnames = [field.name for field in arcpy.ListFields(path_pp)]
# if field exists, that identifies polygon as model watershed, delete watersheds
# with the fields' value >= 1
if f_pp_ws in pp_fieldnames:
# join created watershed polygons to pour points
arcpy.SpatialJoin_analysis(
path_ws_sr, path_pp, path_ws_s_sj, 'JOIN_ONE_TO_ONE', 'KEEP_ALL',
"{0} '{0}' true true false 2 Short 0 0,First,#,{1},{0},-1,-1".format(
f_pp_ws, name_pp), 'CONTAINS', '', '')
# select and copy model watersheds marked with a positive integer
sel_sql = f_pp_ws + ' >= 1'
path_ws_s_sj_sel = arcpy.management.SelectLayerByAttribute(
path_ws_s_sj, 'NEW_SELECTION', sel_sql)
if arcpy.Exists(path_ws_s): arcpy.management.Delete(path_ws_s)
arcpy.management.CopyFeatures(path_ws_s_sj_sel, path_ws_s, '', '', '', '')
else:
if arcpy.Exists(path_ws_s): arcpy.management.Delete(path_ws_s)
arcpy.management.CopyFeatures(path_ws_sr, path_ws_s, '', '', '', '')
# %% function to calculate the model cell network
def calc_model_network(path_ws_s, path_fd_mr, path_fa_mr,
path_gdb_out, path_files_out, name_fl_mr='fl_mr',
name_tgb_p='tgb_p', name_mnw='mwn',
print_out=False):
"""
Creates a point feature class representing the center of model cells
as well as a polyline feature class representing the model network between
the model cells (upstream-downstream-relation).
JM 2021
Arguments:
-----------
path_ws_s: str
path of the output watershed polygon feature class
(e.g., 'ws_s')
path_fd_mr: str
path of the output model resolution flow direction raster
(e.g., 'fd_mr')
path_fa_mr: str
path of the output model resolution flow accumulation raster
(e.g., 'fa_mr')
path_gdb_out: str
path of the output file geodatabase (e.g., 'c:\model_creation.gdb')
path_files_out: str
storage path for intermediate data (e.g., 'c:\tmp_model_data\')
name_fl_mr: str
name of the extracted output model resolution flow length raster
(e.g., 'fl_mr_e')
name_tgb_p: str = (optional)
name of the output model cell point feature class (e.g., 'tgb_p')
name_tgbd_p: str = (optional)
name of the output downstream model cell point feature class
(e.g., 'tgbd_p')
name_mnw: str = (optional)
name of the output model network polyline feature class (e.g., 'mwn')
print_out: boolean (optional)
true if workprogress shall be print to command line
Returns:
-----------
df_data_tgb_p: pd.DataFrame
- tgb: model element ID number (int)
- tgb_down: downstream model element ID number (int)
- tgb_type: model element type (str)
- tgb_dtgb: real representative model element ID for dummy elements (int)
- tgb_a: inflowing catchment area of each model element [km²]
- x, y: x- and y-coordinates of element center [m]
df_tgb_up: pd.DataFrame
tgb_up1, tgb_up2: upstream model element ID numbers (int)
Saves the following outputs:
- extracted model resolution flow length raster
- model cell point feature class
- downstream model cell point feature class
- model network polyline feature class
"""
# define internal variables
def_val_dtgb = -1
# define internal field names
f_tgb = 'tgb'
f_tgb_down = 'tgb_down'
f_tgb_type = 'tgb_type'
f_tgb_dtgb = 'tgb_dtgb'
f_tgb_a = 'tgb_a'
f_x = 'x'
f_y = 'y'
f_nrflv = 'nrflv'
f_tgb_up1 = 'up1'
f_tgb_up2 = 'up2'
# define key-words to identify element types
str_headw = 'headwater'
str_routing = 'routing'
str_dummy = 'dummy'
# arcpy field names (ONLY CHANGE IF ARCPY FUNCTION CHANGES FORCE YOU!)
f_p_x = 'POINT_X'
f_p_y = 'POINT_Y'
# define paths of intermediates in working geodatabase
path_fd_mr_e = path_gdb_out + 'fd_mr_e'
path_fa_mr_e = path_gdb_out + 'fa_mr_e'
name_tgb_down_p = 'tgb_down_p'
# paths for outputs
path_fl_mr_e = path_gdb_out + name_fl_mr
path_mnw = path_gdb_out + name_mnw
# create real representative index list for dummy subcatchments
def real_repr_idx(df_tgb, str_dummy, print_out=False):
if print_out: print(('...create representative index list for '
'dummy subcatchments tgb_dtgb...'))
# Preallocate arrays
ser_tgb_dtgb = pd.Series(np.ones(df_tgb.shape[0]) * def_val_dtgb,
index=df_tgb.index, name=f_tgb_dtgb).astype(np.int)
# Iterate over all final index values
for tgb in df_tgb.index:
# if cell is a dummy, find the connected real cell
if df_tgb.at[tgb, f_tgb_type] == str_dummy:
# follow dummy cascade downwards until real cell and set index
mm = copy.deepcopy(tgb)
while df_tgb.at[mm, f_tgb_type] == str_dummy:
mm = df_tgb.at[mm, f_tgb_down]
ser_tgb_dtgb.at[tgb] = mm
return ser_tgb_dtgb
# calculations
# (de-)activate additional debugging command line output
debug = False # (False/True)
# set workspace
arcpy.env.workspace = path_gdb_out
# clip flow direction raster to watershed polygon
if print_out: print('...clip flow direction raster...')
if arcpy.Exists(path_fd_mr_e): arcpy.management.Delete(path_fd_mr_e)
fd_mr_e = arcpy.sa.ExtractByMask(path_fd_mr, path_ws_s)
fd_mr_e.save(path_fd_mr_e)
# clip flow accumulation raster to watershed polygon
if print_out: print('...clip flow accumulation raster...')
if arcpy.Exists(path_fa_mr_e): arcpy.management.Delete(path_fa_mr_e)
fa_mr_e = arcpy.sa.ExtractByMask(path_fa_mr, path_ws_s)
fa_mr_e.save(path_fa_mr_e)
# calculate downstream flow length
if print_out: print('...calculate flow length...')
if arcpy.Exists(path_fl_mr_e): arcpy.management.Delete(path_fl_mr_e)
fl_mr_e = arcpy.sa.FlowLength(fd_mr_e, 'DOWNSTREAM', '')
fl_mr_e.save(path_fl_mr_e)
if print_out: print('...import flow rasters...')
# define paths of intermediates in working folder
path_fd_c_tif = path_files_out + 'fd_c.tif'
path_fa_c_tif = path_files_out + 'fa_c.tif'
path_fl_c_tif = path_files_out + 'fl_c.tif'
# import flow direction, accumulation and length as numpy rasters
fd, ncols, nrows, cellsz, xll, yll, ctrl_tif_export = tc.fdal_raster_to_numpy(
path_fd_mr_e, 'fd', path_fd_c_tif, True)
fa, _, _, _, _, _, _ = tc.fdal_raster_to_numpy(
path_fa_mr_e, 'fa', path_fa_c_tif, False)
fl, _, _, _, _, _, _ = tc.fdal_raster_to_numpy(
path_fl_mr_e, 'fl', path_fl_c_tif, True)
# add a NaN boundary to all gis input data sets
empty_row = np.zeros((1, ncols)) * np.nan
empty_col = np.zeros((nrows + 2, 1)) * np.nan
fa = np.concatenate((empty_row, fa, empty_row), axis=0)
fa = np.concatenate((empty_col, fa, empty_col), axis=1)
fd = np.concatenate((empty_row, fd, empty_row), axis=0)
fd = np.concatenate((empty_col, fd, empty_col), axis=1)
fl = np.concatenate((empty_row, fl, empty_row), axis=0)
fl = np.concatenate((empty_col, fl, empty_col), axis=1)
# adjust gis parameters for new sizes
ncols = ncols + 2
nrows = nrows + 2
xll = xll - cellsz
yll = yll - cellsz
# set default data type for calculations for efficient RAM usage
if ncols * nrows <= 32767: np_type = np.int32
else: np_type = np.int64
# get indices and number of not-nan-data
gis_notnans = np.nonzero(~np.isnan(fd))
gis_notnans_x = gis_notnans[0]
gis_notnans_y = gis_notnans[1]
gis_notnans_count = gis_notnans_x.shape[0]
# create lookup table connecting flow direction int-values to array indices
fd_lu = np.array([[ 1, 0, 1], [ 2, 1, 1], [ 4, 1, 0],
[ 8, 1,-1], [ 16, 0,-1], [ 32,-1,-1],
[ 64,-1, 0], [128,-1, 1]])
# pre-allocate flow direction arrays
fd_xd = np.empty((gis_notnans_count, 1), dtype=np_type)
fd_yd = np.empty((gis_notnans_count, 1), dtype=np_type)
# iterate flow direction int-values
for ii in range(fd_lu.shape[0]):
# get indices of not-nan flow direction values with fitting int-value
fd_notnans_ii = fd[~np.isnan(fd)] == fd_lu[ii, 0]
# set array x and y index at found indices
fd_xd[fd_notnans_ii] = fd_lu[ii, 1]
fd_yd[fd_notnans_ii] = fd_lu[ii, 2]
# create vector of combined not-nan array and converted flow direction indices
Jtm_down_xd = gis_notnans_x + np.int64(fd_xd[:, 0])
Jtm_down_yd = gis_notnans_y + np.int64(fd_yd[:, 0])
if print_out: print('...initialize arrays for iteration...')
# create temporal index array with continuous number Jtm
Jtm = np.ones((nrows, ncols), dtype=np_type) * -1
Jtm[gis_notnans_x, gis_notnans_y] = range(1, gis_notnans_count+1)
# calculate temporal downstream cell array Jtm_down using flow direction indices.
Jtm_down = np.ones((nrows, ncols),dtype=np_type) * -1
Jtm_down[gis_notnans] = Jtm[Jtm_down_xd, Jtm_down_yd]
# find the catchment outlet where no downstream index is set
OFr = np.nonzero(np.logical_and(Jtm != -1, Jtm_down == -1))
# mark the outlet cell in Jtm_down with a zero
Jtm_down[OFr] = 0
# preallocate list for temporal upstream index calculation Jt_up
Jt_up = np.ones((gis_notnans_count, 7), dtype=np_type) * -1
# iterate temporal upstream list
for jt_ii, jt in enumerate(range(1, Jt_up.shape[0] + 1)):
# find all rows in Jtm_down which do have jt as downstream cell
verw = np.nonzero(Jtm_down == jt)
# print subset in temporal upstream list Jt_up
Jt_up[jt_ii, 0:verw[0].shape[0]] = Jtm[verw]
# convert list to int
Jt_up = np.int32(Jt_up)
# calculate sum of necessary dummy cells (which have >2 upstream cells)
D_count = np.nansum(Jt_up[:, 2:7] != -1)
# calculate number of temporal index numbers jt
Jt_count = Jt_up.shape[0]
# calculate number of final indices j as sum of dummy and real cells
J_count = Jt_count + D_count
# preallocate temporal downstream list Jt_down
Jt_down = np.ones((Jt_count, 1), dtype=np_type) * -1
# iterate over temporal index jt and fill list
for jt_ii, jt in enumerate(range(1, Jt_count+1)):
# look for downstream cell from matrix
Jt_down[jt_ii] = Jtm_down[Jtm == jt]
# preallocate lists for final indices J, J_type and J_jt, final upstream
# and downstream lists J_up and J_down, and protocol list Done
J_type = J_count * [None]
J = np.array(range(1, J_count+1))
J_up = np.ones((J_count, 2), dtype=np_type) * -1
J_down = np.ones((J_count, ), dtype=np_type) * -1
J_jt = np.ones((J_count, 1), dtype=np_type) * -1
Done = np.ones((np.nanmax(Jtm)), dtype=np_type) * -1
# calculate protocol list D_contr
D_contr = np.nansum(Jt_up[:, 2:] != -1, 1)
# calculate final flow network index lists J, J_down, J_up, J_type, X and Y
# iterating from largest flow length downstream to outlet (tree-climbing algorithm)
if print_out: print('''...calculate final flow network index lists...''')
# find cell with largest flow length and its temporal index
jt = Jtm[fl == np.nanmax(fl)][0]
jti = jt - 1
# preset upstream subset (ss)
ss = Jt_up[jti, :]
ss = ss[ss != -1]
ssi = ss - 1
# calculate not done subset of upstream cell subset
ssnotdone = ss[Done[ssi] == -1]
# pre-set final index variable (0)
jj = 0
# debug protocol
if debug and print_out:
im_pos = np.nonzero(Jtm == jt)
x = im_pos[0]
y = im_pos[1]
print(' Initial cell at pos: {0:d}/{1:d} ({2:d})'.format(x, y, jt))
# while either outlet is not reached or not all upstream members are processed
while jt != Jtm[OFr] or ssnotdone.shape[0] != 0:
# case 1: HEADWATER CELL as ssnotnan is empty
# -> create new index for headwater and move downwards
if ss.shape[0] == 0:
# increment final index, fill type and link lists
jj += 1
jji = jj - 1
J_type[jji] = str_headw
J_jt[jji, 0] = jt
# debug protocol
if debug and print_out:
print('j: {0:d}, pos: {1:d}/{2:d} = {3:d} -> {4:d}, {5:s} cell'.format(
jj, x, y, jt, Jt_down[jti, 0], str_headw))
# set upstream cell to 0, mark cell as done and go downwards
J_up[jji, 0] = 0
Done[jti] = 1
jt = Jt_down[jti, 0]
jti = jt - 1
# debug protocol
if debug and print_out:
im_pos = np.nonzero(Jtm == jt)
x = im_pos[0]
y = im_pos[1]
print(' -> down to {0:d}/{1:d} = {2:d}'.format(x, y, jt))
else:
# case 2: ROUTING CELL as all upstream cells are done
# -> create new index for routing cell and move downwards
if all(Done[ssi] == 1):
# increment final index, fill type and link lists
jj += 1
jji = jj - 1
J_type[jji] = str_routing
J_jt[jji, 0] = jt
# define upstream cell subset and give position indices
ssj = np.flatnonzero(np.any(J_jt == ss, 1))
# if one or two upstream cells exist:
# connect two real cells in Jt_up and Jt_down
if ssj.shape[0] <= 2:
ssjl = ssj.shape[0]
ssjtu = Jt_up[jti, :ssjl]
ssjtu = ssjtu[ssjtu != -1]
J_up[jji, :ssjl] = J[np.flatnonzero(np.any(J_jt == ssjtu, 1))]
J_down[ssj] = jj
# else if > 2 upstream cells exist:
# connect 1 real and 1 dammy cell in Jt_up and Jt_down
else:
real = J[np.amax(ssj)]
dummy = np.amax(J_down[ssj])
J_up[jji, :] = [dummy, real]
J_down[[dummy-1, real-1]] = jj
# debug protocol
if debug and print_out:
pr_up = Jt_up[jti, :]
pr_up = pr_up[pr_up != -1]
print('''j: {0:d}, Pos: {1:d}/{2:d} = {3:d} -> {4:d},
Jt_up: {5:s}, {6:s} cell'''.format(
jj, x, y, jt, Jt_down[jt-1],
str(pr_up[~np.isnan(pr_up)])[1:-1], str_routing))
# mark cell as done and go downwards
Done[jti] = 1
jt = Jt_down[jti, 0]
jti = jt - 1
# debug protocol
if debug and print_out:
im_pos = np.nonzero(Jtm == jt)
x = im_pos[0]
y = im_pos[1]
print(' -> down to {0:d}/{1:d} = {2:d}'.format(x, y, jt))
else:
# case 3: DUMMY CELL as not all required dummy cells are
# done but >= 2 upstream cells are done
# -> create new index for dummy cell and move upwards to
# the cell with the largest flow accumulation
if np.sum(Done[ssi] != -1) >= 2:
# increment final index, fill type and link lists
jj += 1
jji = jj - 1
J_type[jji] = str_dummy
J_jt[jji,0] = 0
# define upstream cell subset and give position indices
ssj = np.flatnonzero(J_down[0:jji] == -1)
# preallocate testing matrix (all are false)
ssjt = np.zeros((ssj.shape[0], ), dtype=bool)
# iterate upstream cell subset
for ii, ssji in enumerate(ssj):
jtupi = Jt_up[jti, :]
jtupi = jtupi[jtupi != -1]
# ssj exists in Jt_up -> test is TRUE
if np.any(np.isin(jtupi, J_jt[ssji, 0])):
ssjt[ii] = True
# ssj does not exist in Jt_up but is dummy
# -> test is TRUE
elif J_type[ssji] == str_dummy:
ssjt[ii] = True
# reduce subset with testing matrix
ssj = ssj[ssjt]
# 'wrong neighbours'
# (loose, not finished dummy strings) are removed
if ssj.shape[0] > 2:
ssj = ssj[-2:]
# connect upstream cells in Jt_up and Jt_down
J_up[jji, :] = J[ssj]
J_down[ssj] = jj
# debug protocol
if debug and print_out:
pr_up = Jt_up[jti, :]
pr_up = pr_up[pr_up != -1]
print('''j: {0:d}, Pos: {1:d}/{2:d} = {3:d} -> {4:d},
Jt_up: {5:s}, {6:s} cell'''.format(
jj, x, y, jt, Jt_down[jti,0],
str(pr_up[~np.isnan(pr_up)])[1:-1], str_dummy))
# decrement dummy protocol variable
D_contr[jti] = D_contr[jti] - 1
# case 4 (else): UPWARDS MOVEMENT as not all required dummy
# cells are done and < 2 upstream cells are done
# -> do not create new index
# calculate not done subset of upstream cells and its largest
# flow accumulation cell preallocate subset for flow
# accumulation calculation
ssflowacc = np.zeros((ssnotdone.shape[0]), dtype=np_type)
# iterate not done subset of upstream cells and find flow
# accumulation
for ii, iiv in enumerate(ssflowacc):
ssflowacc[ii] = fa[Jtm == ssnotdone[ii]]
# calculate temporal index of max. flow accumulation
ssmaxind = ssnotdone[ssflowacc == np.amax(ssflowacc)]
# go upstream to max flow acc or first cell if more than one
# solutions exist
jt = ssmaxind[0]
jti = jt - 1
# debug protocol
if debug and print_out:
im_pos = np.nonzero(Jtm == jt)
x = im_pos[0]
y = im_pos[1]
print(' -> up to {0:d}/{1:d} = {2:d}'.format(x, y, jt))
# find upstream cells and create subset (ss)
ss = Jt_up[jti, :]
ss = ss[ss != -1]
ssi = ss - 1
# calculate not done subset of upstream cell subset
ssnotdone = ss[Done[ssi] == -1]
# Calculate values for catchment outlet
if print_out: print('...calculate outlet...')
# fill lists
jj += 1
jji = jj - 1
J_jt[jji, 0] = jt
J_type[jji] = str_routing
# debug protocol
if debug and print_out:
pr_up = Jt_up[jti, :]
pr_up = pr_up[pr_up != -1]
print('''j: {0:d}, Pos: {1:d}/{2:d} = {3:d} -> {4:d},
Jt_up: {5:s}, {6:s} cell'''.format(
jj, x, y, jt, Jt_down[jt-1],
str(pr_up[~np.isnan(pr_up)])[1:-1], str_routing))
# define upstream cell subset and give position indices
ssj = np.flatnonzero(np.any(J_jt == ss, 1))
# one or two upstream cells: connect two real cells in Jt_up and Jt_down
if ssj.shape[0] <= 2:
ssjl = ssj.shape[0]
ssjtu = Jt_up[jti, :ssjl]
ssjtu = ssjtu[ssjtu != -1]
J_up[jji, :ssjl] = J[np.flatnonzero(np.any(J_jt == ssjtu, 1))]
J_down[ssj] = jj
# > 2 upstream cells: connect 1 real and 1 dammy cell in Jt_up and Jt_down
else:
real = J[np.amax(ssj)]
dummy = np.amax(J_down[ssj])
J_up[jji, :] = [dummy, real]
J_down[[dummy-1, real-1]] = jj
# Define downstream cell as 0
J_down[jji] = Jt_down[jti]
# create final index array Jm and final dummy index list J_dj
if print_out: print('...create final index array and final dummy index list...')
# preallocate arrays
Jm = np.ones(Jtm.shape, dtype=np_type) * -1
J_dj = np.ones(J_up.shape[0], dtype=np_type) * def_val_dtgb
# iterate all cells
Jtm_it = np.nditer(Jtm, flags=['multi_index'])
while not Jtm_it.finished:
# if cell is a valid ID, find cell in list
if Jtm_it[0] != -1:
Jm[Jtm_it.multi_index] = J[np.flatnonzero(J_jt == Jtm_it[0])]
Jtm_it.iternext()
# create real representative index list for dummy cells iterating all
# final indices
for jj in range(1, J_up.shape[0]+1):
jji = jj - 1
# if cell is a dummy, find the connected real cell
if J_type[jji] == str_dummy:
# follow dummy cascade downwards until real cell and set index
mmi = jji
while J_type[mmi] == str_dummy:
mm = J_down[mmi]
mmi = mm - 1
J_dj[jji] = mm
# calculate cell name and coordinates
if print_out: print('...calculate coordinates...')
# preallocate variable
X = []
Y = []
# iterate final index
for jj in range(1, J_down.shape[0]+1):
jji = jj - 1
# if jj is a dummy, insert X and Y coordinates using dummy list
if J_type[jji] == str_dummy:
# calculate coordinate indices
xy = np.nonzero(Jm == J_dj[jji])
# if it is a head water or routing cell, insert X and Y coordinates
# using index array
else:
# calculate coordinate indices
xy = np.nonzero(Jm == jj)
# if jj is no dummy, insert X and Y coordinates
X.append(xll + (xy[1][0] + 1 - 0.5) * cellsz)
Y.append(yll + (nrows - xy[0][0] - 1 + 0.5) * cellsz)
# calculate upstream inflow catchment area of each routing cell
# pre-allocate variable
J_A = np.zeros(J.shape)
# iterate all cells
for jj_ii, jj in enumerate(J):
# if it is a routing or the outflow cell, calculate area
if J_type[jj_ii] == str_routing:
J_A[jj_ii] = fa[Jm == jj] * ((cellsz / 1000)**2)
# export model cell to point feature classes
if print_out: print('...create model cell point feature classes...')
# create pandas data frames
structarr_tgb_in = list(zip(J_down, J_type, J_A, X, Y))
df_mn = pd.DataFrame(structarr_tgb_in, index=J,
columns=[f_tgb_down, f_tgb_type, f_tgb_a, f_x, f_y])
df_tgb_up = pd.DataFrame(J_up, index=J, columns=[f_tgb_up1, f_tgb_up2])
# create real representative index list for dummy subcatchments
ser_tgb_dtgb = real_repr_idx(df_mn, str_dummy, print_out=print_out)
# create names of model subcatchments
ser_nrflv = pd.Series(df_mn.shape[0] * '', index=df_mn.index, name=f_nrflv)
for tgb, el_type in df_mn.loc[:, f_tgb_type].iteritems():
ser_nrflv.at[jj] = '{0:s}{1:05d}'.format(el_type[0].upper(), tgb)
# summarize DataFrames
df_tgb = pd.concat([df_mn, ser_tgb_dtgb, ser_nrflv], axis=1)
# summarize information for export
ser_tgb = df_tgb.index.to_series(name=f_tgb)
df_data_tgb_p = pd.concat(
[ser_tgb, df_tgb.loc[:, [f_tgb_down, f_tgb_type,
f_tgb_dtgb, f_tgb_a, f_x, f_y]]], axis=1)
# create spatial reference object
sr_obj = arcpy.Describe(path_fd_mr_e).spatialReference
# export to point feature classes
tc.tgb_to_points(df_data_tgb_p, sr_obj, path_gdb_out, name_tgb_p,
geometry_fields=(f_x, f_y))
tc.tgb_to_points(df_data_tgb_p, sr_obj, path_gdb_out, name_tgb_down_p,
geometry_fields=(f_x, f_y))
# create model network polyline feature class
if print_out: print('...create model network polyline feature class...')
# import cell information
arcpy.AddIndex_management(name_tgb_p, f_tgb_down, f_tgb_down,
'NON_UNIQUE', 'NON_ASCENDING')
# delete non-relevant fields of downstream feature class
arcpy.DeleteField_management(name_tgb_down_p, '{0}; {1}; {2}; {3}'.format(
f_tgb_dtgb, f_tgb_down, f_tgb_type, f_tgb_a))
# add coordinates to both feature classes
arcpy.AddXY_management(name_tgb_p)
arcpy.AddXY_management(name_tgb_down_p)
# alter coordinate fields of downstream feature class
f_p_xd = 'POINT_Xd'
f_p_yd = 'POINT_Yd'
arcpy.AlterField_management(name_tgb_down_p, f_p_x, f_p_xd, f_p_xd,
'', '4', 'NULLABLE', 'DO_NOT_CLEAR')
arcpy.AlterField_management(name_tgb_down_p, f_p_y, f_p_yd, f_p_yd,
'', '4', 'NULLABLE', 'DO_NOT_CLEAR')
# join information from downstream cells
tgb_l_join = arcpy.management.AddJoin(name_tgb_p, f_tgb_down, name_tgb_down_p,
f_tgb, 'KEEP_COMMON')
# calculate line features
if arcpy.Exists(path_mnw): arcpy.management.Delete(path_mnw)
arcpy.XYToLine_management(tgb_l_join, path_mnw, f_p_x, f_p_y,
f_p_xd, f_p_yd, 'GEODESIC', f_tgb, sr_obj)
# delete downstream neighbour model cell point feature class
arcpy.Delete_management(name_tgb_down_p)
return df_data_tgb_p, df_tgb_up
# %% function to preprocesses raster files, which are used for routing parameters.
def prepr_routing_rasters(path_dem_hr, path_fnw, path_ws_s, path_fa_mr,
path_gdb_out, name_fa_hr='fa_hr',
name_dem_hr_ws='dem_hr_ws', name_fl_fnw_mr='fl_fnw_mr',
name_dem_max_mr='dem_max_mr', name_dem_min_mr='dem_min_mr',
initial=True, print_out=False):
"""
Preprocesses raster files, which are used to calculate routing parameters.
JM 2021
Arguments:
-----------
path_dem_hr: str (e.g., 'c:\model_creation\dem_hr')
path of the output watershed polygon feature class
path_fnw: str (e.g., 'c:\model_creation\fnw')
path of the flow network polyline feature class or shape file
path_ws_s: str (e.g., 'c:\model_creation\ws_s')
path of the model watershed polygon feature class
path_fa_mr: str (e.g., 'c:\model_creation\fa_mr')
path of the model resolution flow accumulation raster
path_gdb_out: str (e.g., 'c:\model_creation.gdb')
path of the output file geodatabase
name_fa_hr: str (optional, default: 'fa_hr')
name of the output extracted high resolution flow accumulation raster
name_dem_hr_ws: str (optional, default: 'dem_hr_ws')
name of the output extracted high resolution digital elevation raster
name_fl_fnw_mr: str (optional, default: 'fl_fnw_mr')
name of the output model resolution flow length at flow network location
name_dem_max_mr: str (optional, default: 'dem_max_mr')
name of the output model resolution maximum value of the high resolution DEM
name_dem_min_mr: str (optional, default: 'dem_min_mr')
name of the output model resolution minimum value of the high resolution DEM
initial: boolean (optional, default: True)
true if it is the first run and all steps have to be calculated from the scratch
print_out: boolean (optional, default: False)
true if workprogress shall be print to command line
Returns:
-----------
Saves the following outputs:
- extracted high resolution digital elevation raster (model domain)
- model resolution flow length at flow network location (else: NaN)
- model resolution maximum value of the high resolution elevation raster
- model resolution minimum value of the high resolution elevation raster
"""
# arcpy field names (ONLY CHANGE IF ARCPY FUNCTION CHANGES FORCE YOU!)
f_oid = 'OBJECTID'
f_val = 'Value'
f_cellsz_x = 'CELLSIZEX'
method_mean = 'MEAN'
method_max = 'MAXIMUM'
method_min = 'MINIMUM'
# paths for intermediates
path_fnw_r = path_gdb_out + 'fnw_r'
path_fnw_mr = path_gdb_out + 'fnw_mr'
path_dem_hr_f = path_gdb_out + 'dem_hr_f'
path_fd_hr = path_gdb_out + 'fd_hr'
path_fl_hr = path_gdb_out + 'fl_hr'
path_fl_snfnw = path_gdb_out + 'fl_snfnw'
path_fl_snfa_mr = path_gdb_out + 'fl_snfa_mr'
if initial: path_fl_aggr_mr = path_gdb_out + 'fl_aggr_mr'
# paths for outputs
path_dem_hr_ws = path_gdb_out + name_dem_hr_ws
path_fa_hr = path_gdb_out + name_fa_hr
path_fl_fnw_mr = path_gdb_out + name_fl_fnw_mr
if initial:
path_dem_max_mr = path_gdb_out + name_dem_max_mr
path_dem_min_mr = path_gdb_out + name_dem_min_mr
# set workspace
arcpy.env.workspace = path_gdb_out
# if it is the first calculation run, calculate high-resolution flow length
if initial:
if print_out: print('...calculate high-resolution flow length...')
# save default environments
default_env_snr = arcpy.env.snapRaster
default_env_ext = arcpy.env.extent
# set environments
arcpy.env.extent = 'MAXOF'
arcpy.env.snapRaster = path_dem_hr
# clip high resolution digital elevation raster to model domain
if print_out: print(' step 1/7: clip high resolution DEM to model domain...')
if arcpy.Exists(path_dem_hr_ws): arcpy.management.Delete(path_dem_hr_ws)
dem_hr_ws = arcpy.sa.ExtractByMask(path_dem_hr, path_ws_s)
dem_hr_ws.save(path_dem_hr_ws)
# fill corrected high resolution digital elevation raster
if print_out: print(' step 2/7: fill clipped high resolution DEM...')
if arcpy.Exists(path_dem_hr_f): arcpy.management.Delete(path_dem_hr_f)
dem_hr_f = arcpy.sa.Fill(path_dem_hr_ws, None)
dem_hr_f.save(path_dem_hr_f)
# calculate flow direction for filled digital elevation raster
if print_out: print(' step 3/7: calculate high resolution flow direction...')
if arcpy.Exists(path_fd_hr): arcpy.management.Delete(path_fd_hr)
fd_hr = arcpy.sa.FlowDirection(path_dem_hr_f, 'NORMAL', None, 'D8')
fd_hr.save(path_fd_hr)
# calculate flow accumulation
if print_out: print(' step 4/7: calculate high resolution flow accumulation...')
if arcpy.Exists(path_fa_hr): arcpy.management.Delete(path_fa_hr)
fa_hr = arcpy.sa.FlowAccumulation(path_fd_hr)
fa_hr.save(path_fa_hr)
# calculate flow length for flow direction
if print_out: print(' step 5/7: calculate high resolution flow length...')
if arcpy.Exists(path_fl_hr): arcpy.management.Delete(path_fl_hr)
fl_hr = arcpy.sa.FlowLength(path_fd_hr, 'DOWNSTREAM', None)
fl_hr.save(path_fl_hr)
# convert flow network polyline feature class to high resolution raster
if print_out: print((' step 6/7: convert flow network polyline feature '
'class to high resolution raster...'))
if arcpy.Exists(path_fnw_r): arcpy.management.Delete(path_fnw_r)
arcpy.conversion.PolylineToRaster(path_fnw, 'OBJECTID', path_fnw_r,
'MAXIMUM_LENGTH', 'NONE', path_dem_hr_ws)
# set flow length to nan if flow network raster is nan
if print_out: print((' step 7/7: set flow length to nan if flow network '
'raster is nan...'))
if arcpy.Exists(path_fl_snfnw): arcpy.management.Delete(path_fl_snfnw)
setn_expr = '{0} IS NULL'.format(f_val)
fl_snfnw = arcpy.ia.SetNull(path_fnw_r, path_fl_hr, setn_expr)
fl_snfnw.save(path_fl_snfnw)
# reset environments
arcpy.env.snapRaster = default_env_snr
arcpy.env.extent = default_env_ext
# Aggregate flow length to model resolution
if print_out: print('...aggregate flow length to model resolution...')
# save default environments
default_env_snr = arcpy.env.snapRaster
default_env_ext = arcpy.env.extent
default_env_mask = arcpy.env.mask
# set environments
arcpy.env.snapRaster = path_fa_mr
arcpy.env.extent = path_fa_mr
arcpy.env.mask = path_fa_mr
# get high resolution and model resolution cell size
cell_sz_x_obj = arcpy.GetRasterProperties_management(path_dem_hr_ws, f_cellsz_x)
cell_sz_x = np.int32(cell_sz_x_obj.getOutput(0))
cellsz_obj = arcpy.GetRasterProperties_management(path_fa_mr, f_cellsz_x)
cellsz = np.int32(cellsz_obj.getOutput(0))
# aggregate flow length to final cell size
if initial:
fl_aggr_mr = arcpy.sa.Aggregate(
path_fl_snfnw, str(np.int32(cellsz/cell_sz_x)),
method_mean, 'EXPAND', 'DATA')
fl_aggr_mr.save(path_fl_aggr_mr)
# set aggregated flow length at flow accumulation areas < 0.1 km² to nan
expr_sql = '{0:s} < {1:.0f}'.format(f_val, 1000/cellsz)
fl_snfa_mr = arcpy.ia.SetNull(path_fa_mr, path_fl_aggr_mr, expr_sql)
fl_snfa_mr.save(path_fl_snfa_mr)
# convert polylines to raster in model grid size
arcpy.conversion.PolylineToRaster(path_fnw, f_oid, path_fnw_mr,
'MAXIMUM_LENGTH', 'NONE', path_fa_mr)
# set aggregated flow length to nan if aggregated flow network is nan as well
if arcpy.Exists(path_fl_fnw_mr): arcpy.management.Delete(path_fl_fnw_mr)
fl_fnw_mr = arcpy.ia.SetNull(arcpy.ia.IsNull(path_fnw_mr), path_fl_snfa_mr)
fl_fnw_mr.save(path_fl_fnw_mr)
# Aggregate high-resolution DEM to model resolution extracting min and max values
if initial:
if print_out: print(('...calculate min and max high resolution DEM values '
'in model resolution...'))
if arcpy.Exists(path_dem_max_mr): arcpy.management.Delete(path_dem_max_mr)
if arcpy.Exists(path_dem_min_mr): arcpy.management.Delete(path_dem_min_mr)
dem_max_mr = arcpy.sa.Aggregate(path_dem_hr_ws, str(cellsz),
method_max, 'EXPAND', 'DATA')
dem_min_mr = arcpy.sa.Aggregate(path_dem_hr_ws, str(cellsz),
method_min, 'EXPAND', 'DATA')
dem_max_mr.save(path_dem_max_mr)
dem_min_mr.save(path_dem_min_mr)
# reset environments
arcpy.env.snapRaster = default_env_snr
arcpy.env.extent = default_env_ext
arcpy.env.mask = default_env_mask
# %% function to create point feature class indicating elements, where no
# high-resolution flow length shall be calculated
def create_fl_ind_point(sr_obj,
path_gdb_out, name_no_fnw_fl='no_fnw_fl',
print_out=False):
"""
Creates a point feature class in the defined file geodatabase to be filled
by the user with points. These points indicate cells, for which no high
resolution flow length shall be calculated, but the model resolution is used
instead. The point feature class has neither Z- nor M-values.
JM 2021
Arguments:
-----------
sr_obj: arcpy.SpatialReferenceObject
arcpy.Object containing the spatial reference of the final feature class
path_gdb_out: str
path of the output file geodatabase (e.g., 'c:\model_creation.gdb')
name_no_fnw_fl: str (optional)
name of the output indication point feature class (e.g., 'no_fnw_fl')
print_out: boolean
true if workprogress shall be print to command line
Returns:
-----------
Saves the output pour point feature class
"""
if print_out: print('...create indication point feature class...')
# set path for output
path_no_fnw_fl = path_gdb_out + name_no_fnw_fl
# prepare indication point feature class
if arcpy.Exists(path_no_fnw_fl): arcpy.management.Delete(path_no_fnw_fl)
arcpy.CreateFeatureclass_management(path_gdb_out, name_no_fnw_fl, 'POINT', '',
'DISABLED', 'DISABLED', sr_obj, '', '0',
'0', '0', '')
# %% Create polygon feature class representing the model elements
def create_element_polyg(path_tgb_p, path_sn_raster, path_gdb_out,
name_tgb_s='tgb_s', print_out=False):
"""
Creates a polygon feature class in the defined file geodatabase, which includes
all values of the input point feature class and represents the model element
raster structure. The feature class only includes model elements, which are no
dummy elements and covers the whole model domain.
JM 2021
Arguments:
-----------
path_tgb_p: str
path of the flow network feature class or shape file
(e.g., 'c:\model_creation.gdb\tgb_p')
path_sn_raster: str
path of the raster, which represents the model raster
path_gdb_out: str
path of the output file geodatabase (e.g., 'c:\model_creation.gdb')
name_tgb_s: str (optional, default: 'tgb_s')
name of the output model element polygon feature class
print_out: boolean
true if workprogress shall be print to command line
Returns:
-----------
Saves a polygon feature class representing the model elements
"""
# define internal variables
def_val_dtgb = -1
# define internal field names
f_tgb = 'tgb'
f_tgb_dtgb = 'tgb_dtgb'
# arcpy field names (ONLY CHANGE IF ARCPY FUNCTION CHANGES FORCE YOU!)
f_val = 'VALUE'
f_cellsz_x = 'CELLSIZEX'
f_gridcode = 'gridcode'
# define paths of intermediates in working geodatabase
name_tgb_p_nodum = 'tgb_p_nodum'
name_tgb_p_sel = 'tgb_p_sel'
name_tgb_r = 'tgb_r'
# set workspace
arcpy.env.workspace = path_gdb_out
# Calculate model element polygons in raster structure
if print_out: print('...Calculate model element polygons in raster structure...')
# save original environment settings
default_env_snr = arcpy.env.snapRaster
# select elements which are not dummys and copy features to a new layer
sel_expr = '{0:s} = {1:d}'.format(f_tgb_dtgb, def_val_dtgb)
name_tgb_p_sel = arcpy.management.SelectLayerByAttribute(
path_tgb_p, 'NEW_SELECTION', sel_expr, '')
arcpy.CopyFeatures_management(name_tgb_p_sel, name_tgb_p_nodum, '', '', '', '')
arcpy.management.SelectLayerByAttribute(path_tgb_p, 'CLEAR_SELECTION', '', None)
# set environment
arcpy.env.snapRaster = path_sn_raster
# get model cell size
cellsz_obj = arcpy.GetRasterProperties_management(path_sn_raster, f_cellsz_x)
cellsz = np.int32(cellsz_obj.getOutput(0))
# create elment features from point layer converting to raster
arcpy.PointToRaster_conversion(name_tgb_p_nodum, f_tgb, name_tgb_r,
'MOST_FREQUENT', 'NONE', str(cellsz))
arcpy.RasterToPolygon_conversion(name_tgb_r, name_tgb_s, 'NO_SIMPLIFY',
f_val, 'SINGLE_OUTER_PART', '')
arcpy.AlterField_management(name_tgb_s, f_gridcode, f_tgb, f_tgb)
# restore environment
arcpy.env.snapRaster = default_env_snr
# %% summyrize GIS data for runoff concentration and routing parameter calculation
def summar_gisdata_for_roandrout(
path_tgb_p, path_dem_max_mr, path_dem_min_mr,
path_fl_mr, path_fl_fnw_mr, path_no_fnw_fl,
path_gdb_out, name_tgb_par_p='tgb_par_p',
field_dem_max_mr='dem_max_mr', field_dem_min_mr='dem_min_mr',
field_fl_fnw_mean_mr='fl_fnw_mean_mr', field_fl_mr='fl_mr',
print_out=False):
"""
Creates a point feature class in the defined file geodatabase, which includes
values of the maximum and minimum elevation as well as the flow network and
the model resolution flow length within each model element.
JM 2021
Arguments:
-----------
path_tgb_p: str
path of the flow network feature class or shape file
(e.g., 'c:\model_creation.gdb\tgb_p')
path_dem_max_mr: str
path of the flow network feature class or shape file
(e.g., 'c:\model_creation.gdb\dem_max_mr')
path_dem_min_mr: str
path of the output file geodatabase
(e.g., 'c:\model_creation.gdb\dem_min_mr')
path_fl_mr: str
path of the output file geodatabase
(e.g., 'c:\model_creation.gdb\fl_mr')
path_fl_fnw_mr: str
path of the flow network feature class or shape file
(e.g., 'c:\model_creation.gdb\fl_fnw_mr')
path_no_fnw_fl: str
path of the output file geodatabase
(e.g., 'c:\model_creation.gdb\no_fnw_fl')
path_gdb_out: str
path of the output file geodatabase (e.g., 'c:\model_creation.gdb')
name_tgb_par_p: str (optional, default: 'tgb_par_p')
name of the output model element point feature class with extracted
parameters
field_dem_max_mr: str (optional, default: 'dem_max_mr')
name of the field in name_tgb_par_p containing max elevation value
field_dem_min_mr: str (optional, default: 'dem_min_mr')
name of the field in name_tgb_par_p containing min elevation value
field_fl_fnw_mean_mr: str (optional, default: 'fl_fnw_mean_mr')
name of the field in name_tgb_par_p containing flow network flow length
field_fl_mr: str (optional, default: 'fl_mr')
name of the field in name_tgb_par_p containing model resolution flow length
print_out: boolean
true if workprogress shall be print to command line
Returns:
-----------
Saves model element point feature class with extracted parameters:
- minimum elevation
- maximum elevation
- model resolution flow length
- flow network flow length
"""
# define internal variables
f_tgb = 'tgb'
# set workspace
arcpy.env.workspace = path_gdb_out
# define paths of intermediates in working geodatabase
name_fl_fnw_mr_corr = 'fl_fnw_mr_corr'
name_no_fnw_fl_r = 'no_fnwfl_r'
# save original environment settings
default_env_snr = arcpy.env.snapRaster
default_env_ext = arcpy.env.extent
# If field fl_mr (model resolution flow length) does not exist,
# add field while extracting flow length values
if not arcpy.ListFields(path_tgb_p, field_fl_mr):
if print_out: print('...extract flow length values...')
arcpy.gp.ExtractMultiValuesToPoints_sa(
path_tgb_p, path_fl_mr + ' ' + field_fl_mr, 'NONE')
# If there are any flow length correction points
# remove values of fl_mr at TGBs marked with a feature point in no_fnw_fl
if arcpy.management.GetCount(path_no_fnw_fl)[0] != '0':
if print_out: print('...correct marked flow length values...')
# set environments
arcpy.env.extent = 'MAXOF'
arcpy.env.snapRaster = path_fl_mr
# convert correction points to raster and remove flow network flow length values
arcpy.PointToRaster_conversion(path_no_fnw_fl, 'OBJECTID', name_no_fnw_fl_r,
'MOST_FREQUENT', 'NONE', path_fl_mr)
fl_fnw_mr_corr = arcpy.ia.Con(arcpy.ia.IsNull(name_no_fnw_fl_r), path_fl_fnw_mr)
fl_fnw_mr_corr.save(name_fl_fnw_mr_corr)
# restore environments
arcpy.env.extent = default_env_ext
arcpy.env.snapRaster = default_env_snr
# Extract min and max elevation and flow length values to model point features
if print_out: print('...extract raster values to model point features...')
# copy model element point features to a new feature class
arcpy.management.CopyFeatures(path_tgb_p, name_tgb_par_p)
# if there are any flow length correction points, add information from corrected
if arcpy.management.GetCount(path_no_fnw_fl)[0] != '0':
arcpy.sa.ExtractMultiValuesToPoints(name_tgb_par_p, [
[path_dem_max_mr, field_dem_max_mr],
[path_dem_min_mr, field_dem_min_mr],
[name_fl_fnw_mr_corr, field_fl_fnw_mean_mr]], 'NONE')
# else use original files
else:
arcpy.sa.ExtractMultiValuesToPoints(name_tgb_par_p, [
[path_dem_max_mr, field_dem_max_mr],
[path_dem_min_mr, field_dem_min_mr],
[path_fl_fnw_mr, field_fl_fnw_mean_mr]], 'NONE')
# delete identical (Workaround for Bug in ExtractMultiValuesToPoints)
arcpy.management.DeleteIdentical(name_tgb_par_p, f_tgb, None, 0)
# %% calculate parameters for tgb.dat
def calc_roandrout_params(cellsz, q_spec_ch, name_tgb_par_p,
field_dem_max_mr='dem_max_mr', field_dem_min_mr='dem_min_mr',
field_fl_mr='fl_mr', field_fl_fnw_mean_mr='fl_fnw_mean_mr',
def_fl_upper_lim=np.inf, def_fl_strct_mism=2, def_sl_min=0.0001,
def_sl_excl_quant=None, def_zmin_rout_fac=0.5, def_zmax_fac=1,
ser_q_in_corr=None, ch_est_method='combined', def_bx=0, def_bbx_fac=1,
def_bnm=1.5, def_bnx=100, def_bnvrx=4, def_skm=30, def_skx=20,
print_out=False):
"""
Creates a point feature class in the defined file geodatabase, which includes
values of the maximum and minimum elevation as well as the flow network and
the model resolution flow length within each model element.
JM 2021
Arguments:
-----------
cellsz: integer
edge length of the model elements in [m] (e.g., 100)
q_spec_ch: float
channel forming specific flood discharge value [m3s-1km-2] (e.g., 0.21)
name_tgb_par_p: str
path of the input model element point feature class with following
parameters for each element except dummy elements:
- maximum elevation value
- minimum elevation value
- channel model resolution flow length
- channel flow network flow length
(e.g., 'c:\model_creation.gdb\tgb_p_fl')
field_dem_max_mr: str (optional, default: 'dem_max_mr')
name of the field in name_tgb_par_p containing max elevation value
field_dem_min_mr: str (optional, default: 'dem_min_mr')
name of the field in name_tgb_par_p containing min elevation value
field_fl_mr: str (optional, default: 'fl_mr')
name of the field in name_tgb_par_p containing model resolution flow length
field_fl_fnw_mean_mr: str (optional, default: 'fl_fnw_mean_mr')
name of the field in name_tgb_par_p containing flow network flow length
def_sl_min: float (optional, default: 0.0001)
minimum channel slope value to be maintained due to LARSIM-internal
restrictions
def_fl_strct_mism: int (optional, default: 2)
default flow length for structural mismatch and negative transition
deviations [m]. attention: 1 [m] is interpreted by LARSIM as 'no routing'!
def_fl_upper_lim: int (optional, default: inf)
upper threshold for realistic flow length [m]
def_sl_excl_quant: float (optional, default: None)
quantile of slope values to be set constant to quantile value
(e.g., 0.999 sets the upper 0.1% of the slope values to the
0.1% quantile value)
def_zmin_rout_fac: float (optional, default: 0.5)
Factor to vary the lower elevation of runoff concentration between
the minimum (0) and maximum (1) channel elevation of the element.
By default, the factor is set to the average elevation (0.5) [-]
def_zmax_fac: float (optional, default: 1)
Factor to vary the upper elevation of runoff concentration between
the minimum (0) and maximum (1) elevation of the element. By default,
ser_q_in_corr: pandas.Series
Series of channel-forming inflow (e.g., HQ2) at the corresponding
model element ID in the serie's index.
(e.g., pd.Series(np.array([2.8, 5.3]), index=[23, 359], name='q_in'))
ch_est_method: string (optional, default: 'combined')
String defining channel estimation function. Possible values:
- 'Allen': Allen et al. (1994)
- 'Krauter': Krauter (2006)
- 'combined': Allen et al.(1994) for small and Krauter (2006) for
large areas
def_bx: float (optional, default: 0)
Float defining the flat foreland width left and right [m]
def_bbx_fac: float (optional, default: 1)
Float factor defining the slopy foreland width left and right,
which is calculated multiplying the channel width with this factor [-]
def_bnm: float (optional, default: 1.5 = 67%)
Float defining the channel embankment slope left and right [mL/mZ]
def_bnx: float (optional, default: 100 = nearly flat foreland)
Float defining the slopy foreland slope left and right [mL/mZ]
def_bnvrx: float (optional, default: 4 = 25%)
Float defining the outer foreland slope left and right [mL/mZ]
def_skm: float (optional, default: 30 = natural channel, vegetated river bank)
Float defining the Strickler roughness values in the channel [m1/3s-1]
def_skx: float (optional, default: 20 = uneven vegetated foreland)
Float defining the Strickler roughness values of the left and right
foreland [m1/3s-1]
print_out: boolean (optional, default: False)
true if workprogress shall be print to command line
Returns:
-----------
df_data_tgbdat: pandas.DataFrame
DataFrame of all parameters, which are needed in the resulting file.
The DataFrame includes the model element ID as index and the following
columns:
- 'TGB': element ID number (int)
- 'NRVLF': element name (str)
- 'FT': element area (float)
- 'HUT': lower elevation of runoff concentration [m]
- 'HOT': upper elevation of runoff concentration [m]
- 'TAL': maximum flow length for runoff concentration [km]
- 'X': x-coordinate of element center [m]
- 'Y': y-coordinate of element center [m]
- 'KMU': lower stationing of routing [m]
- 'KMO': upper stationing of routing [m]
- 'GEF': channel slope for routing [m]
- 'HM': channel depth [m]
- 'BM': channel width [m]
- 'BL': flat foreland width left [m]
- 'BR': flat foreland width right [m]
- 'BBL': slopy foreland width left [m]
- 'BBR': slopy foreland width right [m]
- 'BNM': channel embankment slope left and right [mL/mZ]
- 'BNL': slopy foreland slope left [mL/mZ]
- 'BNR': slopy foreland slope right [mL/mZ]
- 'BNVRL': outer foreland slope left [mL/mZ]
- 'BNVRR': outer foreland slope right [mL/mZ]
- 'SKM': Strickler roughnes values in the channel [m1/3s-1]
- 'SKL': Strickler roughnes values at the left foreland [m1/3s-1]
- 'SKR': Strickler roughnes values at the right foreland [m1/3s-1]
ser_tgb_down_nd: pandas.Series
Series of corresponding downstream model element indices ignoring dummy
elements. Model outlet remains -1 and dummy elements are represented as 0.
ser_ft: pandas.Series
Series of corresponding model element areas. Dummy elements have an
area of 0. [km²]
ser_area_outfl: pandas.Series
Series of corresponding model element inflow catchment areas.
Dummy elements have an area of 0. [km²]
ser_ch_form_q: pandas.Series
Series of elements' channel-forming discharge at the corresponding
model element ID in the serie's index.
"""
# %% Redistribute flow length values at confluence points
def redistr_flowl_at_conflp(ser_fl, ser_tgb_down_nd, ser_tgb_up_nd,
ser_tgb_type_headw, ser_tgb_type_dummy):
"""
This function redistributes flow length values at cofluence points.
Remember: there will result multipliers of 1 and sqrt(2) with the model
resolution as flow length values from D8 flow length calculation. The LARSIM
convention assumes the confluence point of cells upstream of the routing
element. Therefore, the resulting discrepancies at confluence points have to
be balanced in upstream routing elements.
JM 2021
Arguments:
-----------
ser_fl: pandas.Series
Series of model element raster flow length corresponding to the serie's
ascending index. The flow length is calculated using the D8-flow direction
based on the model resolution digital elevation raster and using the
'DOWNSTREAM' option (outlet = 0). It may be clipped from a larger raster,
whereby the outlet is not zero anymore.
(e.g., pd.Series([300, 341, 200, 100], index=[1, 2, 3, 4], name='ser_fl'))
ser_tgb_down_nd: pandas.Series
Series of corresponding downstream model element indices ignoring dummy
elements. Model outlet remains -1 and dummy elements are represented as 0.
ser_tgb_up_nd: pandas.Series
Series of corresponding upstream model element indices ignoring dummy
elements. These are represented as empty array (e.g., []).
ser_tgb_type_headw: pandas.Series
Boolean Series, which identifies the headwater cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[1, 1, 0, 0], index=[1, 2, 3, 4], name='headwater',
dtype='bool'))
ser_tgb_type_dummy: pandas.Series
Boolean Series, which identifies the dummy cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 0, 0], index=[1, 2, 3, 4], name='dummy',
dtype='bool'))
Returns:
-----------
df_fl: pandas.DataFrame
DataFrame of corresponding model resolution flow length values. The DataFrame
includes the following columns:
- downstream share of flow length within cell ('down')
- downstream share confluence correction value ('corr_conf_down')
- corrected downstream share of flow length within cell ('corr_down')
- upstream share of flow length within cell ('up')
- corrected upstream share of flow length within cell ('corr_up')
"""
# define internal string variables
f_up = 'up'
f_down = 'down'
f_corr_up = 'corr_up'
f_corr_down = 'corr_down'
f_corr_conf_down = 'corr_conf_down'
# pre-allocate variable
df_fl = pd.DataFrame(np.zeros((ser_fl.shape[0], 5)) * np.nan,
index=ser_fl.index,
columns=[f_down, f_corr_conf_down, f_corr_down,
f_up, f_corr_up])
# copy model resolution flow length (GIS raster calculation)
# (dummy cells are nan, outflow not)
ser_fl.at[ser_tgb_type_dummy] = np.nan
df_fl.at[ser_tgb_type_dummy, :] = np.nan
# iterate elements to calculate flow length to downstream cell
# (only head water and routing cells)
for tgb, fl_sum_tgb in ser_fl.iteritems():
# if it is a head water or routing cell and not the outflow:
if not ser_tgb_type_dummy.at[tgb] and tgb != np.max(df_fl.index):
# get flow length of downstream cell
fl_sum_down = ser_fl.loc[ser_tgb_down_nd.at[tgb]]
# calculate flow length difference between recent and downstream cell
df_fl.at[tgb, f_down] = (fl_sum_tgb - fl_sum_down) / 2
# iterate elements to calculate flow length to upstream cells and correct it
for tgb, fl_sum_tgb in ser_fl.iteritems():
# if it is a head water cell set upstream flow length to zero
if ser_tgb_type_headw.at[tgb]:
df_fl.at[tgb, f_up] = 0
# if it is a routing cell allocate mean residuals to upstream cells
elif not ser_tgb_type_dummy.at[tgb]:
# get values of upstream cells
fl_sum_up = ser_fl.loc[ser_tgb_up_nd.at[tgb]]
# calculate mean of differences between recent and upstream cells
fl_dif_up = np.nanmean(fl_sum_up - fl_sum_tgb) / 2
df_fl.at[tgb, f_up] = fl_dif_up
# calculate mean downstream residuals and allocate it to upstream cells
fl_dif_up_rest = (fl_sum_up - fl_sum_tgb) / 2 - fl_dif_up
df_fl.loc[fl_dif_up_rest.index, f_corr_conf_down] = fl_dif_up_rest
# calculate sums of flow length shares
df_fl.loc[:, f_corr_down] = np.sum(
df_fl.loc[:, [f_down, f_corr_conf_down]], axis=1)
df_fl.loc[:, f_corr_up] = df_fl.loc[:, f_up].values
return df_fl
# %% Redistribute flow network flow length values at confluence points
def redistr_flowl_polyl_at_conflp(ser_fl_fnw, ser_tgb_down_nd, ser_tgb_up_nd,
ser_tgb_type_headw, ser_tgb_type_dummy, cellsz):
"""
This function redistributes the model resolution flow length values calculated
based on existing flow path polyline features.
Remember: The LARSIM convention assumes the confluence point of cells
upstream of the routing element. Therefore, the resulting discrepancies at
confluence points have to be balanced in upstream routing elements.
Furthermore, the flow network balances might get negative with unavoidable
influences of neighbouring flow network elements. This will be retained by
setting discrepancies to a symbolic value of 1 to prevent LARSIM assuming
a dummy cell. As it stays unclear, where the influencing flow network element
belongs to, the (rather small) discrepancy has to stay unbalanced upstream.
Additionally, to prevent instabilities in the water routing calculation, a
correction and redistribution of very small flow lengths is introduced. If
the flow length is smaller than 10% of the model's cell size, the difference
to the actual flow length at the recent cell is redistributed from upstream
cells to the recent one.
JM 2021
Arguments:
-----------
ser_fl_fnw: pandas.Series
Series of model element polyline flow length corresponding to the serie's
ascending index. The flow length is calculated using the accumulative
lengths of polyline elements intersected with model raster polygons.
The outlet is the minimum value, but has not to be zero.
(e.g., pd.Series([308.4, 341.0, 204.5, 133.8], index=[1, 2, 3, 4],
name='ser_fl_fnw'))
ser_tgb_down_nd: pandas.Series
Series of corresponding downstream model element indices ignoring dummy
elements. Model outlet remains -1 and dummy elements are represented as 0.
ser_tgb_up_nd: pandas.Series
Series of corresponding upstream model element indices ignoring dummy
elements. These are represented as empty array (e.g., []).
ser_tgb_type_headw: pandas.Series
Boolean Series, which identifies the headwater cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[1, 1, 0, 0], index=[1, 2, 3, 4], name='headwater',
dtype='bool'))
ser_tgb_type_dummy: pandas.Series
Boolean Series, which identifies the dummy cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 0, 0], index=[1, 2, 3, 4], name='dummy',
dtype='bool'))
cellsz: int
Integer, which defines the model element edge length in [m]
Returns:
-----------
df_fl_fnw: pandas.DataFrame
DataFrame of corresponding model resolution flow length values.
The DataFrame includes the following columns:
- original downstream share of flow length within cell ('down')
- downstream correction value of confluences ('corr_conf_down')
- downstream correction value of redistribution ('corr_red_down')
- corrected downstream share of flow length within cell ('corr_down')
- upstream share of flow length within cell ('up')
- upstream correction value of redistribution ('corr_red_up')
- corrected upstream share of flow length within cell ('corr_up')
"""
# define internal string variables
f_up = 'up'
f_down = 'down'
f_corr_up = 'corr_up'
f_corr_down = 'corr_down'
f_corr_conf_down = 'corr_conf_down'
f_corr_red_up = 'corr_red_up'
f_corr_red_down = 'corr_red_down'
# pre-allocate variable
df_fl_fnw = pd.DataFrame(np.zeros((ser_fl_fnw.shape[0], 7))*np.nan,
index=ser_fl_fnw.index,
columns=[f_down, f_corr_conf_down,
f_corr_red_down, f_corr_down,
f_up, f_corr_red_up, f_corr_up])
# first column = high resolution flow length (GIS raster calculation)
# (dummy cells are nan, outflow not)
ser_fl_fnw.at[ser_tgb_type_dummy] = np.nan
df_fl_fnw.at[ser_tgb_type_dummy, :] = np.nan
# calculate flow distances
for tgb, fl_sum in ser_fl_fnw.iteritems():
# if high resolution flow length is not nan...
if not np.isnan(fl_sum):
# if it is a head water cell only calculate downstream part
if ser_tgb_type_headw.at[tgb]:
# find downstream cell and get flow length
fl_down = ser_fl_fnw.loc[ser_tgb_down_nd.at[tgb]]
# calculate flow length difference between recent and
# downstream cell
df_fl_fnw.at[tgb, f_down] = (fl_sum - fl_down) / 2
# set difference between recent and upstream cell to zero
df_fl_fnw.at[tgb, f_up] = 0
# if it is a routing cell...
elif not ser_tgb_type_dummy.at[tgb]:
# if it is not outflow
if tgb != np.max(ser_fl_fnw.index):
# find downstream cell and get flow length
fl_down = ser_fl_fnw.loc[ser_tgb_down_nd.loc[tgb]]
# downstream value is difference between recent and
# downstream cell or 1 [m] if it would be smaller
df_fl_fnw.at[tgb, f_down] \
= np.max([(fl_sum - fl_down) / 2, 1])
else:
# downstream difference is 0
df_fl_fnw.at[tgb, f_down] = 0
# find upstream cells and get flow lengths
jjnd_up = ser_tgb_up_nd.at[tgb]
# calculate flow length difference between recent
# and upstream cells
fl_dif_up = (ser_fl_fnw.loc[jjnd_up] - fl_sum) / 2
# correct negative upstream difference values and protocol
fl_dif_up_ii = np.logical_and(np.isnan(fl_dif_up),
fl_dif_up < 0)
fl_dif_up[fl_dif_up_ii] = 1
# calculate mean of difference between recent
# and upstream cells
if np.any(~np.isnan(fl_dif_up)):
fl_difmean_up = np.nanmean(fl_dif_up)
else:
fl_difmean_up = np.nan
df_fl_fnw.at[tgb, f_up] = fl_difmean_up
# calculate residual from mean calculation
df_fl_fnw.at[jjnd_up, f_corr_conf_down] \
= fl_dif_up - fl_difmean_up
# iterate cells in reversed calculation order from outflow to most
# upstream point and redistribute very small network values
for tgb in reversed(ser_fl_fnw.index):
# if high resolution flow length is not nan and it is a routing cell...
fl_sum = ser_fl_fnw[tgb]
if not np.isnan(fl_sum) \
and not (ser_tgb_type_headw.at[tgb] and ser_tgb_type_dummy.at[tgb]):
# add downstream, upstream and remaining flow length part of
# recent element
fl_fnw = np.nansum(
df_fl_fnw.loc[tgb, [f_down, f_corr_conf_down, f_up]])
# if the flow length is smaller than 10% of the cell size...
if fl_fnw < cellsz / 10:
# allocate the difference to 10% of cell size to the
# recent element
fl_fnw_dif_corr = cellsz / 10 - fl_fnw
df_fl_fnw.at[tgb, f_corr_red_up] = fl_fnw_dif_corr
# redistribute correction length to upstream cells
df_fl_fnw.at[ser_tgb_up_nd.at[tgb], f_corr_red_down] \
= - fl_fnw_dif_corr
# calculate sums of flow length shares
df_fl_fnw.at[:, f_corr_down] = np.sum(
df_fl_fnw.loc[:, [f_down, f_corr_conf_down, f_corr_red_down]], axis=1)
df_fl_fnw.at[:, f_corr_up] = np.sum(
df_fl_fnw.loc[:, [f_up, f_corr_red_up]], axis=1)
return df_fl_fnw
# %% Merge flow length from model resolution raster and flow network polylines
def merge_fnw_and_mr_fl(df_fl_mr, df_fl_fnw, ser_j_down, ser_tgb_down_nd,
ser_tgb_type_headw, ser_tgb_type_dummy,
def_fl_upper_lim=np.inf, def_fl_strct_mism=2):
"""
This function merges both model resolution flow length sources (1)
calculated based on existing flow path polyline features and (2) using
the D8-flow direction based on the model resolution digital elevation
raster and using the 'DOWNSTREAM' option (outlet = 0).
The flow length calculated from flow network polylines and model
resolution are potentially referenced to a different outflow point as
the extent of the DEM is different. The extent of the flow network
usually is larger, as the calculation of the model domain is based on
the underlying high-resolution DEM. Therefore, flow lenght references
have to be reset at the outflow point of the model. Consequently, the
flow network and model resolution flow length values are merged.
JM 2021
Arguments:
-----------
df_fl_mr: pandas.DataFrame
DataFrame of corresponding model resolution flow length values. The
DataFrame includes the model element ID as index and the following columns:
- downstream share of flow length within cell ('down')
- corrected downstream share of flow length within cell ('corr_down')
- corrected upstream share of flow length within cell ('corr_up')
df_fl_fnw: pandas.DataFrame
DataFrame of corresponding model resolution flow length values. The
DataFrame includes the model element ID as index and the following columns:
- corrected downstream share of flow length within cell ('corr_down')
- corrected upstream share of flow length within cell ('corr_up')
ser_j_down: pandas.Series
Series of corresponding downstream model element indices.
Model outlet remains -1 and dummy elements are represented as 0.
ser_tgb_down_nd: pandas.Series
Series of corresponding downstream model element indices ignoring dummy
elements. Model outlet remains -1 and dummy elements are represented as 0.
ser_tgb_up_nd: pandas.Series
Series of corresponding upstream model element indices ignoring dummy
elements. These are represented as empty array (e.g., []).
ser_tgb_type_headw: pandas.Series
Boolean Series, which identifies the headwater cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[1, 1, 0, 0], index=[1, 2, 3, 4], name='headwater',
dtype='bool'))
ser_tgb_type_dummy: pandas.Series
Boolean Series, which identifies the dummy cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 0, 0], index=[1, 2, 3, 4], name='dummy',
dtype='bool'))
def_fl_upper_lim: int (optional, default: inf)
upper threshold for realistic flow length [m]
def_fl_strct_mism: int (optional, default: 2)
default flow length for structural mismatch and negative transition
deviations [m]. attention: 1 [m] is interpreted by LARSIM as 'no routing'!
Returns:
-----------
df_fl: pandas.DataFrame
DataFrame of corresponding model resolution flow length values.
The DataFrame includes the model element ID as index and the following
columns:
- accumulative flow length at lower boundary of cell ('lower')
- flow length value of cell ('length')
- accumulative flow length at upper boundary of cell ('upper')
"""
# define internal string variables
f_up = 'up'
f_down = 'down'
f_corr_up = 'corr_up'
f_corr_down = 'corr_down'
f_lower = 'lower'
f_upper = 'upper'
f_length = 'length'
# pre-allocate DataFrame for indentification keys
df_fl_keys = pd.DataFrame(np.zeros((df_fl_mr.shape[0], 2))*np.nan,
index=df_fl_mr.index, columns=[f_down, f_up])
# pre-allocate DataFrame for flow length values
df_fl = pd.DataFrame(np.zeros((df_fl_mr.shape[0], 3))*np.nan,
index=df_fl_mr.index,
columns=[f_lower, f_length, f_upper])
# calculate outflow cell index
tgb_out = np.max(df_fl_mr.index)
# pre-set outflow flow length value to 1 [m]
df_fl.at[tgb_out, f_lower] = 1
# iterate all cells in reversed order
for tgb in reversed(df_fl_mr.index):
# if cell is a routing or headwater cell
if not ser_tgb_type_dummy.at[tgb]:
# find real downstream cell
jjnd_down = ser_tgb_down_nd.at[tgb]
# SET LOWER CUMULATIVE FLOW LENGTH OF RECENT AS UPPER OF DOWNSTREAM CELL
if tgb != tgb_out:
df_fl.at[tgb, f_lower] = df_fl.at[jjnd_down, f_upper]
else:
df_fl.at[tgb, f_lower] = 0
# DECIDE ABOUT BEHAVIOR USING DOWNSTREAM PART OF RECENT AND
# UPSTREAM PART OF DOWNSTREAM CELL
# get downstream flow network flow length of RECENT cell
if tgb != tgb_out: fl_fnw_down = df_fl_fnw.loc[tgb, f_corr_down]
else: fl_fnw_down = 0
# if (1) downstream flow network flow length of RECENT cell is > 0
# set downstream flow length to flow network flow length (key 1)
# (no further distinction, as fl_fnw_down > 0 && fl_fnw_down_up <= 0
# cannot exist due to the definition of df_fl_fnw)
if fl_fnw_down > 0:
fl_down = df_fl_fnw.loc[tgb, f_corr_down]
df_fl_keys.at[tgb, f_down] = 1
# if (2) downstream flow network flow length of RECENT cell is < 0
# than a potential structural mismatch between model resolution flow
# length and flow network flow length resulting from cell aggregation
# has to be corrected. The downstream flow length is set to flow network
# flow length (key -1)
elif fl_fnw_down < 0:
fl_down = df_fl_fnw.loc[tgb, f_corr_down]
df_fl_keys.at[tgb, f_down] = -1
# if (3) downstream flow network flow length of RECENT cell does not
# exist (= 0), than model resolution flow length is used and further
# distinction of cases is based on upstream flow network flow length
# of DOWNSTREAM cell
elif fl_fnw_down == 0:
# get upstream flow network flow length of DOWNSTREAM cell
# (except for outflow)
if tgb != tgb_out:
fl_fnw_down_up = df_fl_fnw.loc[jjnd_down, f_corr_up]
else:
fl_fnw_down_up = 0
# if (3.1) upstream flow network flow length of DOWNSTREAM cell
# does not exist (<= 0), than both cells have model resolution
# flow length and downstream flow length part is set to model
# resolution flow length (key 100)
if fl_fnw_down_up <= 0:
fl_down = df_fl_mr.loc[tgb, f_corr_down]
df_fl_keys.at[tgb, f_down] = 100
# if (3.2) upstream flow network flow length of DOWNSTREAM
# cell exists (> 0) than there is a transition from downstream
# flow network to recent cell model resolution flow length
# and the difference of model resolution and flow network
# flow length is calculated (key -100).
else:
fl_down = df_fl_mr.loc[tgb, f_down] * 2 - fl_fnw_down_up
df_fl_keys.at[tgb, f_down] = -100
# CALCULATE UPSTREAM AND SUM OF FLOW LENGTH OF RECENT CELL
# headwater cells: cell flow length = downstream part
if ser_tgb_type_headw.at[tgb]:
df_fl.at[tgb, f_length] = fl_down
# routing cells: cell flow length = downstream + upstream flow length
else:
# get upstream flow network flow length of RECENT cell
fl_fnw_up = df_fl_fnw.loc[tgb, f_corr_up]
# if upstream flow network flow length of RECENT cell is > 0
# set upstream flow length to flow network flow length (key 1)
if fl_fnw_up > 0:
fl_up = fl_fnw_up
df_fl_keys.at[tgb, f_up] = 1
# if upstream flow network flow length is = 0 (< 0 cannot exist)
# set upstream flow length to model resolution flow length (key 100)
else:
fl_up = df_fl_mr.loc[tgb, f_corr_up]
df_fl_keys.at[tgb, f_up] = 100
# sum down- and upstream flow length parts (except for outflow)
if tgb != tgb_out: df_fl.at[tgb, f_length] = fl_down + fl_up
else: df_fl.at[tgb, f_length] = fl_up
# DO CORRECTIONS
# if structural mismatches and transition values cannot be compensated
# by upstream flow length part (flow length < 0), set flow length to
# the threshold def_fl_strct_mism, a symbolic very small value
if np.isin(df_fl_keys.at[tgb, f_down], [-1, -100]) \
and df_fl.at[tgb, f_length] <= def_fl_strct_mism:
df_fl.at[tgb, f_length] = def_fl_strct_mism
# if flow length is unrealistic high (flow length > def_fl_upper_lim),
# set flow length to the threshold def_fl_upper_lim
if df_fl.at[tgb, f_length] > def_fl_upper_lim:
df_fl.at[tgb, f_length] = def_fl_upper_lim
# CALCULATE UPSTREAM CUMULATIVE FLOW LENGTH OF RECENT CELL
# headwater cells: use lower cumulative flow length as upper
# (not used in LARSIM, as there is no routing in head water cells)
if ser_tgb_type_headw.at[tgb]:
df_fl.at[tgb, f_upper] = df_fl.at[tgb, f_lower]
# routing cell, which is not outlet: calculate sum of downstream
# cumulative flow length and flow length of recent cell
elif tgb != tgb_out:
df_fl.at[tgb, f_upper] = df_fl.at[tgb, f_length] \
+ df_fl.at[tgb, f_lower]
# routing cell, which is outlet: use flow length of recent cell
else:
df_fl.at[tgb, f_upper] = df_fl.at[tgb, f_length]
# if cell is a dummy cell
else:
# take value from downstream cell for upper and lower value
df_fl.at[tgb, [f_lower, f_upper]] = df_fl.loc[ser_j_down.at[tgb], f_upper]
return df_fl
# %% Calculate cumulative flow length values respecting LARSIM conventions
def calc_cum_ch_fl(df_fl, ser_tgb_up, ser_tgb_type_headw, ser_tgb_type_dummy):
"""
This function calculates the cumulative flow length values respecting LARSIM
conventions.
In elements with a difference of 1 [m] between upper and lower cumulative
flow length (KMO and KMU) will the routing be ignored. Therefore, dummy
and head water elements shall be set to a difference of 1 between KMO and KMU
(KMO - KMU = 1). The function returns a pandas.DataFrame for KMO and KMU.
JM 2021
Arguments:
-----------
df_fl: pandas.DataFrame
DataFrame of corresponding model resolution flow length values. The
DataFrame includes the model element ID as index and the following
columns:
- accumulative flow length at lower boundary of cell ('lower')
- flow length value of cell ('length')
- accumulative flow length at upper boundary of cell ('upper')
ser_tgb_up: pandas.Series
Series of corresponding upstream model element indices.
Dummy elements are represented as empty array (e.g., []).
ser_tgb_type_headw: pandas.Series
Boolean Series, which identifies the headwater cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[1, 1, 0, 0], index=[1, 2, 3, 4], name='headwater',
dtype='bool'))
ser_tgb_type_dummy: pandas.Series
Boolean Series, which identifies the dummy cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 0, 0], index=[1, 2, 3, 4], name='dummy',
dtype='bool'))
Returns:
-----------
df_cum_ch_fl: pandas.DataFrame
DataFrame of corresponding runoff concentration parameters. The DataFrame
includes the model element ID as index and the following columns:
- corresponding lower cumulative flow length values (KMU) [m]
- corresponding upper cumulative flow length values (KMO) [m]
"""
# define internal string variables
f_kmu = 'kmu'
f_kmo = 'kmo'
f_lower = 'lower'
f_upper = 'upper'
# Calculate Dummy adds for KMO and KMU
# pre-allocate arrays for adds
dummy_adds = pd.DataFrame(np.zeros((ser_tgb_up.shape[0], 2)),
index=ser_tgb_up.index, columns=[f_lower, f_upper])
# add of outlet is 1
tgb_out = np.max(ser_tgb_up.index)
dummy_adds.at[tgb_out, f_lower] = 1
# iterate all cells
for tgb in reversed(ser_tgb_up.index):
# get upstream cell IDs
tgb_up = ser_tgb_up.at[tgb]
# lower add of upstream cell is upper add of recent cell
dummy_adds.at[tgb_up, f_lower] = dummy_adds.at[tgb, f_upper]
# get indices of upstream dummy cells
tgb_up_dummys = ser_tgb_type_dummy.loc[tgb_up].index
# if upstream cell is not a dummy cell, upper add = lower add
dummy_adds.at[tgb_up, f_upper] = dummy_adds.loc[tgb_up, f_lower].values
# if upstream cell is a dummy cell, upper add = upper add + 1
dummy_adds.at[tgb_up_dummys, f_upper] \
= dummy_adds.loc[tgb_up_dummys, f_upper].values + 1
# Calculate head water adds
headw_adds = pd.Series(np.zeros((ser_tgb_up.shape[0])), index=ser_tgb_up.index,
name=f_upper)
headw_adds.at[ser_tgb_type_headw] = 1
# Add Dummy and Head Water Adds
ser_kmu = np.round(df_fl.loc[:, f_lower], 0) + dummy_adds.loc[:, f_lower]
ser_kmo = np.round(df_fl.loc[:, f_upper], 0) + dummy_adds.loc[:, f_upper] \
+ headw_adds
# summarize parameters
df_cum_ch_fl = pd.concat([ser_kmu, ser_kmo], axis=1)
df_cum_ch_fl.columns = [f_kmu, f_kmo]
return df_cum_ch_fl
# %% calculate channel elevation differences
def calc_ch_zdif(ser_zlower, df_fl,
ser_tgb_up_nd, ser_tgb_type_headw, ser_tgb_type_dummy,
def_sl_min=0.0001):
"""
This function calculates the channel elevation differences and corrects them
applying the LARSIM conventions. This means, that (1) a minimum channel slope
is maintained. The slope value might be very small, but is not allowed to be
zero. As there are LARSIM-internal rounding mechanisms, slope values smaller
0.0001 mL/mZ have to be avoided. Additionally, (2) multiple upstream
neighbour elements have to be balanced, as only one elevation value can be
applied to a single element. Potential conservation is achieved moving the
elevation difference to the upstream element neighbours.
JM 2021
Arguments:
-----------
ser_zlower: pandas.Series
Series of model elements' minimum elevation corresponding to the serie's
ascending index.
(e.g., pd.Series([308.4, 341.0, 204.5, 133.8], index=[1, 2, 3, 4],
name='ser_zlower'))
df_fl: pandas.DataFrame
DataFrame of corresponding model resolution flow length values. The
DataFrame includes the model element ID as index and the following
columns:
- accumulative flow length at lower boundary of cell ('lower')
- flow length value of cell ('length')
- accumulative flow length at upper boundary of cell ('upper')
ser_tgb_up_nd: pandas.Series
Series of corresponding upstream model element indices ignoring dummy
elements. These are represented as empty array (e.g., []).
ser_tgb_type_headw: pandas.Series
Boolean Series, which identifies the headwater cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[1, 1, 0, 0], index=[1, 2, 3, 4], name='headwater',
dtype='bool'))
ser_tgb_type_dummy: pandas.Series
Boolean Series, which identifies the dummy cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 0, 0], index=[1, 2, 3, 4], name='dummy',
dtype='bool'))
def_sl_min: float (optional, default: 0.0001)
minimum channel slope value to be maintained due to LARSIM-internal
restrictions
Returns:
-----------
df_ch_zdif: pandas.DataFrame
DataFrame of corrected element elevation values. The DataFrame
includes the model element ID as index and the following columns:
- slope correction value [m] ('corr_sl')
- balancing correction value [m] ('corr_bal')
- corrected minimum channel elevation [m] ('lower_corr')
- corrected channel elevation difference [m] ('ch_zdif')
- corrected maximum channel elevation [m] ('upper_corr')
"""
# define internal string variables
f_length = 'length'
f_ch_zdif = 'ch_zdif'
f_corr_sl = 'corr_sl'
f_corr_bal = 'corr_bal'
f_lower_corr = 'lower_corr'
f_upper_corr = 'upper_corr'
# pre-allocate arrays
df_ch_zdif = pd.DataFrame(
np.zeros((ser_tgb_up_nd.shape[0], 5)) * np.nan, index=ser_tgb_up_nd.index,
columns=[f_corr_sl, f_corr_bal, f_lower_corr, f_ch_zdif, f_upper_corr])
# fill input columns (min and max elevation within cell)
df_ch_zdif.lower_corr = ser_zlower
# set dummy cell values to nan
df_ch_zdif.at[ser_tgb_type_dummy, :] = np.nan
# iterate all cells
for tgb in reversed(ser_tgb_up_nd.index):
# routing cells
if not ser_tgb_type_dummy[tgb] and not ser_tgb_type_headw[tgb]:
# get min elevation within cell
zlower = df_ch_zdif.at[tgb, f_lower_corr]
# find upstream cell ID number
tgb_up_nd = ser_tgb_up_nd.at[tgb]
# get elevation value for upstream cell
zupper = df_ch_zdif.loc[tgb_up_nd, f_lower_corr]
# calculate range threshold to prevent slope < def_sl_min
zdif_sl_thr = def_sl_min * df_fl.at[tgb, f_length]
# find cell pairs lower threshold slope
sl_corr_bool = (zupper - zlower) <= zdif_sl_thr
# if there is any, correct height differences lower than threshold
if np.any(sl_corr_bool):
# get and set min elevation correction values
hd_corr = zdif_sl_thr - (zupper.loc[sl_corr_bool] - zlower)
df_ch_zdif.at[tgb_up_nd[sl_corr_bool], f_corr_sl] = hd_corr
# get and set max elevation correction values
zupper_sl_corr = zupper.loc[sl_corr_bool] + hd_corr
df_ch_zdif.at[tgb_up_nd[sl_corr_bool], f_lower_corr] = zupper_sl_corr
zupper.at[sl_corr_bool] = zupper_sl_corr.iloc[0]
else:
df_ch_zdif.at[tgb_up_nd, f_corr_sl] = 0
# if more than one upstream cells exist...
if np.any(tgb_up_nd):
# ...calculate minimum value
zupper_min = np.nanmin(zupper)
df_ch_zdif.at[tgb, f_upper_corr] = zupper_min
df_ch_zdif.at[tgb_up_nd, f_lower_corr] = zupper_min
df_ch_zdif.at[tgb_up_nd, f_corr_bal] = zupper_min - zupper
# if only one upstream cell exists take elevation value of it
else:
df_ch_zdif.at[tgb_up_nd, f_corr_bal] = 0
df_ch_zdif.at[tgb, f_upper_corr] = zupper
# calculate elevation range within cell
df_ch_zdif.loc[:, f_ch_zdif] = \
df_ch_zdif.loc[:, f_upper_corr] - df_ch_zdif.loc[:, f_lower_corr]
return df_ch_zdif
# %% calculate runoff concentration parameters
def calc_roconc_params(ser_ch_zmin, ser_zmax, ser_fl_ch_down, ser_fl_headw_len,
ser_tgb_type_headw, ser_tgb_type_dummy,
cellsz, def_zmin_rout_fac=0.5, def_zmax_fac=1):
"""
This function calculates the runoff concentration parameters needed for
the retention time estimation using the Kirpich formula (Kirpich, 1940).
JM 2021
Arguments:
-----------
ser_ch_zmin: pandas.Series [m]
Series of model elements' minimum channel elevation corresponding to
the serie's ascending index.
(e.g., pd.Series([302.4, 330.0, 180.5, 120.8], index=[1, 2, 3, 4],
name='ser_ch_zmin'))
ser_zmax: pandas.Series
Series of model elements' maximum elevation corresponding to the
serie's ascending index. [m]
(e.g., pd.Series([308.4, 341.0, 204.5, 133.8], index=[1, 2, 3, 4],
name='ser_zmax'))
ser_fl_ch_down: pandas.Series [m]
Series of model elements' downstream channel flow length parts
corresponding to the serie's ascending index.
(e.g., pd.Series([202.4, 120.0, 29.5, 13.8], index=[1, 2, 3, 4],
name='ser_fl_ch_down'))
ser_fl_headw_len: pandas.Series
Series of model elements' headwater flow length parts corresponding
to the serie's ascending index. [m]
(e.g., pd.Series([110.4, 231.0, 204.5, 133.8], index=[1, 2, 3, 4],
name='ser_fl_headw_len'))
ser_tgb_type_headw: pandas.Series
Boolean Series, which identifies the headwater cells corresponding
to the serie's ascending index with True.
(e.g., pd.Series(data=[1, 1, 0, 0], index=[1, 2, 3, 4], name='headw',
dtype='bool'))
ser_tgb_type_dummy: pandas.Series
Boolean Series, which identifies the dummy cells corresponding to the
serie's ascending index with True.
(e.g., pd.Series(data=[0, 0, 0, 0], index=[1, 2, 3, 4], name='dummy',
dtype='bool'))
cellsz: int
Integer, which defines the model element edge length in [m] (e.g., 100)
def_zmin_rout_fac: float (optional, default: 0.5)
Factor to vary the lower elevation of runoff concentration between
the minimum (0) and maximum (1) channel elevation of the element. By
default, the factor is set to the average elevation (0.5) [-]
def_zmax_fac: float (optional, default: 1)
Factor to vary the upper elevation of runoff concentration between
the minimum (0) and maximum (1) elevation of the element. By default,
the factor is set to the maximum elevation (1) [-]
Returns:
-----------
df_roconc_params: pandas.DataFrame
DataFrame of runoff concentration parameters. The DataFrame
includes the model element ID as index and the following columns:
- lower runoff concentration elevation [m] ('hut')
- upper runoff concentration elevation [m] ('hot')
- maximum runoff concentration flow length [km] ('tal')
"""
# define internal string variables
f_tal = 'tal'
f_hut = 'hut'
f_hot = 'hot'
# calculate lower runoff concentration elevation
# define HUT for head waters as low point of cell
ser_hut = ser_ch_zmin + (ser_zmax - ser_ch_zmin) * def_zmin_rout_fac
ser_hut.at[ser_tgb_type_headw] = ser_ch_zmin.loc[ser_tgb_type_headw]
# calculate upper runoff concentration elevation
ser_hot = ser_hut + (ser_zmax - ser_hut) * def_zmax_fac
# correct negative and zero HOT-HUT
zdif_corr_ii = np.round(ser_hot, 1) - np.round(ser_hut, 1) <= 0
ser_hot.at[zdif_corr_ii] = ser_hut.loc[zdif_corr_ii] + 0.1
# calculate maximum flow length
# define TAL for cells with stream as mean of streight and diagonal line
ser_tal = pd.Series(np.zeros(ser_hot.shape) + (np.sqrt(2) + 1) * cellsz / 4,
index=ser_hot.index, name=f_tal)
# define TAL for head waters balancing flow length upstream values
ser_tal.at[ser_tgb_type_headw] = \
ser_fl_ch_down.loc[ser_tgb_type_headw] \
+ ser_fl_headw_len.loc[ser_tgb_type_headw]
# convert from [m] to [km]
ser_tal = ser_tal / 1000
# summarize series
df_roconc_params = pd.concat([ser_hut, ser_hot, ser_tal], axis=1)
df_roconc_params.columns = [f_hut, f_hot, f_tal]
df_roconc_params.at[ser_tgb_type_dummy, :] = np.nan
return df_roconc_params
# %% calculation
# define key-words to identify element types
str_headw = 'headwater'
str_routing = 'routing'
str_dummy = 'dummy'
# define internal variables
f_tgb = 'tgb'
f_tgb_down = 'tgb_down'
f_tgb_type = 'tgb_type'
f_tgb_a = 'tgb_a'
f_x = 'x'
f_y = 'y'
f_nrflv = 'nrflv'
f_ft = 'ft'
# define arcpy default field names
f_pt_x = 'POINT_X'
f_pt_y = 'POINT_Y'
# calculate model network parameters
if print_out: print('...import and pre-process data...')
# Import model cell feature class attribute table and convert to pandas.DataFrame
structarr_tgb_in = arcpy.da.FeatureClassToNumPyArray(
name_tgb_par_p,
[f_tgb, f_tgb_type, f_tgb_down, f_tgb_a, f_pt_x, f_pt_y, field_fl_mr,
field_dem_max_mr, field_dem_min_mr, field_fl_fnw_mean_mr])
df_tgb_in = pd.DataFrame(np.sort(structarr_tgb_in, order=f_tgb),
index=structarr_tgb_in[f_tgb])
df_tgb_in = df_tgb_in.rename(columns={f_pt_x: f_x, f_pt_y: f_y})
# convert string identifiers of model cells to logical arrays
tgb_type_lookup, tgb_type_tgb_id = np.unique(df_tgb_in.loc[:, f_tgb_type],
return_inverse=True)
ser_tgb_type_headw = pd.Series(
tgb_type_tgb_id == np.nonzero(tgb_type_lookup == str_headw)[0][0],
dtype=bool, index=df_tgb_in.index, name=str_headw)
ser_tgb_type_routing = pd.Series(tgb_type_tgb_id == np.nonzero(
tgb_type_lookup == str_routing)[0][0],
dtype=bool, index=df_tgb_in.index, name=str_routing)
ser_tgb_type_dummy = pd.Series(tgb_type_tgb_id == np.nonzero(
tgb_type_lookup == str_dummy)[0][0],
dtype=bool, index=df_tgb_in.index, name=str_dummy)
# calculate upstream model element indices
ser_tgb_up = tc.get_upstream_idx(df_tgb_in.loc[:, f_tgb_down])
# get up- and downstream model cell indices while ignoring dummy elements
ser_tgb_down_nd = tc.get_downstream_idx_ign_dumm(
df_tgb_in.loc[:, f_tgb_down], ser_tgb_type_dummy)
ser_tgb_up_nd = tc.get_upstream_idx_ign_dumm(
df_tgb_in.loc[:, f_tgb_down], ser_tgb_type_headw, ser_tgb_type_dummy)
# calculate model network parameters
if print_out: print('...calculate model network parameters...')
# redistribute model resolution flow length values at confluence points
ser_fl = copy.deepcopy(df_tgb_in.loc[:, field_fl_mr])
df_fl_mr = redistr_flowl_at_conflp(ser_fl, ser_tgb_down_nd, ser_tgb_up_nd,
ser_tgb_type_headw, ser_tgb_type_dummy)
# redistribute flow network flow length values at confluence points
# (including redistribution of very small flow length values)
ser_fl_fnw = copy.deepcopy(df_tgb_in.loc[:, field_fl_fnw_mean_mr])
df_fl_fnw = redistr_flowl_polyl_at_conflp(
ser_fl_fnw, ser_tgb_down_nd, ser_tgb_up_nd,
ser_tgb_type_headw, ser_tgb_type_dummy, cellsz)
# merge flow length resulting from model resolution raster and flow network polylines
df_fl = merge_fnw_and_mr_fl(df_fl_mr, df_fl_fnw,
df_tgb_in.loc[:, f_tgb_down], ser_tgb_down_nd, ser_tgb_type_headw,
ser_tgb_type_dummy, def_fl_upper_lim=def_fl_upper_lim,
def_fl_strct_mism=def_fl_strct_mism)
# calculate cumulative flow length values respecting LARSIM conventions
df_cum_ch_fl = calc_cum_ch_fl(df_fl, ser_tgb_up, ser_tgb_type_headw, ser_tgb_type_dummy)
# calculate channel elevation differences
df_ch_zdif = calc_ch_zdif(df_tgb_in.loc[:, field_dem_min_mr], df_fl,
ser_tgb_up_nd, ser_tgb_type_headw, ser_tgb_type_dummy,
def_sl_min=def_sl_min)
# calculate slope for routing
ser_ch_gef = tc.calc_ch_sl(df_ch_zdif.loc[:, 'ch_zdif'], df_fl.loc[:, 'length'],
ser_tgb_type_routing,
def_sl_excl_quant=def_sl_excl_quant)
# calculate runoff concentration parameters
if print_out: print('...calculate runoff concentration parameters...')
df_roconc_params = calc_roconc_params(df_ch_zdif.lower_corr,
df_tgb_in.loc[:, field_dem_max_mr],
df_fl_mr.corr_down, df_fl.length,
ser_tgb_type_headw, ser_tgb_type_dummy,
cellsz, def_zmin_rout_fac=def_zmin_rout_fac,
def_zmax_fac=def_zmax_fac)
# calculate routing parameters
if print_out: print('...calculate routing parameters...')
# calculate channel-forming discharge
ser_ch_form_q = tc.calc_ch_form_q(df_tgb_in.loc[:, f_tgb_a], df_tgb_in.loc[:, f_tgb_down],
q_spec=q_spec_ch, ser_q_in_corr=ser_q_in_corr)
# calculate tripel trapezoid river cross section
df_ttp = tc.calc_ttp(ser_ch_form_q, ser_tgb_type_routing, ch_est_method=ch_est_method,
def_bx=def_bx, def_bbx_fac=def_bbx_fac, def_bnm=def_bnm,
def_bnx=def_bnx, def_bnvrx=def_bnvrx,
def_skm=def_skm, def_skx=def_skx)
# calculate informative parameters
if print_out: print('...calculate informative parameters...')
# calculate inflow catchment size informative value
ser_area_outfl = df_tgb_in.loc[:, f_tgb_a] + (cellsz**2) / (10**6)
ser_area_outfl.at[~ser_tgb_type_routing] = 0
# create names of elements
ser_nrflv = | pd.Series(df_tgb_in.shape[0]*'', index=df_tgb_in.index, name=f_nrflv) | pandas.Series |
import os
from unittest.mock import patch
from itertools import product
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from mavedbconvert import validators, enrich2, constants
from tests import ProgramTestCase
class TestEnrich2ParseInput(ProgramTestCase):
def setUp(self):
super().setUp()
self.wt = "GCTGAT"
self.path = os.path.join(self.data_dir, "enrich2", "test_store.h5")
self.store = pd.HDFStore(self.path, "w")
self.enrich2 = enrich2.Enrich2(
self.path, wt_sequence=self.wt, offset=0, one_based=True
)
scores, shared, counts, *_ = self.mock_variants_frames()
self.store["/main/variants/scores/"] = scores
self.store["/main/variants/scores_shared/"] = shared
self.store["/main/variants/counts/"] = counts
scores, shared, counts, *_ = self.mock_synonymous_frames()
self.store["/main/synonymous/scores/"] = scores
self.store["/main/synonymous/scores_shared/"] = shared
self.store["/main/synonymous/counts/"] = counts
self.files = [
os.path.normpath(
os.path.join(
self.data_dir,
"enrich2",
"test_store",
"mavedb_test_store_synonymous_counts_c1.csv",
)
),
os.path.normpath(
os.path.join(
self.data_dir,
"enrich2",
"test_store",
"mavedb_test_store_synonymous_counts_c2.csv",
)
),
os.path.normpath(
os.path.join(
self.data_dir,
"enrich2",
"test_store",
"mavedb_test_store_synonymous_scores_c1.csv",
)
),
os.path.normpath(
os.path.join(
self.data_dir,
"enrich2",
"test_store",
"mavedb_test_store_synonymous_scores_c2.csv",
)
),
os.path.normpath(
os.path.join(
self.data_dir,
"enrich2",
"test_store",
"mavedb_test_store_variants_counts_c1.csv",
)
),
os.path.normpath(
os.path.join(
self.data_dir,
"enrich2",
"test_store",
"mavedb_test_store_variants_counts_c2.csv",
)
),
os.path.normpath(
os.path.join(
self.data_dir,
"enrich2",
"test_store",
"mavedb_test_store_variants_scores_c1.csv",
)
),
os.path.normpath(
os.path.join(
self.data_dir,
"enrich2",
"test_store",
"mavedb_test_store_variants_scores_c2.csv",
)
),
]
self.store.close()
self.store = pd.HDFStore(self.path, mode="r")
def tearDown(self):
self.store.close()
def mock_variants_frames(self, scores_hgvs=None, counts_hgvs=None):
counts_index = pd.MultiIndex.from_product(
[["c1", "c2"], ["rep1", "rep2"], ["t0", "t1"]],
names=["condition", "selection", "timepoint"],
)
scores_shared_index = pd.MultiIndex.from_product(
[["c1", "c2"], ["rep1", "rep2"], ["SE", "score"]],
names=["condition", "selection", "value"],
)
scores_index = pd.MultiIndex.from_product(
[["c1", "c2"], ["SE", "epsilon", "score"]], names=["condition", "value"]
)
if scores_hgvs is None:
scores_hgvs = [
"c.2C>T (p.Ala1Val), c.3T>C (p.Ala1=)",
"c.5A>G (p.Asp2Gly), c.6T>A (p.Asp2Glu)",
]
if counts_hgvs is None:
counts_hgvs = [
"c.2C>T (p.Ala1Val), c.3T>C (p.Ala1=)",
"c.5A>G (p.Asp2Gly), c.6T>A (p.Asp2Glu)",
]
expected = self.parse_rows(scores_hgvs)
expected_nt = [t[0] for t in expected]
expected_pro = [t[1] for t in expected]
scores = pd.DataFrame(
np.random.randn(len(scores_hgvs), len(scores_index)),
index=scores_hgvs,
columns=scores_index,
)
shared = pd.DataFrame(
np.random.randn(len(scores_hgvs), len(scores_shared_index)),
index=scores_hgvs,
columns=scores_shared_index,
)
counts = pd.DataFrame(
np.random.randint(
low=0, high=100, size=(len(scores_hgvs), len(counts_index))
),
index=counts_hgvs,
columns=counts_index,
)
return scores, shared, counts, expected_nt, expected_pro
def mock_synonymous_frames(self, scores_hgvs=None, counts_hgvs=None):
counts_index = pd.MultiIndex.from_product(
[["c1", "c2"], ["rep1", "rep2"], ["t0", "t1"]],
names=["condition", "selection", "timepoint"],
)
scores_shared_index = pd.MultiIndex.from_product(
[["c1", "c2"], ["rep1", "rep2"], ["SE", "score"]],
names=["condition", "selection", "value"],
)
scores_index = pd.MultiIndex.from_product(
[["c1", "c2"], ["SE", "epsilon", "score"]], names=["condition", "value"]
)
if scores_hgvs is None:
scores_hgvs = ["p.Ala1Val, p.Ala1=", "p.Asp2Gly, p.Asp2Glu"]
if counts_hgvs is None:
counts_hgvs = ["p.Ala1Val, p.Ala1=", "p.Asp2Gly, p.Asp2Glu"]
expected = self.parse_rows(scores_hgvs)
expected_nt = [t[0] for t in expected]
expected_pro = [t[1] for t in expected]
scores = pd.DataFrame(
np.random.randn(len(scores_hgvs), len(scores_index)),
index=scores_hgvs,
columns=scores_index,
)
shared = pd.DataFrame(
np.random.randn(len(scores_hgvs), len(scores_shared_index)),
index=scores_hgvs,
columns=scores_shared_index,
)
counts = pd.DataFrame(
np.random.randint(
low=0, high=100, size=(len(scores_hgvs), len(counts_index))
),
index=counts_hgvs,
columns=counts_index,
)
return scores, shared, counts, expected_nt, expected_pro
def tearDown(self):
self.store.close()
super().tearDown()
if os.path.isdir(self.enrich2.output_directory):
os.removedirs(self.enrich2.output_directory)
def parse_rows(self, variants, element=None):
return [self.enrich2.parse_row((v, element)) for v in list(variants)]
@patch.object(pd.DataFrame, "to_csv", return_value=None)
def test_saves_to_output_directory(self, patch):
output = os.path.join(self.data_dir, "enrich2", "new")
p = enrich2.Enrich2(src=self.store, dst=output, wt_sequence=self.wt, offset=0)
p.parse_input(p.load_input_file())
for call_args in patch.call_args_list:
self.assertIn(output, call_args[0][0])
@patch.object(pd.DataFrame, "to_csv", return_value=None)
def test_saves_to_file_location_if_no_dst_supplied(self, patch):
p = enrich2.Enrich2(src=self.store, wt_sequence=self.wt, offset=0)
p.parse_input(self.enrich2.load_input_file())
expected_base_path = os.path.normpath(
os.path.join(self.data_dir, "enrich2", "test_store")
)
for call_args in patch.call_args_list:
self.assertIn(expected_base_path, call_args[0][0])
@patch("mavedbconvert.enrich2.get_replicate_score_dataframes")
def test_iterates_over_all_available_tables(self, patch):
self.enrich2.convert()
self.assertIn(constants.synonymous_table, patch.call_args_list[0][0])
self.assertIn(constants.variants_table, patch.call_args_list[1][0])
@patch(
"mavedbconvert.enrich2.drop_null",
side_effect=lambda scores_df, counts_df: (scores_df, counts_df),
)
def test_calls_drop_null(self, patch):
self.enrich2.convert()
patch.assert_called()
def test_scores_index_order_retained_in_hgvs_columns(self):
self.enrich2.convert()
*_, expected_nt, expected_pro = self.mock_variants_frames()
nt_pro_tuples = self.parse_rows(
self.store["/main/variants/scores/"]["c1"].index
)
self.assertListEqual(expected_nt, [t[0] for t in nt_pro_tuples])
self.assertListEqual(expected_pro, [t[1] for t in nt_pro_tuples])
*_, expected_nt, expected_pro = self.mock_synonymous_frames()
nt_pro_tuples = self.parse_rows(
self.store["/main/synonymous/scores/"]["c1"].index
)
self.assertListEqual(expected_nt, [t[0] for t in nt_pro_tuples])
self.assertListEqual(expected_pro, [t[1] for t in nt_pro_tuples])
def test_counts_index_order_retained_in_hgvs_columns(self):
self.enrich2.convert()
*_, expected_nt, expected_pro = self.mock_variants_frames()
nt_pro_tuples = self.parse_rows(
self.store["/main/variants/counts/"]["c1"].index
)
self.assertListEqual(expected_nt, [t[0] for t in nt_pro_tuples])
self.assertListEqual(expected_pro, [t[1] for t in nt_pro_tuples])
*_, expected_nt, expected_pro = self.mock_synonymous_frames()
nt_pro_tuples = self.parse_rows(
self.store["/main/synonymous/counts/"]["c1"].index
)
self.assertListEqual(expected_nt, [t[0] for t in nt_pro_tuples])
self.assertListEqual(expected_pro, [t[1] for t in nt_pro_tuples])
def test_outputs_expected_synonymous_counts_for_each_condition(self):
self.enrich2.convert()
*_, _, expected_pro = self.mock_synonymous_frames()
# C1
result = pd.read_csv(self.files[0], sep=",")
expected = pd.DataFrame({constants.pro_variant_col: expected_pro})
for (rep, tp) in product(["rep1", "rep2"], ["t0", "t1"]):
expected[rep + "_" + tp] = self.store["/main/synonymous/counts/"]["c1"][
rep
][tp].values.astype(int)
assert_frame_equal(result, expected)
# C2
result = pd.read_csv(self.files[1], sep=",")
expected = pd.DataFrame({constants.pro_variant_col: expected_pro})
for (rep, tp) in product(["rep1", "rep2"], ["t0", "t1"]):
expected[rep + "_" + tp] = self.store["/main/synonymous/counts/"]["c2"][
rep
][tp].values.astype(int)
assert_frame_equal(result, expected)
def test_outputs_expected_synonymous_scores_for_each_condition(self):
self.enrich2.convert()
*_, _, expected_pro = self.mock_synonymous_frames()
table_scores = "/main/synonymous/scores/"
table_shared = "/main/synonymous/scores_shared/"
# C1
result = pd.read_csv(self.files[2], sep=",")
expected = pd.DataFrame(
{
constants.pro_variant_col: expected_pro,
"SE": self.store[table_scores]["c1"]["SE"].values.astype(float),
"epsilon": self.store[table_scores]["c1"]["epsilon"].values.astype(
float
),
"score": self.store[table_scores]["c1"]["score"].values.astype(float),
},
columns=[
constants.pro_variant_col,
"SE",
"epsilon",
"score",
"SE_rep1",
"score_rep1",
"SE_rep2",
"score_rep2",
],
)
for (value, rep) in product(["SE", "score"], ["rep1", "rep2"]):
expected[value + "_" + rep] = self.store[table_shared]["c1"][rep][
value
].values.astype(float)
assert_frame_equal(result, expected)
# C2
result = pd.read_csv(self.files[3], sep=",")
expected = pd.DataFrame(
{
constants.pro_variant_col: expected_pro,
"SE": self.store[table_scores]["c2"]["SE"].values.astype(float),
"epsilon": self.store[table_scores]["c2"]["epsilon"].values.astype(
float
),
"score": self.store[table_scores]["c2"]["score"].values.astype(float),
},
columns=[
constants.pro_variant_col,
"SE",
"epsilon",
"score",
"SE_rep1",
"score_rep1",
"SE_rep2",
"score_rep2",
],
)
for (value, rep) in product(["SE", "score"], ["rep1", "rep2"]):
expected[value + "_" + rep] = self.store[table_shared]["c2"][rep][
value
].values.astype(float)
assert_frame_equal(result, expected)
def test_outputs_expected_variants_counts_for_each_condition(self):
self.enrich2.convert()
*_, expected_nt, expected_pro = self.mock_variants_frames()
# C1
result = pd.read_csv(self.files[4], sep=",")
expected = pd.DataFrame(
{
constants.nt_variant_col: expected_nt,
constants.pro_variant_col: expected_pro,
}
)
for (rep, tp) in product(["rep1", "rep2"], ["t0", "t1"]):
expected[rep + "_" + tp] = self.store["/main/variants/counts/"]["c1"][rep][
tp
].values.astype(int)
assert_frame_equal(result, expected)
# C2
result = pd.read_csv(self.files[5], sep=",")
expected = pd.DataFrame(
{
constants.nt_variant_col: expected_nt,
constants.pro_variant_col: expected_pro,
}
)
for (rep, tp) in product(["rep1", "rep2"], ["t0", "t1"]):
expected[rep + "_" + tp] = self.store["/main/variants/counts/"]["c2"][rep][
tp
].values.astype(int)
assert_frame_equal(result, expected)
def test_outputs_expected_variants_scores_for_each_condition(self):
self.enrich2.convert()
*_, expected_nt, expected_pro = self.mock_variants_frames()
table_scores = "/main/variants/scores/"
table_shared = "/main/variants/scores_shared/"
# C1
result = pd.read_csv(self.files[6], sep=",")
expected = pd.DataFrame(
{
constants.pro_variant_col: expected_pro,
constants.nt_variant_col: expected_nt,
"SE": self.store[table_scores]["c1"]["SE"].values.astype(float),
"epsilon": self.store[table_scores]["c1"]["epsilon"].values.astype(
float
),
"score": self.store[table_scores]["c1"]["score"].values.astype(float),
},
columns=[
constants.nt_variant_col,
constants.pro_variant_col,
"SE",
"epsilon",
"score",
"SE_rep1",
"score_rep1",
"SE_rep2",
"score_rep2",
],
)
for (value, rep) in product(["SE", "score"], ["rep1", "rep2"]):
expected[value + "_" + rep] = self.store[table_shared]["c1"][rep][
value
].values.astype(float)
assert_frame_equal(result, expected)
# C2
result = pd.read_csv(self.files[7], sep=",")
expected = pd.DataFrame(
{
constants.pro_variant_col: expected_pro,
constants.nt_variant_col: expected_nt,
"SE": self.store[table_scores]["c2"]["SE"].values.astype(float),
"epsilon": self.store[table_scores]["c2"]["epsilon"].values.astype(
float
),
"score": self.store[table_scores]["c2"]["score"].values.astype(float),
},
columns=[
constants.nt_variant_col,
constants.pro_variant_col,
"SE",
"epsilon",
"score",
"SE_rep1",
"score_rep1",
"SE_rep2",
"score_rep2",
],
)
for (value, rep) in product(["SE", "score"], ["rep1", "rep2"]):
expected[value + "_" + rep] = self.store[table_shared]["c2"][rep][
value
].values.astype(float)
assert_frame_equal(result, expected)
def test_counts_and_scores_output_define_same_variants_when_input_does_not(self):
self.store.close()
self.store = pd.HDFStore(self.path, "w")
scores, shared, counts, expected_nt, expected_pro = self.mock_variants_frames(
counts_hgvs=[
"c.2C>T (p.Ala1Val), c.3T>C (p.Ala1=)",
# Does not appear in scores
"c.5A>G (p.Asp2Gly), c.6T>C (p.Asp2=)",
]
)
self.store["/main/variants/scores/"] = scores
self.store["/main/variants/scores_shared/"] = shared
self.store["/main/variants/counts/"] = counts
self.store.close()
self.enrich2.convert()
df_counts = pd.read_csv(self.files[4]) # c1
df_scores = pd.read_csv(self.files[6]) # c1
validators.validate_datasets_define_same_variants(df_scores, df_counts)
df_counts = pd.read_csv(self.files[5]) # c2
df_scores = pd.read_csv(self.files[7]) # c2
validators.validate_datasets_define_same_variants(df_scores, df_counts)
def test_drops_null_rows(self):
self.store.close()
self.store = pd.HDFStore(self.path, "w")
scores, shared, counts, expected_nt, expected_pro = self.mock_variants_frames()
# Add a null row
scores = scores.reindex(scores.index.values.tolist() + ["c.1G>G (p.Ala1=)"])
shared = shared.reindex(shared.index.values.tolist() + ["c.1G>G (p.Ala1=)"])
counts = counts.reindex(counts.index.values.tolist() + ["c.1G>G (p.Ala1=)"])
self.store["/main/variants/scores/"] = scores
self.store["/main/variants/scores_shared/"] = shared
self.store["/main/variants/counts/"] = counts
self.store.close()
self.enrich2.convert()
df_counts = pd.read_csv(self.files[4]) # c1
df_scores = pd.read_csv(self.files[6]) # c1
self.assertNotIn("c.1G>G", df_counts[constants.nt_variant_col])
self.assertNotIn("c.1G>G", df_scores[constants.nt_variant_col])
self.assertNotIn("p.Ala1=", df_counts[constants.pro_variant_col])
self.assertNotIn("p.Ala1=", df_scores[constants.pro_variant_col])
df_counts = pd.read_csv(self.files[5]) # c1
df_scores = pd.read_csv(self.files[7]) # c1
self.assertNotIn("c.1G>G", df_counts[constants.nt_variant_col])
self.assertNotIn("c.1G>G", df_scores[constants.nt_variant_col])
self.assertNotIn("p.Ala1=", df_counts[constants.pro_variant_col])
self.assertNotIn("p.Ala1=", df_scores[constants.pro_variant_col])
class TestEnrich2ParseInputNoVariants(ProgramTestCase):
def setUp(self):
super().setUp()
self.wt = "GCTGAT"
self.path = os.path.join(self.data_dir, "enrich2", "test_store.h5")
self.store = pd.HDFStore(self.path, "w")
self.enrich2 = enrich2.Enrich2(
self.path, wt_sequence=self.wt, offset=0, one_based=True
)
scores, shared, counts, *_ = self.mock_synonymous_frames()
self.store["/main/synonymous/scores/"] = scores
self.store["/main/synonymous/scores_shared/"] = shared
self.store["/main/synonymous/counts/"] = counts
self.files = [
os.path.normpath(
os.path.join(
self.data_dir,
"enrich2",
"test_store",
"mavedb_test_store_synonymous_counts_c1.csv",
)
),
os.path.normpath(
os.path.join(
self.data_dir,
"enrich2",
"test_store",
"mavedb_test_store_synonymous_counts_c2.csv",
)
),
os.path.normpath(
os.path.join(
self.data_dir,
"enrich2",
"test_store",
"mavedb_test_store_synonymous_scores_c1.csv",
)
),
os.path.normpath(
os.path.join(
self.data_dir,
"enrich2",
"test_store",
"mavedb_test_store_synonymous_scores_c2.csv",
)
),
]
self.store.close()
self.store = | pd.HDFStore(self.path, mode="r") | pandas.HDFStore |
# Copyright 2021 AI Singapore. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import pandas as pd
from rarity.data_loader import CSVDataLoader, DataframeLoader
# add this in the conftest.py under tests folder
from selenium.webdriver.chrome.options import Options
def pytest_setup_options():
options = Options()
# added mainly for integration test in gitlab-ci to resolve
# (unknown error: DevToolsActivePort file doesn't exist)
# (The process started from chrome location /usr/bin/google-chrome is no longer running,
# so ChromeDriver is assuming that Chrome has crashed.)
# solution reference => https://github.com/plotly/dash/issues/1420
options.add_argument('--no-sandbox')
return options
@pytest.fixture
def csv_loader_single_modal_reg():
SAMPLE_DATA_DIR = './tests/sample_data/regression/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'reg_yPreds_modelA.csv')
MODEL_NAMES = ['model_A']
ANALYSIS_TYPE = 'Regression'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_single_modal_cls():
SAMPLE_DATA_DIR = './tests/sample_data/classification/binary/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'binary_yPreds_modelA.csv')
MODEL_NAMES = ['model_A']
ANALYSIS_TYPE = 'Binary-Classification'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_bimodal_reg():
SAMPLE_DATA_DIR = './tests/sample_data/regression/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'reg_yPreds_modelA.csv')
Y_PRED_FILE_2 = os.path.join(SAMPLE_DATA_DIR, 'reg_yPreds_modelB.csv')
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Regression'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1, Y_PRED_FILE_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_bimodal_cls():
SAMPLE_DATA_DIR = './tests/sample_data/classification/binary/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'binary_yPreds_modelA.csv')
Y_PRED_FILE_2 = os.path.join(SAMPLE_DATA_DIR, 'binary_yPreds_modelB.csv')
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Binary-Classification'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1, Y_PRED_FILE_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_bimodal_cls_multi():
SAMPLE_DATA_DIR = './tests/sample_data/classification/multiclass/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'multiclass_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'multiclass_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'multiclass_yPreds_modelA.csv')
Y_PRED_FILE_2 = os.path.join(SAMPLE_DATA_DIR, 'multiclass_yPreds_modelB.csv')
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Multiclass-Classification'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1, Y_PRED_FILE_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def dataframe_loader_single_modal_reg():
DF_FEATURES = pd.DataFrame([[0.1, 2.5, 3.6], [0.5, 2.2, 6.6]], columns=['x1', 'x2', 'x3'])
DF_Y_TRUE = pd.DataFrame([[22.6], [36.6]], columns=['actual'])
DF_Y_PRED_1 = pd.DataFrame([[22.2], [35.0]], columns=['pred'])
MODEL_NAMES = ['model_A']
ANALYSIS_TYPE = 'Regression'
data_loader = DataframeLoader(DF_FEATURES,
DF_Y_TRUE,
df_yPred_ls=[DF_Y_PRED_1],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def dataframe_loader_single_modal_cls():
DF_FEATURES = pd.DataFrame([[0.1, 2.5, 3.6], [0.5, 2.2, 6.6], [0.3, 2.3, 5.2]], columns=['x1', 'x2', 'x3'])
DF_Y_TRUE = pd.DataFrame([[0], [1], [1]], columns=['actual'])
DF_Y_PRED_1 = pd.DataFrame([[0.38, 0.62], [0.86, 0.14], [0.78, 0.22]], columns=['0', '1'])
MODEL_NAMES = ['model_A']
ANALYSIS_TYPE = 'Binary-Classification'
data_loader = DataframeLoader(DF_FEATURES,
DF_Y_TRUE,
df_yPred_ls=[DF_Y_PRED_1],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def dataframe_loader_bimodal_reg():
DF_FEATURES = pd.DataFrame([[0.1, 2.5, 3.6], [0.5, 2.2, 6.6]], columns=['x1', 'x2', 'x3'])
DF_Y_TRUE = pd.DataFrame([[22.6], [36.6]], columns=['actual'])
DF_Y_PRED_1 = pd.DataFrame([[22.2], [35.0]], columns=['pred'])
DF_Y_PRED_2 = pd.DataFrame([[22.2], [35.0]], columns=['pred'])
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Regression'
data_loader = DataframeLoader(DF_FEATURES,
DF_Y_TRUE,
df_yPred_ls=[DF_Y_PRED_1, DF_Y_PRED_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def dataframe_loader_bimodal_cls():
DF_FEATURES = pd.DataFrame([[0.1, 2.5, 3.6], [0.5, 2.2, 6.6]], columns=['x1', 'x2', 'x3'])
DF_Y_TRUE = pd.DataFrame([[0], [1]], columns=['actual'])
DF_Y_PRED_1 = pd.DataFrame([[0.38, 0.62], [0.86, 0.14]], columns=['0', '1'])
DF_Y_PRED_2 = pd.DataFrame([[0.56, 0.44], [0.68, 0.32]], columns=['0', '1'])
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Binary-Classification'
data_loader = DataframeLoader(DF_FEATURES,
DF_Y_TRUE,
df_yPred_ls=[DF_Y_PRED_1, DF_Y_PRED_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def dataframe_loader_bimodal_cls_multi():
DF_FEATURES = | pd.DataFrame([[0.1, 2.5, 3.6], [0.5, 2.2, 6.6], [0.3, 2.3, 5.2]], columns=['x1', 'x2', 'x3']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger('main_logger')
import pandas as pd
import dataset
import model
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
from tqdm.auto import tqdm
def predict_effnet(conf):
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
# import the test dataframe where the id of the test images are
test_df = | pd.read_csv(conf['dir_df_test']) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Project 2 - GeoTweet
#
# @Author <NAME> (daddyjab)<br>
# @Date 3/25/19<br>
# @File ETL_for_GeoTweet
#
# # Dependencies
# In[1]:
# Dependencies
import tweepy
import json
import time
import os
import pandas as pd
from datetime import datetime
from dateutil import tz
import requests
from pprint import pprint
# Database dependencies
# Imports the method used for connecting to DBs
from sqlalchemy import create_engine, asc, desc, between, distinct, func, null, nullsfirst, nullslast, or_, and_, not_
from sqlalchemy.orm import sessionmaker, relationship
# Imports the methods needed to abstract classes into tables
from sqlalchemy.ext.declarative import declarative_base
# Allow us to declare column types
from sqlalchemy import Column, Integer, String, Float, ForeignKey
# API Keys
from api_config import *
# Twitter API
# key_twitter_tweetquestor_consumer_api_key
# key_twitter_tweetquestor_consumer_api_secret_key
# key_twitter_tweetquestor_access_token
# key_twitter_tweetquestor_access_secret_token
# Flickr API
# key_flicker_infoquestor_key
# key_flicker_infoquestor_secret
# # Database Configuration for `locations` and `trends` Tables
# In[2]:
# Setup the database using SQLAlchemy
Base = declarative_base()
# In[3]:
# Database schema for Twitter 'locations' table
class Location(Base):
__tablename__ = 'locations'
# Defining the columns for the table 'locations',
# which will hold all of the locations in the U.S. for which
# top trends data is available, as well as location specific
# info like latitude/longitude
id = Column( Integer, primary_key = True)
woeid = Column( Integer )
twitter_country = Column( String(100) )
tritter_country_code = Column( String(10) )
twitter_name = Column( String(250) )
twitter_parentid = Column( Integer )
twitter_type = Column( String(50) )
country_name = Column( String(250) )
country_name_only = Column( String(250) )
country_woeid = Column( Integer )
county_name = Column( String(250) )
county_name_only = Column( String(250) )
county_woeid = Column( Integer )
latitude = Column( Float )
longitude = Column( Float )
name_full = Column( String(250) )
name_only = Column( String(250) )
name_woe = Column( String(250) )
place_type = Column( String(250) )
state_name = Column( String(250) )
state_name_only = Column( String(250) )
state_woeid = Column( Integer )
timezone = Column( String(250) )
# Relationship between Location and Trend is through 'woeid' in each
trend = relationship("Trend", backref='Trend.woeid', primaryjoin='Location.woeid==Trend.woeid', lazy='dynamic')
# Database schema for Twitter 'trends' table
class Trend(Base):
__tablename__ = 'trends'
# Defining the columns for the table 'trends',
# which will hold all of the top trends associated with
# locations in the 'locations' table
id = Column( Integer, primary_key = True)
woeid = Column( Integer, ForeignKey(Location.woeid) )
twitter_as_of = Column( String(100) )
twitter_created_at = Column( String(100) )
twitter_name = Column( String(250) )
twitter_tweet_name = Column( String(250) )
twitter_tweet_promoted_content = Column( String(250) )
twitter_tweet_query = Column( String(250) )
twitter_tweet_url = Column( String(250) )
twitter_tweet_volume = Column( Float )
# In[ ]:
# # Database Setup for `twitter_trends.db` Database
# In[4]:
# Create an engine that stores data in the local directory's SQLite file
# 'data/twitter_trends.db'.
# NOTE: Since this Jupyter notebook is running in the ./resources folder
# the path to the database is a little different
db_path_jupyter_notebook = "sqlite:///../data/twitter_trends.db"
# But the Flask app will run from the main folder
db_path_flask_app = "sqlite:///data/twitter_trends.db"
engine = create_engine(db_path_jupyter_notebook)
# Create all table in the engine
# (The equivalent of Create Table statements in raw SQL)
Base.metadata.create_all(engine)
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# In[ ]:
# # Tweepy Setup for Twitter API Access
# In[5]:
# Setup Tweepy API Authentication to access Twitter
auth = tweepy.OAuthHandler(key_twitter_tweetquestor_consumer_api_key, key_twitter_tweetquestor_consumer_api_secret_key)
auth.set_access_token(key_twitter_tweetquestor_access_token, key_twitter_tweetquestor_access_secret_token)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
# In[ ]:
# # Function Definitions: Twitter API Rate Limit Management
# In[6]:
def api_calls_remaining( a_api, a_type = "place"):
# Return the number of Twitter API calls remaining
# for the specified API type:
# 'place': Top 10 trending topics for a WOEID
# 'closest': Locations near a specificed lat/long for which Twitter has trending topic info
# 'available': Locations for which Twitter has topic info
# Get Twitter rate limit information using the Tweepy API
rate_limits = a_api.rate_limit_status()
# Focus on the rate limits for trends calls
trends_limits = rate_limits['resources']['trends']
# Return the remaining requests available for the
# requested type of trends query (or "" if not a valid type)
try:
remaining = trends_limits[ f"/trends/{a_type}" ]['remaining']
print(f"Twitter API 'trends/{a_type}' - API Calls Remaining: {remaining}")
except:
return ""
return remaining
# In[7]:
def api_time_before_reset( a_api, a_type = "place"):
# Return the number of minutes until the Twitter API is reset
# for the specified API type:
# 'place': Top 10 trending topics for a WOEID
# 'closest': Locations near a specificed lat/long for which Twitter has trending topic info
# 'available': Locations for which Twitter has topic info
# Get Twitter rate limit information using the Tweepy API
rate_limits = a_api.rate_limit_status()
# Focus on the rate limits for trends calls
trends_limits = rate_limits['resources']['trends']
# Return the reset time for the
# requested type of trends query (or "" if not a valid type)
try:
reset_ts = trends_limits[ f"/trends/{a_type}" ]['reset']
except:
return -1
# Calculate the remaining time using datetime methods to
# get the UTC time from the POSIX timestamp
reset_utc = datetime.utcfromtimestamp(reset_ts)
# Current the current time
current_utc = datetime.utcnow()
# Calculate the number of seconds remaining,
# Assumption: reset time will be >= current time
time_before_reset = (reset_utc - current_utc).total_seconds() / 60.0
# Tell the datetime object that it's in UTC time zone since
# datetime objects are 'naive' by default
reset_utc = reset_utc.replace(tzinfo = tz.tzutc() )
# Convert time zone
reset_local = reset_utc.astimezone( tz.tzlocal() )
# Tell the datetime object that it's in UTC time zone since
# datetime objects are 'naive' by default
current_utc = current_utc.replace(tzinfo = tz.tzutc() )
# Convert time zone
current_local = current_utc.astimezone( tz.tzlocal() )
print(f"Twitter API 'trends/{a_type}' - Time Before Rate Limit Reset: {time_before_reset:.1f}: Reset Time: {reset_local.strftime('%Y-%m-%d %H:%M:%S')}, Local Time: {current_local.strftime('%Y-%m-%d %H:%M:%S')}")
# Return the time before reset (in minutes)
return time_before_reset
# In[ ]:
# # Function Definitions: Twitter Locations with Available Trends Info
# In[8]:
def get_loc_with_trends_available_to_df( a_api ):
# Get locations that have trends data from a api.trends_available() call,
# flatten the data, and create a dataframe
# Obtain the WOEID locations for which Twitter Trends info is available
try:
trends_avail = a_api.trends_available()
except TweepError as e:
# No top trends info available for this WOEID, return False
print(f"Error obtaining top trends for WOEID {a_woeid}: ", e)
return False
# Import trend availability info into a dataframe
trends_avail_df = | pd.DataFrame.from_dict(trends_avail, orient='columns') | pandas.DataFrame.from_dict |
'''
Created on Feb. 25, 2020
@author: cefect
helper functions w/ Qgis api
'''
#==============================================================================
# imports------------
#==============================================================================
#python
import os, configparser, logging, inspect, copy, datetime, re
import pandas as pd
import numpy as np
#qgis
from qgis.core import *
from qgis.analysis import QgsNativeAlgorithms
from qgis.gui import QgisInterface
from PyQt5.QtCore import QVariant, QMetaType
from PyQt5.QtWidgets import QProgressBar
"""throws depceciationWarning"""
import processing
#==============================================================================
# customs
#==============================================================================
mod_logger = logging.getLogger('Q') #get the root logger
from hlpr.exceptions import QError as Error
import hlpr.basic as basic
from hlpr.basic import get_valid_filename
#==============================================================================
# globals
#==============================================================================
fieldn_max_d = {'SpatiaLite':50, 'ESRI Shapefile':10, 'Memory storage':50, 'GPKG':50}
npc_pytype_d = {'?':bool,
'b':int,
'd':float,
'e':float,
'f':float,
'q':int,
'h':int,
'l':int,
'i':int,
'g':float,
'U':str,
'B':int,
'L':int,
'Q':int,
'H':int,
'I':int,
'O':str, #this is the catchall 'object'
}
type_qvar_py_d = {10:str, 2:int, 135:float, 6:float, 4:int, 1:bool, 16:datetime.datetime, 12:str} #QVariant.types to pythonic types
#parameters for lots of statistic algos
stat_pars_d = {'First': 0, 'Last': 1, 'Count': 2, 'Sum': 3, 'Mean': 4, 'Median': 5,
'St dev (pop)': 6, 'Minimum': 7, 'Maximum': 8, 'Range': 9, 'Minority': 10,
'Majority': 11, 'Variety': 12, 'Q1': 13, 'Q3': 14, 'IQR': 15}
#==============================================================================
# classes -------------
#==============================================================================
class Qcoms(basic.ComWrkr): #baseclass for working w/ pyqgis outside the native console
driverName = 'SpatiaLite' #default data creation driver type
out_dName = driverName #default output driver/file type
q_hndls = ['crs', 'crsid', 'algo_init', 'qap', 'vlay_drivers']
algo_init = False #flag indicating whether the algos have been initialized
qap = None
mstore = None
def __init__(self,
feedback=None,
#init controls
init_q_d = {}, #container of initilzied objects
crsid = 'EPSG:4326', #default crsID if no init_q_d is passed
**kwargs
):
""""
#=======================================================================
# plugin use
#=======================================================================
QprojPlugs don't execute super cascade
#=======================================================================
# Qgis inheritance
#=======================================================================
for single standalone runs
all the handles will be generated and Qgis instanced
for console runs
handles should be passed to avoid re-instancing Qgis
for session standalone runs
handles passed
for swapping crs
run set_crs() on the session prior to spawning the child
"""
#=======================================================================
# defaults
#=======================================================================
if feedback is None:
"""by default, building our own feedbacker
passed to ComWrkr.setup_feedback()
"""
feedback = MyFeedBackQ()
#=======================================================================
# cascade
#=======================================================================
super().__init__(
feedback = feedback,
**kwargs) #initilzie teh baseclass
log = self.logger
#=======================================================================
# attachments
#=======================================================================
self.fieldn_max_d=fieldn_max_d
self.crsid=crsid
#=======================================================================
# Qgis setup COMMON
#=======================================================================
"""both Plugin and StandAlone runs should call these"""
self.qproj = QgsProject.instance()
"""
each worker will have their own store
used to wipe any intermediate layers
"""
self.mstore = QgsMapLayerStore() #build a new map store
#do your own init (standalone r uns)
if len(init_q_d) == 0:
self._init_standalone()
else:
#check everything is there
miss_l = set(self.q_hndls).difference(init_q_d.keys())
assert len(miss_l)==0, 'init_q_d missing handles: %s'%miss_l
#set the handles
for k,v in init_q_d.items():
setattr(self, k, v)
self._upd_qd()
self.proj_checks()
#=======================================================================
# attach inputs
#=======================================================================
self.logger.debug('Qcoms.__init__ finished w/ out_dir: \n %s'%self.out_dir)
return
#==========================================================================
# standalone methods-----------
#==========================================================================
def _init_standalone(self, #setup for qgis runs
crsid = None,
):
"""
WARNING! do not call twice (phantom crash)
"""
log = self.logger.getChild('_init_standalone')
if crsid is None: crsid = self.crsid
#=======================================================================
# #crs
#=======================================================================
crs = QgsCoordinateReferenceSystem(crsid)
assert isinstance(crs, QgsCoordinateReferenceSystem), 'bad crs type'
assert crs.isValid()
self.crs = crs
self.qproj.setCrs(crs)
log.info('crs set to \'%s\''%self.crs.authid())
#=======================================================================
# setup qgis
#=======================================================================
self.qap = self.init_qgis()
self.algo_init = self.init_algos()
self.set_vdrivers()
#=======================================================================
# wrap
#=======================================================================
self._upd_qd()
log.debug('Qproj._init_standalone finished')
return
def _upd_qd(self): #set a fresh parameter set
self.init_q_d = {k:getattr(self, k) for k in self.q_hndls}
def init_qgis(self, #instantiate qgis
gui = False):
"""
WARNING: need to hold this app somewhere. call in the module you're working in (scripts)
"""
log = self.logger.getChild('init_qgis')
try:
QgsApplication.setPrefixPath(r'C:/OSGeo4W64/apps/qgis-ltr', True)
app = QgsApplication([], gui)
# Update prefix path
#app.setPrefixPath(r"C:\OSGeo4W64\apps\qgis", True)
app.initQgis()
#logging.debug(QgsApplication.showSettings())
""" was throwing unicode error"""
log.info(u' QgsApplication.initQgis. version: %s, release: %s'%(
Qgis.QGIS_VERSION.encode('utf-8'), Qgis.QGIS_RELEASE_NAME.encode('utf-8')))
return app
except:
raise Error('QGIS failed to initiate')
def init_algos(self): #initiilize processing and add providers
"""
crashing without raising an Exception
"""
log = self.logger.getChild('init_algos')
if not isinstance(self.qap, QgsApplication):
raise Error('qgis has not been properly initlized yet')
from processing.core.Processing import Processing
Processing.initialize() #crashing without raising an Exception
QgsApplication.processingRegistry().addProvider(QgsNativeAlgorithms())
assert not self.feedback is None, 'instance needs a feedback method for algos to work'
log.info('processing initilzied w/ feedback: \'%s\''%(type(self.feedback).__name__))
return True
def set_vdrivers(self):
log = self.logger.getChild('set_vdrivers')
#build vector drivers list by extension
"""couldnt find a good built-in to link extensions with drivers"""
vlay_drivers = {'SpatiaLite':'sqlite', 'OGR':'shp'}
#vlay_drivers = {'sqlite':'SpatiaLite', 'shp':'OGR','csv':'delimitedtext'}
for ext in QgsVectorFileWriter.supportedFormatExtensions():
dname = QgsVectorFileWriter.driverForExtension(ext)
if not dname in vlay_drivers.keys():
vlay_drivers[dname] = ext
#add in missing/duplicated
for vdriver in QgsVectorFileWriter.ogrDriverList():
if not vdriver.driverName in vlay_drivers.keys():
vlay_drivers[vdriver.driverName] ='?'
self.vlay_drivers = vlay_drivers
log.debug('built driver:extensions dict: \n %s'%vlay_drivers)
return
def set_crs(self, #load, build, and set the project crs
crsid = None, #integer
crs = None, #QgsCoordinateReferenceSystem
logger=None,
):
#=======================================================================
# setup and defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('set_crs')
if crsid is None:
crsid = self.crsid
#=======================================================================
# if not isinstance(crsid, int):
# raise IOError('expected integer for crs')
#=======================================================================
#=======================================================================
# build it
#=======================================================================
if crs is None:
crs = QgsCoordinateReferenceSystem(crsid)
assert isinstance(crs, QgsCoordinateReferenceSystem)
self.crs=crs #overwrite
if not self.crs.isValid():
raise IOError('CRS built from %i is invalid'%self.crs.authid())
#=======================================================================
# attach to project
#=======================================================================
self.qproj.setCrs(self.crs)
self.crsid = self.crs.authid()
if not self.qproj.crs().description() == self.crs.description():
raise Error('qproj crs does not match sessions')
log.info('crs set to EPSG: %s, \'%s\''%(self.crs.authid(), self.crs.description()))
self._upd_qd()
self.proj_checks(logger=log)
return self.crs
def proj_checks(self,
logger=None):
#log = self.logger.getChild('proj_checks')
if not self.driverName in self.vlay_drivers:
raise Error('unrecognized driver name')
if not self.out_dName in self.vlay_drivers:
raise Error('unrecognized driver name')
assert self.algo_init
assert not self.feedback is None
assert not self.progressBar is None
#=======================================================================
# crs checks
#=======================================================================
assert isinstance(self.crs, QgsCoordinateReferenceSystem)
assert self.crs.isValid()
assert self.crs.authid()==self.qproj.crs().authid(), 'crs mismatch'
assert self.crs.authid() == self.crsid, 'crs mismatch'
assert not self.crs.authid()=='', 'got empty CRS!'
#=======================================================================
# handle checks
#=======================================================================
assert isinstance(self.init_q_d, dict)
miss_l = set(self.q_hndls).difference(self.init_q_d.keys())
assert len(miss_l)==0, 'init_q_d missing handles: %s'%miss_l
for k,v in self.init_q_d.items():
assert getattr(self, k) == v, k
#log.info('project passed all checks')
return
def print_qt_version(self):
import inspect
from PyQt5 import Qt
vers = ['%s = %s' % (k,v) for k,v in vars(Qt).items() if k.lower().find('version') >= 0 and not inspect.isbuiltin(v)]
print('\n'.join(sorted(vers)))
#===========================================================================
# LOAD/WRITE LAYERS-----------
#===========================================================================
def load_vlay(self,
fp,
logger=None,
providerLib='ogr',
aoi_vlay = None,
allow_none=True, #control check in saveselectedfeastures
addSpatialIndex=True,
):
assert os.path.exists(fp), 'requested file does not exist: %s'%fp
if logger is None: logger = self.logger
log = logger.getChild('load_vlay')
basefn = os.path.splitext(os.path.split(fp)[1])[0]
log.debug('loading from %s'%fp)
vlay_raw = QgsVectorLayer(fp,basefn,providerLib)
#=======================================================================
# # checks
#=======================================================================
if not isinstance(vlay_raw, QgsVectorLayer):
raise IOError
#check if this is valid
if not vlay_raw.isValid():
raise Error('loaded vlay \'%s\' is not valid. \n \n did you initilize?'%vlay_raw.name())
#check if it has geometry
if vlay_raw.wkbType() == 100:
raise Error('loaded vlay has NoGeometry')
assert isinstance(self.mstore, QgsMapLayerStore)
"""only add intermediate layers to store
self.mstore.addMapLayer(vlay_raw)"""
if not vlay_raw.crs()==self.qproj.crs():
log.warning('crs mismatch: \n %s\n %s'%(
vlay_raw.crs(), self.qproj.crs()))
#=======================================================================
# aoi slice
#=======================================================================
if isinstance(aoi_vlay, QgsVectorLayer):
log.info('slicing by aoi %s'%aoi_vlay.name())
vlay = self.selectbylocation(vlay_raw, aoi_vlay, allow_none=allow_none,
logger=log, result_type='layer')
#check for no selection
if vlay is None:
return None
vlay.setName(vlay_raw.name()) #reset the name
#clear original from memory
self.mstore.addMapLayer(vlay_raw)
self.mstore.removeMapLayers([vlay_raw])
else:
vlay = vlay_raw
#=======================================================================
# clean------
#=======================================================================
#spatial index
if addSpatialIndex and (not vlay_raw.hasSpatialIndex()==QgsFeatureSource.SpatialIndexPresent):
self.createspatialindex(vlay_raw, logger=log)
#=======================================================================
# wrap
#=======================================================================
dp = vlay.dataProvider()
log.info('loaded vlay \'%s\' as \'%s\' %s geo with %i feats from file: \n %s'
%(vlay.name(), dp.storageType(), QgsWkbTypes().displayString(vlay.wkbType()), dp.featureCount(), fp))
return vlay
def load_rlay(self, fp,
aoi_vlay = None,
logger=None):
if logger is None: logger = self.logger
log = logger.getChild('load_rlay')
assert os.path.exists(fp), 'requested file does not exist: %s'%fp
assert QgsRasterLayer.isValidRasterFileName(fp), \
'requested file is not a valid raster file type: %s'%fp
basefn = os.path.splitext(os.path.split(fp)[1])[0]
#Import a Raster Layer
log.debug('QgsRasterLayer(%s, %s)'%(fp, basefn))
rlayer = QgsRasterLayer(fp, basefn)
"""
hanging for some reason...
QgsRasterLayer(C:\LS\03_TOOLS\CanFlood\_git\tutorials\1\haz_rast\haz_1000.tif, haz_1000)
"""
#=======================================================================
# rlayer = QgsRasterLayer(r'C:\LS\03_TOOLS\CanFlood\_git\tutorials\1\haz_rast\haz_1000.tif',
# 'haz_1000')
#=======================================================================
#===========================================================================
# check
#===========================================================================
assert isinstance(rlayer, QgsRasterLayer), 'failed to get a QgsRasterLayer'
assert rlayer.isValid(), "Layer failed to load!"
if not rlayer.crs() == self.qproj.crs():
log.warning('loaded layer \'%s\' crs mismatch!'%rlayer.name())
log.debug('loaded \'%s\' from \n %s'%(rlayer.name(), fp))
#=======================================================================
# aoi
#=======================================================================
if not aoi_vlay is None:
log.debug('clipping w/ %s'%aoi_vlay.name())
assert isinstance(aoi_vlay, QgsVectorLayer)
rlay2 = self.cliprasterwithpolygon(rlayer,aoi_vlay, logger=log, layname=rlayer.name())
#clean up
mstore = QgsMapLayerStore() #build a new store
mstore.addMapLayers([rlayer]) #add the layers to the store
mstore.removeAllMapLayers() #remove all the layers
else:
rlay2 = rlayer
return rlay2
def write_rlay(self, #make a local copy of the passed raster layer
rlayer, #raster layer to make a local copy of
extent = 'layer', #write extent control
#'layer': use the current extent (default)
#'mapCanvas': use the current map Canvas
#QgsRectangle: use passed extents
resolution = 'raw', #resolution for output
opts = ["COMPRESS=LZW"], #QgsRasterFileWriter.setCreateOptions
out_dir = None, #directory for puts
newLayerName = None,
logger=None,
):
"""
because processing tools only work on local copies
#=======================================================================
# coordinate transformation
#=======================================================================
NO CONVERSION HERE!
can't get native API to work. use gdal_warp instead
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
if out_dir is None: out_dir = self.out_dir
if newLayerName is None: newLayerName = rlayer.name()
newFn = get_valid_filename('%s.tif'%newLayerName) #clean it
out_fp = os.path.join(out_dir, newFn)
log = logger.getChild('write_rlay')
log.debug('on \'%s\' w/ \n crs:%s \n extents:%s\n xUnits:%.4f'%(
rlayer.name(), rlayer.crs(), rlayer.extent(), rlayer.rasterUnitsPerPixelX()))
#=======================================================================
# precheck
#=======================================================================
assert isinstance(rlayer, QgsRasterLayer)
assert os.path.exists(out_dir)
if os.path.exists(out_fp):
msg = 'requested file already exists! and overwrite=%s \n %s'%(
self.overwrite, out_fp)
if self.overwrite:
log.warning(msg)
else:
raise Error(msg)
#=======================================================================
# extract info from layer
#=======================================================================
"""consider loading the layer and duplicating the renderer?
renderer = rlayer.renderer()"""
provider = rlayer.dataProvider()
#build projector
projector = QgsRasterProjector()
#projector.setCrs(provider.crs(), provider.crs())
#build and configure pipe
pipe = QgsRasterPipe()
if not pipe.set(provider.clone()): #Insert a new known interface in default place
raise Error("Cannot set pipe provider")
if not pipe.insert(2, projector): #insert interface at specified index and connect
raise Error("Cannot set pipe projector")
#pipe = rlayer.pipe()
#coordinate transformation
"""see note"""
transformContext = self.qproj.transformContext()
#=======================================================================
# extents
#=======================================================================
if extent == 'layer':
extent = rlayer.extent()
elif extent=='mapCanvas':
assert isinstance(self.iface, QgisInterface), 'bad key for StandAlone?'
#get the extent, transformed to the current CRS
extent = QgsCoordinateTransform(
self.qproj.crs(),
rlayer.crs(),
transformContext
).transformBoundingBox(self.iface.mapCanvas().extent())
assert isinstance(extent, QgsRectangle), 'expected extent=QgsRectangle. got \"%s\''%extent
#expect the requested extent to be LESS THAN what we have in the raw raster
assert rlayer.extent().width()>=extent.width(), 'passed extents too wide'
assert rlayer.extent().height()>=extent.height(), 'passed extents too tall'
#=======================================================================
# resolution
#=======================================================================
#use the resolution of the raw file
if resolution == 'raw':
"""this respects the calculated extents"""
nRows = int(extent.height()/rlayer.rasterUnitsPerPixelY())
nCols = int(extent.width()/rlayer.rasterUnitsPerPixelX())
else:
"""dont think theres any decent API support for the GUI behavior"""
raise Error('not implemented')
#=======================================================================
# #build file writer
#=======================================================================
file_writer = QgsRasterFileWriter(out_fp)
#file_writer.Mode(1) #???
if not opts is None:
file_writer.setCreateOptions(opts)
log.debug('writing to file w/ \n %s'%(
{'nCols':nCols, 'nRows':nRows, 'extent':extent, 'crs':rlayer.crs()}))
#execute write
error = file_writer.writeRaster( pipe, nCols, nRows, extent, rlayer.crs(), transformContext)
log.info('wrote to file \n %s'%out_fp)
#=======================================================================
# wrap
#=======================================================================
if not error == QgsRasterFileWriter.NoError:
raise Error(error)
assert os.path.exists(out_fp)
assert QgsRasterLayer.isValidRasterFileName(out_fp), \
'requested file is not a valid raster file type: %s'%out_fp
return out_fp
def vlay_write(self, #write a VectorLayer
vlay,
out_fp=None,
driverName='GPKG',
fileEncoding = "CP1250",
opts = QgsVectorFileWriter.SaveVectorOptions(), #empty options object
overwrite=None,
logger=None):
"""
help(QgsVectorFileWriter.SaveVectorOptions)
QgsVectorFileWriter.SaveVectorOptions.driverName='GPKG'
opt2 = QgsVectorFileWriter.BoolOption(QgsVectorFileWriter.CreateOrOverwriteFile)
help(QgsVectorFileWriter)
"""
#==========================================================================
# defaults
#==========================================================================
if logger is None: logger=self.logger
log = logger.getChild('vlay_write')
if overwrite is None: overwrite=self.overwrite
if out_fp is None: out_fp = os.path.join(self.out_dir, '%s.gpkg'%vlay.name())
#===========================================================================
# assemble options
#===========================================================================
opts.driverName = driverName
opts.fileEncoding = fileEncoding
#===========================================================================
# checks
#===========================================================================
#file extension
fhead, ext = os.path.splitext(out_fp)
if not 'gpkg' in ext:
raise Error('unexpected extension: %s'%ext)
if os.path.exists(out_fp):
msg = 'requested file path already exists!. overwrite=%s \n %s'%(
overwrite, out_fp)
if overwrite:
log.warning(msg)
os.remove(out_fp) #workaround... should be away to overwrite with the QgsVectorFileWriter
else:
raise Error(msg)
if vlay.dataProvider().featureCount() == 0:
raise Error('\'%s\' has no features!'%(
vlay.name()))
if not vlay.isValid():
Error('passed invalid layer')
#=======================================================================
# write
#=======================================================================
error = QgsVectorFileWriter.writeAsVectorFormatV2(
vlay, out_fp,
QgsCoordinateTransformContext(),
opts,
)
#=======================================================================
# wrap and check
#=======================================================================
if error[0] == QgsVectorFileWriter.NoError:
log.info('layer \' %s \' written to: \n %s'%(vlay.name(),out_fp))
return out_fp
raise Error('FAILURE on writing layer \' %s \' with code:\n %s \n %s'%(vlay.name(),error, out_fp))
def load_dtm(self, #convienece loader for assining the correct attribute
fp,
logger=None,
**kwargs):
if logger is None: logger=self.logger
log=logger.getChild('load_dtm')
self.dtm_rlay = self.load_rlay(fp, logger=log, **kwargs)
return self.dtm_rlay
#==========================================================================
# GENERIC METHODS-----------------
#==========================================================================
def vlay_new_df2(self, #build a vlay from a df
df_raw,
geo_d = None, #container of geometry objects {fid: QgsGeometry}
crs=None,
gkey = None, #data field linking with geo_d (if None.. uses df index)
layname='df',
index = False, #whether to include the index as a field
logger=None,
):
"""
performance enhancement over vlay_new_df
simpler, clearer
although less versatile
"""
#=======================================================================
# setup
#=======================================================================
if crs is None: crs = self.qproj.crs()
if logger is None: logger = self.logger
log = logger.getChild('vlay_new_df')
#=======================================================================
# index fix
#=======================================================================
df = df_raw.copy()
if index:
if not df.index.name is None:
coln = df.index.name
df.index.name = None
else:
coln = 'index'
df[coln] = df.index
#=======================================================================
# precheck
#=======================================================================
#make sure none of hte field names execeed the driver limitations
max_len = self.fieldn_max_d[self.driverName]
#check lengths
boolcol = df_raw.columns.str.len() >= max_len
if np.any(boolcol):
log.warning('passed %i columns which exeed the max length=%i for driver \'%s\'.. truncating: \n %s'%(
boolcol.sum(), max_len, self.driverName, df_raw.columns[boolcol].tolist()))
df.columns = df.columns.str.slice(start=0, stop=max_len-1)
#make sure the columns are unique
assert df.columns.is_unique, 'got duplicated column names: \n %s'%(df.columns.tolist())
#check datatypes
assert np.array_equal(df.columns, df.columns.astype(str)), 'got non-string column names'
#check the geometry
if not geo_d is None:
assert isinstance(geo_d, dict)
if not gkey is None:
assert gkey in df_raw.columns
#assert 'int' in df_raw[gkey].dtype.name
#check gkey match
l = set(df_raw[gkey].drop_duplicates()).difference(geo_d.keys())
assert len(l)==0, 'missing %i \'%s\' keys in geo_d: %s'%(len(l), gkey, l)
#against index
else:
#check gkey match
l = set(df_raw.index).difference(geo_d.keys())
assert len(l)==0, 'missing %i (of %i) fid keys in geo_d: %s'%(len(l), len(df_raw), l)
#===========================================================================
# assemble the fields
#===========================================================================
#column name and python type
fields_d = {coln:np_to_pytype(col.dtype) for coln, col in df.items()}
#fields container
qfields = fields_build_new(fields_d = fields_d, logger=log)
#=======================================================================
# assemble the features
#=======================================================================
#convert form of data
feats_d = dict()
for fid, row in df.iterrows():
feat = QgsFeature(qfields, fid)
#loop and add data
for fieldn, value in row.items():
#skip null values
if pd.isnull(value): continue
#get the index for this field
findx = feat.fieldNameIndex(fieldn)
#get the qfield
qfield = feat.fields().at(findx)
#make the type match
ndata = qtype_to_pytype(value, qfield.type(), logger=log)
#set the attribute
if not feat.setAttribute(findx, ndata):
raise Error('failed to setAttribute')
#setgeometry
if not geo_d is None:
if gkey is None:
gobj = geo_d[fid]
else:
gobj = geo_d[row[gkey]]
feat.setGeometry(gobj)
#stor eit
feats_d[fid]=feat
log.debug('built %i \'%s\' features'%(
len(feats_d),
QgsWkbTypes.geometryDisplayString(feat.geometry().type()),
))
#=======================================================================
# get the geo type
#=======================================================================\
if not geo_d is None:
gtype = QgsWkbTypes().displayString(next(iter(geo_d.values())).wkbType())
else:
gtype='None'
#===========================================================================
# buidl the new layer
#===========================================================================
vlay = vlay_new_mlay(gtype,
crs,
layname,
qfields,
list(feats_d.values()),
logger=log,
)
self.createspatialindex(vlay, logger=log)
#=======================================================================
# post check
#=======================================================================
if not geo_d is None:
if vlay.wkbType() == 100:
raise Error('constructed layer has NoGeometry')
return vlay
def check_aoi(self, #special c hecks for AOI layers
vlay):
assert isinstance(vlay, QgsVectorLayer)
assert 'Polygon' in QgsWkbTypes().displayString(vlay.wkbType())
assert vlay.dataProvider().featureCount()==1
assert vlay.crs() == self.qproj.crs(), 'aoi CRS (%s) does not match project (%s)'%(vlay.crs(), self.qproj.crs())
return
#==========================================================================
# ALGOS--------------
#==========================================================================
def deletecolumn(self,
in_vlay,
fieldn_l, #list of field names
invert=False, #whether to invert selected field names
layname = None,
logger=None,
):
#=======================================================================
# presets
#=======================================================================
algo_nm = 'qgis:deletecolumn'
if logger is None: logger=self.logger
log = logger.getChild('deletecolumn')
self.vlay = in_vlay
#=======================================================================
# field manipulations
#=======================================================================
fieldn_l = self._field_handlr(in_vlay, fieldn_l, invert=invert, logger=log)
if len(fieldn_l) == 0:
log.debug('no fields requsted to drop... skipping')
return self.vlay
#=======================================================================
# assemble pars
#=======================================================================
#assemble pars
ins_d = { 'COLUMN' : fieldn_l,
'INPUT' : in_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT'}
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
#===========================================================================
# post formatting
#===========================================================================
if layname is None:
layname = '%s_delf'%self.vlay.name()
res_vlay.setName(layname) #reset the name
return res_vlay
def joinattributesbylocation(self,
#data definitions
vlay,
join_vlay, #layer from which to extract attribue values onto th ebottom vlay
jlay_fieldn_l, #list of field names to extract from the join_vlay
selected_only = False,
jvlay_selected_only = False, #only consider selected features on the join layer
#algo controls
prefix = '',
method=0, #one-to-many
predicate_l = ['intersects'],#list of geometric serach predicates
discard_nomatch = False, #Discard records which could not be joined
#data expectations
join_nullvs = True, #allow null values on jlay_fieldn_l on join_vlay
join_df = None, #if join_nullvs=FALSE, data to check for nulls (skips making a vlay_get_fdf)
allow_field_rename = False, #allow joiner fields to be renamed when mapped onto the main
allow_none = False,
#geometry expectations
expect_all_hits = False, #wheter every main feature intersects a join feature
expect_j_overlap = False, #wheter to expect the join_vlay to beoverlapping
expect_m_overlap = False, #wheter to expect the mainvlay to have overlaps
logger=None,
):
"""
TODO: really need to clean this up...
discard_nomatch:
TRUE: two resulting layers have no features in common
FALSE: in layer retains all non matchers, out layer only has the non-matchers?
METHOD: Join type
- 0: Create separate feature for each located feature (one-to-many)
- 1: Take attributes of the first located feature only (one-to-one)
"""
#=======================================================================
# presets
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('joinattributesbylocation')
self.vlay = vlay
algo_nm = 'qgis:joinattributesbylocation'
predicate_d = {'intersects':0,'contains':1,'equals':2,'touches':3,'overlaps':4,'within':5, 'crosses':6}
jlay_fieldn_l = self._field_handlr(join_vlay,
jlay_fieldn_l,
invert=False)
#=======================================================================
# jgeot = vlay_get_bgeo_type(join_vlay)
# mgeot = vlay_get_bgeo_type(self.vlay)
#=======================================================================
mfcnt = self.vlay.dataProvider().featureCount()
#jfcnt = join_vlay.dataProvider().featureCount()
mfnl = vlay_fieldnl(self.vlay)
expect_overlaps = expect_j_overlap or expect_m_overlap
#=======================================================================
# geometry expectation prechecks
#=======================================================================
"""should take any geo
if not (jgeot == 'polygon' or mgeot == 'polygon'):
raise Error('one of the layres has to be a polygon')
if not jgeot=='polygon':
if expect_j_overlap:
raise Error('join vlay is not a polygon, expect_j_overlap should =False')
if not mgeot=='polygon':
if expect_m_overlap:
raise Error('main vlay is not a polygon, expect_m_overlap should =False')
if expect_all_hits:
if discard_nomatch:
raise Error('discard_nomatch should =FALSE if you expect all hits')
if allow_none:
raise Error('expect_all_hits=TRUE and allow_none=TRUE')
#method checks
if method==0:
if not jgeot == 'polygon':
raise Error('passed method 1:m but jgeot != polygon')
if not expect_j_overlap:
if not method==0:
raise Error('for expect_j_overlap=False, method must = 0 (1:m) for validation')
"""
#=======================================================================
# data expectation checks
#=======================================================================
#make sure none of the joiner fields are already on the layer
if len(mfnl)>0: #see if there are any fields on the main
l = basic.linr(jlay_fieldn_l, mfnl, result_type='matching')
if len(l) > 0:
#w/a prefix
if not prefix=='':
log.debug('%i fields on the joiner \'%s\' are already on \'%s\'... prefixing w/ \'%s\': \n %s'%(
len(l), join_vlay.name(), self.vlay.name(), prefix, l))
else:
log.debug('%i fields on the joiner \'%s\' are already on \'%s\'...renameing w/ auto-sufix: \n %s'%(
len(l), join_vlay.name(), self.vlay.name(), l))
if not allow_field_rename:
raise Error('%i field names overlap: %s'%(len(l), l))
#make sure that the joiner attributes are not null
if not join_nullvs:
if jvlay_selected_only:
raise Error('not implmeneted')
#pull thedata
if join_df is None:
join_df = vlay_get_fdf(join_vlay, fieldn_l=jlay_fieldn_l, db_f=self.db_f, logger=log)
#slice to the columns of interest
join_df = join_df.loc[:, jlay_fieldn_l]
#check for nulls
booldf = join_df.isna()
if np.any(booldf):
raise Error('got %i nulls on \'%s\' field %s data'%(
booldf.sum().sum(), join_vlay.name(), jlay_fieldn_l))
#=======================================================================
# assemble pars
#=======================================================================
#convert predicate to code
pred_code_l = [predicate_d[name] for name in predicate_l]
#selection flags
if selected_only:
"""WARNING! This will limit the output to only these features
(despite the DISCARD_NONMATCHING flag)"""
main_input = self._get_sel_obj(self.vlay)
else:
main_input = self.vlay
if jvlay_selected_only:
join_input = self._get_sel_obj(join_vlay)
else:
join_input = join_vlay
#assemble pars
ins_d = { 'DISCARD_NONMATCHING' : discard_nomatch,
'INPUT' : main_input,
'JOIN' : join_input,
'JOIN_FIELDS' : jlay_fieldn_l,
'METHOD' : method,
'OUTPUT' : 'TEMPORARY_OUTPUT',
#'NON_MATCHING' : 'TEMPORARY_OUTPUT', #not working as expected. see get_misses
'PREDICATE' : pred_code_l,
'PREFIX' : prefix}
log.info('extracting %i fields from %i feats from \'%s\' to \'%s\' join fields: %s'%
(len(jlay_fieldn_l), join_vlay.dataProvider().featureCount(),
join_vlay.name(), self.vlay.name(), jlay_fieldn_l))
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay, join_cnt = res_d['OUTPUT'], res_d['JOINED_COUNT']
log.debug('got results: \n %s'%res_d)
#===========================================================================
# post checks
#===========================================================================
hit_fcnt = res_vlay.dataProvider().featureCount()
if not expect_overlaps:
if not discard_nomatch:
if not hit_fcnt == mfcnt:
raise Error('in and out fcnts dont match')
else:
pass
#log.debug('expect_overlaps=False, unable to check fcnts')
#all misses
if join_cnt == 0:
log.warning('got no joins from \'%s\' to \'%s\''%(
self.vlay.name(), join_vlay.name()))
if not allow_none:
raise Error('got no joins!')
if discard_nomatch:
if not hit_fcnt == 0:
raise Error('no joins but got some hits')
#some hits
else:
#check there are no nulls
if discard_nomatch and not join_nullvs:
#get data on first joiner
fid_val_ser = vlay_get_fdata(res_vlay, jlay_fieldn_l[0], logger=log, fmt='ser')
if np.any(fid_val_ser.isna()):
raise Error('discard=True and join null=FALSe but got %i (of %i) null \'%s\' values in the reuslt'%(
fid_val_ser.isna().sum(), len(fid_val_ser), fid_val_ser.name
))
#=======================================================================
# get the new field names
#=======================================================================
new_fn_l = set(vlay_fieldnl(res_vlay)).difference(vlay_fieldnl(self.vlay))
#=======================================================================
# wrap
#=======================================================================
log.debug('finished joining %i fields from %i (of %i) feats from \'%s\' to \'%s\' join fields: %s'%
(len(new_fn_l), join_cnt, self.vlay.dataProvider().featureCount(),
join_vlay.name(), self.vlay.name(), new_fn_l))
return res_vlay, new_fn_l, join_cnt
def joinbylocationsummary(self,
vlay, #polygon layer to sample from
join_vlay, #layer from which to extract attribue values onto th ebottom vlay
jlay_fieldn_l, #list of field names to extract from the join_vlay
jvlay_selected_only = False, #only consider selected features on the join layer
predicate_l = ['intersects'],#list of geometric serach predicates
smry_l = ['sum'], #data summaries to apply
discard_nomatch = False, #Discard records which could not be joined
use_raw_fn=False, #whether to convert names back to the originals
layname=None,
):
"""
WARNING: This ressets the fids
discard_nomatch:
TRUE: two resulting layers have no features in common
FALSE: in layer retains all non matchers, out layer only has the non-matchers?
"""
"""
view(join_vlay)
"""
#=======================================================================
# presets
#=======================================================================
algo_nm = 'qgis:joinbylocationsummary'
predicate_d = {'intersects':0,'contains':1,'equals':2,'touches':3,'overlaps':4,'within':5, 'crosses':6}
summaries_d = {'count':0, 'unique':1, 'min':2, 'max':3, 'range':4, 'sum':5, 'mean':6}
log = self.logger.getChild('joinbylocationsummary')
#=======================================================================
# defaults
#=======================================================================
if isinstance(jlay_fieldn_l, set):
jlay_fieldn_l = list(jlay_fieldn_l)
#convert predicate to code
pred_code_l = [predicate_d[pred_name] for pred_name in predicate_l]
#convert summaries to code
sum_code_l = [summaries_d[smry_str] for smry_str in smry_l]
if layname is None: layname = '%s_jsmry'%vlay.name()
#=======================================================================
# prechecks
#=======================================================================
if not isinstance(jlay_fieldn_l, list):
raise Error('expected a list')
#check requested join fields
fn_l = [f.name() for f in join_vlay.fields()]
s = set(jlay_fieldn_l).difference(fn_l)
assert len(s)==0, 'requested join fields not on layer: %s'%s
#check crs
assert join_vlay.crs().authid() == vlay.crs().authid()
#=======================================================================
# assemble pars
#=======================================================================
main_input=vlay
if jvlay_selected_only:
join_input = self._get_sel_obj(join_vlay)
else:
join_input = join_vlay
#assemble pars
ins_d = { 'DISCARD_NONMATCHING' : discard_nomatch,
'INPUT' : main_input,
'JOIN' : join_input,
'JOIN_FIELDS' : jlay_fieldn_l,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'PREDICATE' : pred_code_l,
'SUMMARIES' : sum_code_l,
}
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
#===========================================================================
# post formatting
#===========================================================================
res_vlay.setName(layname) #reset the name
#get new field names
nfn_l = set([f.name() for f in res_vlay.fields()]).difference([f.name() for f in vlay.fields()])
"""
view(res_vlay)
"""
#=======================================================================
# post check
#=======================================================================
for fn in nfn_l:
rser = vlay_get_fdata(res_vlay, fieldn=fn, logger=log, fmt='ser')
if rser.isna().all().all():
log.warning('%s \'%s\' got all nulls'%(vlay.name(), fn))
#=======================================================================
# rename fields
#=======================================================================
if use_raw_fn:
assert len(smry_l)==1, 'rename only allowed for single sample stat'
rnm_d = {s:s.replace('_%s'%smry_l[0],'') for s in nfn_l}
s = set(rnm_d.values()).symmetric_difference(jlay_fieldn_l)
assert len(s)==0, 'failed to convert field names'
res_vlay = vlay_rename_fields(res_vlay, rnm_d, logger=log)
nfn_l = jlay_fieldn_l
log.info('sampled \'%s\' w/ \'%s\' (%i hits) and \'%s\'to get %i new fields \n %s'%(
join_vlay.name(), vlay.name(), res_vlay.dataProvider().featureCount(),
smry_l, len(nfn_l), nfn_l))
return res_vlay, nfn_l
def joinattributestable(self, #join csv edata to a vector layer
vlay, table_fp, fieldNm,
method = 1, #join type
#- 0: Create separate feature for each matching feature (one-to-many)
#- 1: Take attributes of the first matching feature only (one-to-one)
csv_params = {'encoding':'System',
'type':'csv',
'maxFields':'10000',
'detectTypes':'yes',
'geomType':'none',
'subsetIndex':'no',
'watchFile':'no'},
logger=None,
layname=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
if layname is None:
layname = '%s_j'%vlay.name()
algo_nm = 'native:joinattributestable'
log = self.logger.getChild('joinattributestable')
#=======================================================================
# prechecks
#=======================================================================
assert isinstance(vlay, QgsVectorLayer)
assert os.path.exists(table_fp)
assert fieldNm in [f.name() for f in vlay.fields()], 'vlay missing link field %s'%fieldNm
#=======================================================================
# setup table layer
#=======================================================================
uriW = QgsDataSourceUri()
for pName, pValue in csv_params.items():
uriW.setParam(pName, pValue)
table_uri = r'file:///' + table_fp.replace('\\','/') +'?'+ str(uriW.encodedUri(), 'utf-8')
table_vlay = QgsVectorLayer(table_uri,'table',"delimitedtext")
assert fieldNm in [f.name() for f in table_vlay.fields()], 'table missing link field %s'%fieldNm
#=======================================================================
# assemble p ars
#=======================================================================
ins_d = { 'DISCARD_NONMATCHING' : True,
'FIELD' : 'xid', 'FIELDS_TO_COPY' : [],
'FIELD_2' : 'xid',
'INPUT' : vlay,
'INPUT_2' : table_vlay,
'METHOD' : method,
'OUTPUT' : 'TEMPORARY_OUTPUT', 'PREFIX' : '' }
#=======================================================================
# execute
#=======================================================================
log.debug('executing \'native:buffer\' with ins_d: \n %s'%ins_d)
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
res_vlay.setName(layname) #reset the name
log.debug('finished w/ %i feats'%res_vlay.dataProvider().featureCount())
return res_vlay
def cliprasterwithpolygon(self,
rlay_raw,
poly_vlay,
layname = None,
#output = 'TEMPORARY_OUTPUT',
logger = None,
):
"""
clipping a raster layer with a polygon mask using gdalwarp
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('cliprasterwithpolygon')
if layname is None:
layname = '%s_clipd'%rlay_raw.name()
algo_nm = 'gdal:cliprasterbymasklayer'
#=======================================================================
# precheck
#=======================================================================
assert isinstance(rlay_raw, QgsRasterLayer)
assert isinstance(poly_vlay, QgsVectorLayer)
assert 'Poly' in QgsWkbTypes().displayString(poly_vlay.wkbType())
assert rlay_raw.crs() == poly_vlay.crs()
#=======================================================================
# run algo
#=======================================================================
ins_d = { 'ALPHA_BAND' : False,
'CROP_TO_CUTLINE' : True,
'DATA_TYPE' : 0,
'EXTRA' : '',
'INPUT' : rlay_raw,
'KEEP_RESOLUTION' : True,
'MASK' : poly_vlay,
'MULTITHREADING' : False,
'NODATA' : None,
'OPTIONS' : '',
'OUTPUT' : 'TEMPORARY_OUTPUT',
'SET_RESOLUTION' : False,
'SOURCE_CRS' : None,
'TARGET_CRS' : None,
'X_RESOLUTION' : None,
'Y_RESOLUTION' : None,
}
log.debug('executing \'%s\' with ins_d: \n %s \n\n'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['OUTPUT']):
"""failing intermittently"""
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def cliprasterwithpolygon2(self, #with saga
rlay_raw,
poly_vlay,
ofp = None,
layname = None,
#output = 'TEMPORARY_OUTPUT',
logger = None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('cliprasterwithpolygon')
if layname is None:
if not ofp is None:
layname = os.path.splitext(os.path.split(ofp)[1])[0]
else:
layname = '%s_clipd'%rlay_raw.name()
if ofp is None:
ofp = os.path.join(self.out_dir,layname+'.sdat')
if os.path.exists(ofp):
msg = 'requseted filepath exists: %s'%ofp
if self.overwrite:
log.warning('DELETING'+msg)
os.remove(ofp)
else:
raise Error(msg)
algo_nm = 'saga:cliprasterwithpolygon'
#=======================================================================
# precheck
#=======================================================================
if os.path.exists(ofp):
msg = 'requested filepath exists: %s'%ofp
if self.overwrite:
log.warning(msg)
else:
raise Error(msg)
if not os.path.exists(os.path.dirname(ofp)):
os.makedirs(os.path.dirname(ofp))
#assert QgsRasterLayer.isValidRasterFileName(ofp), 'invalid filename: %s'%ofp
assert 'Poly' in QgsWkbTypes().displayString(poly_vlay.wkbType())
assert rlay_raw.crs() == poly_vlay.crs()
#=======================================================================
# run algo
#=======================================================================
ins_d = { 'INPUT' : rlay_raw,
'OUTPUT' : ofp,
'POLYGONS' : poly_vlay }
log.debug('executing \'%s\' with ins_d: \n %s \n\n'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['OUTPUT']):
"""failing intermittently"""
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def srastercalculator(self,
formula,
rlay_d, #container of raster layers to perform calculations on
logger=None,
layname=None,
ofp=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('srastercalculator')
assert 'a' in rlay_d
if layname is None:
if not ofp is None:
layname = os.path.splitext(os.path.split(ofp)[1])[0]
else:
layname = '%s_calc'%rlay_d['a'].name()
if ofp is None:
ofp = os.path.join(self.out_dir, layname+'.sdat')
if not os.path.exists(os.path.dirname(ofp)):
log.info('building basedir: %s'%os.path.dirname(ofp))
os.makedirs(os.path.dirname(ofp))
if os.path.exists(ofp):
msg = 'requseted filepath exists: %s'%ofp
if self.overwrite:
log.warning(msg)
os.remove(ofp)
else:
raise Error(msg)
#=======================================================================
# execute
#=======================================================================
algo_nm = 'saga:rastercalculator'
ins_d = { 'FORMULA' : formula,
'GRIDS' : rlay_d.pop('a'),
'RESAMPLING' : 3,
'RESULT' : ofp,
'TYPE' : 7,
'USE_NODATA' : False,
'XGRIDS' : list(rlay_d.values())}
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['RESULT']):
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['RESULT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def grastercalculator(self, #GDAL raster calculator
formula,
rlay_d, #container of raster layers to perform calculations on
nodata=0,
logger=None,
layname=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('grastercalculator')
algo_nm = 'gdal:rastercalculator'
if layname is None:
layname = '%s_calc'%rlay_d['a'].name()
#=======================================================================
# prechecks
#=======================================================================
assert 'A' in rlay_d
#=======================================================================
# populate
#=======================================================================
for rtag in ('A', 'B', 'C', 'D', 'E', 'F'):
#set dummy placeholders for missing rasters
if not rtag in rlay_d:
rlay_d[rtag] = None
#check what the usre pasased
else:
assert isinstance(rlay_d[rtag], QgsRasterLayer), 'passed bad %s'%rtag
assert rtag in formula, 'formula is missing a reference to \'%s\''%rtag
#=======================================================================
# execute
#=======================================================================
ins_d = { 'BAND_A' : 1, 'BAND_B' : -1, 'BAND_C' : -1, 'BAND_D' : -1, 'BAND_E' : -1, 'BAND_F' : -1,
'EXTRA' : '',
'FORMULA' : formula,
'INPUT_A' : rlay_d['A'], 'INPUT_B' : rlay_d['B'], 'INPUT_C' : rlay_d['C'],
'INPUT_D' : rlay_d['D'], 'INPUT_E' : rlay_d['E'], 'INPUT_F' : rlay_d['F'],
'NO_DATA' : nodata,
'OPTIONS' : '',
'OUTPUT' : 'TEMPORARY_OUTPUT',
'RTYPE' : 5 }
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
assert os.path.exists(res_d['OUTPUT']), 'failed to get result'
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def qrastercalculator(self, #QGIS native raster calculator
formula,
ref_layer = None, #reference layer
logger=None,
layname=None,
):
"""executes the algorhithim... better to use the constructor directly
QgsRasterCalculator"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('qrastercalculator')
algo_nm = 'qgis:rastercalculator'
if layname is None:
if ref_layer is None:
layname = 'qrastercalculator'
else:
layname = '%s_calc'%ref_layer.name()
#=======================================================================
# execute
#=======================================================================
"""
formula = '\'haz_100yr_cT2@1\'-\'dtm_cT1@1\''
"""
ins_d = { 'CELLSIZE' : 0,
'CRS' : None,
'EXPRESSION' : formula,
'EXTENT' : None,
'LAYERS' : [ref_layer], #referecnce layer
'OUTPUT' : 'TEMPORARY_OUTPUT' }
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['RESULT']):
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['RESULT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def addgeometrycolumns(self, #add geometry data as columns
vlay,
layname=None,
logger=None,
):
if logger is None: logger=self.logger
log = logger.getChild('addgeometrycolumns')
algo_nm = 'qgis:exportaddgeometrycolumns'
#=======================================================================
# assemble pars
#=======================================================================
#assemble pars
ins_d = { 'CALC_METHOD' : 0, #use layer's crs
'INPUT' : vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT'}
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
#===========================================================================
# post formatting
#===========================================================================
if layname is None:
layname = '%s_gcol'%self.vlay.name()
res_vlay.setName(layname) #reset the name
return res_vlay
def buffer(self, vlay,
distance, #buffer distance to apply
dissolve = False,
end_cap_style = 0,
join_style = 0,
miter_limit = 2,
segments = 5,
logger=None,
layname=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
if layname is None:
layname = '%s_buf'%vlay.name()
algo_nm = 'native:buffer'
log = self.logger.getChild('buffer')
distance = float(distance)
#=======================================================================
# prechecks
#=======================================================================
if distance==0 or np.isnan(distance):
raise Error('got no buffer!')
#=======================================================================
# build ins
#=======================================================================
"""
distance = 3.0
dcopoy = copy.copy(distance)
"""
ins_d = {
'INPUT': vlay,
'DISSOLVE' : dissolve,
'DISTANCE' : distance,
'END_CAP_STYLE' : end_cap_style,
'JOIN_STYLE' : join_style,
'MITER_LIMIT' : miter_limit,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'SEGMENTS' : segments}
#=======================================================================
# execute
#=======================================================================
log.debug('executing \'native:buffer\' with ins_d: \n %s'%ins_d)
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
res_vlay.setName(layname) #reset the name
log.debug('finished')
return res_vlay
def selectbylocation(self, #select features (from main laye) by geoemtric relation with comp_vlay
vlay, #vlay to select features from
comp_vlay, #vlay to compare
result_type = 'select',
method= 'new', #Modify current selection by
pred_l = ['intersect'], #list of geometry predicate names
#expectations
allow_none = False,
logger = None,
):
#=======================================================================
# setups and defaults
#=======================================================================
if logger is None: logger=self.logger
algo_nm = 'native:selectbylocation'
log = logger.getChild('selectbylocation')
#===========================================================================
# #set parameter translation dictoinaries
#===========================================================================
meth_d = {'new':0}
pred_d = {
'are within':6,
'intersect':0,
'overlap':5,
}
#predicate (name to value)
pred_l = [pred_d[pred_nm] for pred_nm in pred_l]
#=======================================================================
# setup
#=======================================================================
ins_d = {
'INPUT' : vlay,
'INTERSECT' : comp_vlay,
'METHOD' : meth_d[method],
'PREDICATE' : pred_l }
log.debug('executing \'%s\' on \'%s\' with: \n %s'
%(algo_nm, vlay.name(), ins_d))
#===========================================================================
# #execute
#===========================================================================
_ = processing.run(algo_nm, ins_d, feedback=self.feedback)
#=======================================================================
# check
#=======================================================================
fcnt = vlay.selectedFeatureCount()
if fcnt == 0:
msg = 'No features selected!'
if allow_none:
log.warning(msg)
else:
raise Error(msg)
#=======================================================================
# wrap
#=======================================================================
log.debug('selected %i (of %i) features from %s'
%(vlay.selectedFeatureCount(),vlay.dataProvider().featureCount(), vlay.name()))
return self._get_sel_res(vlay, result_type=result_type, logger=log, allow_none=allow_none)
def saveselectedfeatures(self,#generate a memory layer from the current selection
vlay,
logger=None,
allow_none = False,
layname=None):
#===========================================================================
# setups and defaults
#===========================================================================
if logger is None: logger = self.logger
log = logger.getChild('saveselectedfeatures')
algo_nm = 'native:saveselectedfeatures'
if layname is None:
layname = '%s_sel'%vlay.name()
#=======================================================================
# precheck
#=======================================================================
fcnt = vlay.selectedFeatureCount()
if fcnt == 0:
msg = 'No features selected!'
if allow_none:
log.warning(msg)
return None
else:
raise Error(msg)
log.debug('on \'%s\' with %i feats selected'%(
vlay.name(), vlay.selectedFeatureCount()))
#=======================================================================
# # build inputs
#=======================================================================
ins_d = {'INPUT' : vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT'}
log.debug('\'native:saveselectedfeatures\' on \'%s\' with: \n %s'
%(vlay.name(), ins_d))
#execute
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
assert isinstance(res_vlay, QgsVectorLayer)
#===========================================================================
# wrap
#===========================================================================
res_vlay.setName(layname) #reset the name
return res_vlay
def polygonfromlayerextent(self,
vlay,
round_to=0, #adds a buffer to the result?
logger=None,
layname=None):
"""
This algorithm takes a map layer and generates a new vector layer with the
minimum bounding box (rectangle polygon with N-S orientation) that covers the input layer.
Optionally, the extent can be enlarged to a rounded value.
"""
#===========================================================================
# setups and defaults
#===========================================================================
if logger is None: logger = self.logger
log = logger.getChild('polygonfromlayerextent')
algo_nm = 'qgis:polygonfromlayerextent'
if layname is None:
layname = '%s_exts'%vlay.name()
#=======================================================================
# precheck
#=======================================================================
#=======================================================================
# # build inputs
#=======================================================================
ins_d = {'INPUT' : vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'ROUND_TO':round_to}
log.debug('\'%s\' on \'%s\' with: \n %s'
%(algo_nm, vlay.name(), ins_d))
#execute
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
assert isinstance(res_vlay, QgsVectorLayer)
#===========================================================================
# wrap
#===========================================================================
res_vlay.setName(layname) #reset the name
return res_vlay
def fixgeometries(self, vlay,
logger=None,
layname=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
if layname is None:
layname = '%s_fix'%vlay.name()
algo_nm = 'native:fixgeometries'
log = self.logger.getChild('fixgeometries')
#=======================================================================
# build ins
#=======================================================================
"""
distance = 3.0
dcopoy = copy.copy(distance)
"""
ins_d = {
'INPUT': vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
}
#=======================================================================
# execute
#=======================================================================
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
res_vlay.setName(layname) #reset the name
log.debug('finished')
return res_vlay
def createspatialindex(self,
in_vlay,
logger=None,
):
#=======================================================================
# presets
#=======================================================================
algo_nm = 'qgis:createspatialindex'
if logger is None: logger=self.logger
log = self.logger.getChild('createspatialindex')
in_vlay
#=======================================================================
# assemble pars
#=======================================================================
#assemble pars
ins_d = { 'INPUT' : in_vlay }
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
#===========================================================================
# post formatting
#===========================================================================
#=======================================================================
# if layname is None:
# layname = '%s_si'%self.vlay.name()
#
# res_vlay.setName(layname) #reset the name
#=======================================================================
return
def warpreproject(self, #repojrect a raster
rlay_raw,
crsOut = None, #crs to re-project to
layname = None,
options = 'COMPRESS=DEFLATE|PREDICTOR=2|ZLEVEL=9',
output = 'TEMPORARY_OUTPUT',
logger = None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('warpreproject')
if layname is None:
layname = '%s_rproj'%rlay_raw.name()
algo_nm = 'gdal:warpreproject'
if crsOut is None: crsOut = self.crs #just take the project's
#=======================================================================
# precheck
#=======================================================================
"""the algo accepts 'None'... but not sure why we'd want to do this"""
assert isinstance(crsOut, QgsCoordinateReferenceSystem), 'bad crs type'
assert isinstance(rlay_raw, QgsRasterLayer)
assert rlay_raw.crs() != crsOut, 'layer already on this CRS!'
#=======================================================================
# run algo
#=======================================================================
ins_d = {
'DATA_TYPE' : 0,
'EXTRA' : '',
'INPUT' : rlay_raw,
'MULTITHREADING' : False,
'NODATA' : None,
'OPTIONS' : options,
'OUTPUT' : output,
'RESAMPLING' : 0,
'SOURCE_CRS' : None,
'TARGET_CRS' : crsOut,
'TARGET_EXTENT' : None,
'TARGET_EXTENT_CRS' : None,
'TARGET_RESOLUTION' : None,
}
log.debug('executing \'%s\' with ins_d: \n %s \n\n'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['OUTPUT']):
"""failing intermittently"""
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
assert rlay_raw.bandCount()==res_rlay.bandCount(), 'band count mismatch'
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
#===========================================================================
# ALGOS - CUSTOM--------
#===========================================================================
def vlay_pts_dist(self, #get the distance between points in a given order
vlay_raw,
ifn = 'fid', #fieldName to index by
request = None,
result = 'vlay_append', #result type
logger=None):
#===========================================================================
# defaults
#===========================================================================
if logger is None: logger=self.logger
log = logger.getChild('vlay_pts_dist')
if request is None:
request = QgsFeatureRequest(
).addOrderBy(ifn, ascending=True
).setSubsetOfAttributes([ifn], vlay_raw.fields())
#===========================================================================
# precheck
#===========================================================================
assert 'Point' in QgsWkbTypes().displayString(vlay_raw.wkbType()), 'passed bad geo type'
#see if indexer is unique
ifn_d = vlay_get_fdata(vlay_raw, fieldn=ifn, logger=log)
assert len(set(ifn_d.values()))==len(ifn_d)
#===========================================================================
# loop and calc
#===========================================================================
d = dict()
first, geo_prev = True, None
for i, feat in enumerate(vlay_raw.getFeatures(request)):
assert not feat.attribute(ifn) in d, 'indexer is not unique!'
geo = feat.geometry()
if first:
first=False
else:
d[feat.attribute(ifn)] = geo.distance(geo_prev)
geo_prev = geo
log.info('got %i distances using \"%s\''%(len(d), ifn))
#===========================================================================
# check
#===========================================================================
assert len(d) == (vlay_raw.dataProvider().featureCount() -1)
#===========================================================================
# results typing
#===========================================================================
if result == 'dict': return d
elif result == 'vlay_append':
#data manip
ncoln = '%s_dist'%ifn
df_raw = vlay_get_fdf(vlay_raw, logger=log)
df = df_raw.join(pd.Series(d, name=ncoln), on=ifn)
assert df[ncoln].isna().sum()==1, 'expected 1 null'
#reassemble
geo_d = vlay_get_fdata(vlay_raw, geo_obj=True, logger=log)
return self.vlay_new_df2(df, geo_d=geo_d, logger=log,
layname='%s_%s'%(vlay_raw.name(), ncoln))
#==========================================================================
# privates----------
#==========================================================================
def _field_handlr(self, #common handling for fields
vlay, #layer to check for field presence
fieldn_l, #list of fields to handle
invert = False,
logger=None,
):
if logger is None: logger=self.logger
log = logger.getChild('_field_handlr')
#=======================================================================
# all flag
#=======================================================================
if isinstance(fieldn_l, str):
if fieldn_l == 'all':
fieldn_l = vlay_fieldnl(vlay)
log.debug('user passed \'all\', retrieved %i fields: \n %s'%(
len(fieldn_l), fieldn_l))
else:
raise Error('unrecognized fieldn_l\'%s\''%fieldn_l)
#=======================================================================
# type setting
#=======================================================================
if isinstance(fieldn_l, tuple) or isinstance(fieldn_l, np.ndarray) or isinstance(fieldn_l, set):
fieldn_l = list(fieldn_l)
#=======================================================================
# checking
#=======================================================================
if not isinstance(fieldn_l, list):
raise Error('expected a list for fields, instead got \n %s'%fieldn_l)
#vlay_check(vlay, exp_fieldns=fieldn_l)
#=======================================================================
# #handle inversions
#=======================================================================
if invert:
big_fn_s = set(vlay_fieldnl(vlay)) #get all the fields
#get the difference
fieldn_l = list(big_fn_s.difference(set(fieldn_l)))
log.debug('inverted selection from %i to %i fields'%
(len(big_fn_s), len(fieldn_l)))
return fieldn_l
def _get_sel_obj(self, vlay): #get the processing object for algos with selections
log = self.logger.getChild('_get_sel_obj')
assert isinstance(vlay, QgsVectorLayer)
if vlay.selectedFeatureCount() == 0:
raise Error('Nothing selected on \'%s\'. exepects some pre selection'%(vlay.name()))
#handle project layer store
if self.qproj.mapLayer(vlay.id()) is None:
#layer not on project yet. add it
if self.qproj.addMapLayer(vlay, False) is None:
raise Error('failed to add map layer \'%s\''%vlay.name())
log.debug('based on %i selected features from \'%s\''%(len(vlay.selectedFeatureIds()), vlay.name()))
return QgsProcessingFeatureSourceDefinition(source=vlay.id(),
selectedFeaturesOnly=True,
featureLimit=-1,
geometryCheck=QgsFeatureRequest.GeometryAbortOnInvalid)
def _get_sel_res(self, #handler for returning selection like results
vlay, #result layer (with selection on it
result_type='select',
#expectiions
allow_none = False,
logger=None
):
#=======================================================================
# setup
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('_get_sel_res')
#=======================================================================
# precheck
#=======================================================================
if vlay.selectedFeatureCount() == 0:
if not allow_none:
raise Error('nothing selected')
return None
#log.debug('user specified \'%s\' for result_type'%result_type)
#=======================================================================
# by handles
#=======================================================================
if result_type == 'select':
#log.debug('user specified \'select\', doing nothing with %i selected'%vlay.selectedFeatureCount())
result = None
elif result_type == 'fids':
result = vlay.selectedFeatureIds() #get teh selected feature ids
elif result_type == 'feats':
result = {feat.id(): feat for feat in vlay.getSelectedFeatures()}
elif result_type == 'layer':
result = self.saveselectedfeatures(vlay, logger=log)
else:
raise Error('unexpected result_type kwarg')
return result
def _in_out_checking(self,res_vlay,
):
"""placeholder"""
def __exit__(self, #destructor
*args,**kwargs):
self.mstore.removeAllMapLayers()
super().__exit__(*args,**kwargs) #initilzie teh baseclass
class MyFeedBackQ(QgsProcessingFeedback):
"""
wrapper for easier reporting and extended progress
Dialogs:
built by QprojPlug.qproj_setup()
Qworkers:
built by Qcoms.__init__()
"""
def __init__(self,
logger=mod_logger):
self.logger=logger.getChild('FeedBack')
super().__init__()
def setProgressText(self, text):
self.logger.debug(text)
def pushInfo(self, info):
self.logger.info(info)
def pushCommandInfo(self, info):
self.logger.info(info)
def pushDebugInfo(self, info):
self.logger.info(info)
def pushConsoleInfo(self, info):
self.logger.info(info)
def reportError(self, error, fatalError=False):
self.logger.error(error)
def upd_prog(self, #advanced progress handling
prog_raw, #pass None to reset
method='raw', #whether to append value to the progress
):
#=======================================================================
# defaults
#=======================================================================
#get the current progress
progress = self.progress()
#===================================================================
# prechecks
#===================================================================
#make sure we have some slots connected
"""not sure how to do this"""
#=======================================================================
# reseting
#=======================================================================
if prog_raw is None:
"""
would be nice to reset the progressBar.. .but that would be complicated
"""
self.setProgress(0)
return
#=======================================================================
# setting
#=======================================================================
if method=='append':
prog = min(progress + prog_raw, 100)
elif method=='raw':
prog = prog_raw
elif method == 'portion':
rem_prog = 100-progress
prog = progress + rem_prog*(prog_raw/100)
assert prog<=100
#===================================================================
# emit signalling
#===================================================================
self.setProgress(prog)
#==============================================================================
# FUNCTIONS----------
#==============================================================================
def init_q(gui=False):
try:
QgsApplication.setPrefixPath(r'C:/OSGeo4W64/apps/qgis-ltr', True)
app = QgsApplication([], gui)
# Update prefix path
#app.setPrefixPath(r"C:\OSGeo4W64\apps\qgis", True)
app.initQgis()
#logging.debug(QgsApplication.showSettings())
""" was throwing unicode error"""
print(u' QgsApplication.initQgis. version: %s, release: %s'%(
Qgis.QGIS_VERSION.encode('utf-8'), Qgis.QGIS_RELEASE_NAME.encode('utf-8')))
return app
except:
raise Error('QGIS failed to initiate')
def vlay_check( #helper to check various expectations on the layer
vlay,
exp_fieldns = None, #raise error if these field names are OUT
uexp_fieldns = None, #raise error if these field names are IN
real_atts = None, #list of field names to check if attribute value are all real
bgeot = None, #basic geo type checking
fcnt = None, #feature count checking. accepts INT or QgsVectorLayer
fkey = None, #optional secondary key to check
mlay = False, #check if its a memory layer or not
chk_valid = False, #check layer validty
logger = mod_logger,
db_f = False,
):
#=======================================================================
# prechecks
#=======================================================================
if vlay is None:
raise Error('got passed an empty vlay')
if not isinstance(vlay, QgsVectorLayer):
raise Error('unexpected type: %s'%type(vlay))
log = logger.getChild('vlay_check')
checks_l = []
#=======================================================================
# expected field names
#=======================================================================
if not basic.is_null(exp_fieldns): #robust null checking
skip=False
if isinstance(exp_fieldns, str):
if exp_fieldns=='all':
skip=True
if not skip:
fnl = basic.linr(exp_fieldns, vlay_fieldnl(vlay),
'expected field names', vlay.name(),
result_type='missing', logger=log, fancy_log=db_f)
if len(fnl)>0:
raise Error('%s missing expected fields: %s'%(
vlay.name(), fnl))
checks_l.append('exp_fieldns=%i'%len(exp_fieldns))
#=======================================================================
# unexpected field names
#=======================================================================
if not basic.is_null(uexp_fieldns): #robust null checking
#fields on the layer
if len(vlay_fieldnl(vlay))>0:
fnl = basic.linr(uexp_fieldns, vlay_fieldnl(vlay),
'un expected field names', vlay.name(),
result_type='matching', logger=log, fancy_log=db_f)
if len(fnl)>0:
raise Error('%s contains unexpected fields: %s'%(
vlay.name(), fnl))
#no fields on the layer
else:
pass
checks_l.append('uexp_fieldns=%i'%len(uexp_fieldns))
#=======================================================================
# null value check
#=======================================================================
#==========================================================================
# if not real_atts is None:
#
# #pull this data
# df = vlay_get_fdf(vlay, fieldn_l = real_atts, logger=log)
#
# #check for nulls
# if np.any(df.isna()):
# raise Error('%s got %i nulls on %i expected real fields: %s'%(
# vlay.name(), df.isna().sum().sum(), len(real_atts), real_atts))
#
#
# checks_l.append('real_atts=%i'%len(real_atts))
#==========================================================================
#=======================================================================
# basic geometry type
#=======================================================================
#==========================================================================
# if not bgeot is None:
# bgeot_lay = vlay_get_bgeo_type(vlay)
#
# if not bgeot == bgeot_lay:
# raise Error('basic geometry type expectation \'%s\' does not match layers \'%s\''%(
# bgeot, bgeot_lay))
#
# checks_l.append('bgeot=%s'%bgeot)
#==========================================================================
#=======================================================================
# feature count
#=======================================================================
if not fcnt is None:
if isinstance(fcnt, QgsVectorLayer):
fcnt=fcnt.dataProvider().featureCount()
if not fcnt == vlay.dataProvider().featureCount():
raise Error('\'%s\'s feature count (%i) does not match %i'%(
vlay.name(), vlay.dataProvider().featureCount(), fcnt))
checks_l.append('fcnt=%i'%fcnt)
#=======================================================================
# fkey
#=======================================================================
#==============================================================================
# if isinstance(fkey, str):
# fnl = vlay_fieldnl(vlay)
#
# if not fkey in fnl:
# raise Error('fkey \'%s\' not in the fields'%fkey)
#
# fkeys_ser = vlay_get_fdata(vlay, fkey, logger=log, fmt='ser').sort_values()
#
# if not np.issubdtype(fkeys_ser.dtype, np.number):
# raise Error('keys are non-numeric. type: %s'%fkeys_ser.dtype)
#
# if not fkeys_ser.is_unique:
# raise Error('\'%s\' keys are not unique'%fkey)
#
# if not fkeys_ser.is_monotonic:
# raise Error('fkeys are not monotonic')
#
# if np.any(fkeys_ser.isna()):
# raise Error('fkeys have nulls')
#
# checks_l.append('fkey \'%s\'=%i'%(fkey, len(fkeys_ser)))
#==============================================================================
#=======================================================================
# storage type
#=======================================================================
if mlay:
if not 'Memory' in vlay.dataProvider().storageType():
raise Error('\"%s\' unexpected storage type: %s'%(
vlay.name(), vlay.dataProvider().storageType()))
checks_l.append('mlay')
#=======================================================================
# validty
#=======================================================================
#==========================================================================
# if chk_valid:
# vlay_chk_validty(vlay, chk_geo=True)
#
# checks_l.append('validity')
#==========================================================================
#=======================================================================
# wrap
#=======================================================================
log.debug('\'%s\' passed %i checks: %s'%(
vlay.name(), len(checks_l), checks_l))
return
def load_vlay( #load a layer from a file
fp,
providerLib='ogr',
logger=mod_logger):
"""
what are we using this for?
see instanc emethod
"""
log = logger.getChild('load_vlay')
assert os.path.exists(fp), 'requested file does not exist: %s'%fp
basefn = os.path.splitext(os.path.split(fp)[1])[0]
#Import a Raster Layer
vlay_raw = QgsVectorLayer(fp,basefn,providerLib)
#check if this is valid
if not vlay_raw.isValid():
log.error('loaded vlay \'%s\' is not valid. \n \n did you initilize?'%vlay_raw.name())
raise Error('vlay loading produced an invalid layer')
#check if it has geometry
if vlay_raw.wkbType() == 100:
log.error('loaded vlay has NoGeometry')
raise Error('no geo')
#==========================================================================
# report
#==========================================================================
vlay = vlay_raw
dp = vlay.dataProvider()
log.info('loaded vlay \'%s\' as \'%s\' %s geo with %i feats from file: \n %s'
%(vlay.name(), dp.storageType(), QgsWkbTypes().displayString(vlay.wkbType()), dp.featureCount(), fp))
return vlay
def vlay_write( #write a VectorLayer
vlay, out_fp,
driverName='GPKG',
fileEncoding = "CP1250",
opts = QgsVectorFileWriter.SaveVectorOptions(), #empty options object
overwrite=False,
logger=mod_logger):
"""
help(QgsVectorFileWriter.SaveVectorOptions)
QgsVectorFileWriter.SaveVectorOptions.driverName='GPKG'
opt2 = QgsVectorFileWriter.BoolOption(QgsVectorFileWriter.CreateOrOverwriteFile)
help(QgsVectorFileWriter)
TODO: Move this back onto Qcoms
"""
#==========================================================================
# defaults
#==========================================================================
log = logger.getChild('vlay_write')
#===========================================================================
# assemble options
#===========================================================================
opts.driverName = driverName
opts.fileEncoding = fileEncoding
#===========================================================================
# checks
#===========================================================================
#file extension
fhead, ext = os.path.splitext(out_fp)
if not 'gpkg' in ext:
raise Error('unexpected extension: %s'%ext)
if os.path.exists(out_fp):
msg = 'requested file path already exists!. overwrite=%s \n %s'%(
overwrite, out_fp)
if overwrite:
log.warning(msg)
os.remove(out_fp) #workaround... should be away to overwrite with the QgsVectorFileWriter
else:
raise Error(msg)
if vlay.dataProvider().featureCount() == 0:
raise Error('\'%s\' has no features!'%(
vlay.name()))
if not vlay.isValid():
Error('passed invalid layer')
error = QgsVectorFileWriter.writeAsVectorFormatV2(
vlay, out_fp,
QgsCoordinateTransformContext(),
opts,
)
#=======================================================================
# wrap and check
#=======================================================================
if error[0] == QgsVectorFileWriter.NoError:
log.info('layer \' %s \' written to: \n %s'%(vlay.name(),out_fp))
return out_fp
raise Error('FAILURE on writing layer \' %s \' with code:\n %s \n %s'%(vlay.name(),error, out_fp))
def vlay_get_fdf( #pull all the feature data and place into a df
vlay,
fmt='df', #result fomrat key.
#dict: {fid:{fieldname:value}}
#df: index=fids, columns=fieldnames
#limiters
request = None, #request to pull data. for more customized requestes.
fieldn_l = None, #or field name list. for generic requests
#modifiers
reindex = None, #optinal field name to reindex df by
#expectations
expect_all_real = False, #whether to expect all real results
allow_none = False,
db_f = False,
logger=mod_logger,
feedback=MyFeedBackQ()):
"""
performance improvement
Warning: requests with getFeatures arent working as expected for memory layers
this could be combined with vlay_get_feats()
also see vlay_get_fdata() (for a single column)
RETURNS
a dictionary in the Qgis attribute dictionary format:
key: generally feat.id()
value: a dictionary of {field name: attribute value}
"""
#===========================================================================
# setups and defaults
#===========================================================================
log = logger.getChild('vlay_get_fdf')
assert isinstance(vlay, QgsVectorLayer)
all_fnl = [fieldn.name() for fieldn in vlay.fields().toList()]
if fieldn_l is None: #use all the fields
fieldn_l = all_fnl
else:
vlay_check(vlay, fieldn_l, logger=logger, db_f=db_f)
if allow_none:
if expect_all_real:
raise Error('cant allow none and expect all reals')
#===========================================================================
# prechecks
#===========================================================================
if not reindex is None:
if not reindex in fieldn_l:
raise Error('requested reindexer \'%s\' is not a field name'%reindex)
if not vlay.dataProvider().featureCount()>0:
raise Error('no features!')
if len(fieldn_l) == 0:
raise Error('no fields!')
if fmt=='dict' and not (len(fieldn_l)==len(all_fnl)):
raise Error('dict results dont respect field slicing')
assert hasattr(feedback, 'setProgress')
#===========================================================================
# build the request
#===========================================================================
feedback.setProgress(2)
if request is None:
"""WARNING: this doesnt seem to be slicing the fields.
see Alg().deletecolumns()
but this will re-key things
request = QgsFeatureRequest().setSubsetOfAttributes(fieldn_l,vlay.fields())"""
request = QgsFeatureRequest()
#never want geometry
request = request.setFlags(QgsFeatureRequest.NoGeometry)
log.debug('extracting data from \'%s\' on fields: %s'%(vlay.name(), fieldn_l))
#===========================================================================
# loop through each feature and extract the data
#===========================================================================
fid_attvs = dict() #{fid : {fieldn:value}}
fcnt = vlay.dataProvider().featureCount()
for indxr, feat in enumerate(vlay.getFeatures(request)):
#zip values
fid_attvs[feat.id()] = feat.attributes()
feedback.setProgress((indxr/fcnt)*90)
#===========================================================================
# post checks
#===========================================================================
if not len(fid_attvs) == vlay.dataProvider().featureCount():
log.debug('data result length does not match feature count')
if not request.filterType()==3: #check if a filter fids was passed
"""todo: add check to see if the fiter request length matches tresult"""
raise Error('no filter and data length mismatch')
#check the field lengthes
if not len(all_fnl) == len(feat.attributes()):
raise Error('field length mismatch')
#empty check 1
if len(fid_attvs) == 0:
log.warning('failed to get any data on layer \'%s\' with request'%vlay.name())
if not allow_none:
raise Error('no data found!')
else:
if fmt == 'dict':
return dict()
elif fmt == 'df':
return pd.DataFrame()
else:
raise Error('unexpected fmt type')
#===========================================================================
# result formatting
#===========================================================================
log.debug('got %i data elements for \'%s\''%(
len(fid_attvs), vlay.name()))
if fmt == 'dict':
return fid_attvs
elif fmt=='df':
#build the dict
df_raw = | pd.DataFrame.from_dict(fid_attvs, orient='index', columns=all_fnl) | pandas.DataFrame.from_dict |
import os
import pandas as pd
def get_data():
"""
Loads the data for the project.
"""
import os
import pandas as pd
pardir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
datadir = pardir + "\\Data"
datadir
offense = []
defense = []
offensel = []
defensel = []
for f in os.listdir(datadir):
if f[11] == "O":
offense.append(datadir + "\\" + f)
if f[11] == "D":
defense.append(datadir + "\\" + f)
for f in offense:
offensel.append(pd.read_excel(f,0))
for f in defense:
defensel.append(pd.read_excel(f,0))
return offensel, defensel
def clean_data(offense, defense):
"""
Prepares the data for the project.
:param offense: DataFrame. The offense team data.
:param defense: DataFrame. The defense team data.
"""
import pandas as pd
i = 2002
for f in offense:
f.insert(0, "Year", i)
i+= 1
j = 2002
for g in defense:
g.insert(0, "Year", j)
j+= 1
for i in range(0, len(offense)):
offense[i] = offense[i].drop([32,33,34])
for i in range(0, len(defense)):
defense[i] = defense[i].drop([32,33,34])
combined = []
i = 0
for f in offense:
combined.append(f.merge(defense[i], how='inner', on=["Year","Tm"]))
i+=1
finalframe = | pd.concat(combined, ignore_index=True) | pandas.concat |
from collections import namedtuple
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
from dask.array.utils import assert_eq as assert_eq_ar
from dask.dataframe.utils import assert_eq as assert_eq_df
from dask_ml.datasets import make_classification
from dask_ml.utils import (
_num_samples,
assert_estimator_equal,
check_array,
check_chunks,
check_consistent_length,
check_matching_blocks,
check_random_state,
handle_zeros_in_scale,
slice_columns,
)
df = dd.from_pandas(pd.DataFrame(5 * [range(42)]).T, npartitions=5)
s = dd.from_pandas(pd.Series([0, 1, 2, 3, 0]), npartitions=5)
a = da.from_array(np.array([0, 1, 2, 3, 0]), chunks=3)
X, y = make_classification(chunks=(2, 20))
Foo = namedtuple("Foo", "a_ b_ c_ d_")
Bar = namedtuple("Bar", "a_ b_ d_ e_")
def test_slice_columns():
columns = [2, 3]
df2 = slice_columns(df, columns)
X2 = slice_columns(X, columns)
assert list(df2.columns) == columns
assert_eq_df(df[columns].compute(), df2.compute())
assert_eq_ar(X.compute(), X2.compute())
def test_handle_zeros_in_scale():
s2 = handle_zeros_in_scale(s)
a2 = handle_zeros_in_scale(a)
assert list(s2.compute()) == [1, 1, 2, 3, 1]
assert list(a2.compute()) == [1, 1, 2, 3, 1]
x = np.array([1, 2, 3, 0], dtype="f8")
expected = np.array([1, 2, 3, 1], dtype="f8")
result = handle_zeros_in_scale(x)
np.testing.assert_array_equal(result, expected)
x = pd.Series(x)
expected = pd.Series(expected)
result = handle_zeros_in_scale(x)
| tm.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
import os
import tempfile
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import simulation as sim
from ...utils.testing import assert_frames_equal
def setup_function(func):
sim.clear_sim()
sim.enable_cache()
def teardown_function(func):
sim.clear_sim()
sim.enable_cache()
@pytest.fixture
def df():
return pd.DataFrame(
{'a': [1, 2, 3],
'b': [4, 5, 6]},
index=['x', 'y', 'z'])
def test_tables(df):
wrapped_df = sim.add_table('test_frame', df)
@sim.table()
def test_func(test_frame):
return test_frame.to_frame() / 2
assert set(sim.list_tables()) == {'test_frame', 'test_func'}
table = sim.get_table('test_frame')
assert table is wrapped_df
assert table.columns == ['a', 'b']
assert table.local_columns == ['a', 'b']
assert len(table) == 3
pdt.assert_index_equal(table.index, df.index)
pdt.assert_series_equal(table.get_column('a'), df.a)
pdt.assert_series_equal(table.a, df.a)
| pdt.assert_series_equal(table['b'], df['b']) | pandas.util.testing.assert_series_equal |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from datetime import datetime
from inspect import signature
from io import StringIO
import os
from pathlib import Path
import sys
import numpy as np
import pytest
from pandas.compat import PY310
from pandas.errors import (
EmptyDataError,
ParserError,
ParserWarning,
)
from pandas import (
DataFrame,
Index,
Series,
Timestamp,
compat,
)
import pandas._testing as tm
from pandas.io.parsers import TextFileReader
from pandas.io.parsers.c_parser_wrapper import CParserWrapper
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self) -> None:
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser.engine = "c"
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("squeeze", [True, False])
def test_squeeze(all_parsers, squeeze):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv_check_warnings(
FutureWarning,
"The squeeze argument has been deprecated "
"and will be removed in a future version. "
'Append .squeeze\\("columns"\\) to the call to squeeze.\n\n',
StringIO(data),
index_col=0,
header=None,
squeeze=squeeze,
)
if not squeeze:
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
@xfail_pyarrow
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@xfail_pyarrow
def test_missing_trailing_delimiters(all_parsers):
parser = all_parsers
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_skip_initial_space(all_parsers):
data = (
'"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
"1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
"314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
"70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
"0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
"-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
)
parser = all_parsers
result = parser.read_csv(
StringIO(data),
names=list(range(33)),
header=None,
na_values=["-9999.0"],
skipinitialspace=True,
)
expected = DataFrame(
[
[
"09-Apr-2012",
"01:10:18.300",
2456026.548822908,
12849,
1.00361,
1.12551,
330.65659,
355626618.16711,
73.48821,
314.11625,
1917.09447,
179.71425,
80.0,
240.0,
-350,
70.06056,
344.9837,
1,
1,
-0.689265,
-0.692787,
0.212036,
14.7674,
41.605,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
0,
12,
128,
]
]
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_trailing_delimiters(all_parsers):
# see gh-2442
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(all_parsers):
# https://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv board","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa:E501
parser = all_parsers
result = parser.read_csv(
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
)
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'
tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
@xfail_pyarrow
def test_ignore_leading_whitespace(all_parsers):
# see gh-3374, gh-6607
parser = all_parsers
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
result = parser.read_csv(StringIO(data), sep=r"\s+")
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
def test_uneven_lines_with_usecols(all_parsers, usecols):
# see gh-12203
parser = all_parsers
data = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10"""
if usecols is None:
# Make sure that an error is still raised
# when the "usecols" parameter is not provided.
msg = r"Expected \d+ fields in line \d+, saw \d+"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
else:
expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# First, check to see that the response of parser when faced with no
# provided columns raises the correct error, with or without usecols.
("", {}, None),
("", {"usecols": ["X"]}, None),
(
",,",
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
DataFrame(columns=["X"], index=[0], dtype=np.float64),
),
(
"",
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
DataFrame(columns=["X"]),
),
],
)
def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
# see gh-12493
parser = all_parsers
if expected is None:
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"kwargs,expected",
[
# gh-8661, gh-8679: this should ignore six lines, including
# lines with trailing whitespace and blank lines.
(
{
"header": None,
"delim_whitespace": True,
"skiprows": [0, 1, 2, 3, 5, 6],
"skip_blank_lines": True,
},
DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
),
# gh-8983: test skipping set of rows after a row with trailing spaces.
(
{
"delim_whitespace": True,
"skiprows": [1, 2, 3, 5, 6],
"skip_blank_lines": True,
},
DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
),
],
)
def test_trailing_spaces(all_parsers, kwargs, expected):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa:E501
parser = all_parsers
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
tm.assert_frame_equal(result, expected)
def test_raise_on_sep_with_delim_whitespace(all_parsers):
# see gh-6607
data = "a b c\n1 2 3"
parser = all_parsers
with pytest.raises(ValueError, match="you can only specify one"):
parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
def test_read_filepath_or_buffer(all_parsers):
# see gh-43366
parser = all_parsers
with pytest.raises(TypeError, match="Expected file path name or file-like"):
parser.read_csv(filepath_or_buffer=b"input")
@xfail_pyarrow
@pytest.mark.parametrize("delim_whitespace", [True, False])
def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
# see gh-9710
parser = all_parsers
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({"MyColumn": list("abab")})
result = parser.read_csv(
StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
)
tm.assert_frame_equal(result, expected)
# Skip for now, actually only one test fails though, but its tricky to xfail
@skip_pyarrow
@pytest.mark.parametrize(
"sep,skip_blank_lines,exp_data",
[
(",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(
",",
False,
[
[1.0, 2.0, 4.0],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5.0, np.nan, 10.0],
[np.nan, np.nan, np.nan],
[-70.0, 0.4, 1.0],
],
),
],
)
def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
parser = all_parsers
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
if sep == r"\s+":
data = data.replace(",", " ")
result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
expected = DataFrame(exp_data, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_whitespace_lines(all_parsers):
parser = all_parsers
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"data,expected",
[
(
""" A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
""",
DataFrame(
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
columns=["A", "B", "C", "D"],
index=["a", "b", "c"],
),
),
(
" a b c\n1 2 3 \n4 5 6\n 7 8 9",
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
),
],
)
def test_whitespace_regex_separator(all_parsers, data, expected):
# see gh-6607
parser = all_parsers
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_sub_character(all_parsers, csv_dir_path):
# see gh-16893
filename = os.path.join(csv_dir_path, "sub_char.csv")
expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
parser = all_parsers
result = parser.read_csv(filename)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])
def test_filename_with_special_chars(all_parsers, filename):
# see gh-15086.
parser = all_parsers
df = DataFrame({"a": [1, 2, 3]})
with | tm.ensure_clean(filename) | pandas._testing.ensure_clean |
"""
Created at EPFL 2020
@author: <NAME>meters
Placing detectors on edges
Input requirements:
- dataframe with edges, needed column names = 'N1', 'N2', 'length'
- Linestring column in x-y coordinates (needed for interpolate function) with name = 'geom'
Note: Linestring object can follow curves, not just straight lines between begin and end node
"""
# Choice between two implementations:
# - single detectors
# - multiple detectors at once
import pneumapackage.compassbearing as cpb
from pneumapackage.__init__ import path_results
from pneumapackage.settings import *
import geopandas as gpd
import pandas as pd
import osmnx as ox
from pyproj import Proj
from shapely.geometry import Point, LineString
import leuvenmapmatching.util.dist_euclidean as distxy
from pathlib import Path
import os
# Single implementation
# gdf = dataframe with edges with Linestring object in x-y coordinates
# distance = specified distance of point object from start node of linestring object
# relative = place point object on relative position wrt to length of edge
# reverse = specified distance of point object starting from end node of linestring object
class Detectors:
def __init__(self, gdf_netw, n_det, length_detector, dfi, double_loops, lonlat=False, gdf_special=None):
self.network = gdf_netw
self.n_det = n_det
self.dfi = dfi
self.len_det = length_detector
if type(double_loops) in (int, float):
self.double_loops = True
self.loop_width = double_loops
else:
self.double_loops = False
self.loop_width = 0
self.det_loc = make_double_detector(self.network, self.dfi, n_det=self.n_det, loop_width=self.loop_width,
make_double_loops=self.double_loops)
self.det_loc_latlon = get_xy_to_crs_double_loops(self.det_loc, n_det=self.n_det, double_loops=self.double_loops,
lonlat=lonlat)
self.det_edges = make_detector_edges(self.det_loc_latlon, self.len_det, double_loops=self.double_loops)
self.features = edge_features(gdf_special, length_detector, lonlat=lonlat)
self.det_edges_all = self.det_edges[0]
self.det_edges_all_ts = {}
self.det_selection = {}
def info(self):
det_info = {'number_detectors': self.n_det, 'distance_from_intersection': self.dfi,
'length_detector': self.len_det, 'double_loops': self.double_loops, 'loop_width': self.loop_width}
return det_info
def detector_selection(self, index_list):
det_sel = self.det_edges_all[self.det_edges_all['_id'].isin(index_list)]
self.det_selection = det_sel
return det_sel
def detector_projected(self):
det_loc = self.det_loc
tmp_det = pd.merge(det_loc, self.network[['_id', 'x1', 'y1', 'x2', 'y2']], how='left', on='_id')
for ind in range(1, self.n_det + 1):
tmp_det[f'crd{ind}'] = [t.coords[0] for t in tmp_det[f'detector {ind}']]
det_proj = tmp_det.apply(help_det_proj, column_name=f'crd{ind}', axis=1)
det_loc[f'proj_det{ind}'] = det_proj
self.det_loc = det_loc
def features_projected(self):
ft = self.features
ft = ft.reset_index()
tmp_ft = pd.merge(ft, self.network[['_id', 'x1', 'y1', 'x2', 'y2']], how='left', on='_id')
ft_proj = tmp_ft.apply(help_det_proj, column_name='xy', axis=1)
ft['proj_feature'] = ft_proj
ft.set_index(['_id', 'index'], inplace=True)
self.features = ft
def detector_to_shapefile(self, det_sel=False, filename=None, folder=path_results):
detector = self.det_edges_all
fn = ''
if filename is not None:
fn = filename
if det_sel:
detector = self.det_selection
Path(folder + "/shapefiles").mkdir(parents=True, exist_ok=True)
for det in range(1, self.n_det + 1):
det_gdf = gpd.GeoDataFrame(detector[['_id', 'n1', 'n2']], geometry=detector[f'det_edge_{det}'])
det_gdf_shp = det_gdf.copy()
det_gdf_shp.crs = 'epsg:4326'
shp_fn = os.path.join(folder, 'shapefiles', f'detector_{det}{fn}')
det_gdf_shp.to_file(filename=shp_fn)
if self.double_loops:
det_bis_gdf = gpd.GeoDataFrame(detector[['_id', 'n1', 'n2']], geometry=detector[f'det_edge_{det}bis'])
det_bis_gdf_shp = det_bis_gdf.copy()
det_bis_gdf_shp.crs = 'epsg:4326'
shp_fnbis = os.path.join(folder, 'shapefiles', f'detector_{det}bis{fn}')
det_bis_gdf_shp.to_file(filename=shp_fnbis)
def make_detector(gdf, distance, relative=False, reverse=False):
if distance < 0:
raise Exception('distance should be positive. The value was: {}'.format(distance))
if relative:
if 1 < distance:
raise Exception('distance should be lower or equal to 1 to be relative. '
'The value was: {}'.format(distance))
if gdf.crs.to_epsg() == 4326:
gdf = ox.project_gdf(gdf)
name = []
id_b = []
if not reverse:
for i, j in gdf.iterrows():
if relative:
d = gdf.loc[i, 'geometry'].interpolate(distance, normalized=relative)
elif gdf['length'][i] > distance:
d = gdf.loc[i, 'geometry'].interpolate(distance, normalized=relative)
else:
d = gdf.loc[i, 'geometry'].interpolate(0.1, normalized=True)
id = gdf.loc[i, ['n1', 'n2']]
name.append(d)
id_b.append(id)
else:
for i, j in gdf.iterrows():
if gdf['length'][i] > distance:
d = gdf.loc[i, 'geometry'].interpolate(gdf.loc[i, 'length'] - distance, normalized=relative)
else:
d = gdf.loc[i, 'geometry'].interpolate(0.9, normalized=True)
id = gdf.loc[i, ['n1', 'n2']]
name.append(d)
id_b.append(id)
name = pd.DataFrame(name, columns=['detector_1'])
id_b = pd.DataFrame(id_b, columns=['n1', 'n2'])
name = | pd.concat([name, id_b], axis=1) | pandas.concat |
from dataclasses import replace
from tika import parser
import utils
import json
from datetime import datetime
import pandas as pd
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
def get_meta_indices(alist):
for index, item in enumerate(alist):
if "AUSFAHRLISTE" in item:
return [i for i in range(index, len(alist))]
class Ausfahrliste:
"""
Separation of customer info is based on the fact that each
customer has a number. If you look keenly after splitting
the extracted pdf, you'll find that customer numbers are not
preceded by any space while other numbers are (apart from
plz or regional code). Thus, we are taking advantage of these
two facts to separate customers.
Assumption:
There is no way number of
customers are going to be more than 1000 so the region code
can never be a customer number.
"""
def __init__(self, afile):
self.afile = afile
self.table = None
def read_pdf(self):
return parser.from_file(self.afile)
def get_content(self):
contents = self.read_pdf()["content"]
contents = utils.replace_unreasonable_text(text=contents, text2replace="76356 in Eimer legen", replacer_text="in Eimer legen\n76356 Weingarten")
return contents
def create_reasonable_pdf_text_splits(self):
return self.get_content().split("\n")
def get_indices_of_customer_numbers(self):
"""
Here we are taking advantage of the fact that
a customer number has no preceding space in the string.
:return: list
"""
customer_line_indices = []
for index, line in enumerate(self.create_reasonable_pdf_text_splits()):
line_splits = line.split(" ")
if utils.is_comma(line_splits): # Every customer line must have a comma
if len(line_splits) > 0: # This ensures all blank items are removed
try:
int(line_splits[0]) # we are looking for an integer
if len(line_splits[0]) < 5: # Region code should not be the first customer line
customer_line_indices.append(index)
except ValueError:
""
return customer_line_indices
def create_index_spans_for_customers(self):
"""
We take advantage of the preceding function where
we create customer indices to create spans of
indices
:return: dictionary
"""
indices = self.get_indices_of_customer_numbers()
customer_spans = {}
for index, _ in enumerate(indices):
if index < len(indices) - 1:
customer_spans[f'customer_no {index + 1}'] = [i for i in range(indices[index], indices[index + 1])]
else:
customer_spans[f'customer_no {index + 1}'] = [i for i in
range(indices[index],
len(self.create_reasonable_pdf_text_splits()))] # from the last index containing customer no to the end of the document
return customer_spans
def get_actual_customer_data_using_indices(self):
"""
using the indices in the preceding function, let us get customer
data
:return: dictionary
"""
customer_indices_dict = self.create_index_spans_for_customers()
mixed_customer_data = self.create_reasonable_pdf_text_splits()
customer_data_dict = customer_indices_dict
for key in customer_indices_dict:
single_customer_indices = customer_indices_dict[key]
customer_data_dict[key] = [mixed_customer_data[i] for i in single_customer_indices]
# if key == "customer_no 6" or key == "customer_no 7" or key == "customer_no 10":
# print_json(customer_data_dict[key])
return customer_data_dict
def get_meta_info(self):
whole_document_list = self.create_reasonable_pdf_text_splits()
whole_document_list = [i for i in whole_document_list if i != ""]
meta_dict = {}
for index, item in enumerate(whole_document_list):
splits = item.split(" ")
if "Route" in splits:
for sub_index, sub_item in enumerate(splits):
if "." in sub_item:
meta_dict["date"] = datetime.strftime(datetime.strptime(sub_item, "%d.%m.%y"), "%Y-%m-%d")
if "Seite" in sub_item:
meta_dict["page"] = splits[sub_index + 1]
if "Route" in sub_item:
meta_dict["route"] = splits[sub_index - 1]
if "eigene" in splits:
for sub_index, sub_item in enumerate(splits):
if "." in sub_item:
meta_dict["date"] = datetime.strftime(datetime.strptime(sub_item, "%d.%m.%y"), "%Y-%m-%d")
if "Seite" in sub_item:
meta_dict["page"] = splits[sub_index + 1]
if "Tour:" in splits:
for sub_index, sub_item in enumerate(splits):
if sub_item == "Tour:":
meta_dict["tour"] = splits[sub_index + 1]
if sub_item == "Logistikpartner:":
meta_dict["logistikpartner"] = utils.list2string(
[splits[i] for i in [sub_index + 1, sub_index + 2]])
if sub_item == "km":
meta_dict["short distance"] = splits[sub_index - 1]
try:
long_route = splits[sub_index + 1]
meta_dict["long route"] = utils.remove_brackets(long_route)
except IndexError:
meta_dict["long distance"] = ""
if "Kommissionierzone:" in splits:
meta_dict["bakery"] = utils.list2string(splits[1:])
return meta_dict
def get_list_of_breads_for_this_ausfahrliste(self):
breads = []
customer_dict = self.get_actual_customer_data_using_indices()
for key in customer_dict:
bread_dict = utils.get_breads(customer_dict[key])
specific_customer_breads = [i for i in bread_dict.keys()]
for bread in specific_customer_breads:
if bread not in breads:
breads.append(bread)
return sorted(breads)
def make_customer_table(self):
all_breads = self.get_list_of_breads_for_this_ausfahrliste()
customer_dict = self.get_actual_customer_data_using_indices()
dict_list = []
for key in customer_dict:
customer_no = key.split(" ")[1]
alist = customer_dict[key]
customer_name = utils.get_customer_name(alist)
customer_partial_address = utils.get_customer_partial_address(alist)
customer_status = utils.get_customer_status(alist)
customer_plz = utils.get_region_plz(alist)
customer_address = f'{customer_partial_address}{customer_plz}'
customer_instructions = utils.get_additional_customer_information(alist)
customer_info_as_dict = {
"no": customer_no,
"name": customer_name,
"address": customer_address,
"status": customer_status,
"instructions": customer_instructions
}
bread_dict = utils.get_breads(alist)
for bread in all_breads:
if bread in bread_dict:
customer_info_as_dict[bread] = bread_dict[bread]
else:
customer_info_as_dict[bread] = 0
dict_list.append(pd.DataFrame(customer_info_as_dict, index=[0]))
df = | pd.concat(dict_list) | pandas.concat |
import streamlit as st
import json
import requests
import pandas as pd
import numpy as np
import re
from datetime import datetime as dt
from datetime import time
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
st.set_page_config(layout="centered",
page_title="DataCracy Dashboard",
page_icon = '💙')
#SLACK_BEARER_TOKEN = os.environ.get('SLACK_BEARER_TOKEN') ## Get in setting of Streamlit Share
SLACK_BEARER_TOKEN = st.secrets["TOKEN"]
DTC_GROUPS_URL = ('https://raw.githubusercontent.com/anhdanggit/atom-assignments/main/data/datacracy_groups.csv')
#st.write(json_data['SLACK_BEARER_TOKEN'])
@st.cache
def load_users_df():
# Slack API User Data
endpoint = "https://slack.com/api/users.list"
headers = {"Authorization": "Bearer {}".format(SLACK_BEARER_TOKEN)}
response_json = requests.post(endpoint, headers=headers).json()
user_dat = response_json['members']
# Convert to CSV
user_dict = {'user_id':[],'name':[],'display_name':[],'real_name':[],'title':[],'is_bot':[]}
for i in range(len(user_dat)):
user_dict['user_id'].append(user_dat[i]['id'])
user_dict['name'].append(user_dat[i]['name'])
user_dict['display_name'].append(user_dat[i]['profile']['display_name'])
user_dict['real_name'].append(user_dat[i]['profile']['real_name_normalized'])
user_dict['title'].append(user_dat[i]['profile']['title'])
user_dict['is_bot'].append(int(user_dat[i]['is_bot']))
user_df = | pd.DataFrame(user_dict) | pandas.DataFrame |
"""Loads, standardizes and validates input data for the simulation.
Abstract the extract and transform pieces of the artifact ETL.
The intent here is to provide a uniform interface around this portion
of artifact creation. The value of this interface shows up when more
complicated data needs are part of the project. See the BEP project
for an example.
`BEP <https://github.com/ihmeuw/vivarium_gates_bep/blob/master/src/vivarium_gates_bep/data/loader.py>`_
.. admonition::
No logging is done here. Logging is done in vivarium inputs itself and forwarded.
"""
import pandas as pd
from gbd_mapping import causes, covariates, risk_factors, sequelae
from db_queries import (
get_covariate_estimates,
get_location_metadata,
get_population,
)
from vivarium_gbd_access.utilities import get_draws
from vivarium.framework.artifact import EntityKey
from vivarium_gbd_access import constants as gbd_constants, gbd
from vivarium_inputs import (
globals as vi_globals,
interface,
utilities as vi_utils,
utility_data,
)
from vivarium_inputs.mapping_extension import alternative_risk_factors
from vivarium_gates_iv_iron.constants import data_keys, metadata
from vivarium_gates_iv_iron.constants.data_values import MATERNAL_HEMORRHAGE_SEVERITY_PROBABILITY
from vivarium_gates_iv_iron.data import utilities
from vivarium_gates_iv_iron.utilities import (
create_draws,
get_lognorm_from_quantiles,
get_truncnorm_from_quantiles,
get_random_variable_draws_for_location,
)
def get_data(lookup_key: str, location: str) -> pd.DataFrame:
"""Retrieves data from an appropriate source.
Parameters
----------
lookup_key
The key that will eventually get put in the artifact with
the requested data.
location
The location to get data for.
Returns
-------
The requested data.
"""
mapping = {
data_keys.POPULATION.LOCATION: load_population_location,
data_keys.POPULATION.STRUCTURE: load_population_structure,
data_keys.POPULATION.AGE_BINS: load_age_bins,
data_keys.POPULATION.DEMOGRAPHY: load_demographic_dimensions,
data_keys.POPULATION.TMRLE: load_theoretical_minimum_risk_life_expectancy,
data_keys.POPULATION.ACMR: load_standard_data,
data_keys.POPULATION.PREGNANT_LACTATING_WOMEN_LOCATION_WEIGHTS: get_pregnant_lactating_women_location_weights,
data_keys.POPULATION.WOMEN_REPRODUCTIVE_AGE_LOCATION_WEIGHTS: get_women_reproductive_age_location_weights,
data_keys.PREGNANCY.INCIDENCE_RATE: load_pregnancy_incidence_rate,
data_keys.PREGNANCY.PREGNANT_PREVALENCE: get_prevalence_pregnant,
data_keys.PREGNANCY.NOT_PREGNANT_PREVALENCE: get_prevalence_not_pregnant,
data_keys.PREGNANCY.POSTPARTUM_PREVALENCE: get_prevalence_postpartum,
data_keys.PREGNANCY.INCIDENCE_RATE_MISCARRIAGE: load_standard_data,
data_keys.PREGNANCY.INCIDENCE_RATE_ECTOPIC: load_standard_data,
data_keys.PREGNANCY.ASFR: load_asfr,
data_keys.PREGNANCY.SBR: load_sbr,
data_keys.LBWSG.DISTRIBUTION: load_metadata,
data_keys.LBWSG.CATEGORIES: load_metadata,
data_keys.LBWSG.EXPOSURE: load_lbwsg_exposure,
data_keys.PREGNANCY_OUTCOMES.STILLBIRTH: load_pregnancy_outcome,
data_keys.PREGNANCY_OUTCOMES.LIVE_BIRTH: load_pregnancy_outcome,
data_keys.PREGNANCY_OUTCOMES.OTHER: load_pregnancy_outcome,
data_keys.MATERNAL_DISORDERS.CSMR: load_standard_data,
data_keys.MATERNAL_DISORDERS.INCIDENCE_RATE: load_standard_data,
data_keys.MATERNAL_DISORDERS.YLDS: load_maternal_disorders_ylds,
data_keys.MATERNAL_HEMORRHAGE.CSMR: load_standard_data,
data_keys.MATERNAL_HEMORRHAGE.INCIDENCE_RATE: load_standard_data,
data_keys.HEMOGLOBIN.MEAN: get_hemoglobin_data,
data_keys.HEMOGLOBIN.STANDARD_DEVIATION: get_hemoglobin_data,
}
return mapping[lookup_key](lookup_key, location)
def load_population_location(key: str, location: str) -> str:
if key != data_keys.POPULATION.LOCATION:
raise ValueError(f"Unrecognized key {key}")
return location
def load_population_structure(key: str, location: str) -> pd.DataFrame:
if location == "LMICs":
world_bank_1 = filter_population(interface.get_population_structure("World Bank Low Income"))
world_bank_2 = filter_population(interface.get_population_structure("World Bank Lower Middle Income"))
population_structure = | pd.concat([world_bank_1, world_bank_2]) | pandas.concat |
## ÇOK DEĞİŞKENLİ REGRESYON İÇİN VERİ HAZIRLAMA
#------------------------------------------------------
## KÜTÜPHANELER
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#---------------------------------
## VERİ YÜKLEME
veri = pd.read_csv("C:\\Users\\Computer\\Desktop\\python-machine_learning\\Veriler\\data.csv")
print(veri,"\n")
#------------------------------------------------------
## KATEGORİK VERİ DÖNÜŞÜMÜ
# ulke kolonu için
ulke = veri.iloc[:,0:1].values
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
ulke[:,0] = le.fit_transform(veri.iloc[:,0])
one_hot_encoder = preprocessing.OneHotEncoder()
ulke = one_hot_encoder.fit_transform(ulke).toarray()
print(ulke,"\n")
#-----------------------------------------------------
# cinsiyet kolonu için
cınsıyet = veri.iloc[:,-1:].values
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
cınsıyet[:,-1] = le.fit_transform(veri.iloc[:,-1])
one_hot_encoder = preprocessing.OneHotEncoder()
cınsıyet = one_hot_encoder.fit_transform(cınsıyet).toarray()
print(cınsıyet,"\n")
#---------------------------------------------------------
## VERİLERİN BİRLEŞTİRİLMESİ
#----------------------------------------
boy_kilo_yas = veri[["boy","kilo","yas"]]
print(boy_kilo_yas)
# ben boy kilo ve yas kolonlarını önceden seçmediğim için şimdi burda aldım
#---------------------------------------
sonuc = pd.DataFrame(data=ulke, index=range(22), columns=['fr','tr','us'])
sonuc_1 = pd.DataFrame(data=boy_kilo_yas, index=range(22), columns=['boy','kilo','yas'])
sonuc_2 = pd.DataFrame(data=cınsıyet[:,:1], index=range(22), columns=['cinsiyet'])
# Biz burda numeric hale dönüştürdüğümüz 2 cinsiyet kolonundan birini kullandık
# çünkü ikisini birden kullanırsak "dumy trap" denen sorunu yaşayabilirdik.
s=pd.concat([sonuc,sonuc_1],axis=1)
#burda ulke ve boy kilo yas verilerini birleştirdik
s_1= | pd.concat([s,sonuc_2],axis=1) | pandas.concat |
# -*- coding:utf-8 -*-
# @Time : 2020/1/122:48
# @Author : liuqiuxi
# @Email : <EMAIL>
# @File : fundfeedsjqdata.py
# @Project : datafeeds
# @Software: PyCharm
# @Remark : This is class of option market
import pandas as pd
import datetime
import copy
from datafeeds.jqdatafeeds import BaseJqData
from datafeeds.utils import BarFeedConfig
from datafeeds import logger
class AFundQuotationJqData(BaseJqData):
LOGGER_NAME = "AFundQuotationJqData"
def __init__(self):
super(AFundQuotationJqData, self).__init__()
self.__adjust_name_dict = {"F": "pre", "B": "post"}
self.__need_adjust_columns = ["close"]
self.__logger = logger.get_logger(name=self.LOGGER_NAME)
def get_quotation(self, securityIds, items, frequency, begin_datetime, end_datetime, adjusted=None):
securityIds_OTC = []
securityIds_EXC = []
# 判断场内场外基金
for securityId in securityIds:
code_suffix = securityId[securityId.find(".") + 1:]
if code_suffix == "OF":
securityIds_OTC.append(securityId)
elif code_suffix == "SH" or code_suffix == "SZ":
securityIds_EXC.append(securityId)
else:
self.__logger.warning("the securityId: %s did't support in fund quotation, we remove it" % securityId)
# 得到场内基金数据
if len(securityIds_EXC) > 0:
data0 = self.__get_exchange_quotation(securityIds=securityIds_EXC, items=items, frequency=frequency,
begin_datetime=begin_datetime, end_datetime=end_datetime,
adjusted=adjusted)
else:
data0 = pd.DataFrame()
# 得到场外基金数据
if len(securityIds_OTC) > 0:
data1 = self.__get_otc_quotation(securityIds=securityIds_OTC, items=items, frequency=frequency,
begin_datetime=begin_datetime, end_datetime=end_datetime,
adjusted=adjusted)
else:
data1 = pd.DataFrame()
# merge OTC and EXC
if not data0.empty and not data1.empty:
columns = list(set(data0.columns).union(set(data1.columns)))
for column in columns:
if column not in data0.columns:
data0.loc[:, column] = None
if column not in data1.columns:
data1.loc[:, column] = None
data0 = data0.loc[:, columns].copy(deep=True)
data1 = data1.loc[:, columns].copy(deep=True)
data = pd.concat(objs=[data0, data1], axis=0, join="outer")
else:
if data0.empty:
data = data1.copy(deep=True)
elif data1.empty:
data = data0.copy(deep=True)
else:
raise BaseException("[AFundQuotationJqData] something may wrong")
data.reset_index(inplace=True, drop=True)
data.sort_values(by=["securityId", "dateTime"], axis=0, ascending=True, inplace=True)
data.reset_index(inplace=True, drop=True)
non_find_securityIds = list(set(securityIds) - set(data.loc[:, "securityId"]))
if len(non_find_securityIds) > 0:
self.__logger.warning("we can't get securityIds: %s data, please check it" % non_find_securityIds)
return data
def __get_exchange_quotation(self, securityIds, items, frequency, begin_datetime, end_datetime, adjusted):
connect = self.connect()
securityIds = self.wind_to_default(securityIds=securityIds)
frequency = self.get_frequency_cycle(frequency=frequency)
adjusted = self.__adjust_name_dict.get(adjusted, None)
rename_dict = BarFeedConfig.get_jq_data_items().get(self.LOGGER_NAME)
data = pd.DataFrame()
for securityId in securityIds:
data0 = connect.get_price(security=securityId, start_date=begin_datetime, end_date=end_datetime,
frequency=frequency, skip_paused=False, fq=adjusted)
data0.loc[:, "dateTime"] = data0.index
securityId = self.default_to_wind(securityIds=[securityId])
data0.loc[:, "securityId"] = securityId
data = pd.concat(objs=[data, data0], axis=0, join="outer")
data.rename(columns=rename_dict, inplace=True)
# choose items to data
default_items = list(rename_dict.values())
real_items = []
for item in items:
if item in ["securityId", "dateTime"]:
self.__logger.info("There is no need add item: %s to parameters items" % item)
elif item in default_items:
real_items.append(item)
else:
self.__logger.warning("item %s not in default items, so we remove this item to data" % item)
data = data.loc[:, ["dateTime", "securityId"] + real_items].copy(deep=True)
connect.logout()
return data
def __get_otc_quotation(self, securityIds, items, frequency, begin_datetime, end_datetime, adjusted):
connect = self.connect()
finance = connect.finance
if frequency != 86400:
self.__logger.warning("otc quotation only support daily data, the frequency: %d not support now")
return | pd.DataFrame() | pandas.DataFrame |
from itertools import combinations
import pandas as pd
import pytest
mock_data = {
"block": [
1,
1,
2,
3,
2,
2,
2,
2,
1,
1,
1,
1,
1,
2,
3,
2,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
],
"class": [
"test class 1",
"test class 1",
"test class 2",
"test class 2",
"test class 3",
"test class 3",
"test class 3",
"test class 3",
"test class 4",
"test class 4",
"test class 5",
"test class 5",
"test class 5",
"test class 6",
"test class 7",
"test class 8",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 7",
"test class 9",
"test class 9",
],
"student": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Test 1",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Test 1",
"Test 2",
"Test 3",
"Test 4",
"Test 5",
"Test 6",
"Test 7",
"Test 8",
"Test 9",
"Test 10",
"Test 11",
"Test 12",
"Test 13",
"Test 14",
"Test 15",
"Test 16",
"Test 17",
"Test 18",
"Test 19",
"Test 20",
"Test 21",
"Test 22",
"Test 23",
"Test 24",
"Test 25",
"Test 26",
"<NAME>",
"<NAME>",
],
}
@pytest.fixture(scope="session")
def class_size_check(test_schedule):
df = pd.read_excel(str(test_schedule), engine="openpyxl")
class_size = (
df.groupby(
[
"block",
"class",
]
)
.size()
.to_frame()
.reset_index()
)
class_size = [
{"block": x[0], "class_name": x[1], "total_students": x[2]} for x in class_size.to_numpy()
]
return class_size
@pytest.fixture(scope="session")
def student_matches_check(test_schedule):
df = pd.read_excel(str(test_schedule), engine="openpyxl")
blocks = df["block"].sort_values().unique()
total_blocks = df["block"].max()
match_df = df.pivot(index="student", columns="block", values="class").reset_index()
matches = [{i: []} for i in range(total_blocks, 1, -1)]
all_combinations = []
for r in range(len(blocks) + 1):
found_combinations = combinations(blocks, r)
combinations_list = list(found_combinations)
for comb in combinations_list:
if len(comb) > 1:
all_combinations += combinations_list
all_combinations.sort(reverse=True, key=len)
for comb in all_combinations:
exclude = []
for match in matches:
for m in match:
for student_matches in match[m]:
for student_match in student_matches:
exclude.append(student_match)
match_df = match_df[~match_df["student"].isin(exclude)]
matches_key = len(comb)
matches_loc = total_blocks - len(comb)
match_some_df = match_df.groupby(list(comb))
for match in match_some_df:
match_list = match[1][["student"]].values.tolist()
check = [x.pop() for x in match_list if len(match_list) > 1]
if check:
matches[matches_loc][matches_key].append(check)
return matches
@pytest.fixture(scope="session")
def student_classes_check(test_schedule):
df = pd.read_excel(str(test_schedule), engine="openpyxl")
student_classes = {} # type: ignore
for student in (
df[["student", "block", "class"]]
.sort_values(
by=[
"block",
"class",
]
)
.to_numpy()
):
if student[0] in student_classes:
student_classes[student[0]]["blocks"][student[1]] = student[2]
else:
student_classes[student[0]] = {"blocks": {student[1]: student[2]}}
return student_classes
@pytest.fixture
def test_schedule_df():
return pd.DataFrame(mock_data)
@pytest.fixture(scope="session")
def test_schedule(tmp_path_factory):
save_dir = tmp_path_factory.mktemp("schedule").joinpath("original_schedule.xlsx")
df = pd.DataFrame(mock_data)
df.to_excel(save_dir, index=False, engine="openpyxl")
return save_dir
@pytest.fixture(scope="session")
def test_schedule_csv(tmp_path_factory):
save_dir = tmp_path_factory.mktemp("schedule").joinpath("original_schedule.csv")
df = | pd.DataFrame(mock_data) | pandas.DataFrame |
"""
Authors: <NAME> and <NAME>
"""
from bloomberg import BBG
import pandas as pd
from sklearn import preprocessing
import numpy as np
import matplotlib.pyplot as plt
bbg = BBG()
min_max_scaler = preprocessing.MinMaxScaler()
# Pulling IBOVESPA and S&P indexes volatility, as well as general US and BR 10-year bonds
# Original Date: '28-fev-1967'
start_date = pd.to_datetime('01-jan-2010')
end_date = pd.to_datetime('today')
df = bbg.fetch_series(securities=['SPX Index', 'IBOV Index', 'USGG10YR Index', 'GEBR10Y Index'],
fields=['VOLATILITY_90D', 'Volatil 90D'],
startdate=start_date,
enddate=end_date)
volSPX_90 = pd.DataFrame(data=df['SPX Index'])
volSPX_90 = volSPX_90.droplevel('FIELD')
volSPX_90 = volSPX_90.resample('Q').last()
volIBOV_90 = pd.DataFrame(data=df['IBOV Index'])
volIBOV_90 = volIBOV_90.droplevel('FIELD')
volIBOV_90 = volIBOV_90.resample('Q').last()
volbonds_90 = pd.DataFrame(data=df['USGG10YR Index'])
volbonds_90 = volbonds_90.droplevel('FIELD')
volbonds_90 = volbonds_90.resample('Q').last()
voltitul_90 = | pd.DataFrame(data=df['GEBR10Y Index']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import os
DATASET_FOLDER = '../../datasets/'
def gen_worldbank_countries():
# Écrit un csv avec les pays et les codes associés des pays qui nous intéresses
df_des = dataframe_flood()
df = pd.DataFrame(df_des.groupby('Country')[['Country', 'ISO']].head(1))
df.rename(columns={'Country': 'name', 'ISO':'code'}, inplace=True)
df.to_csv(f'{DATASET_FOLDER}worldbank_countries.csv', index=False)
def dataframe_flood():
# Renvoit une dataframe avec les désastres de type flood
try:
df = | pd.read_excel(f'{DATASET_FOLDER}emdat_public_2020_09_12_query_uid-tAnKEX.xlsx', index_col=0) | pandas.read_excel |
# -*- coding: utf-8 -*-
import gc
import os
import pickle
import random
from argparse import ArgumentParser
import numpy as np
import pandas as pd
import torch
from transformers import GPT2Tokenizer
from experiment import Intervention, Model
from utils import convert_results_to_pd
np.random.seed(1)
torch.manual_seed(1)
def get_template_list():
# Get list of all considered templates
# "That" sentences are ours
# "Because" sentences are a subset
# from https://arxiv.org/pdf/1807.11714.pdf (Lu et al.)
return ["The {} said that",
"The {} yelled that",
"The {} whispered that",
"The {} wanted that",
"The {} desired that",
"The {} wished that",
"The {} ate because",
"The {} ran because",
"The {} drove because",
"The {} slept because",
"The {} cried because",
"The {} laughed because",
"The {} went home because",
"The {} stayed up because",
"The {} was fired because",
"The {} was promoted because",
"The {} yelled because"]
def get_intervention_types():
return ['man_indirect',
'woman_indirect']
def construct_interventions(base_sent, tokenizer, DEVICE, gender='female'):
interventions = {}
if gender == 'female':
filename = 'experiment_data/professions_female_stereo.json'
else:
filename = 'experiment_data/professions_male_stereo.json'
with open(filename, 'r') as f:
all_word_count = 0
used_word_count = 0
for l in f:
# there is only one line that eval's to an array
for j in eval(l):
all_word_count += 1
biased_word = j[0]
try:
interventions[biased_word] = Intervention(
tokenizer,
base_sent,
[biased_word, "man", "woman"],
["he", "she"],
device=DEVICE)
used_word_count += 1
except:
pass
# print("excepted {} due to tokenizer splitting.".format(
# biased_word))
print("Only used {}/{} neutral words due to tokenizer".format(
used_word_count, all_word_count))
return interventions
def compute_odds_ratio(df, gender='female', col='odds_ratio'):
# filter some stuff out
df['profession'] = df['base_string'].apply(lambda s: s.split()[1])
df['definitional'] = df['profession'].apply(get_stereotypicality)
df = df.loc[df['definitional'] < 0.75, :]
df = df[df['candidate2_base_prob'] > 0.01]
df = df[df['candidate1_base_prob'] > 0.01]
if gender == 'female':
odds_base = df['candidate1_base_prob'] / df['candidate2_base_prob']
odds_intervention = df['candidate1_prob'] / df['candidate2_prob']
else:
odds_base = df['candidate2_base_prob'] / df['candidate1_base_prob']
odds_intervention = df['candidate2_prob'] / df['candidate1_prob']
odds_ratio = odds_intervention / odds_base
df[col] = odds_ratio
return df
def sort_odds_obj(df):
df['odds_diff'] = df['odds_ratio'].apply(lambda x: x-1)
df_sorted = df.sort_values(by=['odds_diff'], ascending=False)
return df_sorted
def get_stereotypicality(vals):
return abs(profession_stereotypicality[vals]['definitional'])
profession_stereotypicality = {}
with open("experiment_data/professions.json") as f:
for l in f:
for p in eval(l):
profession_stereotypicality[p[0]] = {
'stereotypicality': p[2],
'definitional': p[1],
'total': p[2]+p[1],
'max': max([p[2],p[1]], key=abs)}
# get global list
def get_all_contrib(templates, tokenizer, out_dir=''):
# get marginal contrib to empty set
female_df = get_intervention_results(templates, tokenizer, gender='female')
male_df = get_intervention_results(templates, tokenizer, gender='male')
gc.collect()
# compute odds ratio differently for each gender
female_df = compute_odds_ratio(female_df, gender='female')
male_df = compute_odds_ratio(male_df, gender='male')
female_df = female_df[['layer','neuron', 'odds_ratio']]
male_df = male_df[['layer','neuron', 'odds_ratio']]
gc.collect()
# merge and average
df = pd.concat([female_df, male_df])
df = df.groupby(['layer','neuron'], as_index=False).mean()
df_sorted = sort_odds_obj(df)
layer_list = df_sorted['layer'].values
neuron_list = df_sorted['neuron'].values
odds_list = df_sorted['odds_ratio'].values
marg_contrib = {}
marg_contrib['layer'] = layer_list
marg_contrib['neuron'] = neuron_list
marg_contrib['val'] = odds_list
pickle.dump(marg_contrib, open(out_dir + "/marg_contrib_" + model_type + ".pickle", "wb" ))
return layer_list, neuron_list
def get_intervention_results(templates, tokenizer, DEVICE='cuda', gender='female',
layers_to_adj=[], neurons_to_adj=[], intervention_loc='all',
df_layer=None, df_neuron=None):
if gender == 'female':
intervention_type = 'man_indirect'
else:
intervention_type = 'woman_indirect'
df = []
for template in templates:
# pickle.dump(template + "_" + gender, open("results/log.pickle", "wb" ) )
interventions = construct_interventions(template, tokenizer, DEVICE, gender)
intervention_results = model.neuron_intervention_experiment(interventions, intervention_type,
layers_to_adj=layers_to_adj, neurons_to_adj=neurons_to_adj,
intervention_loc=intervention_loc)
df_template = convert_results_to_pd(interventions, intervention_results, df_layer, df_neuron)
# calc odds ratio and odds-abs
df.append(df_template)
gc.collect()
return pd.concat(df)
def get_neuron_intervention_results(templates, tokenizer, layers, neurons):
female_df = get_intervention_results(templates, tokenizer, gender='female',
layers_to_adj=layers, neurons_to_adj=[neurons], intervention_loc='neuron',
df_layer=layers, df_neuron=neurons[0])
male_df = get_intervention_results(templates, tokenizer, gender='male',
layers_to_adj=layers, neurons_to_adj=[neurons], intervention_loc='neuron',
df_layer=layers, df_neuron=neurons[0])
female_df = compute_odds_ratio(female_df, gender='female')
male_df = compute_odds_ratio(male_df, gender='male')
df = pd.concat([female_df, male_df])
return df['odds_ratio'].mean()
def top_k_by_layer(model, model_type, tokenizer, templates, layer, layer_list, neuron_list, k=50, out_dir=''):
layer_2_ind = np.where(layer_list == layer)[0]
neuron_2 = neuron_list[layer_2_ind]
odd_abs_list = []
for i in range(k):
print(i)
temp_list = list(neuron_2[:i+1])
neurons = [temp_list]
# get marginal contrib to empty set
female_df = get_intervention_results(templates, tokenizer, gender='female',
layers_to_adj=len(temp_list)*[layer], neurons_to_adj=neurons, intervention_loc='neuron',
df_layer=layer, df_neuron=neurons[0])
male_df = get_intervention_results(templates, tokenizer, gender='male',
layers_to_adj=len(temp_list)*[layer], neurons_to_adj=neurons, intervention_loc='neuron',
df_layer=layer, df_neuron=neurons[0])
gc.collect()
# compute odds ratio differently for each gender
female_df = compute_odds_ratio(female_df, gender='female')
male_df = compute_odds_ratio(male_df, gender='male')
# merge and average
df = | pd.concat([female_df, male_df]) | pandas.concat |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import datetime
import os
import pandas as pd
deskPath = os.path.join(os.path.expanduser("~"), 'Desktop')
import csv
with open(deskPath+'\\ExportData.csv','r',encoding='UTF-8') as csvfile:
csv_reader = csv.reader(csvfile)
dates = []
cols = []
framedate = {}
allrow = []
for row in csv_reader:
allrow.append(row)
if row[0] != '' and row[0] not in dates:
dates.append(row[0])
if row[1] != '' and row[1] not in cols:
cols.append(row[1])
framedate[row[1]] = {}
for row1 in allrow:
framedate[row1[1]][row1[0]] = row1[2]
df = | pd.DataFrame(framedate, columns=cols, index=dates) | pandas.DataFrame |
"""
SIR 3S Logfile Utilities (short: Lx)
"""
__version__='192.168.3.11.dev1'
import os
import sys
import logging
logger = logging.getLogger(__name__)
import argparse
import unittest
import doctest
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import py7zr
import pandas as pd
import h5py
import subprocess
import csv
import glob
import warnings
#warnings.simplefilter(action='ignore', category=PerformanceWarning)
# pd.set_option("max_rows", None)
# pd.set_option("max_columns", None)
# pd.reset_option('max_rows')
# ...
class LxError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def fTCCast(x):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
v=x
try:
if x in ['true','True']:
v=1
elif x in ['false','False','']:
v=0
else:
try:
v = float(x)
except Exception as e:
#logStrTmp="{:s}{!s:s}: Konvertierung zu float schlaegt fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
#logger.debug(logStrTmp)
try:
v = pd.to_numeric(x,errors='raise',downcast='float')
#logStrTmp="{:s}{!s:s}: Konvertierung mit pd.to_numeric liefert: {!s:s}".format(logStr,x,v)
#logger.debug(logStrTmp)
except Exception as e:
#logStrTmp="{:s}{!s:s}: Konvertierung zu float mit pd.to_numeric schlaegt auch fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
#logger.debug(logStrTmp)
#x='2021-04-20 10:56:12.000'
#t = pd.Timestamp(x)
#t # Timestamp('2021-04-20 10:56:12')
#i=int(t.to_datetime64())/1000000000
#i # 1618916172.0
#pd.to_datetime(i,unit='s',errors='coerce'): Timestamp('2021-04-20 10:56:12')
try:
t = pd.Timestamp(x)
i=int(t.to_datetime64())/1000000000
v=pd.to_numeric(i,errors='raise',downcast='float')
except Exception as e:
logStrTmp="{:s}{!s:s}: Konvertierung zu float (mit pd.to_numeric) schlaegt (auch nach Annahme vaulue=Zeitstring) fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.debug(logStrTmp)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return v
def getTCsOPCDerivative(TCsOPC,col,shiftSize,windowSize,fct=None):
"""
returns a df
index: ProcessTime
cols:
col
dt
dValue
dValueDt
dValueDtRollingMean
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
mDf=pd.DataFrame()
try:
s=TCsOPC[col].dropna()
mDf=pd.DataFrame(s)
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
mDf['dValueDtRollingMean']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return mDf
logFilenamePattern='([0-9]+)(_)+([0-9]+)(\.log)' # group(3) ist Postfix und Nr.
logFilenameHeadPattern='([0-9,_]+)(\.log)' # group(1) ist Head und H5-Key
# nicht alle IDs werden von RE pID erfasst
# diese werden mit pID2, getDfFromODIHelper und in getDfFromODI "nachbehandelt"
pID=re.compile('(?P<Prae>IMDI\.)?(?P<A>[a-z,A-Z,0-9,_]+)\.(?P<B>[a-z,A-Z,0-9,_]+)\.(?P<C1>[a-z,A-Z,0-9]+)_(?P<C2>[a-z,A-Z,0-9]+)_(?P<C3>[a-z,A-Z,0-9]+)_(?P<C4>[a-z,A-Z,0-9]+)_(?P<C5>[a-z,A-Z,0-9]+)(?P<C6>_[a-z,A-Z,0-9]+)?(?P<C7>_[a-z,A-Z,0-9]+)?\.(?P<D>[a-z,A-Z,0-9,_]+)\.(?P<E>[a-z,A-Z,0-9,_]+)(?P<Post>\.[a-z,A-Z,0-9,_]+)?')
pID2='(?P<Prae>IMDI\.)?(?P<A>[a-z,A-Z,0-9,_]+)(?P<Post>\.[a-z,A-Z,0-9,_]+)?'
def getDfFromODIHelper(row,col,colCheck,pID2=pID2):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if not pd.isnull(row[colCheck]):
res= row[col]
resStr='ColCheckOk'
elif pd.isnull(row[col]):
res=re.search(pID2,row['ID']).group(col)
if res != None:
resStr='ColNowOk'
else:
resStr='ColStillNotOk'
else:
res = row[col]
resStr='ColWasOk'
except:
res = row[col]
resStr='ERROR'
finally:
if resStr not in ['ColCheckOk','ColNowOk']:
logger.debug("{:s}col: {:s} resStr: {:s} row['ID']: {:s} res: {:s}".format(logStr,col, resStr,row['ID'],str(res)))
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return res
def getDfFromODI(ODIFile,pID=pID):
"""
returns a defined df from ODIFile
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfID=None
try:
df=pd.read_csv(ODIFile,delimiter=';')
s = pd.Series(df['ID'].unique())
dfID=s.str.extract(pID.pattern,expand=True)
dfID['ID']=s
dfC=dfID['C1']+'_'+dfID['C2']+'_'+dfID['C3']+'_'+dfID['C4']+'_'+dfID['C5']+'_'+dfID['C6']#+'_'+dfID['C7']
dfID.loc[:,'C']=dfC.values
dfID['C']=dfID.apply(lambda row: row['C']+'_'+row['C7'] if not pd.isnull(row['C7']) else row['C'],axis=1)
dfID=dfID[['ID','Prae','A','B','C','C1','C2','C3','C4','C5','C6','C7','D','E','Post']]
for col in ['Prae','Post','A']:
dfID[col]=dfID.apply(lambda row: getDfFromODIHelper(row,col,'A'),axis=1)
dfID.sort_values(by=['ID'], axis=0,ignore_index=True,inplace=True)
dfID.set_index('ID',verify_integrity=True,inplace=True)
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','Post']='.EIN'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','A']='Objects'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','B']='3S_XYZ_PUMPE'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','C']='3S_XYZ_GSI_01'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','D']='Out'
#dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN',:]
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','Post']='.SOLLW'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','A']='Objects'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','B']='3S_XYZ_RSCHIEBER'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','C']='3S_XYZ_PCV_01'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','D']='Out'
#dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW',:]
dfID['yUnit']=dfID.apply(lambda row: getDfFromODIHelperyUnit(row),axis=1)
dfID['yDesc']=dfID.apply(lambda row: getDfFromODIHelperyDesc(row),axis=1)
dfID=dfID[['yUnit','yDesc','Prae','A','B','C','C1','C2','C3','C4','C5','C6','C7','D','E','Post']]
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfID
def addInitvalueToDfFromODI(INITFile,dfID):
"""
returns dfID extended with new Cols Initvalue and NumOfInits
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfIDext=dfID
try:
df=pd.read_csv(INITFile,delimiter=';',header=None,names=['ID','Value'])#,index_col=0)
dfGrped=df.groupby(by=['ID'])['Value'].agg(['count','min','max','mean','last'])
dfIDext=dfID.merge(dfGrped,left_index=True,right_index=True,how='left').filter(items=dfID.columns.to_list()+['last','count']).rename(columns={'last':'Initvalue','count':'NumOfInits'})
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfIDext
def fODIMatch(dfODI,TYPE=None,OBJTYPE=None,NAME1=None,NAME2=None):
df=dfODI
if TYPE != None:
df=df[df['TYPE']==TYPE]
if OBJTYPE != None:
df=df[df['OBJTYPE']==OBJTYPE]
if NAME1 != None:
df=df[df['NAME1']==NAME1]
if NAME2 != None:
df=df[df['NAME2']==NAME2]
return df
def fODIFindAllSchieberSteuerungsIDs(dfODI,NAME1=None,NAME2=None): # dfODI: pd.read_csv(ODI,delimiter=';')
df=fODIMatch(dfODI,TYPE='OL_2',OBJTYPE='VENT',NAME1=NAME1,NAME2=NAME2)
return sorted(list(df['ID'].unique())+[ID for ID in df['REF_ID'].unique() if not pd.isnull(ID)])
def fODIFindAllZeilenWithIDs(dfODI,IDs):
return dfODI[dfODI['ID'].isin(IDs) | dfODI['REF_ID'].isin(IDs)]
def getDfFromODIHelperyUnit(row):
"""
returns Unit
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
unit=None
try:
if row['E'] in ['AL_S','SB_S']:
unit='[-]'
elif row['E'] in ['LR_AV','LP_AV','QD_AV','SD_AV','AM_AV','FZ_AV','MZ_AV','NG_AV']:
unit='[Nm³/h]'
elif row['E'] in ['AC_AV','LR_AV']:
unit='[mm/s²]'
else:
unit='TBD in Lx'
except:
unit='ERROR'
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return unit
def getDfFromODIHelperyDesc(row):
"""
returns Desc
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
desc=None
try:
if row['E'] in ['AL_S','SB_S']:
desc='Status'
elif row['E'] in ['LR_AV','LP_AV','QD_AV','SD_AV','AM_AV','FZ_AV','MZ_AV','NG_AV']:
desc='Fluss'
elif row['E'] in ['AC_AV','LR_AV']:
desc='Beschleunigung'
else:
desc='TBD in Lx'
except:
desc='ERROR'
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return desc
def getDfIDUniqueCols(dfID):
"""
returns df with uniques
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfIDUniqueCols=pd.DataFrame()
try:
# Spalte mit der groessten Anzahl von Auspraegungen feststellen
lenMax=0
colMax=''
# ueber alle Spalten
for idx,col in enumerate(dfID):
s=pd.Series(dfID[col].unique())
if len(s) > lenMax:
lenMax=len(s)
colMax=col
s=pd.Series(dfID[colMax].unique(),name=colMax)
s.sort_values(inplace=True)
s=pd.Series(s.values,name=colMax)
dfIDUniqueCols=pd.DataFrame(s)
# ueber alle weiteren Spalten
for idx,col in enumerate([col for col in dfID.columns if col != colMax]):
# s unique erzeugen
s=pd.Series(dfID[col].unique(),name=col)
# s sortieren
s.sort_values(inplace=True)
s=pd.Series(s.values,name=col)
dfIDUniqueCols=pd.concat([dfIDUniqueCols,s],axis=1)
dfIDUniqueCols=dfIDUniqueCols[dfID.columns]
except:
logger.error("{0:s}".format(logStr))
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfIDUniqueCols
def getIDsFromID(ID='Objects.3S_XYZ_SEG_INFO.3S_L_6_KED_39_EL1.In.AL_S',dfID=None,matchCols=['B','C1','C2','C3','C4','C5','D'],any=False):
"""
returns IDs matching ID
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
IDsMatching=[]
s=dfID.loc[ID,:]
for ID,row in dfID.iterrows():
match=True
for col in [col for col in row.index.values if col in matchCols]:
#if str(row[col])!=str(s[col]):
if row[col]!=s[col]:
match=False
break
else:
if any:
break
if match:
IDsMatching.append(ID)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
#except:
# logger.error("{0:s}".format(logStr))
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return sorted(IDsMatching)
def getLDSResVecDf(
ID # ResVec-Defining-Channel; i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In.AL_S / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.AL_S
,dfID
,TCsLDSResDf
,matchCols # i.e. ['B','C1','C2','C3','C4','C5','C6','D'] for Segs; i.e. ['B','C','D'] for Drks
):
"""
returns a df with LDSResChannels as columns (AL_S, ...); derived by Filtering columns from TCsLDSResDf and renaming them
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
IDs=getIDsFromID(ID=ID,dfID=dfID,matchCols=matchCols)
dfFiltered=TCsLDSResDf.filter(items=IDs)
colDct={}
for col in dfFiltered.columns:
m=re.search(pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except:
logger.error("{0:s}".format(logStr))
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetFirstAndLastValidIdx(df):
"""
returns (tFirst,tLast)
"""
for idx,col in enumerate(df.columns):
tF=df[col].first_valid_index()
tL=df[col].last_valid_index()
if idx==0:
tFirst=tF
tLast=tL
else:
if tF < tFirst:
tFirst=tF
if tL > tLast:
tLast=tL
return (tFirst,tLast)
def fGetIDSets(
dfID
,divNr #'7'
,pipelineNrLst #['43','44']
,fctIn=None # Funktion von ID die Falsch heraus gibt, wenn ID (doch) nicht in Menge sein soll
):
# returns Dct: key: Bezeichner einer ID-Menge; value: zugeh. IDs
IDSets={}
IDs=[]
for ID in sorted(dfID.index.unique()):
m=re.search(pID,ID)
if m != None:
C1= m.group('C1')
C2= m.group('C2')
C3= m.group('C3')
C4= m.group('C4')
C5= m.group('C5')
if C1 in [divNr] and C3 in pipelineNrLst: # u.a. SEG ErgVecs
IDs.append(ID)
elif C2 in [divNr] and C4 in pipelineNrLst:
IDs.append(ID)
elif C3 in [divNr] and C5 in pipelineNrLst: # FT, PTI, etc.
IDs.append(ID)
if fctIn != None:
IDs=[ID for ID in IDs if fctIn(ID)]
IDSets['IDs']=IDs
IDsAlarm=[ID for ID in IDs if re.search(pID,ID).group('E') == 'AL_S']
IDSets['IDsAlarm']=IDsAlarm
IDsAlarmSEG=[ID for ID in IDsAlarm if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsAlarmSEG']=IDsAlarmSEG
IDsAlarmDruck=[ID for ID in IDsAlarm if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsAlarmDruck']=IDsAlarmDruck
IDsStat=[ID for ID in IDs if re.search(pID,ID).group('E') == 'STAT_S']
IDSets['IDsStat']=IDsStat
IDsStatSEG=[ID for ID in IDsStat if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsStatSEG']=IDsStatSEG
IDsStatDruck=[ID for ID in IDsStat if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsStatDruck']=IDsStatDruck
###
IDsSb=[ID for ID in IDs if re.search(pID,ID).group('E') == 'SB_S']
IDSets['IDsSb']=IDsSb
IDsSbSEG=[ID for ID in IDsSb if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsSbSEG']=IDsSbSEG
IDsSbDruck=[ID for ID in IDsSb if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsSbDruck']=IDsSbDruck
###
IDsZHK=[ID for ID in IDs if re.search(pID,ID).group('E') == 'ZHKNR_S']
IDSets['IDsZHK']=IDsZHK
IDsZHKSEG=[ID for ID in IDsZHK if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsZHKSEG']=IDsZHKSEG
IDsZHKDruck=[ID for ID in IDsZHK if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsZHKDruck']=IDsZHKDruck
IDsFT=[ID for ID in IDs if re.search(pID,ID).group('C4') == 'FT']
IDSets['IDsFT']=IDsFT
IDsPT=[ID for ID in IDs if re.search(pID,ID).group('C4') == 'PTI']
IDSets['IDsPT']=IDsPT
IDsPT_BCIND=[ID for ID in IDs if re.search(pID,ID).group('C5') == 'PTI' and re.search(pID,ID).group('E') == 'BCIND_S' ]
IDSets['IDsPT_BCIND']=IDsPT_BCIND
### Schieber
IDsZUST=[ID for ID in IDs if re.search(pID,ID).group('E') == 'ZUST']
IDsZUST=sorted(IDsZUST,key=lambda x: re.match(pID,x).group('C5'))
IDSets['IDsZUST']=IDsZUST
IDs_3S_XYZ_ESCHIEBER=[ID for ID in IDs if re.search(pID,ID).group('B') == '3S_FBG_ESCHIEBER']
IDs_3S_XYZ_ESCHIEBER=sorted(IDs_3S_XYZ_ESCHIEBER,key=lambda x: re.match(pID,x).group('C6'))
IDSets['IDs_3S_XYZ_ESCHIEBER']=IDs_3S_XYZ_ESCHIEBER
IDs_XYZ_ESCHIEBER=[ID for ID in IDs if re.search(pID,ID).group('B') == 'FBG_ESCHIEBER']
IDs_XYZ_ESCHIEBER=sorted(IDs_XYZ_ESCHIEBER,key=lambda x: re.match(pID,x).group('C5')) #
IDSets['IDs_XYZ_ESCHIEBER']=IDs_XYZ_ESCHIEBER
IDs_XYZ_ESCHIEBER_Ohne_ZUST=[ID for ID in IDs_XYZ_ESCHIEBER if re.search(pID,ID).group('E') != 'ZUST']
IDs_XYZ_ESCHIEBER_Ohne_ZUST=sorted(IDs_XYZ_ESCHIEBER_Ohne_ZUST,key=lambda x: re.match(pID,x).group('C5'))
IDSets['IDs_XYZ_ESCHIEBER_Ohne_ZUST']=IDs_XYZ_ESCHIEBER_Ohne_ZUST
IDsSchieberAlle=IDsZUST+IDs_XYZ_ESCHIEBER_Ohne_ZUST+IDs_3S_XYZ_ESCHIEBER
IDSets['IDsSchieberAlle']=IDsSchieberAlle
IDsSchieberAlleOhneLAEUFT=[ID for ID in IDsSchieberAlle if re.search('LAEUFT$',ID) == None]
IDsSchieberAlleOhneLAEUFT=[ID for ID in IDsSchieberAlleOhneLAEUFT if re.search('LAEUFT_NICHT$',ID) == None]
IDSets['IDsSchieberAlleOhneLAEUFT']=IDsSchieberAlleOhneLAEUFT
return IDSets
h5KeySep='/'
def fValueFct(x):
return pd.to_numeric(x,errors='ignore',downcast='float')
class AppLog():
"""
SIR 3S App Log (SQC Log)
Maintains a H5-File.
Existing H5-File will be deleted (if not initialized with h5File=...).
H5-Keys are:
* init
* lookUpDf
* lookUpDfZips (if initialized with zip7Files=...)
* Logfilenames praefixed by Log without extension
Attributes:
* h5File
* lookUpDf
zipName
logName
FirstTime (ScenTime - not #LogTime)
LastTime (ScenTime - mot #LogTime)
* lookUpDfZips
"""
TCsdfOPCFill=False # wenn Wahr, werden in TCsdfOPCFill die NULLen aufgefuellt; default: Falsch
@classmethod
def getTCsFromDf(cls,df,dfID=pd.DataFrame(),TCsdfOPCFill=TCsdfOPCFill):
"""
returns several TC-dfs from df
Verarbeitung von dfs gemaess extractTCsToH5s; siehe dort
Args:
* df: a df with Log-Data
* columns: ['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']
* dfID
* index: ID
* erf. nur, wenn IDs nach Res1 und Res2 aufgeteilt werden sollen
* TCsdfOPCFill: if True (default): fill NaNs in this df
Time curve dfs: cols:
* Time (TCsdfOPC: ProcessTime, other: ScenTime)
* ID
* Value
Time curve dfs:
* TCsdfOPC
* TCsSirCalc
* TCsLDSIn
* TCsLDSRes (dfID empty) or TCsLDSRes1, TCsLDSRes2
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
TCsdfOPC=pd.DataFrame()
TCsdfSirCalc=pd.DataFrame()
TCsdfLDSIn=pd.DataFrame()
if not dfID.empty:
TCsdfLDSRes1=pd.DataFrame()
TCsdfLDSRes2=pd.DataFrame()
else:
TCsdfLDSRes=pd.DataFrame()
if not dfID.empty:
df=df.merge(dfID,how='left',left_on='ID',right_index=True,suffixes=('','_r'))
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfOPC ...'))
TCsdfOPC=df[(df['SubSystem'].str.contains('^OPC'))
### & ~(df['Value'].isnull()) # ueberfluessig, wenn df dies bereits erfuellt
][['ProcessTime','ID','Value']].pivot_table(index='ProcessTime', columns='ID', values='Value',aggfunc='last')
if TCsdfOPCFill:
for col in TCsdfOPC.columns:
TCsdfOPC[col]=TCsdfOPC[col].fillna(method='ffill')
TCsdfOPC[col]=TCsdfOPC[col].fillna(method='bfill')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfSirCalc ...'))
TCsdfSirCalc=df[(df['SubSystem'].str.contains('^SirCalc')) | (df['SubSystem'].str.contains('^RTTM')) ][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSIn ...'))
TCsdfLDSIn=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^<-'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
if not dfID.empty:
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes1 ...'))
TCsdfLDSRes1=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->')) & (df['B'].str.contains('^3S_FBG_SEG_INFO'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes2 ...'))
TCsdfLDSRes2=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->')) & (df['B'].str.contains('^3S_FBG_DRUCK'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
else:
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes ...'))
TCsdfLDSRes=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if not dfID.empty:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2
else:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes
def __init__(self,logFile=None,zip7File=None,h5File=None,h5FileName=None,readWithDictReader=False,nRows=None,readWindowsLog=False):
"""
(re-)initialize
logFile:
wird gelesen und in H5 abgelegt
addZip7File(zip7File) liest alle Logs eines zipFiles und legt diese in H5 ab
zipFile:
1. logFile wird gelesen und in H5 abgelegt
addZip7File(zip7File) liest alle Logs eines zipFiles und legt diese in H5 ab
die Initialisierung mit zipFile ist identisch mit der Initialisierung mit logFile wenn logFile das 1. logFile des Zips ist
nach addZip7File(zip7File) - ggf. mehrfach fuer mehrere Zips:
koennen Daten mit self.get(...) gelesen werden (liefert 1 df)
koennen Daten mit self.getTCs(...) gelesen werden (liefert mehrere dfs in TC-Form)
koennen Daten mit self.getTCsSpecified(...) gelesen werden (liefert 1 df in TC-Form)
koennen Daten in TC-Form mit self.extractTCsToH5s(...) in separate H5s gelesen werden
mit self.getTCsFromH5s(...) koennen die TCs wieder gelesen werden
=== addZip7File(zip7File) - ggf. mehrfach - und extractTCsToH5s(...) sind Bestandteil einer 7Zip-Verarbeitung vor der eigentlichen Analyse ===
h5File:
die lookUp-Dfs vom H5-File werden gelesen
die zum H5-File zugehoerigen TC-H5-Filenamen werden belegt
die TC-H5-Files werden nicht auf Existenz geprüft oder gar gelesen
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
self.lookUpDf=pd.DataFrame()
self.lookUpDfZips=pd.DataFrame()
try:
if logFile != None and zip7File != None and h5File != None:
logger.debug("{0:s}{1:s}".format(logStr,'3 Files (logFile and zip7File and h5File) specified.'))
elif logFile != None and zip7File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (logFile and zip7File) specified.'))
elif logFile != None and h5File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (logFile and h5File) specified.'))
elif h5File != None and zip7File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (h5File and zip7File) specified.'))
elif logFile != None:
self.__initlogFile(logFile,h5FileName=h5FileName,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
elif zip7File != None:
self.__initzip7File(zip7File,h5FileName=h5FileName,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
elif h5File != None:
self.__initWithH5File(h5File)
else:
logger.debug("{0:s}{1:s}".format(logStr,'No File (logFile XOR zip7File XOR h5File) specified.'))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initlogFile(self,logFile,h5FileName=None,readWithDictReader=False,readWindowsLog=False):
"""
(re-)initialize with logFile
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn logFile nicht existiert ...
if not os.path.exists(logFile):
logger.debug("{0:s}logFile {1:s} not existing.".format(logStr,logFile))
else:
df = self.__processALogFile(logFile=logFile,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
self.__initH5File(logFile,df,h5FileName=h5FileName)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initH5File(self,h5File,df,h5FileName=None):
"""
creates self.h5File and writes 'init'-Key Logfile df to it
Args:
* h5File: name of logFile or zip7File; the Dir is the Dir of the H5-File
* df
* h5FileName: the H5-FileName without Dir and Extension; if None (default), "Log ab ..." is used
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
(h5FileHead,h5FileTail)=os.path.split(h5File)
# H5-File
if h5FileName==None:
h5FileTail="Log ab {0:s}.h5".format(str(df['#LogTime'].min())).replace(':',' ').replace('-',' ')
else:
h5FileTail=h5FileName+'.h5'
self.h5File=os.path.join(h5FileHead,h5FileTail)
# wenn H5 existiert wird es geloescht
if os.path.exists(self.h5File):
os.remove(self.h5File)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileTail))
# init-Logfile schreiben
self.__toH5('init',df)
logger.debug("{0:s}'init'-Key Logfile done.".format(logStr))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initWithH5File(self,h5File,useRawHdfAPI=False):
"""
self.h5File=h5File
self.lookUpDf
self.lookUpDfZips
die lookUp-Dfs werden gelesen vom H5-File
die zum H5-File zugehoerigen TC-H5-Filenamen werden belegt, wenn diese H5-Files existieren
die TC-H5-Files werden nicht gelesen
der zum H5-File zugehoerige CVD-Filename wird belegt, wenn das H5-File existiert
das H5-File wird nicht gelesen
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# H5 existiert
if os.path.exists(h5File):
self.h5File=h5File
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
if 'lookUpDf' in h5KeysStripped:
self.lookUpDf=h5Store['lookUpDf']
if 'lookUpDfZips' in h5KeysStripped:
self.lookUpDfZips=h5Store['lookUpDfZips']
else:
if 'lookUpDf' in h5KeysStripped:
self.lookUpDf=pd.read_hdf(self.h5File, key='lookUpDf')
if 'lookUpDfZips' in h5KeysStripped:
self.lookUpDfZips=pd.read_hdf(self.h5File, key='lookUpDfZips')
else:
logStrFinal="{0:s}h5File {1:s} not existing.".format(logStr,h5File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
#TC-H5s
(name,ext)=os.path.splitext(self.h5File)
TCPost='_TC'
h5FileOPC=name+TCPost+'OPC'+ext
h5FileSirCalc=name+TCPost+'SirCalc'+ext
h5FileLDSIn=name+TCPost+'LDSIn'+ext
h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
h5FileLDSRes=name+TCPost+'LDSRes'+ext
if os.path.exists(h5FileOPC):
self.h5FileOPC=h5FileOPC
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileOPC))
if os.path.exists(h5FileSirCalc):
self.h5FileSirCalc=h5FileSirCalc
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileSirCalc))
if os.path.exists(h5FileLDSIn):
self.h5FileLDSIn=h5FileLDSIn
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSIn))
if os.path.exists(h5FileLDSRes):
self.h5FileLDSRes=h5FileLDSRes
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes))
if os.path.exists(h5FileLDSRes1):
self.h5FileLDSRes1=h5FileLDSRes1
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes1))
if os.path.exists(h5FileLDSRes2):
self.h5FileLDSRes2=h5FileLDSRes2
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes2))
h5FileCVD=name+'_'+'CVD'+ext
if os.path.exists(h5FileCVD):
self.h5FileCVD=h5FileCVD
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileCVD))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def getInitDf(self,useRawHdfAPI=False):
"""
returns InitDf from H5-File
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
df=pd.DataFrame()
# H5 existiert
if os.path.exists(self.h5File):
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
if 'init' in h5KeysStripped:
df=h5Store['init']
else:
if 'init' in h5KeysStripped:
df=pd.read_hdf(self.h5File, key='init')
else:
logStrFinal="{0:s}h5File {1:s} not existing.".format(logStr,h5File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return df
def __initzip7File(self,zip7File,h5FileName=None,nRows=None,readWithDictReader=False,readWindowsLog=False):
"""
(re-)initialize with zip7File
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
else:
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
zipFileDirname=os.path.dirname(zip7File)
logger.debug("{0:s}zipFileDirname: {1:s}".format(logStr,zipFileDirname))
aDfRead=False
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,len(allLogFiles)))
logger.debug("{0:s}getnames(): {1:s}.".format(logStr,str(allLogFiles)))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
for idx,logFileNameInZip in enumerate(allLogFiles):
logger.debug("{0:s}idx: {1:d} logFileNameInZip: {2:s}".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(zipFileDirname,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile) # logFileHead == dirname()
logger.debug("{0:s}idx: {1:d} logFileHead: {2:s} logFileTail: {3:s}".format(logStr,idx,logFileHead,logFileTail))
(name, ext)=os.path.splitext(logFile)
logger.debug("{0:s}idx: {1:d} name: {2:s} ext: {3:s}".format(logStr,idx,name,ext))
if logFileHead!='': # logFileHead == dirname()
if os.path.exists(logFileHead) and logFileHead not in extDirLstExistingLogged:
logger.debug("{0:s}idx: {1:d} Verz. logFileHead: {2:s} existiert bereits.".format(logStr,idx,logFileHead))
extDirLstExistingLogged.append(logFileHead)
elif not os.path.exists(logFileHead):
logger.debug("{0:s}idx: {1:d} Verz. logFileHead: {2:s} existiert noch nicht.".format(logStr,idx,logFileHead))
extDirLstTBDeleted.append(logFileHead)
# kein Logfile zu prozessieren ...
if ext == '':
continue
# Logfile prozessieren ...
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
zip7FileObj.extract(path=zipFileDirname,targets=logFileNameInZip)
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,nRows=nRows,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
else:
aDfRead=True
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
# wir wollen nur das 1. File lesen ...
if aDfRead:
break;
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
self.__initH5File(zip7File,df,h5FileName=h5FileName)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __toH5(self,key,df,useRawHdfAPI=False,updLookUpDf=False,logName='',zipName='',noDfStorage=False):
"""
write df with key to H5-File (if not noDfStorage)
Args:
* updLookUpDf: if True, self.lookUpDf is updated with
* zipName (the Zip of logFile)
* logName (the name of the logFile i.e. 20201113_0000004.log)
* FirstTime (the first ScenTime in df)
* LastTime (the last ScenTime in df)
self.lookUpDf is not wriiten to H5
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
(h5FileHead,h5FileTail)=os.path.split(self.h5File)
if not noDfStorage:
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
try:
h5Store.put(key,df)
except Exception as e:
logger.error("{0:s}Writing df with h5Key={1:s} to {2:s} FAILED!".format(logStr,key,h5FileTail))
raise e
else:
df.to_hdf(self.h5File, key=key)
logger.debug("{0:s}Writing df with h5Key={1:s} to {2:s} done.".format(logStr,key,h5FileTail))
if updLookUpDf:
s=df['ScenTime']#['#LogTime']
FirstTime=s.iloc[0]
LastTime=s.iloc[-1]
if self.lookUpDf.empty:
data={ 'zipName': [zipName]
,'logName': [logName]
,'FirstTime' : [FirstTime]
,'LastTime' : [LastTime]
}
self.lookUpDf = pd.DataFrame (data, columns = ['zipName','logName','FirstTime','LastTime'])
self.lookUpDf['zipName']=self.lookUpDf['zipName'].astype(str)
self.lookUpDf['logName']=self.lookUpDf['logName'].astype(str)
else:
data={ 'zipName': zipName
,'logName': logName
,'FirstTime' : FirstTime
,'LastTime' : LastTime
}
self.lookUpDf=self.lookUpDf.append(data,ignore_index=True)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __processALogFile(self,logFile=None,delimiter='\t',nRows=None,readWithDictReader=False,fValueFct=fValueFct,readWindowsLog=False):
"""
process logFile
Args:
* logFile: logFile to be processed
* nRows: number of logFile rows to be processed; default: None (:= all rows are processed); if readWithDictReader: last row is also processed
* readWithDictReader: if True, csv.DictReader is used; default: None (:= pd.read_csv is used)
Returns:
* df: logFile processed to df
* converted:
* #LogTime: to datetime
* ProcessTime: to datetime
* Value: to float64
* ID,Direction,SubSystem,LogLevel,State,Remark: to str
* new:
* ScenTime datetime
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
df=None
try:
with open(logFile,'r') as f:
pass
(logFileHead,logFileTail)=os.path.split(logFile)
if readWithDictReader:
restkey='+'
with open(logFile,"r") as csvFile: # 1. Zeile enthaelt die Ueberschrift
reader = csv.DictReader(csvFile,delimiter=delimiter,restkey=restkey)
logger.debug("{0:s}{1:s} csv.DictReader reader processed.".format(logStr,logFileTail))
# If a row has more fields than fieldnames, the remaining data is put in a list and stored with the fieldname specified by restkey.
colNames=reader.fieldnames
dcts = [dct for dct in reader] # alle Zeilen lesen
logger.debug("{0:s}{1:s} csv.DictReader-Ergebnis processed.".format(logStr,logFileTail))
if nRows!=None:
dcts=dcts[0:nRows]+[dcts[-1]]
# nur die Spaltennamen werden als row-Spalten erzeugt
rows = [[dct[colName] for colName in colNames] for dct in dcts]
logger.debug("{0:s}{1:s} rows processed.".format(logStr,logFileTail))
# die "ueberfluessigen" Spalten an die letzte Spalte dranhaengen
for i, dct in enumerate(dcts):
if restkey in dct:
restValue=dct[restkey]
restValueStr = delimiter.join(restValue)
newValue=rows[i][-1]+delimiter+restValueStr
#logger.debug("{0:s}{1:s} restValueStr: {2:s} - Zeile {3:10d}: {4:s} - neuer Wert letzte Spalte: {5:s}.".format(logStr,logFileTail,restValueStr,i,str(rows[i]),newValue))
rows[i][-1]=rows[i][-1]+newValue
logger.debug("{0:s}{1:s} restkey processed.".format(logStr,logFileTail))
index=range(len(rows))
df = pd.DataFrame(rows,columns=colNames,index=index)
else:
if nRows==None:
df=pd.read_csv(logFile,delimiter=delimiter,error_bad_lines=False,warn_bad_lines=True,low_memory=False)
else:
df=pd.read_csv(logFile,delimiter=delimiter,error_bad_lines=False,warn_bad_lines=True,low_memory=False,nrows=nRows)
logger.debug("{0:s}{1:s} pd.DataFrame processed.".format(logStr,logFileTail))
#logger.debug("{0:s}df: {1:s}".format(logStr,str(df)))
#LogTime
df['#LogTime']=pd.to_datetime(df['#LogTime'],unit='ms',errors='coerce') # NaT
#ProcessTime
df['ProcessTime']=pd.to_datetime(df['ProcessTime'],unit='ms',errors='coerce') # NaT
logger.debug("{0:s}{1:s} col ProcessTime processed.".format(logStr,logFileTail))
#Value
df['Value']=df.Value.str.replace(',', '.') # Exception: Line: 1137: <class 'AttributeError'>: Can only use .str accessor with string values!
df['Value']=fValueFct(df['Value'].values) # df['ValueProcessed'].apply(fValueFct)
logger.debug("{0:s}{1:s} col Value processed.".format(logStr,logFileTail))
#Strings
for col in ['ID','Direction','SubSystem','LogLevel','State','Remark']:
df[col]=df[col].astype(str)
logger.debug("{0:s}{1:s} String-cols processed.".format(logStr,logFileTail))
#1618249551621 STD CVD 1615442324000 p-p BEGIN_OF_NEW_CONTROL_VOLUME 6-10-SV1-RB~6-10-BID-RB NULL NULL # String in beiden Faellen (Linux und Windows) gleich?
#1618249551621 STD CVD <- 156 CV_ID
##ScenTime
## SubSystem Direction ProcessTime ID Value State Remark
## Linux ---
## 1615029280000 INF SQC Starting cycle for 2021-03-06 12:14:38.000
## 1615029280000 STD LDS MCL 1615029278000 Main cycle loop 06.03.2021 12:14:38.000 (ScenTime: Tag und Zeit in Klartext; Spalte ProcessTime ScenTime!)
## Windows ---
## 1618256150711 STD SQC 1615457121000 Main cycle loop 11:05:21.000 (ScenTime-Zeit in Klartext; Spalte ProcessTime ScenTime!)
dfScenTime=df[df['ID']=='Main cycle loop'][['ProcessTime']]
dfScenTime.rename(columns={'ProcessTime':'ScenTime'},inplace=True)
df=df.join(dfScenTime)
df['ScenTime']=df['ScenTime'].fillna(method='ffill')
df['ScenTime']=df['ScenTime'].fillna(method='bfill')
if df['ScenTime'].isnull().values.all():
logger.debug("{0:s}Keine Zeile mit ID=='Main cycle loop' gefunden. ScenTime zu #LogTime gesetzt.".format(logStr))
df['ScenTime']=df['#LogTime'] # wenn keine Zeile mit ID=='Main cycle loop' gefunden wurde, wird ScenTime zu #LogTime gesetzt
# finalisieren
df=df[['#LogTime','LogLevel','SubSystem','Direction','ProcessTime','ID','Value','ScenTime','State','Remark']]
logger.debug("{0:s}{1:s} processed with nRows: {2:s} (None if all).".format(logStr,logFileTail,str(nRows)))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return df
def rebuildLookUpDfZips(self,zip7Files,readWithDictReader=True,readWindowsLog=False):
"""
(re-)initialize with zip7Files
only persistent outcome is lookUpDfZips (Attribute and H5-Persistence)
lookUpdf is changed but not H5-stored
(Re-)Init with AppLog(h5File=...) after using rebuildLookUpDfZips to obtain old lookUpdf
main Usage of rebuildLookUpDfZips is to determine which zip7Files to add by i.e.:
zip7FilesToAdd=lx.lookUpDfZips[~(lx.lookUpDfZips['LastTime']<timeStartAusschnitt) & ~(lx.lookUpDfZips['FirstTime']>timeEndAusschnitt)].index.to_list()
"""
#noDfStorage=False
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
#self.__initzip7File(zip7File=zip7Files[0],h5FileName=h5FileName,nRows=1,readWithDictReader=True)
for zip7File in zip7Files:
logger.info("{0:s}addZip7File: {1:s}".format(logStr,zip7File))
self.addZip7File(zip7File,firstsAndLastsLogsOnly=True,nRows=1,readWithDictReader=readWithDictReader,noDfStorage=True,readWindowsLog=readWindowsLog)
logger.debug("{0:s}lookUpDf: {1:s}".format(logStr,self.lookUpDf.to_string()))
df=self.lookUpDf.groupby(by='zipName').agg(['min', 'max'])
logger.debug("{0:s}df: {1:s}".format(logStr,df.to_string()))
minTime=df.loc[:,('FirstTime','min')]
maxTime=df.loc[:,('LastTime','max')]
minFileNr=df.loc[:,('logName','min')].apply(lambda x: int(re.search(logFilenamePattern,x).group(3)))
maxFileNr=df.loc[:,('logName','max')].apply(lambda x: int(re.search(logFilenamePattern,x).group(3)))
s=(maxTime-minTime)/(maxFileNr-minFileNr)
lookUpDfZips=s.to_frame().rename(columns={0:'TimespanPerLog'})
lookUpDfZips['NumOfFiles']=maxFileNr-minFileNr
lookUpDfZips['FirstTime']=minTime
lookUpDfZips['LastTime']=maxTime
lookUpDfZips['minFileNr']=minFileNr
lookUpDfZips['maxFileNr']=maxFileNr
lookUpDfZips=lookUpDfZips[['FirstTime','LastTime','TimespanPerLog','NumOfFiles','minFileNr','maxFileNr']]
# lookUpDfZips schreiben
self.lookUpDfZips=lookUpDfZips
self.__toH5('lookUpDfZips',self.lookUpDfZips)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def addZip7File(self,zip7File,firstsAndLastsLogsOnly=False,nRows=None,readWithDictReader=False,noDfStorage=False,readWindowsLog=False):
"""
add zip7File
Args:
* zipFile: zipFile which LogFiles shall be added
* Args for internal Usage:
* firstsAndLastsLogsOnly (True dann)
* nRows (1 dann)
* readWithDictReader (True dann)
d.h. es werden nur die ersten und letzten Logs pro Zip angelesen und dort auch nur die 1. und letzte Zeile und das mit DictReader
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
else:
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
logger.debug("{0:s}zip7FileHead (leer wenn zip7 im selben Verz.): {1:s} zip7FileTail: {2:s}.".format(logStr,zip7FileHead,zip7FileTail))
logger.info("{0:s}zip7File: {1:s} ...".format(logStr,zip7File))
tmpDir=os.path.dirname(zip7File)
tmpDirContent=glob.glob(tmpDir)
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
allLogFilesLen=len(allLogFiles)
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,allLogFilesLen))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
for idx,logFileNameInZip in enumerate(allLogFiles):
if firstsAndLastsLogsOnly:
if idx not in [0,1,allLogFilesLen-2,allLogFilesLen-1]:
#logger.debug("{0:s}idx: {1:d} item: {2:s} NOT processed ...".format(logStr,idx,logFileNameInZip))
continue
logger.info("{0:s}idx: {1:d} item: {2:s} ...".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(tmpDir,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile)
# evtl. bezeichnet logFileNameInZip keine Datei sondern ein Verzeichnis
(name, ext)=os.path.splitext(logFileNameInZip)
if ext == '':
# Verzeichnis!
extDir=os.path.join(tmpDir,logFileNameInZip)
(extDirHead, extDirTail)=os.path.split(extDir)
if os.path.exists(extDir) and extDir in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) bereits.".format(logStr,idx,extDirTail))
extDirLstExistingLogged.append(extDir)
elif os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
elif not os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
# kein Logfile zu prozessieren ...
continue
# logFileNameInZip bezeichnet eine Datei
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
logger.debug("{0:s}Log: {1:s} wird extrahiert ... ".format(logStr,logFileTail))
import lzma
try:
zip7FileObj.extract(path=tmpDir,targets=logFileNameInZip)
except lzma.LZMAError:
logger.warning("{0:s}Log: {1:s} nicht erfolgreich extrahiert - continue ... ".format(logStr,logFileTail))
continue
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
logger.debug("{0:s}Log: {1:s} wurde extrahiert. ".format(logStr,logFileTail))
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,nRows=nRows,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
# ...
(name, ext)=os.path.splitext(logFileTail)
key='Log'+name
if zip7FileHead != '':
zipName=os.path.join(os.path.relpath(zip7FileHead),zip7FileTail)
else:
zipName=zip7FileTail
# df schreiben
self.__toH5(key,df,updLookUpDf=True,logName=logFileTail,zipName=zipName,noDfStorage=noDfStorage)#os.path.join(os.path.relpath(zip7FileHead),zip7FileTail))
# danach gleich lookUpDf schreiben ...
self.__toH5('lookUpDf',self.lookUpDf,noDfStorage=noDfStorage)
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def getTotalLogTime(self):
"""
Returns Tuple: firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal # Brutto-Logzeit, Netto-Logzeit, Summe aller Zeiten zwischen 2 Logdateien (sollte = Brutto-Netto sein)
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Inhalt der Logs
tdTotal=pd.Timedelta('0 Seconds')
tdBetweenFilesTotal=pd.Timedelta('0 Seconds')
for idx,(index,row) in enumerate(self.lookUpDf.iterrows()):
if idx > 0:
tdBetweenFiles=row["FirstTime"]-lastTime
tdBetweenFilesTotal=tdBetweenFilesTotal+tdBetweenFiles
if tdBetweenFiles > pd.Timedelta('0 second'):
if tdBetweenFiles > pd.Timedelta('1 second'):
logger.info("{:s}Zeitdifferenz: {!s:s} zwischen {:s} ({:s}) und {:s} ({:s})".format(logStr,
str(tdBetweenFiles).replace('days','Tage')
,lastFile,lastZip
,row["logName"],row["zipName"]
))
pass
if tdBetweenFiles < pd.Timedelta('0 second'):
if tdBetweenFiles < -pd.Timedelta('1 second'):
pass
logger.info("{:s}Zeitueberlappung > 1s: {!s:s} zwischen {:s} ({:s}) und {:s} ({:s})".format(logStr,
str(tdBetweenFiles).replace('days','Tage')
,lastFile,lastZip
,row["logName"],row["zipName"]
))
td=row["LastTime"]-row["FirstTime"]
if type(td) == pd.Timedelta:
tdTotal=tdTotal+td
else:
print(index)# Fehler!
lastTime=row["LastTime"]
lastFile=row["logName"]
lastZip=row["zipName"]
firstTime=self.lookUpDf.iloc[0]["FirstTime"]
lastTime=self.lookUpDf.iloc[-1]["LastTime"]
tdTotalGross=lastTime-firstTime
tdTotalGross,tdTotal,tdBetweenFilesTotal
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal
def extractTCsToH5s(self,dfID=pd.DataFrame(),timeStart=None,timeEnd=None,TCsdfOPCFill=TCsdfOPCFill):
"""
extracts TC-Data (and CVD-Data) from H5 to seperate H5-Files (Postfixe: _TCxxx.h5 and _CVD.h5)
TCsdfOPCFill: wenn Wahr, werden in TCsdfOPCFill die NULLen aufgefuellt; default: Falsch
wenn timeStart != None: es wird an exisitierende .h5s angehaengt; sonst werden diese ueberschrieben
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# _TCxxx.h5 anlegen (OPC, SirCalc, LDSIn, LDSRes1, LDSRes2 (,LDSRes)) and _CVD.h5
# ueber alle dfs in H5 (unter Berücksichtigung von timeStart und timeEnd)
# lesen
# TC-Teilmenge ermitteln: 'ID','ProcessTime','ScenTime','SubSystem','Value','Direction'
# Zeilen mit 'Value' isnull() werden NICHT gelesen
# d.h. bei einer Logfile-Semantik welche durch NULL-Zeilen einen Wert auf (was auch immer) zuruecksetzt wuerde der Wert bei einer Stop-Plot-Ausgabe auf dem letzten Nicht-NULL Wert verharren ...
# ... zunaechst ...
# Untermengen bilden: ['TCsdfOPC','TCsdfSirCalc','TCsdfLDSIn','TCsdfLDSRes1','TCsdfLDSRes2' (,'TCsdfLDSRes')]
# ... NULLen (NaNs) entstehen durch die Pivotierung mit Index = Time: nicht fuer alles Times (Obermenge) gibt es fuer jede ID Values
# speichern
(name,ext)=os.path.splitext(self.h5File)
TCPost='_TC'
self.h5FileOPC=name+TCPost+'OPC'+ext
self.h5FileSirCalc=name+TCPost+'SirCalc'+ext
self.h5FileLDSIn=name+TCPost+'LDSIn'+ext
if not dfID.empty:
# Attribute
self.h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
self.h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
# Komplement wird geloescht
h5FileLDSRes=name+TCPost+'LDSRes'+ext
try:
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes):
os.remove(h5FileLDSRes)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes))
del self.h5FileLDSRes
except:
pass
else:
# Attribut
self.h5FileLDSRes=name+TCPost+'LDSRes'+ext
# Komplemente werden geloescht
h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
try:
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes1):
os.remove(h5FileLDSRes1)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes1))
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes2):
os.remove(h5FileLDSRes2)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes2))
del self.h5FileLDSRes1
del self.h5FileLDSRes2
except:
pass
self.h5FileCVD=name+'_'+'CVD'+ext
h5Keys,h5KeysPost=self.__getH5Keys(timeStart=timeStart,timeEnd=timeEnd)
h5KeysOPC=['TCsOPC'+x for x in h5KeysPost]
h5KeysSirCalc=['TCsSirCalc'+x for x in h5KeysPost]
h5KeysLDSIn=['TCsLDSIn'+x for x in h5KeysPost]
h5KeysLDSRes1=['TCsLDSRes1'+x for x in h5KeysPost]
h5KeysLDSRes2=['TCsLDSRes2'+x for x in h5KeysPost]
h5KeysLDSRes=['TCsLDSRes'+x for x in h5KeysPost]
h5KeysCVD=['CVDRes'+x for x in h5KeysPost]
h5KeysAll=zip(h5Keys,h5KeysOPC,h5KeysSirCalc,h5KeysLDSIn,h5KeysLDSRes1,h5KeysLDSRes2,h5KeysLDSRes,h5KeysCVD)
for idx,(h5Key,h5KeyOPC,h5KeySirCalc,h5KeyLDSIn,h5KeyLDSRes1,h5KeyLDSRes2,h5KeyLDSRes,h5KeyCVD) in enumerate(h5KeysAll):
#H5-Write-Modus
if idx==0:
if timeStart!=None:
mode='a'
else:
mode='w'
else:
mode='a'
logger.info("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=pd.read_hdf(self.h5File, key=h5Key)
# CVD -------------------------------------------------------------------------------------------------
dfCVD=df[df['SubSystem']=='CVD']
df=df[['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']]
df['Value']=df['Value'].apply(lambda x: fTCCast(x))
df=df[~(df['Value'].isnull())]
if not dfID.empty:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
else:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
logger.debug("{0:s}{1:s}".format(logStr,'Write ...'))
TCsdfOPC.to_hdf(self.h5FileOPC,h5KeyOPC, mode=mode)
TCsdfSirCalc.to_hdf(self.h5FileSirCalc,h5KeySirCalc, mode=mode)
TCsdfLDSIn.to_hdf(self.h5FileLDSIn,h5KeyLDSIn, mode=mode)
if not dfID.empty:
TCsdfLDSRes1.to_hdf(self.h5FileLDSRes1,h5KeyLDSRes1, mode=mode)
TCsdfLDSRes2.to_hdf(self.h5FileLDSRes2,h5KeyLDSRes2, mode=mode)
else:
TCsdfLDSRes.to_hdf(self.h5FileLDSRes,h5KeyLDSRes, mode=mode)
# ---
dfCVD.to_hdf(self.h5FileCVD,h5KeyCVD, mode=mode)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return
def shrinkH5File(self):
"""
die dfs werden geloescht im H5-File
extract TCs to H5s ### MUSS ### vorher gelaufen sein
nach shrinkH5File stehen im Master-H5 die eigentlichen Daten nicht mehr zur Verfuegung
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# H5 existiert
if os.path.exists(self.h5File):
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys()) # /Log20201216_0000001
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
for key in h5Keys:
if re.match('(^/Log)',key):
logger.debug("{0:s}key removed: {1:s}".format(logStr,str(key)))
h5Store.remove(key.replace(h5KeySep,''))
else:
logger.debug("{0:s}key NOT removed: {1:s}".format(logStr,str(key)))
with pd.HDFStore(self.h5File) as h5Store:
pass
shrinkCmd="ptrepack --chunkshape=auto --propindexes --complib=blosc "+self.h5File+" "+self.h5File+".Shrinked"
logger.debug("{0:s}shrinkCmd: {1:s}".format(logStr,shrinkCmd))
if os.path.exists(self.h5File+".Shrinked"):
os.remove(self.h5File+".Shrinked")
os.system(shrinkCmd)
os.remove(self.h5File)
os.rename(self.h5File+".Shrinked",self.h5File)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def get(self,timeStart=None,timeEnd=None,filter_fct=None,filterAfter=True,useRawHdfAPI=False):
"""
returns df with filter_fct applied
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfRet=None
try:
dfLst=[]
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
for h5Key in h5Keys:
logger.debug("{0:s}Get (pd.HDFStore) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=h5Store[h5Key]
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
else:
for h5Key in h5Keys:
logger.debug("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=pd.read_hdf(self.h5File, key=h5Key)
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
dfRet=pd.concat(dfLst)
del dfLst
if filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
dfRet=pd.DataFrame(dfRet[dfRet.apply(filter_fct,axis=1)].values,columns=dfRet.columns)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfRet
def getFromZips(self,timeStart=None,timeEnd=None,filter_fct=None,filterAfter=True,readWithDictReader=False,readWindowsLog=False):
"""
returns df from Zips
die Daten werden von den Zips gelesen: Log extrahieren, parsen, wieder loeschen
die Initalisierung muss mit AppLog(zip7Files=...) erfolgt sein da nur dann self.lookUpDfZips existiert
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfRet=None
try:
dfLst=[]
timeStart=pd.Timestamp(timeStart)
timeEnd=pd.Timestamp(timeEnd)
# zips die prozessiert werden muessen
dfLookUpZips=self.lookUpDfZips
if timeStart!=None:
dfLookUpZips=dfLookUpZips[dfLookUpZips['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpZips=dfLookUpZips[dfLookUpZips['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
for index, row in dfLookUpZips.iterrows():
zip7File=index
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
dTime=timeStart-row['FirstTime']
nStart = int(dTime.total_seconds()/row['TimespanPerLog'].total_seconds())
dTime=timeEnd-timeStart
nDelta = int(dTime.total_seconds()/row['TimespanPerLog'].total_seconds())+1
nEnd=nStart+nDelta
logger.debug("{0:s}zip7File: {1:s}: Start: {2:d}/{3:07d} End: {4:d}/{5:07d}".format(logStr,zip7FileTail
,nStart,nStart+row['minFileNr']
,nStart+nDelta,nStart+row['minFileNr']+nDelta))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
tmpDir=os.path.dirname(zip7File)
tmpDirContent=glob.glob(tmpDir)
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
allLogFilesLen=len(allLogFiles)
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,allLogFilesLen))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
idxEff=0
for idx,logFileNameInZip in enumerate(allLogFiles):
if idx < nStart-idxEff or idx > nEnd+idxEff:
continue
logger.debug("{0:s}idx: {1:d} item: {2:s} ...".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(tmpDir,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile)
# evtl. bezeichnet logFileNameInZip keine Datei sondern ein Verzeichnis
(name, ext)=os.path.splitext(logFileNameInZip)
if ext == '':
# Verzeichnis!
extDir=os.path.join(tmpDir,logFileNameInZip)
(extDirHead, extDirTail)=os.path.split(extDir)
if os.path.exists(extDir) and extDir in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) bereits.".format(logStr,idx,extDirTail))
extDirLstExistingLogged.append(extDir)
elif os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
elif not os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
# kein Logfile zu prozessieren ...
idxEff+=1
continue
# logFileNameInZip bezeichnet eine Datei
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
zip7FileObj.extract(path=tmpDir,targets=logFileNameInZip)
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
else:
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
dfRet=pd.concat(dfLst)
del dfLst
if filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
dfRet=pd.DataFrame(dfRet[dfRet.apply(filter_fct,axis=1)].values,columns=dfRet.columns)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfRet
def getTCs(self,dfID=pd.DataFrame(),timeStart=None,timeEnd=None,TCsdfOPCFill=TCsdfOPCFill,persistent=False,overwrite=True):
"""
returns TCs-dfs
Verarbeitung von dfs gemaess extractTCsToH5s; siehe dort
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
TCKeys=['<KEY>','TCsdfSirCalc','TCsdfLDSIn','TCsdfLDSRes1','TCsdfLDSRes2a','TCsdfLDSRes2b','TCsdfLDSRes2c']
if persistent:
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
#logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if set(TCKeys) & set(h5KeysStripped) == set(TCKeys):
if not overwrite:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren alle bereits - return aus H5-File ...".format(logStr,str(TCKeys)))
TCsdfOPC=pd.read_hdf(self.h5File,key='<KEY>')
TCsdfSirCalc=pd.read_hdf(self.h5File,key='TCsdfSirCalc')
TCsdfLDSIn=pd.read_hdf(self.h5File,key='TCsdfLDSIn')
TCsdfLDSRes1=pd.read_hdf(self.h5File,key='TCsdfLDSRes1')
TCsdfLDSRes2a=pd.read_hdf(self.h5File,key='TCsdfLDSRes2a')
TCsdfLDSRes2b=pd.read_hdf(self.h5File,key='TCsdfLDSRes2b')
TCsdfLDSRes2c=pd.read_hdf(self.h5File,key='TCsdfLDSRes2c')
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2a,TCsdfLDSRes2b,TCsdfLDSRes2c
else:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren alle bereits - sollen aber ueberschrieben werden ...".format(logStr,str(TCKeys)))
else:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren nicht (alle) ...".format(logStr,str(TCKeys)))
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
dfLst=[]
for h5Key in h5Keys:
logger.debug("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
dfSingle=pd.read_hdf(self.h5File, key=h5Key)
dfSingle=dfSingle[['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']]
dfSingle=dfSingle[~(dfSingle['Value'].isnull())]
dfLst.append(dfSingle)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
df=pd.concat(dfLst)
del dfLst
logger.debug("{0:s}{1:s}".format(logStr,'Concat finished. Filter & Pivot ...'))
if not dfID.empty:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
else:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
if persistent:
logger.debug("{0:s}peristent: TCKeys {1:s} nach H5-File ...".format(logStr,str(TCKeys)))
TCsdfOPC.to_hdf(self.h5File,key='TCsdfOPC')
TCsdfSirCalc.to_hdf(self.h5File,key='TCsdfSirCalc')
TCsdfLDSIn.to_hdf(self.h5File,key='TCsdfLDSIn')
TCsdfLDSRes1.to_hdf(self.h5File,key='TCsdfLDSRes1')
TCsdfLDSRes2a.to_hdf(self.h5File,key='TCsdfLDSRes2a')
TCsdfLDSRes2b.to_hdf(self.h5File,key='TCsdfLDSRes2b')
TCsdfLDSRes2c.to_hdf(self.h5File,key='TCsdfLDSRes2c')
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if not dfID.empty:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2#a,TCsdfLDSRes2b,TCsdfLDSRes2c
else:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1
def getTCsFromH5s(self,timeStart=None,timeEnd=None, LDSResOnly=False, LDSResColsSpecified=None, LDSResTypeSpecified=None, timeShiftPair=None):
"""
returns several TC-dfs from TC-H5s:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2
or
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes
LDSResOnly:
TCsdfLDSRes1,TCsdfLDSRes2
or
TCsdfLDSRes
LDSResColsSpecified:
return in LDSRes df(s) only the specified cols
all cols are returned otherwise
LDSResTypeSpecified:
return TCsdfLDSRes1 (SEG) for 'SEG' or TCsdfLDSRes2 (Druck) for 'Druck'
both are returned otherwise
timeShiftPair: (preriod,freq): i.e. (1,'H'); if not None index is shifted
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
try:
self.h5FileLDSRes1
Res2=True
except:
Res2=False
TCsdfOPC=pd.DataFrame()
TCsdfSirCalc=pd.DataFrame()
TCsdfLDSIn=pd.DataFrame()
if Res2:
TCsdfLDSRes1=pd.DataFrame()
TCsdfLDSRes2= | pd.DataFrame() | pandas.DataFrame |
### imports starts
import sys
from pathlib import Path
import pickle
import cv2
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.optimizers import Adam
util_dir = Path.cwd().parent.joinpath('Utility')
sys.path.insert(1, str(util_dir))
from Configuration import frcnn_config
from DataGenerator import DataGeneratorV2
from Abstract import make_anchors, normalize_anchor, propose_score_bbox_list
from Layers import rpn
from Information import *
### import ends
def rpn_propose_RoI(C):
pstage('RPN is predicting Regions of Interest (RoIs) without NMS')
#input_shape, ratio, anchor_scales, anchor_ratios
anchors = make_anchors(C.input_shape, C.base_net.ratio, C.anchor_scales, C.anchor_ratios)
row_num, col_num = C.input_shape[:2]
input_shape = [row_num, col_num]
### reconstruct model
pinfo('Reconstructing model')
# reconstruct model file
cwd = Path.cwd()
weights_dir = C.weight_dir
model_weights = weights_dir.joinpath(C.rpn_model_name+'.h5')
pdebug(model_weights)
# load model
input_layer = Input(shape=C.input_shape)
x = C.base_net.get_base_net(input_layer)
rpn_layer = rpn(C.anchor_scales, C.anchor_ratios)
classifier = rpn_layer.classifier(x)
regressor = rpn_layer.regression(x)
model = Model(inputs=input_layer, outputs = [classifier,regressor])
model.load_weights(model_weights, by_name=True)
############################ Predicting by training data ###################################
pinfo('Start predicting for training data')
### preparing input data
train_generator = DataGeneratorV2(C.train_img_inputs_npy, C.train_labels_npy, C.train_deltas_npy, batch_size=1, shuffle=False)
### predicting by model
pinfo('RPN is scoring anchors and proposing delta suggestions')
outputs_raw = model.predict(x=train_generator)
score_maps = outputs_raw[0]
delta_maps = outputs_raw[1]
### filter out negative anchors and adjust positive anchors
all_bbox = []
img_idx = 0
bbox_idx = 0
dict_for_df={}
bbox_df = pd.read_csv(C.train_bbox_reference_file, index_col=0)
img_names = bbox_df['FileName'].unique().tolist()
imgNum = len(img_names)
if len(img_names) != len(score_maps):
perr('Number of images is inconsistent with the number of outputs')
sys.exit()
for img_name, score_map, delta_map in zip(img_names, score_maps, delta_maps):
sys.stdout.write(t_info(f'Proposing bounding boxes for image: {img_idx+1}/{imgNum}','\r'))
if img_idx+1 == imgNum:
sys.stdout.write('\n')
sys.stdout.flush()
# select all bbox whose objective score is >= 0.5 and put them in a list
score_bbox_pairs = propose_score_bbox_list(anchors, score_map, delta_map)
scores, bbox_raws = [], []
for score_bbox in score_bbox_pairs:
scores.append(score_bbox[0])
bbox_raws.append(score_bbox[1])
# trimming bboxes
for i, bbox in enumerate(bbox_raws):
if bbox[0] < 0:
bbox_raws[i][0] = 0
if bbox[1] > 1:
bbox_raws[i][1] = 1
if bbox[2] < 0:
bbox_raws[i][2] = 0
if bbox[3] > 1:
bbox_raws[i][3] = 1
if bbox[1] < bbox[0]:
pwarn(f"bbox {i} XMax is less than XMin")
if bbox[3] < bbox[2]:
pwarn(f"bbox {i} YMax is less than YMin")
# save parameters to dict
for score, bbox in zip(scores, bbox_raws):
dict_for_df[bbox_idx] = {'FileName': str(img_name),\
'XMin':bbox[0],\
'XMax':bbox[1],\
'YMin':bbox[2],\
'YMax':bbox[3],\
'Score':score}
bbox_idx += 1
img_idx += 1
# save proposed bboxes to local
output_df = pd.DataFrame.from_dict(dict_for_df, "index")
output_file = C.sub_data_dir.joinpath("mc_RoI_prediction_no_NMS_train.csv")
output_df.to_csv(output_file)
C.set_train_proposal(output_file)
############################ Predicting by validation data ###################################
pinfo('Start predicting for validation data')
### preparing input data
val_generator = DataGeneratorV2(C.validation_img_inputs_npy, C.validation_labels_npy, C.validation_deltas_npy, batch_size=1, shuffle=False)
### predicting by model
pinfo('RPN is scoring anchors and proposing delta suggestions')
outputs_raw = model.predict(x=val_generator)
score_maps = outputs_raw[0]
delta_maps = outputs_raw[1]
### filter out negative anchors and adjust positive anchors
all_bbox = []
img_idx = 0
bbox_idx = 0
dict_for_df={}
bbox_df = | pd.read_csv(C.validation_bbox_reference_file, index_col=0) | pandas.read_csv |
from django.conf import settings
import pandas as pd
import os
info_path = os.path.join(settings.BASE_DIR, 'info.pkl')
fin_path = os.path.join(settings.BASE_DIR, 'fin.pkl')
mc_path = os.path.join(settings.BASE_DIR, 'mc.pkl')
info = pd.read_pickle(info_path)
fin = pd.read_pickle(fin_path)
mc = | pd.read_pickle(mc_path) | pandas.read_pickle |
import pandas as pd
def multiIDgnomad(CHR, position, ref, alt):
position_list = str(position).split(',')
chr_element = ['chr']*len(list(str(position).split(',')))
chr_list = [str(CHR)]*len(str(position).split(','))
chr_final = list(map("".join,list(zip(chr_element,chr_list))))
ref_list = str(ref).split(',')
alt_list = str(alt).split(',')
outlist = list(map(":".join, list(zip(chr_final,position_list))))
outlist = list(map("".join, list(zip(outlist,ref_list))))
outlist = list(map(">".join, list(zip(outlist,alt_list))))
outlist = [x.rstrip('>').replace('nan>','').replace('>nan','') for x in outlist]
outlist = ','.join(outlist)
return(outlist)
def multiID(CHR, position, ref, alt):
position_list = str(position).split(',')
chr_element = ['chr']*len(list(str(position).split(',')))
chr_list = [str(CHR)]*len(str(position).split(','))
chr_final = list(map("".join,list(zip(chr_element,chr_list))))
ref_list = str(ref).split(',')
alt_list = str(alt).split(',')
outlist = list(map("_".join, list(zip(chr_final,position_list,ref_list,alt_list))))
outlist = ','.join(outlist)
return(outlist)
df = pd.read_csv('/path/to/Alleles_20201228.csv',sep='\t')
#df['actionable'].loc[(df['SYMBOL'] == 'CYP4F2') & (df['allele'] == '*2')] = 'Yes'
df = df.loc[(df['count_carrier_ids'].astype(str) != 'nan') & (df['actionable'] == 'Yes')].copy()
reference = pd.read_csv('/path/to/reference_HAPLOTYPES_20201130_hg38_hg19.csv',sep='\t')
reference['ID2'] = reference['SYMBOL'] + '_' + reference['allele'].astype(str)
reference['multiID_gnomad'] = reference.apply(lambda x: multiIDgnomad(x['chr'], x['position_hg19_gnomad'],x['ref_gnomad'],x['alt_gnomad']),axis=1)
df['multiID_gnomad'] = df['ID2'].map(dict(zip(list(reference['ID2']),list(reference['multiID_gnomad']))))
sampleinfo = pd.read_csv('/home/jlanillos/CNIO/PGx/dictionary_proc.csv',sep='\t')
sampleinfo['sample'] = sampleinfo['sample'].astype(str)
gender = pd.read_csv('/path/to/gender.csv',sep='\t')
gender_dict = dict(zip(list(gender['annonimousID'].astype(str)),list(gender['gender'])))
country_dict = dict(zip(list(sampleinfo['sample']),list(sampleinfo['from'])))
country_dict_general = dict(zip(list(sampleinfo['sample']),list(sampleinfo['from_general'])))
def calculateNallelesperRS(multiID_gnomad, gts):
multiID_gnomad = multiID_gnomad.split(',')
d_al = list()
for al in multiID_gnomad:
genotypes = [j.split(',')[multiID_gnomad.index(al)] for j in gts.split(';')]
heteroz = genotypes.count('0/1')
homoz = 2*genotypes.count('1/1')
wt = 2*genotypes.count('0/0') + heteroz # Adding all wt alleles, including those from 0/0 individuals and those from 0/1
othergenotypes = list()
for z in list(set(genotypes)):
if (z != '0/1') and (z != '1/1') and (z != '0/0'):
othergenotypes.append(z)
wt = wt + 2*genotypes.count(z) # assume that other values different than 0/1, 1/1 and 0/0 are considered as 0/0 too
d_al.append(','.join([str(heteroz + homoz),str(wt)]))
return(';'.join(d_al))
# New function added to solve the count of alleles in X-chrom genes such as G6PD
def correctNallelesperRS_chromX(multiID_gnomad, gts, samples, gender_dict):
multiID_gnomad = multiID_gnomad.split(',')
samples = [i.rstrip('_gt') for i in samples.split(',')]
gender_samples = [gender_dict[i] for i in samples]
d_al = list()
for al in multiID_gnomad:
genotypes = [j.split(',')[multiID_gnomad.index(al)] for j in gts.split(';')]
genotypes_female = [i for i,j in zip(genotypes, gender_samples) if j == 'F']
genotypes_male = [i for i,j in zip(genotypes, gender_samples) if j == 'M']
heteroz = genotypes_female.count('0/1') + genotypes_male.count('0/1')
homoz = 2*genotypes_female.count('1/1') + genotypes_male.count('1/1')
wt = 2*genotypes_female.count('0/0') + genotypes_male.count('0/0')
othergenotypes = list()
for z in list(set(genotypes)):
if (z != '0/1') and (z != '1/1') and (z != '0/0'):
othergenotypes.append(z)
wt = wt + 2*genotypes_female.count(z) + genotypes_male.count(z) # assume that other values different than 0/1, 1/1 and 0/0 are considered as 0/0 too
d_al.append(','.join([str(heteroz + homoz),str(wt)]))
return(';'.join(d_al))
def splitByCountry(carrier_ids, carrier_gt, wt_ids, wt_gt, country_dict, country, key):
carrier_ids = carrier_ids.replace('_gt','').split(',')
carrier_gt = carrier_gt.split(';')
new_carrier_ids = list()
new_carrier_gt = list()
wt_ids = wt_ids.replace('_gt','').split(',')
wt_gt = wt_gt.split(';')
new_wt_ids = list()
new_wt_gt = list()
for i,j in zip(carrier_ids,carrier_gt):
if country_dict[i] == country:
new_carrier_ids.append(i+'_gt')
new_carrier_gt.append(j)
new_carrier_ids = ','.join(new_carrier_ids)
new_carrier_gt = ';'.join(new_carrier_gt)
for i,j in zip(wt_ids,wt_gt):
if country_dict[i] == country:
new_wt_ids.append(i+'_gt')
new_wt_gt.append(j)
new_wt_ids = ','.join(new_wt_ids)
new_wt_gt = ';'.join(new_wt_gt)
if key == 'onlypositivevalues_carrier_ids':
outlist = new_carrier_ids
elif key == 'onlypositivevalues_carrier_gt':
outlist = new_carrier_gt
elif key == 'onlypositivevalues_wt_ids':
outlist = new_wt_ids
elif key == 'onlypositivevalues_wt_gt':
outlist = new_wt_gt
return(outlist)
countries = list(set(sampleinfo['from']))
cols = ['onlypositivevalues_carrier_ids','onlypositivevalues_carrier_gt','onlypositivevalues_wt_ids','onlypositivevalues_wt_gt']
for country in countries:
for col in cols:
df[country + '_' + col] = df.apply(lambda x: splitByCountry(x['onlypositivevalues_carrier_ids'],x['onlypositivevalues_carrier_gt'],x['onlypositivevalues_wt_ids'],x['onlypositivevalues_wt_gt'], country_dict, country, col),axis = 1)
df['aux'] = df[country + '_' +'onlypositivevalues_carrier_gt'].astype(str) + ';' + df[country + '_' + 'onlypositivevalues_wt_gt'].astype(str)
df['aux'] = df['aux'].apply(lambda x: x.replace('nan;','').replace(';nan','').rstrip(';').lstrip(';'))
df['aux_ids'] = df[country + '_' +'onlypositivevalues_carrier_ids'].astype(str) + ',' + df[country + '_' + 'onlypositivevalues_wt_ids'].astype(str)
df['aux_ids'] = df['aux_ids'].apply(lambda x: x.replace('nan,','').replace(',nan','').rstrip(',').lstrip(','))
df[country + '_N_alleles'] = df.apply(lambda x: calculateNallelesperRS(x['multiID_gnomad'], x['aux']) if str(x['chr']) != 'X' else correctNallelesperRS_chromX(x['multiID_gnomad'], x['aux'], x['aux_ids'], gender_dict),axis=1)
countries = ['LATAM'] # list(set(sampleinfo['from_general']))
cols = ['onlypositivevalues_carrier_ids','onlypositivevalues_carrier_gt','onlypositivevalues_wt_ids','onlypositivevalues_wt_gt']
for country in countries:
for col in cols:
df[country + '_' + col] = df.apply(lambda x: splitByCountry(x['onlypositivevalues_carrier_ids'],x['onlypositivevalues_carrier_gt'],x['onlypositivevalues_wt_ids'],x['onlypositivevalues_wt_gt'], country_dict_general, country, col),axis = 1)
df['aux'] = df[country + '_' +'onlypositivevalues_carrier_gt'].astype(str) + ';' + df[country + '_' + 'onlypositivevalues_wt_gt'].astype(str)
df['aux'] = df['aux'].apply(lambda x: x.replace('nan;','').replace(';nan','').rstrip(';').lstrip(';'))
df['aux_ids'] = df[country + '_' +'onlypositivevalues_carrier_ids'].astype(str) + ',' + df[country + '_' + 'onlypositivevalues_wt_ids'].astype(str)
df['aux_ids'] = df['aux_ids'].apply(lambda x: x.replace('nan,','').replace(',nan','').rstrip(',').lstrip(','))
df[country + '_N_alleles'] = df.apply(lambda x: calculateNallelesperRS(x['multiID_gnomad'], x['aux']) if str(x['chr']) != 'X' else correctNallelesperRS_chromX(x['multiID_gnomad'], x['aux'], x['aux_ids'], gender_dict),axis=1)
country = 'ALL'
df['aux'] = df['onlypositivevalues_carrier_gt'].astype(str) + ';' + df['onlypositivevalues_wt_gt'].astype(str)
df['aux'] = df['aux'].apply(lambda x: x.replace('nan;','').replace(';nan','').rstrip(';').lstrip(';'))
df['aux_ids'] = df['onlypositivevalues_carrier_ids'].astype(str) + ',' + df['onlypositivevalues_wt_ids'].astype(str)
df['aux_ids'] = df['aux_ids'].apply(lambda x: x.replace('nan,','').replace(',nan','').rstrip(',').lstrip(','))
df[country + '_N_alleles'] = df.apply(lambda x: calculateNallelesperRS(x['multiID_gnomad'], x['aux']) if str(x['chr']) != 'X' else correctNallelesperRS_chromX(x['multiID_gnomad'], x['aux'], x['aux_ids'], gender_dict),axis=1)
gnomad = | pd.read_csv('/path/to/gnomAD_info/gnomAD_rs_alleles_pop.info.csv',sep='\t') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
log_data = open('./data/access.log', 'r')
fields = ['ts', 'elapsed', 'remhost', 'status', 'bytes',
'method', 'url', 'rfc931', 'peerstatus', 'type']
split_list = list()
for line in log_data:
ls = line.split()
l_type = ls[9]
l_url = ls[6]
# if l.type == "text/html":
if l_type == "application/vnd.apple.mpegurl":
# Group by Time Interval
file = (l_url).split("/")[-1]
if file == "playlist.m3u8":
# split_list.append(line.split())
split_list.append([ls[0], ls[2], ls[6], file])
df = | pd.DataFrame(split_list, columns=['ts', 'remhost', 'url', 'file']) | pandas.DataFrame |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')),
(30, 131*11*12, pd.Timestamp('2015-01-20')),
(40, 140. * 13 * 14, pd.Timestamp('2015-02-10')),
(50, 150., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 130*1/10, cls.window_test_start_date),
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140, pd.Timestamp('2015-01-09')),
(50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))],
pd.Timestamp('2015-01-09')
),
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-12')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-13')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-14')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*5, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*.7, cls.window_test_start_date),
(20, 121*.7, pd.Timestamp('2015-01-07')),
(30, 230*11, cls.window_test_start_date),
(40, 240, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100*5*6, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110*.3, pd.Timestamp('2015-01-09')),
(10, 111*.3, pd.Timestamp('2015-01-12')),
(20, 120*.7*.8, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-07')),
(30, 230*11*12, cls.window_test_start_date),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240*13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-21')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-22')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., | pd.Timestamp('2015-01-12') | pandas.Timestamp |
from IPython import embed
import pandas as pd
import numpy as np
import os
import sys
sys.path.insert(0,'..')
from run_model import QuaLiKizDuoNN, QuaLiKizMultiNN, QuaLiKizNDNN, QuaLiKizComboNN
simple_nns = ['efe_GB',
'efi_GB',
]
# 'dfe_GB',
# 'dfi_GB',
# 'gam_GB_less2max',
# 'gam_GB_leq2max',
# 'vte_GB_plus_vce_GB',
# 'vti_GB_plus_vci_GB']
#
#combo_nns = {'efe_GB2': ['efe_GB_min_efeETG_GB', 'efeETG_GB', lambda x, y: x + y],
# 'vte_GB_plus_vce_GB2': ['vte_GB', 'vce_GB', lambda x, y: x + y],
# 'vti_GB_plus_vci_GB2': ['vti_GB', 'vci_GB', lambda x, y: x + y]
#}
#nns = []
#for name in simple_nns:
# nn = QuaLiKizNDNN.from_json('nns/nn_' + name + '.json')
# nns.append(nn)
nn_dict = {}
path = 'nns'
for file_ in os.listdir(path):
if file_.endswith('.json'):
nn_dict[file_[3:-5]] = QuaLiKizNDNN.from_json(os.path.join(path, file_))
#efe_fancy = 1. + (3.) / (2. + 1) + (5.) / (4. + 1)
#efi_fancy = (2. * 3.) / (2. + 1) + (4. * 5.) / (4. + 1)
efe_GB_A = QuaLiKizComboNN('efe_GB_A', [nn_dict['efeETG_GB'],
nn_dict['efiITG_GB_div_efeITG_GB'],
nn_dict['efiITG_GB_plus_efeITG_GB'],
nn_dict['efiTEM_GB_div_efeTEM_GB'],
nn_dict['efiTEM_GB_plus_efeTEM_GB']],
lambda a, b, c, d, e: a + c / (b + 1) + e / (d + 1))
efi_GB_A = QuaLiKizComboNN('efi_GB_A', [
nn_dict['efiITG_GB_div_efeITG_GB'],
nn_dict['efiITG_GB_plus_efeITG_GB'],
nn_dict['efiTEM_GB_div_efeTEM_GB'],
nn_dict['efiTEM_GB_plus_efeTEM_GB']],
lambda b, c, d, e: (b * c) / (b + 1) + (d * e) / (d + 1))
efe_GB_C = QuaLiKizComboNN('efe_GB_C', [nn_dict['efi_GB_div_efe_GB'],
nn_dict['efi_GB_plus_efe_GB']],
lambda a, b: b / (a + 1))
efi_GB_C = QuaLiKizComboNN('efi_GB_C', [nn_dict['efi_GB_div_efe_GB'],
nn_dict['efi_GB_plus_efe_GB']],
lambda a, b: (a * b) / (a + 1))
efe_GB_D = nn_dict['efe_GB']
efi_GB_D = nn_dict['efi_GB']
nns = [
efe_GB_A,
efi_GB_A,
efe_GB_C,
efi_GB_C,
efe_GB_D,
efi_GB_D,
nn_dict['efeTEM_GB'],
nn_dict['efeETG_GB'],
nn_dict['efeITG_GB'],
nn_dict['efiTEM_GB'],
nn_dict['efiITG_GB']
]
nn = QuaLiKizMultiNN(nns)
#
#for name, recipe in combo_nns.items():
# nn1 = QuaLiKizNDNN.from_json('nns/nn_' + recipe[0] + '.json')
# nn2 = QuaLiKizNDNN.from_json('nns/nn_' + recipe[1] + '.json')
# nn = QuaLiKizDuoNN(name, nn1, nn2, recipe[2])
# nns.append(nn)
#nn = QuaLiKizMultiNN(nns)
if __name__ == '__main__':
scann = 24
input = | pd.DataFrame() | pandas.DataFrame |
import json
import cv2
import pandas as pd
import numpy as np
from .process_steps import Analysis
from .parameters import SpotlobParameterSet
from .process_opencv import draw_contours, crop_to_contour
class CircleAnalysis(Analysis):
def __init__(self, calibration=None, extended_output=True):
self.calibration = calibration
super(CircleAnalysis, self).__init__(
self.analyze, [], extended_output=extended_output)
def analyze(self, metadata):
contours = metadata['contours']
areas = []
ellipses_positions = []
ellipses_major_axes = []
ellipses_minor_axes = []
ellipses_angles = []
for cont in contours:
# AREA
areas += [cv2.contourArea(cont)]
# ELLIPSE
try:
e_pos, (e_major_ax, e_minor_ax), angle = cv2.fitEllipse(cont)
except cv2.error:
e_pos, (e_major_ax,
e_minor_ax), angle = np.nan, (np.nan, np.nan), np.nan
ellipses_positions += [np.array(e_pos)]
ellipses_major_axes += [e_major_ax]
ellipses_minor_axes += [e_minor_ax]
ellipses_angles += [angle]
res_dict = {"area_px2": areas,
"ellipse_position_px": ellipses_positions,
"ellipse_majorAxis_px": ellipses_major_axes,
"ellipse_minorAxis_px": ellipses_minor_axes,
"ellipse_angle": ellipses_angles}
if self.extended_output:
res_dict.update({"contours": contours})
result = | pd.DataFrame(res_dict) | pandas.DataFrame |
import forecaster as fc
import optimizer as opt
import trader as td
import datetime as dt
import utilities as util
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def calc_start_date(end_date=dt.datetime(2017,1,1), data_size=12):
return end_date-dt.timedelta(weeks=int(data_size * 52/12))
def run_today(start_date=dt.datetime(2015,1,1), end_date=dt.datetime(2017,1,1), n_days=21, data_size=12,
myport=['AAPL', 'GOOG'], allocations=[0.5,0.5],
train_size=0.7, max_k=50, max_trade_size=0.1, gen_plot=False, verbose=False, savelogs=False):
"""
:param start_date: Beginning of time period
:param end_date: End of time period
:param n_days: Number of days into the future to predict the daily returns of a fund
:param data_size: The number of months of data to use in the machine learning model.
:param myport: The funds available in your portfolio
:param allocations: The percentage of your portfolio invested in the funds
:param train_size: The percentage of data used for training the ML model, remained used for testing.
:param max_k: Maximum number of neighbors used in kNN
:param max_trade_size: The maximum percentage of your portfolio permitted to be traded in any one transaction.
:param gen_plot: Boolean to see if you want to plot results
:param verbose: Boolean to print out information during execution of application.
:return:
"""
start_date = calc_start_date(end_date, data_size)#end_date - dt.timedelta(weeks=int(data_size * 52/12))
#print('start:', start_date, 'end:', end_date)
if verbose: print('-'*20 + '\nFORECAST\n' + '-'*20)
forecast = fc.forecast(start_date, end_date, symbols=myport, train_size=train_size,
n_days=n_days, max_k=max_k, gen_plot=gen_plot, verbose=verbose, savelogs=savelogs)
if verbose: print('\n'+'-'*20 + '\nOPTIMIZE\n' + '-'*20)
target_allocations = opt.optimize_return(forecast, myport, allocations, gen_plot=gen_plot, verbose=verbose, savelogs=savelogs)
if verbose: print('\n' + '-'*20 + '\nORDERS\n' + '-'*20)
trade_date = forecast.index.max()
orders = td.create_orders(myport, allocations, target_allocations, trade_date=trade_date,max_trade_size=max_trade_size, verbose=verbose, savelogs=savelogs)
if verbose: print(orders)
new_allocations = allocations.copy()
for i in range(orders.shape[0]):
# fix this code so that the correct allocations are updated!
index = myport.index(orders.loc[i, 'Symbol'])
#symbol = orders.loc[i, 'Symbol']
if orders.loc[i, 'Action'] == 'SELL':
new_allocations[index] -= orders.loc[i, 'Quantity']
else:
new_allocations[index] += orders.loc[i, 'Quantity']
adr_current, vol_current, sr_current, pv_current = util.compute_returns(forecast, allocations=allocations)
adr_target, vol_target, sr_target, pv_target = util.compute_returns(forecast, allocations=target_allocations)
adr_new, vol_new, sr_new, pv_new = util.compute_returns(forecast, allocations=new_allocations)
if verbose:
print("Portfolios:", "Current", "Target","New")
print("Daily return: %.5f %.5f %.5f" % (adr_current, adr_target, adr_new))
print("Daily Risk: %.5f %.5f %.5f" % (vol_current, vol_target, vol_new))
print("Sharpe Ratio: %.5f %.5f %.5f" % (sr_current, sr_target, sr_new))
print("Return vs Risk: %.5f %.5f %.5f" % (adr_current/vol_current, adr_target/vol_target, adr_new/vol_new))
print("\nALLOCATIONS\n" + "-" * 40)
print("Symbol", "Current", "Target", 'New')
for i, symbol in enumerate(myport):
print("%s %.3f %.3f %.3f" %
(symbol, allocations[i], target_allocations[i], new_allocations[i]))
# Compare daily portfolio value with SPY using a normalized plot
if gen_plot:
fig, ax = plt.subplots()
ax.scatter(vol_current, adr_current, c='green', s=15, alpha=0.5) # Current portfolio
ax.scatter(vol_target, adr_target, c='red', s=15, alpha=0.5) # ef
ax.scatter(vol_new, adr_new, c='black', s=25, alpha=0.75) # ef
ax.set_xlabel('St. Dev. Daily Returns')
ax.set_ylabel('Mean Daily Returns')
#ax.set_xlim(min(vol)/1.5, max(vol)*1.5)
#ax.set_ylim(min(adr)/1.5, max(adr)*1.5)
ax.grid()
ax.grid(linestyle=':')
fig.tight_layout()
plt.show()
# add code to plot here
df_temp = pd.concat([pv_current, pv_target, pv_new], keys=['Current', 'Target', 'New'], axis=1)
df_temp = df_temp / df_temp.ix[0, :]
util.plot_data(df_temp, 'Forecasted Daily portfolio value and SPY', 'Date-21', 'Normalized Price')
if False: # meh was going to plot portfolio values for the last year but trying something else now
prior_prices = util.load_data(myport, start_date, end_date)
prior_prices.fillna(method='ffill', inplace=True)
prior_prices.fillna(method='bfill', inplace=True)
#prices_SPY = prior_prices['SPY'] # SPY prices, for benchmark comparison
prior_prices = prior_prices[myport] # prices of portfolio symbols
forecast_prices = forecast * prior_prices
time_span = pd.date_range(forecast.index.min(), end_date + dt.timedelta(days=n_days*2))
forecast_prices = forecast_prices.reindex(time_span)
forecast_prices = forecast_prices.shift(periods=n_days*2)
forecast_prices = forecast_prices.dropna()
forecast_prices = pd.concat([prior_prices, forecast_prices], axis=0)
adr_current, vol_current, sr_current, pv_current = util.compute_returns(forecast_prices, allocations=allocations)
adr_target, vol_target, sr_target, pv_target = util.compute_returns(forecast_prices, allocations=target_allocations)
adr_new, vol_new, sr_new, pv_new = util.compute_returns(forecast_prices, allocations=new_allocations)
df_temp = | pd.concat([pv_current, pv_target, pv_new], keys=['Current', 'Target', 'New'], axis=1) | pandas.concat |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: | pd.Timestamp("2012-06-04 00:00:00") | pandas.Timestamp |
# Copyright 2017-2021 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run: python3 -m unittest discover -s tests/ -p test_*.py -t . -v
import os
import unittest
from unittest.mock import patch
import glob
import pickle
import joblib
import inspect
import pandas as pd
import numpy as np
from moonshot import MoonshotML
from moonshot.cache import TMP_DIR
from moonshot.exceptions import MoonshotError
from sklearn.tree import DecisionTreeClassifier
class SKLearnMachineLearningTestCase(unittest.TestCase):
def setUp(self):
"""
Trains a scikit-learn model.
"""
self.model = DecisionTreeClassifier()
# Predict Y will be same as X
X = np.array([[1,1],[0,0]])
Y = np.array([1,0])
self.model.fit(X, Y)
self.pickle_path = "{0}/decision_tree_model.pkl".format(TMP_DIR)
self.joblib_path = "{0}/decision_tree_model.joblib".format(TMP_DIR)
def tearDown(self):
"""
Remove cached files.
"""
for file in glob.glob("{0}/moonshot*.pkl".format(TMP_DIR)):
os.remove(file)
for file in (self.pickle_path, self.joblib_path):
if os.path.exists(file):
os.remove(file)
def test_complain_if_mix_dataframe_and_series(self):
"""
Tests error handling when the features list contains a mix of
DataFrames and Series.
"""
# pickle model
with open(self.pickle_path, "wb") as f:
pickle.dump(self.model, f)
class DecisionTreeML1(MoonshotML):
MODEL = self.pickle_path
def prices_to_features(self, prices):
features = []
# DataFrame then Series
features.append(prices.loc["Close"] > 10)
features.append(prices.loc["Close"]["FI12345"] > 10)
return features, None
class DecisionTreeML2(MoonshotML):
MODEL = self.pickle_path
def prices_to_features(self, prices):
features = []
# Series then DataFrame
features.append(prices.loc["Close"]["FI12345"] > 10)
features.append(prices.loc["Close"] > 10)
return features, None
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9,
11,
10.50,
9.99,
],
"FI23456": [
# Close
9.89,
11,
8.50,
10.50,
],
},
index=idx
)
prices.columns.name = "Sid"
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with self.assertRaises(MoonshotError) as cm:
results = DecisionTreeML1().backtest()
self.assertIn(
"features should be either all DataFrames or all Series, not a mix of both",
repr(cm.exception))
# clear cache
for file in glob.glob("{0}/moonshot*.pkl".format(TMP_DIR)):
os.remove(file)
with self.assertRaises(MoonshotError) as cm:
results = DecisionTreeML2().backtest()
self.assertIn(
"features should be either all DataFrames or all Series, not a mix of both",
repr(cm.exception))
def test_complain_if_no_targets(self):
"""
Tests error handling when prices_to_features doesn't return a two-tuple.
"""
# pickle model
with open(self.pickle_path, "wb") as f:
pickle.dump(self.model, f)
class DecisionTreeML(MoonshotML):
MODEL = self.pickle_path
def prices_to_features(self, prices):
features = []
features.append(prices.loc["Close"] > 10)
features.append(prices.loc["Close"] > 100)
return features
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close"]
idx = | pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"]) | pandas.MultiIndex.from_product |
__all__ = ["plot_centroids",
"plot_histogram",
"plot_scatter"]
try:
# noinspection PyUnresolvedReferences
from matplotlib import use
use('Agg')
# noinspection PyUnresolvedReferences
import matplotlib.pyplot as plt
# noinspection PyUnresolvedReferences
import matplotlib.ticker
except ImportError:
# raise ImportError("Unable to import matplotlib")
pass
import logging
from pathlib import Path
from typing import Optional, Tuple
import numpy as np
import pandas as pd
import seaborn as sns
from torch import Tensor
import torch
from .datasets.types import TensorGroup
from .types import TorchOrNp, PathOrStr, OptStr
def plot_scatter(ax, x: TorchOrNp, y: TorchOrNp, title: str,
# legend: Optional[List[str]] = None,
xlabel: str = "", ylabel: str = "",
log_x: bool = False, log_y: bool = False,
xmin: Optional[float] = None, xmax: Optional[float] = None,
ymin: Optional[float] = None, ymax: Optional[float] = None):
# Automatically convert to numpy
if isinstance(x, Tensor): x = x.numpy()
if isinstance(y, Tensor): y = y.numpy()
ax.scatter(x, y)
# if legend is not None:
# ax.legend(legend)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
ax.set_title(title)
if log_x:
ax.set_xscale("log")
if log_y:
ax.set_yscale("log")
# Set the minimum and maximum values if specified
ax.set_xlim(xmin=xmin, xmax=xmax)
ax.set_ylim(ymin=ymin, ymax=ymax)
def plot_histogram(ax, vals: TorchOrNp, n_bins: int, title: str,
hist_range: Optional[Tuple[float, float]] = None, xlabel: str = ""):
if isinstance(vals, Tensor): vals = vals.numpy()
n_ele = vals.shape[0]
# N is the count in each bin, bins is the lower-limit of the bin
_, bins, patches = ax.hist(vals, bins=n_bins, range=hist_range, weights=np.ones(n_ele) / n_ele)
# Now we format the y-axis to display percentage
ax.yaxis.set_major_formatter(matplotlib.ticker.PercentFormatter(1))
ax.set_title(title)
if xlabel:
ax.set_xlabel(xlabel)
def plot_centroids(filename: PathOrStr, ts_grp: TensorGroup, title: OptStr = None,
decision_boundary: Optional[Tuple[float, float]] = None) -> None:
filename = Path(filename)
msg = f"Generating centroid plot to path \"{str(filename)}\""
logging.debug(f"Starting: {msg}")
# Combine all the data into one tensor
x, y = [ts_grp.p_x], [torch.zeros([ts_grp.p_x.shape[0]], dtype=ts_grp.u_tr_y[1].dtype)]
flds = [(1, (ts_grp.u_tr_x, ts_grp.u_tr_y)), (3, (ts_grp.u_te_x, ts_grp.u_te_y))]
for y_offset, (xs, ys) in flds:
x.append(xs)
y.append(ys.clamp_min(0) + y_offset)
x, y = torch.cat(x, dim=0), torch.cat(y, dim=0)
def _build_labels(lbl: int) -> str:
if lbl == 0: return "Pos"
sgn = "+" if lbl % 2 == 0 else "-"
ds = "U-te" if (lbl - 1) // 2 == 1 else "U-tr"
return f"{ds} {sgn}"
y_str = [_build_labels(_y) for _y in y.cpu().numpy()]
# Residual code from t-SNE plotting. Just list class counts
unique, counts = torch.unique(y.squeeze(), return_counts=True)
for uniq, cnt in zip(unique.squeeze(), counts.squeeze()):
logging.debug("Centroids %s: %d elements", _build_labels(uniq.item()), int(cnt.item()))
label_col_name = "Label"
data = {'x1': x[:, 0].squeeze(), 'x2': x[:, 1].squeeze(), label_col_name: y_str}
flatui = ["#0d14e0", "#4bf05b", "#09ac15", "#e6204e", "#ba141d"]
markers = ["P", "s", "D", "X", "^"]
width, height = 8, 8
plt.figure(figsize=(8, 8))
ax = sns.lmplot(x="x1", y="x2",
hue="Label",
hue_order=["Pos", "U-tr +", "U-te +", "U-tr -", "U-te -"],
# palette=sns.color_palette("bright", unique.shape[0]),
palette=sns.color_palette(flatui),
data= | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
| tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 3 10:14:40 2019
Adapted from code created as part of Udemy course: Deep
Learning A-Z™: Hands-On Artificial Neural Networks
(https://www.udemy.com/deep-learning-a-z/)
"""
# # Deep Learning: Stacked LSTM Recurrent Neural Network (RNN) Model in Python
# Predict stock price using a Long-Short Term Memory (LSTM) Recurrent Neural
# Network (RNN)
# ## Part 1: Data Preprocessing
# Import required libraries
import os
import datetime as dt
import pandas as pd
import numpy as np
import urllib.request
import json
import math
import matplotlib.pyplot as plt
from timeit import default_timer as timer
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential # Initializes the Neural Network
from keras.layers import Dense, Dropout, LSTM
import config as cfg # stores my API keys
import warnings
# ### Turn off Depreciation and Future warnings
warnings.simplefilter(action='ignore', category=DeprecationWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
# %reset -f
# Set random seed
seed = 42
np.random.seed(seed)
# Set the plotting style
plt.style.use('seaborn-whitegrid')
# Set the date range
now = dt.datetime.now()
file_date = now.strftime('%Y-%m-%d')
start_date = dt.datetime(now.year - 10, now.month, now.day)
start_date = start_date.strftime('%Y-%m-%d')
# end_date = dt.datetime(now.year, now.month, now.day)
# end_date = pd.to_datetime(end_date)
# Determine prediction period
num_days_pred = 80
# Set params
dropout_rate = 0.2
batch_size = 32
epochs = 500
##########
# # Alpha Vantage API
##########
api_key = cfg.ALPHA_VANTAGE_API_KEY
# Import list of stocks
# Use 'TSX:' to identify Canadian stocks
stock_symbols = ['AAPL', 'IBM', 'TSLA', 'RY', 'JPM']
# Loop through each stock in list
for index in range(0, len(stock_symbols)):
stock_symbol = stock_symbols[index]
print("\n*********** STOCK SYMBOL: %s ***********\n" % stock_symbol)
# url_string_daily = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=%s&outputsize=full&apikey=%s" % (
# stock_symbol, api_key)
url_string_daily = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}&outputsize=full&apikey={}".format(
stock_symbol, api_key)
# Save data to file
save_data_to_file = '../data/processed/3.0-ng-alpha-vantage-intraday-stock-market-data-{}-{}.csv'.format(
stock_symbol, file_date)
save_results_to_file = '../data/processed/3.0-ng-lstm-rnn-model-prediction-results-{}-{}.csv'.format(
stock_symbol, file_date)
# Store date, open, high, low, close, adjusted close, volume, and dividend amount values
# to a Pandas DataFrame
with urllib.request.urlopen(url_string_daily) as url:
data = json.loads(url.read().decode())
# extract stock market data
data = data['Time Series (Daily)']
df_alpha = pd.DataFrame(columns=['Date',
'Open',
'High',
'Low',
'Close',
'Adjusted Close',
'Volume',
'Dividend Amount'])
for k, v in data.items():
date = dt.datetime.strptime(k, '%Y-%m-%d')
data_row = [date.date(),
float(v['1. open']),
float(v['2. high']),
float(v['3. low']),
float(v['4. close']),
float(v['5. adjusted close']),
float(v['6. volume']),
float(v['7. dividend amount'])]
df_alpha.loc[-1, :] = data_row
df_alpha.index = df_alpha.index + 1
print('Data saved to : {}'.format(save_data_to_file))
df_alpha.to_csv(save_data_to_file, index=False)
# Load it from the CSV
df_alpha = pd.read_csv(save_data_to_file)
# ### Sort DataFrame by date
df_alpha = df_alpha.sort_values('Date')
# Filter data to last n years from start_date
df_alpha = df_alpha[(df_alpha['Date'] >= start_date)]
# ### Visualize the Adjusted Close Price
plt.figure(figsize=(12, 6))
plt.plot(range(df_alpha.shape[0]), df_alpha['Adjusted Close'])
plt.xticks(range(0, df_alpha.shape[0], 251),
df_alpha['Date'].loc[::251], rotation=90)
plt.title('Daily Stock Price (Adj Close): {}'.format(stock_symbol))
plt.xlabel('Date', fontsize=14)
plt.ylabel('Adj. Close Price', fontsize=14)
plt.savefig('../reports/figures/3.0-ng-alpha-vantage-daily-stock-market-data-adj-close-price-{}-{}.png'.format(stock_symbol, file_date),
bbox_inches='tight',
dpi=300)
print(plt.show())
# ### Splitting Data into Training and Test
# ### NOTE: We can't use train_test_split because it would randomly pick rows and the
# ### order of rows is critical to our analysis
# Split data into Training and Test sets
# All stock data except last 60 days
data_train = df_alpha[:(len(df_alpha) - num_days_pred)]
data_test = df_alpha[-num_days_pred:] # Last n days of stock data
# Plot Training set
plt.figure(figsize=(12, 6))
plt.plot(range(data_train.shape[0]), data_train['Adjusted Close'])
plt.xticks(range(0, data_train.shape[0], 251),
data_train['Date'].loc[::251], rotation=90)
plt.title(
'Daily Stock Price (Adj. Close): {} [Training Data]'.format(stock_symbol))
plt.xlabel('Date', fontsize=14)
plt.ylabel('Adj. Close Price', fontsize=14)
plt.savefig('../reports/figures/3.0-ng-training-data-{}-adj-close-{}.png'.format(stock_symbol, file_date),
bbox_inches='tight',
dpi=300)
print(plt.show())
# Plot Test set
plt.figure(figsize=(12, 6))
plt.plot(range(data_test.shape[0]), data_test['Adjusted Close'])
plt.xticks(range(0, data_test.shape[0], 5),
data_test['Date'].loc[::5], rotation=90)
plt.title(
'Daily Stock Price (Adj. Close): {} [Test Data]'.format(stock_symbol))
plt.xlabel('Date', fontsize=14)
plt.ylabel('Adj. Close Price', fontsize=14)
plt.savefig('../reports/figures/3.0-ng-test-data-{}-adj-close-{}.png'.format(stock_symbol, file_date),
bbox_inches='tight',
dpi=300)
print(plt.show())
# # Describe the training data
# print(data_train.shape)
# print(data_train.describe().T)
#
# # Describe the test data
# print(data_test.shape)
# print(data_test.describe().T)
# Create a numpy array of 1 column that we care about - Adj Close Stock Price
training_set = data_train.iloc[:, 5:6].values
# Get the real Adj Closing stock prices for last n days
real_stock_price = data_test.iloc[:, 5:6].values
# Feature Scaling
# With RNNs it is recommended to apply normalization for feature scaling
sc = MinMaxScaler(feature_range=(0, 1),
copy=True)
# Scale the training set
training_set_scaled = sc.fit_transform(training_set)
# Create a data structure with n timesteps and 1 output (use the previous
# n days' stock prices to predict the next output = n/20 months of prices)
X_train = []
y_train = []
for i in range(num_days_pred, len(data_train)):
# append the previous n days' stock prices
X_train.append(training_set_scaled[i - num_days_pred:i, 0])
# predict the stock price on the next day
y_train.append(training_set_scaled[i, 0])
# Convert X_train and y_train to numpy arrays
X_train, y_train = np.array(X_train), np.array(y_train)
# Reshape the data to add additional indicators (e.g. volume, closing price, etc.)
# if needed (currently only predicting opening price)
X_train = np.reshape(X_train,
(X_train.shape[0], # number of rows in x_train
X_train.shape[1], # number of columns in x_train
1)) # number of input layers (currently only opening price)
# Part 2: Build the Recurrent Neural Network (RNN) Model
# Import the required Keras libraries and packages
# Add a timer
start = timer()
# Initialize the RNN
model = Sequential()
# Add the 1st LSTM layer with Dropout regularization
model.add(LSTM(units=num_days_pred, # number of memory cells (neurons) in this layer
return_sequences=True,
input_shape=(X_train.shape[1], 1)))
model.add(Dropout(rate=dropout_rate))
# Add a 2nd LSTM layer with Dropout regularization
model.add(LSTM(units=num_days_pred, # number of memory cells (neurons) in this layer
return_sequences=True))
model.add(Dropout(rate=dropout_rate))
# Add a 3rd LSTM layer with Dropout regularization
model.add(LSTM(units=num_days_pred, # number of memory cells (neurons) in this layer
return_sequences=True))
model.add(Dropout(rate=dropout_rate))
# Add a 4th (and last) LSTM layer with Dropout regularization
# number of memory cells (neurons) in this layer
model.add(LSTM(units=num_days_pred))
model.add(Dropout(rate=dropout_rate))
# Add the output layer
model.add(Dense(units=1))
# Compile the Recurrent Neural Network (RNN)
model.compile(optimizer='adam',
loss='mean_squared_error')
# Fit the Recurrent Neural Network (RNN) to the Training Set
model.fit(x=X_train,
y=y_train,
batch_size=batch_size,
epochs=epochs,
verbose=0)
# Elapsed time in minutes
end = timer()
print('Elapsed time in minutes: ')
print(0.1 * round((end - start) / 6))
# Add an end of work message
os.system('say "your {} model has finished processing"'.format(stock_symbol))
# Print summary of the neural network architecture
print(model.summary())
# Part 3: Make Prediction and Visualize the Results
# Get the predicted Open stock prices for last n days
data_total = df_alpha['Adjusted Close']
# first financial day is the difference between the length of the dataset_total and dataset_test
inputs = data_total[len(data_total) -
len(data_test) - num_days_pred:].values
inputs = inputs.reshape(-1, 1)
inputs = sc.transform(inputs) # Scale the inputs
X_test = []
for i in range(num_days_pred, len(data_test) + num_days_pred):
# append the previous n days' stock prices
X_test.append(inputs[i-num_days_pred:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test,
(X_test.shape[0],
X_test.shape[1], 1))
predicted_stock_price = model.predict(X_test)
# Invert the feature scaling
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
# Add calculation of differences between prediction and actual - ups and downs
df_date = | pd.DataFrame(data_test.iloc[:, 0:1].values) | pandas.DataFrame |
"""
Attempt to read and manipulate data using pandas, then plot some of it.
I've attempted to read in the ICP-OES file 'Whorley_C2016major_Grn.csv' and I'll try
to find the average and standard deviation of each analyte, 5 replicates per analyte.
"""
# Imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Local Imports, File Paths
import os, sys
sys.path.append(os.path.abspath('shared'))
import my_module as mymod
myplace = 'twho'
# Input / Output Directories
in_dir = '../' + myplace + '_data/'
out_dir = '../' + myplace + '_output/'
#Define Input / Output File Names
in_fn = in_dir + 'Whorley_C2016major_Grn.csv'
out_fn = out_dir + 'C2016maj.png'
# Read in the data file
df = | pd.read_csv(in_fn) | pandas.read_csv |
"""Dynamic file checks."""
from dataclasses import dataclass
from datetime import date, timedelta
from typing import Dict, Set
import re
import pandas as pd
import numpy as np
from .errors import ValidationFailure, APIDataFetchError
from .datafetcher import get_geo_signal_combos, threaded_api_calls
from .utils import relative_difference_by_min, TimeWindow, lag_converter
class DynamicValidator:
"""Class for validation of static properties of individual datasets."""
@dataclass
class Parameters:
"""Configuration parameters."""
# data source name, one of
# https://cmu-delphi.github.io/delphi-epidata/api/covidcast_signals.html
data_source: str
# span of time over which to perform checks
time_window: TimeWindow
# date that this df_to_test was generated; typically 1 day after the last date in df_to_test
generation_date: date
# number of days back to perform sanity checks, starting from the last date appearing in
# df_to_test
max_check_lookbehind: timedelta
# names of signals that are smoothed (7-day avg, etc)
smoothed_signals: Set[str]
# maximum number of days behind do we expect each signal to be
max_expected_lag: Dict[str, int]
# minimum number of days behind do we expect each signal to be
min_expected_lag: Dict[str, int]
def __init__(self, params):
"""
Initialize object and set parameters.
Arguments:
- params: dictionary of user settings; if empty, defaults will be used
"""
common_params = params["common"]
dynamic_params = params.get("dynamic", dict())
self.test_mode = dynamic_params.get("test_mode", False)
self.params = self.Parameters(
data_source=common_params["data_source"],
time_window=TimeWindow.from_params(common_params["end_date"],
common_params["span_length"]),
generation_date=date.today(),
max_check_lookbehind=timedelta(
days=max(7, dynamic_params.get("ref_window_size", 14))),
smoothed_signals=set(dynamic_params.get("smoothed_signals", [])),
min_expected_lag=lag_converter(common_params.get(
"min_expected_lag", dict())),
max_expected_lag=lag_converter(common_params.get(
"max_expected_lag", dict()))
)
def validate(self, all_frames, report):
"""
Perform all checks over the combined data set from all files.
Parameters
----------
all_frames: pd.DataFrame
combined data from all input files
report: ValidationReport
report to which the results of these checks will be added
"""
# Get 14 days prior to the earliest list date
outlier_lookbehind = timedelta(days=14)
# Get all expected combinations of geo_type and signal.
geo_signal_combos = get_geo_signal_combos(self.params.data_source)
all_api_df = threaded_api_calls(self.params.data_source,
self.params.time_window.start_date - outlier_lookbehind,
self.params.time_window.end_date,
geo_signal_combos)
# Keeps script from checking all files in a test run.
kroc = 0
# Comparison checks
# Run checks for recent dates in each geo-sig combo vs semirecent (previous
# week) API data.
for geo_type, signal_type in geo_signal_combos:
geo_sig_df = all_frames.query(
"geo_type == @geo_type & signal == @signal_type")
# Drop unused columns.
geo_sig_df.drop(columns=["geo_type", "signal"])
report.increment_total_checks()
if geo_sig_df.empty:
report.add_raised_error(ValidationFailure(check_name="check_missing_geo_sig_combo",
geo_type=geo_type,
signal=signal_type,
message="file with geo_type-signal combo "
"does not exist"))
continue
max_date = geo_sig_df["time_value"].max()
self.check_min_allowed_max_date(
max_date, geo_type, signal_type, report)
self.check_max_allowed_max_date(
max_date, geo_type, signal_type, report)
# Get relevant reference data from API dictionary.
api_df_or_error = all_api_df[(geo_type, signal_type)]
report.increment_total_checks()
if isinstance(api_df_or_error, APIDataFetchError):
report.add_raised_error(api_df_or_error)
continue
# Only do outlier check for cases and deaths signals
if (signal_type in ["confirmed_7dav_cumulative_num", "confirmed_7dav_incidence_num",
"confirmed_cumulative_num", "confirmed_incidence_num",
"deaths_7dav_cumulative_num",
"deaths_cumulative_num"]):
# Outlier dataframe
earliest_available_date = geo_sig_df["time_value"].min()
source_df = geo_sig_df.query(
'time_value <= @self.params.time_window.end_date & '
'time_value >= @self.params.time_window.start_date'
)
# These variables are interpolated into the call to `api_df_or_error.query()`
# below but pylint doesn't recognize that.
# pylint: disable=unused-variable
outlier_start_date = earliest_available_date - outlier_lookbehind
outlier_end_date = earliest_available_date - timedelta(days=1)
outlier_api_df = api_df_or_error.query(
'time_value <= @outlier_end_date & time_value >= @outlier_start_date')
# pylint: enable=unused-variable
self.check_positive_negative_spikes(
source_df, outlier_api_df, geo_type, signal_type, report)
# Check data from a group of dates against recent (previous 7 days,
# by default) data from the API.
for checking_date in self.params.time_window.date_seq:
create_dfs_or_error = self.create_dfs(
geo_sig_df, api_df_or_error, checking_date, geo_type, signal_type, report)
if not create_dfs_or_error:
continue
recent_df, reference_api_df = create_dfs_or_error
self.check_max_date_vs_reference(
recent_df, reference_api_df, checking_date, geo_type, signal_type, report)
self.check_rapid_change_num_rows(
recent_df, reference_api_df, checking_date, geo_type, signal_type, report)
if not re.search("cumulative", signal_type):
self.check_avg_val_vs_reference(
recent_df, reference_api_df, checking_date, geo_type,
signal_type, report)
# Keeps script from checking all files in a test run.
kroc += 1
if self.test_mode and kroc == 2:
break
def check_min_allowed_max_date(self, max_date, geo_type, signal_type, report):
"""Check if time since data was generated is reasonable or too long ago.
The most recent data should be at least max_expected_lag from generation date
Arguments:
- max_date: date of most recent data to be validated; datetime format.
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- None
"""
min_thres = timedelta(days = self.params.max_expected_lag.get(
signal_type, self.params.max_expected_lag.get('all', 10)))
if max_date < self.params.generation_date - min_thres:
report.add_raised_error(
ValidationFailure("check_min_max_date",
geo_type=geo_type,
signal=signal_type,
message="date of most recent generated file seems too long ago"))
report.increment_total_checks()
def check_max_allowed_max_date(self, max_date, geo_type, signal_type, report):
"""Check if time since data was generated is reasonable or too recent.
The most recent data should be at most min_expected_lag from generation date
Arguments:
- max_date: date of most recent data to be validated; datetime format.
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- None
"""
max_thres = timedelta(days = self.params.min_expected_lag.get(
signal_type, self.params.min_expected_lag.get('all', 1)))
if max_date > self.params.generation_date - max_thres:
report.add_raised_error(
ValidationFailure("check_max_max_date",
geo_type=geo_type,
signal=signal_type,
message="date of most recent generated file seems too recent"))
report.increment_total_checks()
def create_dfs(self, geo_sig_df, api_df_or_error, checking_date, geo_type, signal_type, report):
"""Create recent_df and reference_api_df from params.
Raises error if recent_df is empty.
Arguments:
- geo_sig_df: Pandas dataframe of test data
- api_df_or_error: pandas dataframe of reference data, either from the
COVIDcast API or semirecent data
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- False if recent_df is empty, else (recent_df, reference_api_df)
(after reference_api_df has been padded if necessary)
"""
# recent_lookbehind: start from the check date and working backward in time,
# how many days at a time do we want to check for anomalies?
# Choosing 1 day checks just the daily data.
recent_lookbehind = timedelta(days=1)
recent_cutoff_date = checking_date - \
recent_lookbehind + timedelta(days=1)
recent_df = geo_sig_df.query(
'time_value <= @checking_date & time_value >= @recent_cutoff_date')
report.increment_total_checks()
if recent_df.empty:
min_thres = timedelta(days = self.params.max_expected_lag.get(
signal_type, self.params.max_expected_lag.get('all', 10)))
if checking_date < self.params.generation_date - min_thres:
report.add_raised_error(
ValidationFailure("check_missing_geo_sig_date_combo",
checking_date,
geo_type,
signal_type,
"test data for a given checking date-geo type-signal type"
" combination is missing. Source data may be missing"
" for one or more dates"))
return False
# Reference dataframe runs backwards from the recent_cutoff_date
#
# These variables are interpolated into the call to `api_df_or_error.query()`
# below but pylint doesn't recognize that.
# pylint: disable=unused-variable
reference_start_date = recent_cutoff_date - self.params.max_check_lookbehind
if signal_type in self.params.smoothed_signals:
# Add an extra 7 days to the reference period.
reference_start_date = reference_start_date - \
timedelta(days=7)
reference_end_date = recent_cutoff_date - timedelta(days=1)
# pylint: enable=unused-variable
# Subset API data to relevant range of dates.
reference_api_df = api_df_or_error.query(
"time_value >= @reference_start_date & time_value <= @reference_end_date")
report.increment_total_checks()
if reference_api_df.empty:
report.add_raised_error(
ValidationFailure("empty_reference_data",
checking_date,
geo_type,
signal_type,
"reference data is empty; comparative checks could not "
"be performed"))
return False
reference_api_df = self.pad_reference_api_df(
reference_api_df, geo_sig_df, reference_end_date)
return (geo_sig_df, reference_api_df)
def pad_reference_api_df(self, reference_api_df, geo_sig_df, reference_end_date):
"""Check if API data is missing, and supplement from test data.
Arguments:
- reference_api_df: API data within lookbehind range
- geo_sig_df: Test data
- reference_end_date: Supposed end date of reference data
Returns:
- reference_api_df: Supplemented version of original
"""
reference_api_df_max_date = reference_api_df.time_value.max()
if reference_api_df_max_date < reference_end_date:
# Querying geo_sig_df, only taking relevant rows
geo_sig_df_supplement = geo_sig_df.query(
'time_value <= @reference_end_date & time_value > \
@reference_api_df_max_date')[[
"geo_id", "val", "se", "sample_size", "time_value"]]
# Matching time_value format
geo_sig_df_supplement["time_value"] = \
pd.to_datetime(geo_sig_df_supplement["time_value"],
format = "%Y-%m-%d %H:%M:%S")
reference_api_df = pd.concat(
[reference_api_df, geo_sig_df_supplement])
return reference_api_df
def check_max_date_vs_reference(self, df_to_test, df_to_reference, checking_date,
geo_type, signal_type, report):
"""
Check if reference data is more recent than test data.
Arguments:
- df_to_test: pandas dataframe of a single CSV of source data
(one day-signal-geo_type combo)
- df_to_reference: pandas dataframe of reference data, either from the
COVIDcast API or semirecent data
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- None
"""
if df_to_test["time_value"].max() < df_to_reference["time_value"].max():
report.add_raised_error(
ValidationFailure("check_max_date_vs_reference",
checking_date,
geo_type,
signal_type,
"reference df has days beyond the max date in the =df_to_test="))
report.increment_total_checks()
def check_rapid_change_num_rows(self, df_to_test, df_to_reference, checking_date,
geo_type, signal_type, report):
"""
Compare number of obervations per day in test dataframe vs reference dataframe.
Arguments:
- df_to_test: pandas dataframe of CSV source data
- df_to_reference: pandas dataframe of reference data, either from the
COVIDcast API or semirecent data
- checking_date: datetime date
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- None
"""
test_rows_per_reporting_day = df_to_test[df_to_test['time_value']
== checking_date].shape[0]
reference_rows_per_reporting_day = df_to_reference.shape[0] / len(
set(df_to_reference["time_value"]))
try:
compare_rows = relative_difference_by_min(
test_rows_per_reporting_day,
reference_rows_per_reporting_day)
except ZeroDivisionError as e:
print(checking_date, geo_type, signal_type)
raise e
if abs(compare_rows) > 0.35:
report.add_raised_error(
ValidationFailure("check_rapid_change_num_rows",
checking_date,
geo_type,
signal_type,
"Number of rows per day seems to have changed rapidly (reference "
"vs test data)"))
report.increment_total_checks()
def check_positive_negative_spikes(self, source_df, api_frames, geo, sig, report):
"""
Adapt Dan's corrections package to Python (only consider spikes).
See https://github.com/cmu-delphi/covidcast-forecast/tree/dev/corrections/data_corrections
Statistics for a right shifted rolling window and a centered rolling window are used
to determine outliers for both positive and negative spikes.
As it is now, ststat will always be NaN for source frames.
Arguments:
- source_df: pandas dataframe of CSV source data
- api_frames: pandas dataframe of reference data, either from the
COVIDcast API or semirecent data
- geo: str; geo type name (county, msa, hrr, state) as in the CSV name
- sig: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
"""
report.increment_total_checks()
# Combine all possible frames so that the rolling window calculations make sense.
source_frame_start = source_df["time_value"].min()
# This variable is interpolated into the call to `add_raised_error()`
# below but pylint doesn't recognize that.
# pylint: disable=unused-variable
source_frame_end = source_df["time_value"].max()
# pylint: enable=unused-variable
all_frames = | pd.concat([api_frames, source_df]) | pandas.concat |
#!/usr/bin/env python
"""
Evaluation of conformal predictors.
"""
# Authors: <NAME>
# TODO: cross_val_score/run_experiment should possibly allow multiple to be evaluated on identical folding
from __future__ import division
from cqr.nonconformist_base import RegressorMixin, ClassifierMixin
import sys
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.base import clone, BaseEstimator
class BaseIcpCvHelper(BaseEstimator):
"""Base class for cross validation helpers.
"""
def __init__(self, icp, calibration_portion):
super(BaseIcpCvHelper, self).__init__()
self.icp = icp
self.calibration_portion = calibration_portion
def predict(self, x, significance=None):
return self.icp.predict(x, significance)
class ClassIcpCvHelper(BaseIcpCvHelper, ClassifierMixin):
"""Helper class for running the ``cross_val_score`` evaluation
method on IcpClassifiers.
See also
--------
IcpRegCrossValHelper
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from cqr.nonconformist import IcpClassifier
>>> from cqr.nonconformist import ClassifierNc, MarginErrFunc
>>> from cqr.nonconformist import ClassIcpCvHelper
>>> from cqr.nonconformist import class_mean_errors
>>> from cqr.nonconformist import cross_val_score
>>> data = load_iris()
>>> nc = ProbEstClassifierNc(RandomForestClassifier(), MarginErrFunc())
>>> icp = IcpClassifier(nc)
>>> icp_cv = ClassIcpCvHelper(icp)
>>> cross_val_score(icp_cv,
... data.data,
... data.target,
... iterations=2,
... folds=2,
... scoring_funcs=[class_mean_errors],
... significance_levels=[0.1])
... # doctest: +SKIP
class_mean_errors fold iter significance
0 0.013333 0 0 0.1
1 0.080000 1 0 0.1
2 0.053333 0 1 0.1
3 0.080000 1 1 0.1
"""
def __init__(self, icp, calibration_portion=0.25):
super(ClassIcpCvHelper, self).__init__(icp, calibration_portion)
def fit(self, x, y):
split = StratifiedShuffleSplit(y, n_iter=1,
test_size=self.calibration_portion)
for train, cal in split:
self.icp.fit(x[train, :], y[train])
self.icp.calibrate(x[cal, :], y[cal])
class RegIcpCvHelper(BaseIcpCvHelper, RegressorMixin):
"""Helper class for running the ``cross_val_score`` evaluation
method on IcpRegressors.
See also
--------
IcpClassCrossValHelper
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.ensemble import RandomForestRegressor
>>> from cqr.nonconformist import IcpRegressor
>>> from cqr.nonconformist import RegressorNc, AbsErrorErrFunc
>>> from cqr.nonconformist import RegIcpCvHelper
>>> from cqr.nonconformist import reg_mean_errors
>>> from cqr.nonconformist import cross_val_score
>>> data = load_boston()
>>> nc = RegressorNc(RandomForestRegressor(), AbsErrorErrFunc())
>>> icp = IcpRegressor(nc)
>>> icp_cv = RegIcpCvHelper(icp)
>>> cross_val_score(icp_cv,
... data.data,
... data.target,
... iterations=2,
... folds=2,
... scoring_funcs=[reg_mean_errors],
... significance_levels=[0.1])
... # doctest: +SKIP
fold iter reg_mean_errors significance
0 0 0 0.185771 0.1
1 1 0 0.138340 0.1
2 0 1 0.071146 0.1
3 1 1 0.043478 0.1
"""
def __init__(self, icp, calibration_portion=0.25):
super(RegIcpCvHelper, self).__init__(icp, calibration_portion)
def fit(self, x, y):
split = train_test_split(x, y, test_size=self.calibration_portion)
x_tr, x_cal, y_tr, y_cal = split[0], split[1], split[2], split[3]
self.icp.fit(x_tr, y_tr)
self.icp.calibrate(x_cal, y_cal)
# -----------------------------------------------------------------------------
#
# -----------------------------------------------------------------------------
def cross_val_score(model,x, y, iterations=10, folds=10, fit_params=None,
scoring_funcs=None, significance_levels=None,
verbose=False):
"""Evaluates a conformal predictor using cross-validation.
Parameters
----------
model : object
Conformal predictor to evaluate.
x : numpy array of shape [n_samples, n_features]
Inputs of data to use for evaluation.
y : numpy array of shape [n_samples]
Outputs of data to use for evaluation.
iterations : int
Number of iterations to use for evaluation. The data set is randomly
shuffled before each iteration.
folds : int
Number of folds to use for evaluation.
fit_params : dictionary
Parameters to supply to the conformal prediction object on training.
scoring_funcs : iterable
List of evaluation functions to apply to the conformal predictor in each
fold. Each evaluation function should have a signature
``scorer(prediction, y, significance)``.
significance_levels : iterable
List of significance levels at which to evaluate the conformal
predictor.
verbose : boolean
Indicates whether to output progress information during evaluation.
Returns
-------
scores : pandas DataFrame
Tabulated results for each iteration, fold and evaluation function.
"""
fit_params = fit_params if fit_params else {}
significance_levels = (significance_levels if significance_levels
is not None else np.arange(0.01, 1.0, 0.01))
df = pd.DataFrame()
columns = ['iter',
'fold',
'significance',
] + [f.__name__ for f in scoring_funcs]
for i in range(iterations):
idx = np.random.permutation(y.size)
x, y = x[idx, :], y[idx]
cv = KFold(y.size, folds)
for j, (train, test) in enumerate(cv):
if verbose:
sys.stdout.write('\riter {}/{} fold {}/{}'.format(
i + 1,
iterations,
j + 1,
folds
))
m = clone(model)
m.fit(x[train, :], y[train], **fit_params)
prediction = m.predict(x[test, :], significance=None)
for k, s in enumerate(significance_levels):
scores = [scoring_func(prediction, y[test], s)
for scoring_func in scoring_funcs]
df_score = pd.DataFrame([[i, j, s] + scores],
columns=columns)
df = df.append(df_score, ignore_index=True)
return df
def run_experiment(models, csv_files, iterations=10, folds=10, fit_params=None,
scoring_funcs=None, significance_levels=None,
normalize=False, verbose=False, header=0):
"""Performs a cross-validation evaluation of one or several conformal
predictors on a collection of data sets in csv format.
Parameters
----------
models : object or iterable
Conformal predictor(s) to evaluate.
csv_files : iterable
List of file names (with absolute paths) containing csv-data, used to
evaluate the conformal predictor.
iterations : int
Number of iterations to use for evaluation. The data set is randomly
shuffled before each iteration.
folds : int
Number of folds to use for evaluation.
fit_params : dictionary
Parameters to supply to the conformal prediction object on training.
scoring_funcs : iterable
List of evaluation functions to apply to the conformal predictor in each
fold. Each evaluation function should have a signature
``scorer(prediction, y, significance)``.
significance_levels : iterable
List of significance levels at which to evaluate the conformal
predictor.
verbose : boolean
Indicates whether to output progress information during evaluation.
Returns
-------
scores : pandas DataFrame
Tabulated results for each data set, iteration, fold and
evaluation function.
"""
df = pd.DataFrame()
if not hasattr(models, '__iter__'):
models = [models]
for model in models:
is_regression = model.get_problem_type() == 'regression'
n_data_sets = len(csv_files)
for i, csv_file in enumerate(csv_files):
if verbose:
print('\n{} ({} / {})'.format(csv_file, i + 1, n_data_sets))
data = pd.read_csv(csv_file, header=header)
x, y = data.values[:, :-1], data.values[:, -1]
x = np.array(x, dtype=np.float64)
if normalize:
if is_regression:
y = y - y.min() / (y.max() - y.min())
else:
for j, y_ in enumerate(np.unique(y)):
y[y == y_] = j
scores = cross_val_score(model, x, y, iterations, folds,
fit_params, scoring_funcs,
significance_levels, verbose)
ds_df = | pd.DataFrame(scores) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by <NAME>
import unittest
import pandas as pd
import pandas.testing as pdtest
from allfreqs import AlleleFreqs
from allfreqs.classes import Reference, MultiAlignment
from allfreqs.tests.constants import (
REAL_ALG_X_FASTA, REAL_ALG_X_NOREF_FASTA, REAL_RSRS_FASTA,
REAL_ALG_L6_FASTA, REAL_ALG_L6_NOREF_FASTA,
SAMPLE_MULTIALG_FASTA, SAMPLE_MULTIALG_NOREF_FASTA, SAMPLE_REF_FASTA,
SAMPLE_MULTIALG_CSV, SAMPLE_MULTIALG_NOREF_CSV, SAMPLE_REF_CSV,
sample_sequences_df, SAMPLE_SEQUENCES_DICT, sample_sequences_freqs,
sample_sequences_freqs_amb, SAMPLE_FREQUENCIES,
SAMPLE_FREQUENCIES_AMB, REAL_ALG_X_DF, REAL_X_FREQUENCIES, REAL_ALG_L6_DF,
REAL_L6_FREQUENCIES, TEST_CSV
)
class TestBasic(unittest.TestCase):
def setUp(self) -> None:
ref = Reference("AAG-CTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGG-TAT")
alg = MultiAlignment(SAMPLE_SEQUENCES_DICT)
self.af = AlleleFreqs(multialg=alg, reference=ref)
self.af_amb = AlleleFreqs(multialg=alg, reference=ref, ambiguous=True)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_frequencies_ambiguous(self):
# Given/When
exp_freqs = sample_sequences_freqs_amb()
# Then
pdtest.assert_frame_equal(self.af_amb.frequencies, exp_freqs)
def test__get_frequencies(self):
# Given
test_freq = pd.Series({'A': 0.2, 'C': 0.2, 'G': 0.1, 'T': 0.3,
'-': 0.1, 'N': 0.1})
exp_freq = {'A': 0.2, 'C': 0.2, 'G': 0.1, 'T': 0.3, 'gap': 0.1,
'oth': 0.1}
# When
result = self.af._get_frequencies(test_freq)
# Then
self._dict_almost_equal(result, exp_freq)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
def test_to_csv_ambiguous(self):
# Given/When
self.af_amb.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES_AMB)
# Then
pdtest.assert_frame_equal(result, expected)
@staticmethod
def _dict_almost_equal(expected: dict, result: dict, acc=10**-8) -> bool:
"""Compare to dictionaries and ensure that all their values are the
same, accounting for some fluctuation up to the given accuracy value.
Args:
expected: expected dictionary
result: resulting dictionary
acc: accuracy to use [default: 10**-8]
"""
if expected.keys() == result.keys():
for key in expected.keys():
if abs(expected[key] - result[key]) < acc:
continue
return True
return False
# From Fasta
class TestFromFasta(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=SAMPLE_MULTIALG_FASTA)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestFromFastaNoRef(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=SAMPLE_MULTIALG_NOREF_FASTA,
reference=SAMPLE_REF_FASTA)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
# From Csv
class TestFromCsv(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_csv(sequences=SAMPLE_MULTIALG_CSV)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestFromCsvNoRef(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_csv(sequences=SAMPLE_MULTIALG_NOREF_CSV,
reference=SAMPLE_REF_CSV)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
# Real Datasets
class TestRealDatasetsX(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=REAL_ALG_X_FASTA)
def test_df(self):
# Given/When
exp_df = pd.read_csv(REAL_ALG_X_DF, index_col=0)
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = pd.read_csv(REAL_X_FREQUENCIES)
# Then
| pdtest.assert_frame_equal(self.af.frequencies, exp_freqs) | pandas.testing.assert_frame_equal |
"""Unit tests for compaction."""
from io import StringIO
import numpy as np # type: ignore
import pandas # type: ignore
from pytest import approx, mark, raises # type: ignore
from compaction import compact
from compaction.cli import load_config, run_compaction
def test_to_analytical() -> None:
c = 3.68e-8
rho_s = 2650.0
rho_w = 1000.0
phi_0 = 0.6
g = 9.81
dz = np.full(2000, 10.0)
phi = np.full(len(dz), phi_0)
phi_numerical = compact(
dz,
phi,
porosity_min=0.0,
porosity_max=phi_0,
c=c,
gravity=g,
rho_grain=rho_s,
rho_void=rho_w,
)
z = np.cumsum(dz * (1 - phi) / (1 - phi_numerical))
phi_analytical = np.exp(-c * g * (rho_s - rho_w) * z) / (
np.exp(-c * g * (rho_s - rho_w) * z) + (1.0 - phi_0) / phi_0
)
sup_norm = np.max(np.abs(phi_numerical - phi_analytical) / phi_analytical)
assert sup_norm < 0.01
def test_spatially_distributed() -> None:
"""Test with spatially distributed inputs."""
dz = np.full((100, 10), 1.0)
phi = np.full((100, 10), 0.5)
phi_new = compact(dz, phi, porosity_max=0.5)
assert phi_new[0] == approx(phi[0])
assert np.all(phi_new[1:] < phi[1:])
assert np.all(np.diff(phi_new, axis=0) < 0.0)
@mark.parametrize("size", (10, 100, 1000, 10000))
@mark.benchmark(group="compaction")
def test_grid_size(benchmark, size) -> None:
dz = np.full((size, 100), 1.0)
phi = np.full((size, 100), 0.5)
phi_new = compact(dz, phi, porosity_max=0.5)
phi_new = benchmark(compact, dz, phi, porosity_max=0.5)
assert phi_new[0] == approx(phi[0])
assert np.all(phi_new[1:] < phi[1:])
assert np.all(np.diff(phi_new, axis=0) < 0.0)
@mark.parametrize("size", (10, 100, 1000, 10000))
@mark.benchmark(group="compaction-with-dz")
def test_grid_size_with_dz(benchmark, size) -> None:
dz = np.full((size, 100), 1.0)
phi = np.full((size, 100), 0.5)
phi_new = compact(dz, phi, porosity_max=0.5)
dz_new = np.empty_like(dz)
phi_new = benchmark(compact, dz, phi, porosity_max=0.5, return_dz=dz_new)
assert phi_new[0] == approx(phi[0])
assert np.all(phi_new[1:] < phi[1:])
assert np.all(np.diff(phi_new, axis=0) < 0.0)
assert dz_new[0] == approx(dz[0])
assert np.all(dz_new[1:] < dz[1:])
assert np.all(np.diff(dz_new, axis=0) < 0.0)
def test_bad_return_dz() -> None:
dz = np.full((10, 100), 1.0)
phi = np.full((10, 100), 0.5)
dz_new = np.empty((10, 100), dtype=int)
with raises(TypeError):
compact(dz, phi, porosity_max=0.5, return_dz=dz_new)
dz_new = np.empty((1, 100), dtype=dz.dtype)
with raises(TypeError):
compact(dz, phi, porosity_max=0.5, return_dz=dz_new)
def test_decreasing_porosity() -> None:
"""Test porosity decreases with depth."""
dz = np.full(100, 1.0)
phi = np.full(100, 0.5)
phi_new = compact(dz, phi, porosity_max=0.5)
assert phi_new[0] == approx(phi[0])
assert np.all(phi_new[1:] < phi[1:])
assert np.all(np.diff(phi_new) < 0.0)
def test_equilibrium_compaction() -> None:
"""Test steady-state compaction."""
dz_0 = np.full(100, 1.0)
phi_0 = np.full(100, 0.5)
phi_1 = compact(dz_0, phi_0, porosity_max=0.5)
dz_1 = dz_0 * (1 - phi_0) / (1 - phi_1)
phi_2 = compact(dz_1, phi_1, porosity_max=0.5)
assert np.all(phi_2 == approx(phi_1))
def test_no_decompaction() -> None:
"""Test removing sediment does not cause decompaction."""
dz_0 = np.full(100, 1.0)
phi_0 = np.full(100, 0.5)
phi_1 = compact(dz_0, phi_0, porosity_max=0.5)
dz_1 = dz_0 * (1 - phi_0) / (1 - phi_1)
dz_1[0] /= 2.0
phi_2 = compact(dz_1, phi_1, porosity_max=0.5)
assert np.all(phi_2 == approx(phi_1))
def test_increasing_load() -> None:
"""Test adding sediment increases compaction."""
dz_0 = np.full(100, 1.0)
phi_0 = np.full(100, 0.5)
phi_1 = compact(dz_0, phi_0, porosity_max=0.5)
dz_1 = dz_0 * (1 - phi_0) / (1 - phi_1)
dz_1[0] *= 2.0
phi_2 = compact(dz_1, phi_1, porosity_max=0.5)
assert np.all(phi_2[1:] < phi_1[1:])
def test_zero_compaction() -> None:
"""Test compaction coefficient of zero."""
dz_0 = np.full(100, 1.0)
phi_0 = np.full(100, 0.5)
phi_1 = compact(dz_0, phi_0, porosity_max=0.5, c=0.0)
assert np.all(phi_1 == approx(phi_0))
def test_increasing_compactability() -> None:
"""Test large compaction coefficient leads to more compaction."""
dz_0 = np.full(100, 1.0)
phi_0 = np.full(100, 0.5)
phi_1 = compact(dz_0, phi_0, porosity_max=0.5, c=1e-6)
phi_2 = compact(dz_0, phi_0, porosity_max=0.5, c=1e-3)
assert np.all(phi_2[1:] < phi_1[1:])
def test_void_is_air() -> None:
"""Test empty void space."""
dz_0 = np.full(100, 1.0)
phi_0 = np.full(100, 0.5)
phi_1 = compact(dz_0, phi_0, porosity_max=0.5, rho_void=0.0)
phi_2 = compact(dz_0, phi_0, porosity_max=0.5, rho_void=1000.0)
assert np.all(phi_1[1:] < phi_2[1:])
def test_all_void() -> None:
dz_0 = np.full(100, 1000.0)
phi_0 = np.full(100, 1.0)
dz_1 = np.empty_like(dz_0)
phi_1 = compact(dz_0, phi_0, return_dz=dz_1)
assert np.all(dz_1 == approx(0.0))
assert np.all(phi_1 == approx(1.0))
def test_load_config_defaults() -> None:
"""Test load_config without file name."""
config = load_config()
defaults = {
"constants": {
"c": 5e-8,
"porosity_min": 0.0,
"porosity_max": 0.5,
"rho_grain": 2650.0,
"rho_void": 1000.0,
}
}
assert config == defaults
config = load_config(StringIO(""))
assert config == defaults
config = load_config(StringIO("[another_group]"))
assert config == defaults
config = load_config(StringIO("[compaction]"))
assert config == defaults
config = load_config(StringIO("[compaction]"))
assert config == defaults
def test_load_config_from_file() -> None:
"""Test config vars from a file."""
file_like = StringIO(
"""[compaction.constants]
c = 3.14
"""
)
config = load_config(file_like)
expected = {
"constants": {
"c": 3.14,
"porosity_min": 0.0,
"porosity_max": 0.5,
"rho_grain": 2650.0,
"rho_void": 1000.0,
}
}
assert config == expected
def test_run(tmpdir) -> None:
dz_0 = np.full(100, 1.0)
phi_0 = np.full(100, 0.5)
phi_1 = compact(dz_0, phi_0, porosity_max=0.5)
with tmpdir.as_cwd():
df = | pandas.DataFrame.from_dict({"dz": dz_0, "porosity": phi_0}) | pandas.DataFrame.from_dict |
# import start
import ast
import asyncio
import calendar
import platform
import subprocess as sp
import time
import traceback
import xml.etree.ElementTree as Et
from collections import defaultdict
from datetime import datetime
import math
import numpy as np
import pandas as pd
from Utility.CDPConfigValues import CDPConfigValues
from Utility.Utilities import Utilities
from Utility.WebConstants import WebConstants
from WebConnection.WebConnection import WebConnection
# import end
## Function to reverse a string
#def reverse(string):
# string = string[::-1]
# return string
class Preprocessor:
""" Preprocessor class is used for preparing the extracted data to be fed to the training algorithm
for further processing.
"""
def __init__(self, project, previous_preprocessed_df=None, preprocessed=None):
"""
:param timestamp_column: Contains the committer timestamp
:type timestamp_column: str
:param email_column: Contains the committer timestamp
:type email_column: str
:param project: project key to be processed
:type project: str
:param project_name: project name to be processed
:type project_name: str
:param web_constants: Constants load from file
:type web_constants: class WebConstants
:param base_timestamp: Instantiating committer timestamp
:type base_timestamp: str
:param developer_stats_df: creating dataframe variable for developer stats
:type developer_stats_df: pandas dataframe
:param developer_sub_module_stats_df: creating dataframe variable for developer sub module stats
:type developer_sub_module_stats_df: pandas dataframe
"""
self.timestamp_column = "COMMITTER_TIMESTAMP"
self.email_column = "COMMITTER_EMAIL"
self.project = project
self.project_name = CDPConfigValues.configFetcher.get('name', project)
self.web_constants = WebConstants(project)
self.base_timestamp = ""
self.developer_stats_df = ""
self.developer_sub_module_stats_df = ""
if preprocessed is None:
if previous_preprocessed_df is None:
self.file_path = f"{CDPConfigValues.preprocessed_file_path}/{self.project_name}"
self.github_data_dump_df = pd.read_csv(
f"{CDPConfigValues.cdp_dump_path}/{self.project_name}/{CDPConfigValues.commit_details_file_name}")
self.pre_processed_file_path = f"{CDPConfigValues.preprocessed_file_path}/{self.project_name}"
CDPConfigValues.create_directory(self.pre_processed_file_path)
self.stats_dataframe = pd.DataFrame()
self.sub_module_list = list()
else:
self.file_path = f"{CDPConfigValues.schedule_file_path}/{self.project_name}"
self.github_data_dump_df = pd.DataFrame(previous_preprocessed_df)
self.github_data_dump_df = self.github_data_dump_df.apply(
lambda x: x.str.strip() if x.dtype == "object" else x)
self.github_data_dump_df["COMMITTER_TIMESTAMP"] = self.github_data_dump_df["COMMITTER_TIMESTAMP"].apply(
lambda x: pd.Timestamp(x, tz="UTC"))
self.github_data_dump_df["COMMITTER_TIMESTAMP"] = self.github_data_dump_df["COMMITTER_TIMESTAMP"].apply(
lambda x: pd.Timestamp(x))
self.github_data_dump_df['COMMITTER_TIMESTAMP'] = self.github_data_dump_df['COMMITTER_TIMESTAMP'].astype(
str)
self.github_data_dump_df['COMMITTER_TIMESTAMP'] = self.github_data_dump_df['COMMITTER_TIMESTAMP'].apply(
lambda x: x[:-6])
self.filter_data_frame(self.github_data_dump_df)
if self.github_data_dump_df.shape[0] != 0:
self.github_data_dump_df["COMMITTER_EMAIL"] = \
self.github_data_dump_df[["COMMITTER_EMAIL", "COMMITTER_NAME"]].apply(self.replace_blank_email, axis=1)
else:
self.github_data_dump_df = previous_preprocessed_df
@staticmethod
def replace_blank_email(row):
if row["COMMITTER_EMAIL"] is None or row["COMMITTER_EMAIL"] == "":
return str(row["COMMITTER_NAME"]).replace(" ", "") + "@noemail"
else:
return row["COMMITTER_EMAIL"]
def filter_data_frame(self, data_frame):
if self.project_name == "spring-boot":
data_frame = data_frame[data_frame["FILE_NAME"].str.endswith(".java")]
elif self.project_name == "opencv":
data_frame = data_frame[
(data_frame["FILE_NAME"].str.endswith(".hpp") |
data_frame["FILE_NAME"].str.endswith(".cpp") |
data_frame["FILE_NAME"].str.endswith(".h") |
data_frame["FILE_NAME"].str.endswith(".cc") |
data_frame["FILE_NAME"].str.endswith(".c") |
data_frame["FILE_NAME"].str.endswith(".py") |
data_frame["FILE_NAME"].str.endswith(".java") |
data_frame["FILE_NAME"].str.endswith(".cl")
)]
# data_frame["FILE_NAME"].str.endswith(".cs")
elif self.project_name == "corefx":
data_frame = data_frame[
(data_frame["FILE_NAME"].str.endswith(".cs") |
data_frame["FILE_NAME"].str.endswith(".h") |
data_frame["FILE_NAME"].str.endswith(".c") |
data_frame["FILE_NAME"].str.endswith(".vb"))]
self.github_data_dump_df = data_frame
def convert_month_day_date_hour_to_categorical(self, ):
"""
This method takes the month, day and hour and applies one hot encoding manually
"""
convert_date_to_categorical_start_time = time.time()
timestamp_column_in_df = self.github_data_dump_df['COMMITTER_TIMESTAMP']
dayList = list()
monthList = list()
dateList = list()
hourList = list()
mondayList = list()
tuesdayList = list()
wednesdayList = list()
thursdayList = list()
fridayList = list()
saturdayList = list()
sundayList = list()
for timestamp_value in timestamp_column_in_df:
new_date_format = datetime.strptime(timestamp_value, '%Y-%m-%d %H:%M:%S')
weekdayStr = calendar.day_name[new_date_format.weekday()]
dayList.append(weekdayStr)
if weekdayStr == 'Sunday':
sundayList.append('1')
mondayList.append('0')
tuesdayList.append('0')
wednesdayList.append('0')
thursdayList.append('0')
fridayList.append('0')
saturdayList.append('0')
elif weekdayStr == 'Monday':
sundayList.append('0')
mondayList.append('1')
tuesdayList.append('0')
wednesdayList.append('0')
thursdayList.append('0')
fridayList.append('0')
saturdayList.append('0')
elif weekdayStr == 'Tuesday':
sundayList.append('0')
mondayList.append('0')
tuesdayList.append('1')
wednesdayList.append('0')
thursdayList.append('0')
fridayList.append('0')
saturdayList.append('0')
elif weekdayStr == 'Wednesday':
sundayList.append('0')
mondayList.append('0')
tuesdayList.append('0')
wednesdayList.append('1')
thursdayList.append('0')
fridayList.append('0')
saturdayList.append('0')
elif weekdayStr == 'Thursday':
sundayList.append('0')
mondayList.append('0')
tuesdayList.append('0')
wednesdayList.append('0')
thursdayList.append('1')
fridayList.append('0')
saturdayList.append('0')
elif weekdayStr == 'Friday':
sundayList.append('0')
mondayList.append('0')
tuesdayList.append('0')
wednesdayList.append('0')
thursdayList.append('0')
fridayList.append('1')
saturdayList.append('0')
elif weekdayStr == 'Saturday':
sundayList.append('0')
mondayList.append('0')
tuesdayList.append('0')
wednesdayList.append('0')
thursdayList.append('0')
fridayList.append('0')
saturdayList.append('1')
monthList.append(new_date_format.month)
dateList.append(new_date_format.day)
hourList.append(new_date_format.hour)
self.github_data_dump_df['DAY'] = dayList
self.github_data_dump_df['MONTH'] = monthList
self.github_data_dump_df['DATE'] = dateList
self.github_data_dump_df['HOUR'] = hourList
self.github_data_dump_df['SUNDAY'] = sundayList
self.github_data_dump_df['MONDAY'] = mondayList
self.github_data_dump_df['TUESDAY'] = tuesdayList
self.github_data_dump_df['WEDNESDAY'] = wednesdayList
self.github_data_dump_df['THURSDAY'] = thursdayList
self.github_data_dump_df['FRIDAY'] = fridayList
self.github_data_dump_df['SATURDAY'] = saturdayList
convert_date_to_categorical_end_time = time.time()
print(f"Time taken to convert datetime to Categorical is "
f"{convert_date_to_categorical_end_time - convert_date_to_categorical_start_time}")
@staticmethod
def file_status_to_cat(value):
"""
THelper method for replacing string to single character value
"""
if value == 'modified':
return 'M'
elif value == 'added':
return 'A'
elif value == 'renamed':
return 'R'
else:
return 'D'
def file_status_to_categorical(self, ):
"""
This method modifies the string value of the file status to categorical (single character)
"""
file_status_start_time = time.time()
self.github_data_dump_df['FILE_STATUS'] = self.github_data_dump_df['FILE_STATUS'].apply(self.file_status_to_cat)
file_status_end_time = time.time()
print(f"Time Taken to convert file status to categorical {file_status_end_time - file_status_start_time}")
def determine_commit_is_fix(self, closed_events_df=None):
"""
This method modifies the dataframe to label commits as isFix corresponding to commmits
:param closed_events_df: dataframe containing the closed events list
:type closed_events_df: pandas dataframe
"""
commit_isFix_start_time = time.time()
if closed_events_df is None:
closed_issue_df = pd.read_csv(
f"{CDPConfigValues.cdp_dump_path}/{self.project_name}/{CDPConfigValues.closed_events_list_file_name}")
else:
closed_issue_df = closed_events_df
commits_closed_df = pd.DataFrame(
closed_issue_df.loc[closed_issue_df["commitid"] != ""]["commitid"].drop_duplicates())
commits_closed_df = commits_closed_df.dropna()
commits_closed_df.columns = ["COMMIT_ID"]
search_pattern = "|".join(commits_closed_df["COMMIT_ID"].to_list())
isFix = self.github_data_dump_df["COMMIT_ID"].str.contains(search_pattern)
self.github_data_dump_df["IsFix"] = isFix.replace((True, False), (1, 0))
commit_isFix_end_time = time.time()
print(f"Time Taken for determining for Commit is for Fix {commit_isFix_end_time - commit_isFix_start_time}")
def get_commit_type(self):
"""
This method based on the commit message containing merge text labels each record as a merge or non-merge
commit.
"""
commit_type_start_time = time.time()
search_pattern = "|".join(["Merge pull request"])
isFix = self.github_data_dump_df["COMMIT_MESSAGE"].str.contains(search_pattern)
self.github_data_dump_df["COMMIT_TYPE"] = isFix.replace((True, False), (1, 0))
commit_type_end_time = time.time()
print(f"Time Taken for getting commit type is {commit_type_end_time - commit_type_start_time}")
def get_file_size(self, ):
"""
The method extracts the file size using the github rest URL service for each commit and corresponding
files.
"""
file_age_start_time = time.time()
self.github_data_dump_df = self.github_data_dump_df.sort_values(by=["COMMITTER_TIMESTAMP"],
ascending=[True])
commit_id_list = self.github_data_dump_df["COMMIT_ID"].drop_duplicates().to_list()
print(f"Total Content Urls to be requested {len(commit_id_list)}")
file_size_url_list = Utilities.format_url(self.web_constants.file_size_url, commit_id_list)
batch_size = int(CDPConfigValues.git_api_batch_size)
web_connection = WebConnection()
results = web_connection.get_async_file_size(file_size_url_list, self.github_data_dump_df, self.web_constants,
batch_size)
file_size = results[0]
failed_urls = results[1]
loop_counter = 1
while len(failed_urls) > 0 and loop_counter < 200:
loop_counter = loop_counter + 1
print(f"Sleeping for {60 * loop_counter} Seconds in get_file_size ...")
time.sleep(60 * loop_counter)
print(f"Total Failed URL's re-trying {len(failed_urls)}")
results = web_connection.get_async_file_size(failed_urls, self.github_data_dump_df, self.web_constants,
batch_size=batch_size)
failed_urls = results[1]
file_size = file_size + results[0]
file_size_df = pd.DataFrame(file_size, columns=["COMMIT_ID", "FILE_NAME", "FILE_SIZE"])
file_size_df = file_size_df.drop_duplicates()
file_size_df = file_size_df.sort_values(by=["COMMIT_ID"])
self.github_data_dump_df = self.github_data_dump_df.sort_values(by=["COMMIT_ID"])
self.github_data_dump_df = pd.merge(self.github_data_dump_df, file_size_df, how="left",
left_on=["COMMIT_ID", "FILE_NAME"], right_on=["COMMIT_ID", "FILE_NAME"])
file_age_end_time = time.time()
print(f"Fetched all file sizes in {file_age_end_time - file_age_start_time}")
@staticmethod
async def calculate_commit_file_age_and_number_of_developer_mp(file_df, file_name):
"""
The method is a helper method which calculates the file age and number of developers for a single file
:param file_df: dataframe containing the file details
:type file_df: pandas dataframe
:param file_name: Name of the file
:type file_name: str
"""
number_of_developers, file_age = list(), list()
counter = 0
df_len = len(file_df)
result = defaultdict()
# file_age_normal = list()
while counter < df_len:
# Changed as part of review comment
# if counter == 0 or file_df["FILE_STATUS"].iloc[counter] == "A":
if counter == 0:
file_age.append(0)
# file_age_normal.append(0)
elif counter > 0:
# file_age_normal.append(
# (file_df["COMMITTER_TIMESTAMP"].iloc[counter] - file_df["COMMITTER_TIMESTAMP"].iloc[
# counter - 1]))
age = (file_df["COMMITTER_TIMESTAMP"].iloc[counter] - file_df["COMMITTER_TIMESTAMP"].iloc[
counter - 1]).days * 24 * 3600 + \
(file_df["COMMITTER_TIMESTAMP"].iloc[counter] - file_df["COMMITTER_TIMESTAMP"].iloc[
counter - 1]).seconds
file_age.append(age)
current_timestamp = file_df["COMMITTER_TIMESTAMP"].iloc[counter]
# if file_df["FILE_STATUS"].iloc[counter] == "A":
# Changed as part of review comment
if counter == 0:
number_of_developers.append(1)
else:
number_of_developers.append(
len(set(file_df.loc[file_df["COMMITTER_TIMESTAMP"] <= current_timestamp]["COMMITTER_NAME"])))
counter = counter + 1
await asyncio.sleep(0)
result[file_name] = (file_age, number_of_developers)
return result
async def execute_calculate_commit_file_age_and_number_of_developer_mp(self, batch):
result = await asyncio.gather(
*[self.calculate_commit_file_age_and_number_of_developer_mp(
self.github_data_dump_df.loc[self.github_data_dump_df["FILE_NAME"] == file][
["COMMIT_ID", "COMMITTER_NAME", "FILE_STATUS", "COMMITTER_TIMESTAMP"]], file) for file in batch]
)
return result
def get_commit_file_age_and_number_of_developer_mp(self, ):
"""
The method calculates the file age which is difference of current change and the last change
for that file. The other output is the number of developers who have worked on that file.
"""
commit_age_no_of_dev_start_time = time.time()
self.github_data_dump_df["COMMITTER_TIMESTAMP"] = pd.to_datetime(
self.github_data_dump_df["COMMITTER_TIMESTAMP"])
self.github_data_dump_df = self.github_data_dump_df.sort_values(by=["FILE_NAME", "COMMITTER_TIMESTAMP"],
ascending=[True, True])
file_names = self.github_data_dump_df["FILE_NAME"]
file_names = file_names.drop_duplicates().to_list()
commit_file_age, number_of_developers, failed_batches = list(), list(), list()
results = defaultdict()
batch_size = 100
file_batches = list(Utilities.create_batches(file_names, batch_size=batch_size))
print(f"For Getting Commit File Age and Numbre of Developers, Batch size {batch_size}")
total_batches = len(file_batches)
batch_counter, percent = 0, 0
print(f"Total Batches to be executed for getting commit file age and number of developer is {total_batches}")
for batch in file_batches:
try:
loop = asyncio.get_event_loop()
asyncio.set_event_loop(asyncio.new_event_loop())
if (total_batches * percent) // 100 == batch_counter:
print(
f"Total Batches completed is {batch_counter} and Failed batches Count is {len(failed_batches)}")
percent = percent + 10
results_list = loop.run_until_complete(
self.execute_calculate_commit_file_age_and_number_of_developer_mp(batch))
for result in results_list:
for result_key in result.keys():
results[result_key] = result[result_key]
except Exception as e:
print(f"Exception Occurred!!!\n{traceback.print_tb(e.__traceback__)}")
for file_name in batch:
failed_batches.append(file_name)
batch_counter = batch_counter + 1
"""Retrieving the result of the dictionary on sorted order of the keys (author is the result_key)"""
for result_key in sorted(results.keys()):
commit_file_age = commit_file_age + results[result_key][0]
number_of_developers = number_of_developers + results[result_key][1]
self.github_data_dump_df["FILE_AGE"] = commit_file_age
self.github_data_dump_df["NO_OF_DEV"] = number_of_developers
commit_age_no_of_dev_end_time = time.time()
print(f"Time Taken FILE_AGE and NO_OF_DEV {commit_age_no_of_dev_end_time - commit_age_no_of_dev_start_time}")
async def calculate_developer_experience(self, file_df, author_name):
"""
Helper method for developer experience
"""
file_df["Year"] = (pd.to_datetime(self.base_timestamp) - pd.to_datetime(file_df["COMMITTER_TIMESTAMP"])) / (
np.timedelta64(1, 'D') * 365)
file_df["Year"] = file_df["Year"].apply(lambda x: math.ceil(x) + 1)
unique_file_df = file_df
unique_file_df = unique_file_df.drop_duplicates()
exp = list()
dev_exp = defaultdict()
counter = 0
while counter < (len(unique_file_df)):
current_timestamp = unique_file_df["COMMITTER_TIMESTAMP"].iloc[counter]
commit_id = unique_file_df["COMMIT_ID"].iloc[counter]
# if counter == 0:
# exp.append((commit_id, current_timestamp, 0))
# else:
# year_count = unique_file_df.loc[unique_file_df["COMMITTER_TIMESTAMP"] < current_timestamp][
# "Year"].value_counts().rename_axis('Year').reset_index(name='Counts')
# year_count["Exp"] = year_count["Counts"] / (year_count["Year"])
#
# exp.append((commit_id, current_timestamp, year_count["Exp"].sum()))
# year_count = unique_file_df.iloc[counter]
# Changed as part of review comment
year_count = unique_file_df.iloc[0:counter + 1][
"Year"].value_counts().rename_axis('Year').reset_index(name='Counts')
# year_count = unique_file_df.loc[unique_file_df["COMMITTER_TIMESTAMP"] <= current_timestamp][
# "Year"].value_counts().rename_axis('Year').reset_index(name='Counts')
year_count["Exp"] = year_count["Counts"] / (year_count["Year"])
exp.append((commit_id, current_timestamp, year_count["Exp"].sum()))
counter = counter + 1
exp_df = | pd.DataFrame(exp, columns=["COMMIT_ID", "COMMITTER_TIMESTAMP", "EXP"]) | pandas.DataFrame |
from datetime import datetime
from decimal import Decimal
import numpy as np
import pytest
import pytz
from pandas.compat import is_platform_little_endian
from pandas import CategoricalIndex, DataFrame, Index, Interval, RangeIndex, Series
import pandas._testing as tm
class TestFromRecords:
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH#6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH#6140
expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[ns]")]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[m]")]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in blocks.items():
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in blocks.items():
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(
columns=df.columns
)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(
columns=df.columns
)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(
columns=df.columns
)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(
columns=df.columns
)
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, RangeIndex(8))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index("C"), columns.index("E1")]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result["C"], df["C"])
tm.assert_series_equal(result["E1"], df["E1"].astype("float64"))
# empty case
result = DataFrame.from_records([], columns=["foo", "bar", "baz"])
assert len(result) == 0
tm.assert_index_equal(result.columns, Index(["foo", "bar", "baz"]))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in blocks.items():
columns.extend(b.columns)
asdict = {x: y for x, y in df.items()}
asdict2 = {x: y.values for x, y in df.items()}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(
DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)
)
results.append(
DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)
)
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
# should pass
df1 = DataFrame.from_records(df, index=["C"])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index="C")
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(df, index=[2])
with pytest.raises(KeyError, match=r"^2$"):
DataFrame.from_records(df, index=2)
def test_from_records_non_tuple(self):
class Record:
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = [tuple(rec) for rec in recs]
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# GH#2633
result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])
expected = Index(["bar"])
assert len(result) == 0
assert result.index.name == "foo"
tm.assert_index_equal(result.columns, expected)
def test_from_records_series_list_dict(self):
# GH#27358
expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T
data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]])
result = DataFrame.from_records(data)
tm.assert_frame_equal(result, expected)
def test_from_records_series_categorical_index(self):
# GH#32805
index = CategoricalIndex(
[Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]
)
series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index)
frame = DataFrame.from_records(series_of_dicts, index=index)
expected = DataFrame(
{"a": [1, 2, np.NaN], "b": [np.NaN, np.NaN, 3]}, index=index
)
tm.assert_frame_equal(frame, expected)
def test_frame_from_records_utc(self):
rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)}
# it works
DataFrame.from_records([rec], index="begin_time")
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=("i4,f4,a10"))
arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index="f1")
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert "index" not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3), (1, 2, None, 3), (None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=["a", "b", "c", "d"])
assert np.isnan(df["c"][0])
def test_from_records_iterator(self):
arr = np.array(
[(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5.0, 5.0, 6, 6), (7.0, 7.0, 8, 8)],
dtype=[
("x", np.float64),
("u", np.float32),
("y", np.int64),
("z", np.int32),
],
)
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame(
{
"x": np.array([1.0, 3.0], dtype=np.float64),
"u": np.array([1.0, 3.0], dtype=np.float32),
"y": np.array([2, 4], dtype=np.int64),
"z": np.array([2, 4], dtype=np.int32),
}
)
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5.0, 6), (7.0, 8)]
df = DataFrame.from_records(iter(arr), columns=["x", "y"], nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=["x", "y"]), check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
yield (i, letters[i % len(letters)], i / length)
columns_names = ["Integer", "String", "Float"]
columns = [
[i[j] for i in tuple_generator(10)] for j in range(len(columns_names))
]
data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
expected = | DataFrame(data, columns=columns_names) | pandas.DataFrame |
'''
Implementation of SimpleImputer that returns Pandas DataFrame
'''
from pandas import DataFrame
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import SimpleImputer
class DataFrameSimpleImputer(BaseEstimator, TransformerMixin):
def __init__(self, fit_params):
self.simple_imputer = None
self.fit_params = fit_params
def fit(self, X, y=None):
self.simple_imputer = SimpleImputer(**self.fit_params)
return self
def transform(self, X, y=None):
data = self.simple_imputer.fit_transform(X)
return | DataFrame(data, columns=X.columns.values) | pandas.DataFrame |
import pandas as pd
import random
import numpy as np
from multiprocessing import Pool
import time
import math
import itertools
from scipy.stats import binom as binom_stat, rankdata
from scipy.special import binom
from src.core.regression.accuracy_scores import \
hazard_ratio, \
dynamic_auc, \
logrank
from src.core.utils import seconds_to_hours
from .feature_pre_selector import FeaturePreSelector
from .feature_selector import FeatureSelector
from .preprocessor import Preprocessor
from .model import Model
class ExhaustiveBase(
FeaturePreSelector,
FeatureSelector,
Preprocessor,
Model,
):
y_features = None
def __init__(
self,
df, ann, n_k, output_dir,
feature_pre_selector, feature_pre_selector_kwargs,
feature_selector, feature_selector_kwargs,
preprocessor, preprocessor_kwargs,
model, model_kwargs,
model_cv_ranges, model_cv_folds,
scoring_functions, main_scoring_function, main_scoring_threshold,
limit_feature_subsets=False, n_feature_subsets=None,
shuffle_feature_subsets=True,
n_processes=1, random_state=None, verbose=True,
):
"""Class constructor
Parameters
----------
df : pandas.DataFrame
A pandas DataFrame whose rows represent samples
and columns represent features.
ann : pandas.DataFrame
DataFrame with annotation of samples. Three columns are mandatory:
Class (binary labels), Dataset (dataset identifiers) and
Dataset type (Training, Filtration, Validation).
n_k : pandas.DataFrame
DataFrame with columns n and k defining a grid
for exhaustive feature selection: n is a number
of selected features, k is a length of each
features subset.
output_dir : str
Path to dir for output files
feature_pre_selector : callable
Function for feature pre-selection. For examples, see
feature_pre_selectors.py.
feature_pre_selector_kwargs : dict
Dict of keyword arguments for feature pre-selector.
feature_selector : callable
Function for feature selection. For examples, see
feature_selectors.py.
feature_selector_kwargs : dict
Dict of keyword arguments for feature selector.
preprocessor : sklearn.preprocessing-like
Class for data preprocessing, should have fit and
transform methods. Any method from sklearn.preprocessing
will be suitable.
preprocessor_kwargs : dict
Dict of keyword arguments for preprocessor initialization.
model : sklearn-like
Model class, should have fit and
predict methods. Most of the sklearn models will be suitable.
model_kwargs : dict
Dict of keyword arguments for model initialization.
model_cv_ranges : dict
Dict defining model parameters which should be
cross-validated. Keys are parameter names, values are
iterables for grid search.
model_cv_folds : int
Number of fold for K-Folds cross-validation.
limit_feature_subsets : bool
If true, limit the number of processed feature subsets.
n_feature_subsets : int
Number of processed feature subsets.
shuffle_feature_subsets : bool
If true, processed feature subsets are selected randomly.
max_n : int
Maximal number of selected features.
max_estimated_time : float
Maximal estimated time of pipeline running
scoring_functions : dict
Dict with scoring functions which will be calculated
for each model. Keys are names (arbitrary strings),
values are sklearn.metrics-like callables (should accept
y_true, y_pred arguments and return a score).
main_scoring_function : str
Key from scoring_functions dict defining the "main" scoring
function which will be optimized during cross-validation
and will be used for model filtering.
main_scoring_threshold : float
A number defining threshold for model filtering:
models with score below this threshold on
training/filtration sets will not be further evaluated.
n_processes : int
Number of processes.
random_state : int
Random seed (set to an arbitrary integer for reproducibility).
verbose : bool
If True, print running time for each pair of n, k.
"""
FeaturePreSelector.__init__(
self,
df, ann,
preselector_function=feature_pre_selector, kwargs=feature_pre_selector_kwargs,
)
Preprocessor.__init__(
self,
preprocessor_model=preprocessor, kwargs=preprocessor_kwargs,
)
FeatureSelector.__init__(
self,
self.df, self.ann,
output_dir,
selector_function=feature_selector, kwargs=feature_selector_kwargs,
)
Model.__init__(
self,
model=model, kwargs=model_kwargs, random_state=random_state,
)
self.model_cv_ranges = model_cv_ranges
self.model_cv_folds = model_cv_folds
self.n_k = n_k
self.output_dir = output_dir
self.n_processes = n_processes
self.random_state = random_state
self.verbose = verbose
self.limit_feature_subsets = limit_feature_subsets
self.n_feature_subsets = n_feature_subsets
self.shuffle_feature_subsets = shuffle_feature_subsets
self.scoring_functions = scoring_functions
self.main_scoring_function = main_scoring_function
self.main_scoring_threshold = main_scoring_threshold
self.datasets_ids = self.ann[['Dataset', 'Dataset type']].drop_duplicates().to_numpy()
def exhaustive_run(self):
"""Run the pipeline for classifier construction
using exhaustive feature selection.
Returns
-------
pandas.DataFrame
DataFrame with constructed classifiers and their
quality scores.
"""
# Iterate over n, k pairs
all_result_dfs = []
all_counts = []
summary_n_k = pd.DataFrame(columns=[
'n', 'k',
'num_training_reliable',
'num_validation_reliable',
'percentage_reliable',
])
for n, k in zip(self.n_k['n'], self.n_k['k']):
df_n_k_results, _ = self.exhaustive_run_n_k(n, k)
df_n_k_results['n'] = n
df_n_k_results['k'] = k
df_n_k_results.sort_values(
by=[column for column in df_n_k_results.columns if 'Training' in column],
ascending=False,
inplace=True,
)
all_result_dfs.append(df_n_k_results)
# Save models
res = | pd.concat(all_result_dfs, axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
This module holds functions for processing the geometry for setting up
the geometry of a ThermalNetwork based on a street geometry and a table of
buildings.
This file is part of project dhnx (). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location: https://github.com/oemof/DHNx
This module is not fully tested yet, so use it with care.
SPDX-License-Identifier: MIT
"""
try:
import geopandas as gpd
except ImportError:
print("Need to install geopandas to process geometry data.")
try:
from shapely.geometry import LineString
from shapely.geometry import MultiPoint
from shapely.geometry import Point
from shapely.geometry import shape
from shapely.ops import cascaded_union
from shapely.ops import nearest_points
except ImportError:
print("Need to install shapely to process geometry.")
import logging
import numpy as np
import pandas as pd
from . import geometry_operations as go
def line_of_point(point, gdf_lines):
"""Gets index of geometry of a GeoDataFrame, a point is located next to,
with a distance lower than 1e-8.
Parameters
----------
point : shapely.geometry.Point
gdf_lines : geopandas.GeoDataFrame
Returns
-------
int, float or str : Index of GeoDataFrame or Warning, if no geometry found.
"""
ind = None
for k, l in gdf_lines.iterrows():
if l['geometry'].distance(point) < 1e-8:
ind = k
if ind is None:
return Warning('No line found which has point on it!')
return ind
def point_to_array(point):
"""Returns the coordinates of a point as numpy.array
Parameters
----------
point : shapely.geometry.Point
Returns
-------
numpy.array()
"""
return np.array([point.x, point.y])
def calc_lot_foot(line, point):
"""
Calculates the lot foot point.
Parameters
----------
line : shapely.geometry.LineString
point : shapely.geometry.Point
Returns
-------
shapely.geometry.Point
"""
s_1 = shape(line).boundary[0]
s_2 = shape(line).boundary[1]
g_1 = point_to_array(s_1) # end point 1 of line
g_2 = point_to_array(s_2) # end point 2 of line
x_1 = point_to_array(point)
# calculate lotfusspunkt
u = g_2 - g_1 # vector of direction
n = np.array([u[1], -u[0]]) # normal vector of line
x_0 = g_1 # point on line
y = x_1 - (np.dot((x_1 - x_0), n) / np.dot(n, n)) * n
lot_foot_point = Point(y[0], y[1])
# # alternative generation via intersection
# # (=> intersections point is not exaclty on lines as well)
# y = x_1 - 2*(np.dot((x_1 - x_0), n)/np.dot(n, n)) * n
# lot_line = LineString([(y[0], y[1]), (x_1[0], x_1[1])])
# lot_foot_point = lot_line.intersection(line)
return lot_foot_point
def create_object_connections(points, lines, tol_distance=1):
"""Connects points to a line network.
Generally, the nearest point of the next line is used as connection the point.
Depending on the geometry, there are 3 options, the connection is created:
- nearest point is line ending => the connection line starts from this line ending
- nearest point is on the next line:
a) line endings are outside the tolerance => line is split and the nearest point
is used as connection point
b) line endings are within the tolerance distance => the next line ending is
used as connection point
The tolerance distance avoids the generation of short line elements.
This is for example the case if two buildings are directly opposite of the street.
Using simply the nearest point method could result in very short lines.
Parameters
----------
points : geopandas.GeoDataFrame
Points which should be connected to the line. GeoDataFrame with Points as geometry.
lines : geopandas.GeoDataFrame
The line-network to which the Points should be connected. The line geometry needs to
consists of simple lines based on one starting and one ending point. LineStrings
which contain more than 2 points are not allowed.
tol_distance : float
Tolerance distance for choosing the end of the line instead of the nearest point.
Returns
-------
geopandas.GeoDataFrame : The newly created connection lines
geopandas.GeoDataFrame : The updated lines (some lines are split.
All lines should only touch at the line endings.
"""
# check linestrings
for _, c in lines.iterrows():
if len(c['geometry'].coords) > 2:
raise ValueError("The Linestrings must consists of simple lines,"
" with only two coordinates!")
# empty geopandas dataframe for house connections
conn_lines = gpd.GeoDataFrame()
# iterate over all houses
for index, row in points.iterrows():
house_geo = row['geometry']
# the same with the original lines
all_lines = lines['geometry']
mergedlines = cascaded_union(all_lines)
# new nearest point method ############ #########
n_p = nearest_points(mergedlines, house_geo)[0]
# get index of line which is closest to the house
line_index = line_of_point(n_p, lines)
# get geometry of supply line
supply_line = lines.loc[line_index, 'geometry']
# get end points of line
supply_line_p0 = Point(list(supply_line.coords)[0])
supply_line_p1 = Point(list(supply_line.coords)[1])
supply_line_points = [supply_line_p0, supply_line_p1]
supply_line_mulitpoints = MultiPoint(supply_line_points)
if n_p in supply_line_points:
# case that nearest point is a line ending
logging.info(
'Connect buildings... id {}: '
'Connected to supply line ending (nearest point)'.format(index)
)
con_line = LineString([n_p, house_geo])
conn_lines = conn_lines.append({'geometry': con_line}, ignore_index=True)
else:
dist_to_endings = [x.distance(n_p) for x in supply_line_points]
if min(dist_to_endings) >= tol_distance:
# line is split, no line ending is close to the nearest point
# this also means the original supply line needs to be deleted
logging.info(
'Connect buildings... id {}: Supply line split'.format(index))
con_line = LineString([n_p, house_geo])
conn_lines = conn_lines.append({'geometry': con_line}, ignore_index=True)
lines.drop([line_index], inplace=True)
lines = lines.append(
{'geometry': LineString([supply_line_p0, n_p])},
ignore_index=True
)
lines = lines.append(
{'geometry': LineString([n_p, supply_line_p1])},
ignore_index=True
)
else:
# case that one or both line endings are closer than tolerance
# thus, the next line ending is chosen
logging.info(
'Connect buildings... id {}: Connected to Supply line ending '
'due to tolerance'.format(index))
conn_point = nearest_points(supply_line_mulitpoints, n_p)[0]
con_line = LineString([conn_point, house_geo])
conn_lines = conn_lines.append({'geometry': con_line}, ignore_index=True)
logging.info('Connection of buildings completed.')
connection_lines = gpd.GeoDataFrame(conn_lines, crs=lines.crs)
return connection_lines, lines
def check_geometry_type(gdf, types):
"""
Checks, if a geodataframe has only the given geometry types in its GeoSeries.
Parameters
----------
gdf : geopandas.GeoDataFrame
DataFrame to be checked.
types : list
List of types allowed for GeoDataFrame.
"""
actual_types = set(gdf['geometry'].type)
for type in actual_types:
if type not in types:
raise TypeError(
"Your input geometry has the wrong type. "
"Expected: {}. Got: {}".format(types, type)
)
def create_points_from_polygons(gdf, method='midpoint'):
"""
Converts the geometry of a polygon layer to a point layer.
Parameters
----------
gdf : geopandas.GeoDataFrame
method : str
Method to create a point from a polygon.
Returns
-------
geopandas.GeoDataFrame : GeoDataFrame with a point geometry.
"""
if gdf['geometry'].values[0].type == 'Point':
return gdf
if method == 'midpoint':
gdf['geometry'] = gdf['geometry'].centroid
return gdf
raise ValueError(
'No other method than >midpoint< implemented!'
)
def process_geometry(lines, consumers, producers,
method='midpoint', projected_crs=4647,
tol_distance=2):
"""
This function connects the consumers and producers to the line network, and prepares the
attributes of the geopandas.GeoDataFrames for importing as dhnx.ThermalNetwork.
The ids of the lines are overwritten.
Parameters
----------
lines : geopandas.GeoDataFrame
Potential routes for the DHS. Expected geometry Linestrings or MultilineStrings.
The graph of this line network should be connected.
consumers : geopandas.GeoDataFrame
Location of demand/consumers. Expected geometry: Polygons or Points.
producers : geopandas.GeoDataFrame
Location of supply sites. Expected geometry: Polygons or Points.
method : str
Method for creating the point if polygons are given for the consumers and producers.
multi_connections : bool
Setting if a building should be connected to multiple streets.
projected_crs : EPSG integer code
EPSG Coordinate reference system number (eg 4647),
which is used for the geometry operations.
A projected crs must be used!
tol_distance : float
Tolerance distance at connection the points to the line network
for choosing the end of the line instead of the lot.
Returns
-------
dict : Dictionary with 4 geopandas.GeoDataFrames: The keys of the Dict are
equal to the components of the dhnx.ThermalNetwork: 'forks', 'consumers',
'producers', 'pipes'.
"""
# check whether the expected geometry is used for geo dataframes
check_geometry_type(lines, types=['LineString', 'MultiLineString'])
for gdf in [producers, consumers]:
check_geometry_type(gdf, types=['Polygon', 'Point', 'MultiPolygon'])
# # split multilinestrings to single lines with only 1 starting and 1 ending point
lines = go.split_multilinestr_to_linestr(lines)
# check and convert crs if it is not already the `projected_crs`
lines = go.check_crs(lines, crs=projected_crs)
for layer in [producers, consumers]:
layer = go.check_crs(layer, crs=projected_crs)
layer = create_points_from_polygons(layer, method=method)
layer.reset_index(inplace=True, drop=True)
layer.index.name = 'id'
if 'id' in layer.columns:
layer.drop(['id'], axis=1, inplace=True)
layer['lat'] = layer['geometry'].apply(lambda x: x.y)
layer['lon'] = layer['geometry'].apply(lambda x: x.x)
producers['id_full'] = 'producers-' + producers.index.astype('str')
producers['type'] = 'G'
consumers['id_full'] = 'consumers-' + consumers.index.astype('str')
consumers['type'] = 'H'
# Add lines to consumers and producers
lines_consumers, lines = create_object_connections(consumers, lines, tol_distance=tol_distance)
lines_producers, lines = create_object_connections(producers, lines, tol_distance=tol_distance)
# Weld continuous line segments together and cut loose ends
lines = go.weld_segments(
lines, lines_producers, lines_consumers,
# debug_plotting=True,
)
# add additional line identifier
lines_producers['type'] = 'GL' # GL for generation line
lines['type'] = 'DL' # DL for distribution line
lines_consumers['type'] = 'HL' # HL for house line
# generate forks point layer
forks = go.create_forks(lines)
# concat lines
lines_all = | pd.concat([lines, lines_consumers, lines_producers], sort=False) | pandas.concat |
import multiprocessing as mp
import os
import string
import warnings
import numpy as np
import pandas as pd
import uncertainties as un
from nptdms import TdmsFile
from numpy import NaN, sqrt
from scipy.stats import t
from tables import NoSuchNodeError
from uncertainties import unumpy as unp
from . import diodes
from ..images import schlieren
from ... import uncertainty
from ...dir import d_drive
from ...simulation import thermo
_DIR = os.path.split(__file__)[0]
_STRUCTURE_END_DATES = (
pd.Timestamp("2019-11-01"),
pd.Timestamp("2020-05-05")
)
_SPATIAL_VARIATIONS = pd.read_csv(
os.path.join(
_DIR,
"../../data",
"spatial_variations.csv"
)
)
def _collect_schlieren_dirs(
base_dir,
test_date
):
"""
When reading in camera data from these tests, we will ignore the spatial
directory since it contains no schlieren information. It will still be
used, but not in this step. Directories containing a `.old` file have a
different structure than newer directories, which must be accounted for.
Parameters
----------
base_dir : str
Base data directory, (e.g. `/d/Data/Raw/`)
test_date : str
ISO 8601 formatted date of test data
Returns
-------
list
ordered list of directories containing diode output
"""
raw_dir = os.path.join(
base_dir,
test_date
)
if not os.path.isdir(raw_dir):
return []
contents = os.listdir(raw_dir)
if ".old" in contents:
raw_dir = os.path.join(
base_dir,
test_date,
"Camera"
)
contents = os.listdir(raw_dir)
return sorted([
os.path.join(raw_dir, item)
for item in contents
if os.path.isdir(os.path.join(raw_dir, item))
and "shot" in item.lower()
and os.path.exists(os.path.join(raw_dir, item, "frames"))
and os.path.exists(os.path.join(raw_dir, item, "bg"))
])
class _ProcessStructure0:
@classmethod
def _collect_test_dirs(
cls,
base_dir,
test_date
):
"""
The first step of reading in an old test directory is to determine
which directories contain valid tests. Under the old DAQ system, the
.vi would generate a new folder each time it was run. Only tests which
successfully generate a `diodes.tdms` file can be considered completed
tests. Some of these may still be failed detonations; this issue will
be dealt with on joining with schlieren data, which contains
information about whether or not a detonation attempt succeeded.
Parameters
----------
base_dir : str
Base data directory, (e.g. `/d/Data/Raw/`)
test_date : str
ISO 8601 formatted date of test data
Returns
-------
list
ordered list of directories containing diode output
"""
raw_dir = os.path.join(
base_dir,
test_date,
"Sensors"
)
return sorted([
root
for root, _, files in os.walk(raw_dir, topdown=True)
if "diodes.tdms" in files
])
@classmethod
def _get_cutoff_pressure(
cls,
df_tdms_pressure,
kind="fuel",
):
"""
This function accepts a dataframe imported from a `pressure.tdms` file.
Old test data was output in amps; this was good and I should probably
have kept it that way. Old tests also logged each fill event
separately. Extract the desired data, build a confidence interval,
apply the calibration, and output the resulting value including
uncertainty.
Parameters
----------
df_tdms_pressure : pd.DataFrame
Dataframe containing test-specific pressure trace
kind : str
Kind of cutoff pressure to get, e.g. fuel, oxidizer
Returns
-------
un.ufloat
Float with applied uncertainty
"""
kind = kind.title()
if kind not in {"Fuel", "Oxidizer", "Vacuum", "Diluent"}:
raise ValueError("bad kind")
# in these tests there is no vacuum logging. These are undiluted tests,
# which means the diluent pressure is identical to the vacuum pressure.
if kind == "Vacuum":
kind = "Diluent"
pressure = df_tdms_pressure[
"/'%s Fill'/'Manifold'" % kind
].dropna() * uncertainty.PRESSURE_CAL["slope"] + \
uncertainty.PRESSURE_CAL["intercept"]
# TODO: update calculation to be like new pressure calc
return unp.uarray(
pressure,
uncertainty.u_pressure(pressure, daq_err=False)
).mean()
@classmethod
def _get_partial_pressure(
cls,
df_tdms_pressure,
kind="fuel"
):
"""
Fill order: vacuum -> (diluent) -> oxidizer -> fuel
Parameters
----------
df_tdms_pressure : pd.DataFrame
Dataframe containing test-specific pressure trace
kind : str
Kind of cutoff pressure to get, e.g. fuel, oxidizer
Returns
-------
un.ufloat
Float with applied uncertainty
"""
p_ox = cls._get_cutoff_pressure(df_tdms_pressure, "oxidizer")
if kind.lower() == "fuel":
return cls._get_cutoff_pressure(df_tdms_pressure, "fuel") - p_ox
elif kind.lower() == "oxidizer":
return p_ox
else:
raise ValueError("only fuels and oxidizers in this analysis")
@classmethod
def _get_initial_pressure(
cls,
df_tdms_pressure
):
"""
In old data, the initial mixture pressure is the fuel cutoff pressure
Parameters
----------
df_tdms_pressure : pd.DataFrame
Dataframe containing test-specific pressure trace
Returns
-------
un.ufloat
Float with applied uncertainty
"""
return cls._get_cutoff_pressure(df_tdms_pressure, kind="fuel")
@classmethod
def _get_initial_temperature(
cls,
df_tdms_temperature
):
"""
Old temperatures need to come from the tube thermocouple, which is
type K, because the manifold thermocouple was jacked up at the time.
Parameters
----------
df_tdms_temperature : pd.DataFrame
Dataframe containing test-specific temperature trace
Returns
-------
un.ufloat
Test-averaged initial temperature with applied uncertainty
"""
# TODO: update calculation to be like new pressure calc
return un.ufloat(
df_tdms_temperature["/'Test Readings'/'Tube'"].mean(),
uncertainty.u_temperature(
df_tdms_temperature["/'Test Readings'/'Tube'"],
tc_type="K",
collapse=True
)
)
@classmethod
def __call__(
cls,
base_dir,
test_date,
f_a_st=0.04201680672268907,
multiprocess=False
):
"""
Process data from an old-style data set.
Parameters
----------
base_dir : str
Base data directory, (e.g. `/d/Data/Raw/`)
test_date : str
ISO 8601 formatted date of test data
f_a_st : float
Stoichiometric fuel/air ratio for the test mixture. Default value
is for propane/air.
multiprocess : bool
Set to True to parallelize processing of a single day's tests
Returns
-------
List[pd.DataFrame, dict]
A list in which the first item is a dataframe of the processed
tube data and the second is a dictionary containing
background-subtracted schlieren images
"""
df = pd.DataFrame(
columns=["date", "shot", "sensors", "diodes", "schlieren"],
)
df["sensors"] = cls._collect_test_dirs(base_dir, test_date)
df["schlieren"] = _collect_schlieren_dirs(base_dir, test_date)
df = df[df["schlieren"].apply(lambda x: "failed" not in x)]
df["date"] = test_date
df["shot"] = [
int(os.path.split(d)[1].lower().replace("shot", "").strip())
for d in df["schlieren"].values
]
images = dict()
if multiprocess:
pool = mp.Pool()
results = pool.starmap(
cls._process_single_test,
[[idx, row, f_a_st] for idx, row in df.iterrows()]
)
pool.close()
for idx, row_results in results:
df.at[idx, "phi"] = row_results["phi"]
df.at[idx, "u_phi"] = row_results["u_phi"]
df.at[idx, "p_0"] = row_results["p_0"]
df.at[idx, "u_p_0"] = row_results["u_p_0"]
df.at[idx, "t_0"] = row_results["t_0"]
df.at[idx, "u_t_0"] = row_results["u_t_0"]
df.at[idx, "p_fuel"] = row_results["p_fuel"]
df.at[idx, "u_p_fuel"] = row_results["u_p_fuel"]
df.at[idx, "p_oxidizer"] = row_results["p_oxidizer"]
df.at[idx, "u_p_oxidizer"] = row_results["u_p_oxidizer"]
df.at[idx, "wave_speed"] = row_results["wave_speed"]
df.at[idx, "u_wave_speed"] = row_results["u_wave_speed"]
df.at[idx, "diodes"] = row_results["diodes"]
images.update(row_results["schlieren"])
else:
for idx, row in df.iterrows():
_, row_results = cls._process_single_test(idx, row, f_a_st)
# output results
df.at[idx, "phi"] = row_results["phi"]
df.at[idx, "u_phi"] = row_results["u_phi"]
df.at[idx, "p_0"] = row_results["p_0"]
df.at[idx, "u_p_0"] = row_results["u_p_0"]
df.at[idx, "t_0"] = row_results["t_0"]
df.at[idx, "u_t_0"] = row_results["u_t_0"]
df.at[idx, "p_fuel"] = row_results["p_fuel"]
df.at[idx, "u_p_fuel"] = row_results["u_p_fuel"]
df.at[idx, "p_oxidizer"] = row_results["p_oxidizer"]
df.at[idx, "u_p_oxidizer"] = row_results["u_p_oxidizer"]
df.at[idx, "wave_speed"] = row_results["wave_speed"]
df.at[idx, "u_wave_speed"] = row_results["u_wave_speed"]
df.at[idx, "diodes"] = row_results["diodes"]
images.update(row_results["schlieren"])
return df, images
@classmethod
def _process_single_test(
cls,
idx,
row,
f_a_st
):
"""
Process a single row of test data. This has been separated into its
own function to facilitate the use of multiprocessing.
Parameters
----------
row : pd.Series
Current row of test data
f_a_st : float
Stoichiometric fuel/air ratio for the test mixture.
Returns
-------
Tuple(Int, Dict)
Calculated test data and associated uncertainty values for the
current row
"""
# background subtraction
image = {
"{:s}_shot{:02d}".format(
row["date"],
row["shot"]
): schlieren.bg_subtract_all_frames(row["schlieren"])
}
# gather pressure data
df_tdms_pressure = TdmsFile(
os.path.join(
row["sensors"],
"pressure.tdms"
)
).as_dataframe()
p_init = cls._get_initial_pressure(df_tdms_pressure)
p_fuel = cls._get_partial_pressure(
df_tdms_pressure,
kind="fuel"
)
p_oxidizer = cls._get_partial_pressure(
df_tdms_pressure,
kind="oxidizer"
)
phi = thermo.get_equivalence_ratio(p_fuel, p_oxidizer, f_a_st)
# gather temperature data
loc_temp_tdms = os.path.join(
row["sensors"],
"temperature.tdms"
)
if os.path.exists(loc_temp_tdms):
df_tdms_temperature = TdmsFile(
os.path.join(
row["sensors"],
"temperature.tdms"
)
).as_dataframe()
t_init = cls._get_initial_temperature(df_tdms_temperature)
else:
t_init = un.ufloat(NaN, NaN)
# wave speed measurement
diode_loc = os.path.join(row["sensors"], "diodes.tdms")
wave_speed = diodes.calculate_velocity(diode_loc)[0]
# output results
out = dict()
out["diodes"] = diode_loc
out["schlieren"] = image
out["phi"] = phi.nominal_value
out["u_phi"] = phi.std_dev
out["p_0"] = p_init.nominal_value
out["u_p_0"] = p_init.std_dev
out["t_0"] = t_init.nominal_value
out["u_t_0"] = t_init.std_dev
out["p_fuel"] = p_fuel.nominal_value
out["u_p_fuel"] = p_fuel.std_dev
out["p_oxidizer"] = p_oxidizer.nominal_value
out["u_p_oxidizer"] = p_oxidizer.std_dev
out["wave_speed"] = wave_speed.nominal_value
out["u_wave_speed"] = wave_speed.std_dev
return idx, out
class _ProcessStructure1:
@classmethod
def __call__(
cls,
base_dir,
test_date,
sample_time=pd.Timedelta(seconds=70),
mech="gri30.cti",
diode_spacing=1.0668,
multiprocess=False
):
"""
Process data from a day of testing using the newer directory structure
Parameters
----------
base_dir : str
Base data directory, (e.g. `/d/Data/Raw/`)
test_date : str
ISO 8601 formatted date of test data
sample_time : None or pd.Timedelta
Length of hold period at the end of a fill state. If None is passed,
value will be read from nominal test conditions.
mech : str
Mechanism for cantera calculations
diode_spacing : float
Diode spacing, in meters
multiprocess : bool
Set true to parallelize data analysis
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple containing a dataframe of test results and a dictionary of
background subtracted schlieren images
"""
dir_data = os.path.join(base_dir, test_date)
df_tests = cls._find_test_times(base_dir, test_date)
n_found_tests = len(df_tests)
n_shot_dirs = len([d for d in os.listdir(dir_data) if "Shot" in d])
if n_found_tests == 0:
raise ValueError("No tests detected in sensor log.tdms")
elif n_found_tests != n_shot_dirs:
raise ValueError("Number of tests does not match number of shots")
df_nominal = cls._load_nominal_conditions(dir_data)
df_sensor = TdmsFile(os.path.join(
dir_data, "sensor log.tdms"
)).as_dataframe()
df_pressure = cls._extract_sensor_data(df_sensor, "pressure")
df_temperature = cls._extract_sensor_data(df_sensor, "temperature")
del df_sensor
df_schlieren = pd.DataFrame(columns=["shot", "schlieren"])
df_schlieren["schlieren"] = _collect_schlieren_dirs(
base_dir, test_date
)
df_schlieren["shot"] = df_schlieren["schlieren"]
df_schlieren["shot"] = [
int(os.path.split(d)[1].lower().replace("shot", "").strip())
for d in df_schlieren["schlieren"]
if "failed" not in d
]
df_tests = df_tests.merge(df_schlieren, on="shot", how="left")
df_diode_locs = pd.DataFrame(columns=["shot", "diodes"])
df_diode_locs["diodes"] = diodes.find_diode_data(dir_data)
df_diode_locs["shot"] = [
int(
os.path.split(
os.path.dirname(d))[1].lower().replace(
"shot", ""
).strip()
)
for d in df_diode_locs["diodes"]
]
df_tests = df_tests.merge(df_diode_locs, on="shot", how="left")
df_state = TdmsFile(os.path.join(
base_dir, test_date, "tube state.tdms"
)).as_dataframe()
df_state.columns = ["time", "state", "mode"]
images = dict()
if multiprocess:
pool = mp.Pool()
results = pool.starmap(
cls._process_single_test,
[[
idx,
df_nominal,
df_pressure,
df_temperature,
df_state,
sample_time,
test_time_row,
mech,
diode_spacing
] for idx, test_time_row in df_tests.iterrows()]
)
pool.close()
for idx, row_results in results:
if row_results["schlieren"] is not None:
images.update(row_results["schlieren"])
df_tests.at[idx, "t_0"] = row_results["t_0"]
df_tests.at[idx, "u_t_0"] = row_results["u_t_0"]
df_tests.at[idx, "p_0_nom"] = row_results["p_0_nom"]
df_tests.at[idx, "p_0"] = row_results["p_0"]
df_tests.at[idx, "u_p_0"] = row_results["u_p_0"]
df_tests.at[idx, "phi_nom"] = row_results["phi_nom"]
df_tests.at[idx, "phi"] = row_results["phi"]
df_tests.at[idx, "u_phi"] = row_results["u_phi"]
df_tests.at[idx, "fuel"] = row_results["fuel"]
df_tests.at[idx, "p_fuel"] = row_results["p_fuel"]
df_tests.at[idx, "u_p_fuel"] = row_results["u_p_fuel"]
df_tests.at[idx, "oxidizer"] = row_results["oxidizer"]
df_tests.at[idx, "p_oxidizer"] = row_results["p_oxidizer"]
df_tests.at[idx, "u_p_oxidizer"] = row_results["u_p_oxidizer"]
df_tests.at[idx, "diluent"] = row_results["diluent"]
df_tests.at[idx, "p_diluent"] = row_results["p_diluent"]
df_tests.at[idx, "u_p_diluent"] = row_results["u_p_diluent"]
df_tests.at[idx, "dil_mf_nom"] = row_results["dil_mf_nom"]
df_tests.at[idx, "dil_mf"] = row_results["dil_mf"]
df_tests.at[idx, "u_dil_mf"] = row_results["u_dil_mf"]
df_tests.at[idx, "wave_speed"] = row_results["wave_speed"]
df_tests.at[idx, "u_wave_speed"] = row_results["u_wave_speed"]
df_tests.at[idx, "cutoff_fuel"] = row_results["cutoff_fuel"]
df_tests.at[idx, "cutoff_vacuum"] = row_results["cutoff_vacuum"]
df_tests.at[idx, "cutoff_diluent"] = \
row_results["cutoff_diluent"]
df_tests.at[idx, "cutoff_oxidizer"] = \
row_results["cutoff_oxidizer"]
df_tests.at[idx, "u_cutoff_fuel"] = \
row_results["u_cutoff_fuel"]
df_tests.at[idx, "u_cutoff_vacuum"] = \
row_results["u_cutoff_vacuum"]
df_tests.at[idx, "u_cutoff_diluent"] = \
row_results["u_cutoff_diluent"]
df_tests.at[idx, "u_cutoff_oxidizer"] = \
row_results["u_cutoff_oxidizer"]
else:
for idx, test_time_row in df_tests.iterrows():
# noinspection PyTypeChecker
_, row_results = cls._process_single_test(
idx,
df_nominal,
df_pressure,
df_temperature,
df_state,
sample_time,
test_time_row,
mech,
diode_spacing
)
# output results
if row_results["schlieren"] is not None:
images.update(row_results["schlieren"])
df_tests.at[idx, "t_0"] = row_results["t_0"]
df_tests.at[idx, "u_t_0"] = row_results["u_t_0"]
df_tests.at[idx, "p_0_nom"] = row_results["p_0_nom"]
df_tests.at[idx, "p_0"] = row_results["p_0"]
df_tests.at[idx, "u_p_0"] = row_results["u_p_0"]
df_tests.at[idx, "phi_nom"] = row_results["phi_nom"]
df_tests.at[idx, "phi"] = row_results["phi"]
df_tests.at[idx, "u_phi"] = row_results["u_phi"]
df_tests.at[idx, "fuel"] = row_results["fuel"]
df_tests.at[idx, "p_fuel"] = row_results["p_fuel"]
df_tests.at[idx, "u_p_fuel"] = row_results["u_p_fuel"]
df_tests.at[idx, "oxidizer"] = row_results["oxidizer"]
df_tests.at[idx, "p_oxidizer"] = row_results["p_oxidizer"]
df_tests.at[idx, "u_p_oxidizer"] = row_results["u_p_oxidizer"]
df_tests.at[idx, "diluent"] = row_results["diluent"]
df_tests.at[idx, "p_diluent"] = row_results["p_diluent"]
df_tests.at[idx, "u_p_diluent"] = row_results["u_p_diluent"]
df_tests.at[idx, "dil_mf_nom"] = row_results["dil_mf_nom"]
df_tests.at[idx, "dil_mf"] = row_results["dil_mf"]
df_tests.at[idx, "u_dil_mf"] = row_results["u_dil_mf"]
df_tests.at[idx, "wave_speed"] = row_results["wave_speed"]
df_tests.at[idx, "u_wave_speed"] = row_results["u_wave_speed"]
df_tests.at[idx, "cutoff_fuel"] = row_results["cutoff_fuel"]
df_tests.at[idx, "cutoff_vacuum"] = row_results["cutoff_vacuum"]
df_tests.at[idx, "cutoff_diluent"] = \
row_results["cutoff_diluent"]
df_tests.at[idx, "cutoff_oxidizer"] = \
row_results["cutoff_oxidizer"]
df_tests.at[idx, "u_cutoff_fuel"] = \
row_results["u_cutoff_fuel"]
df_tests.at[idx, "u_cutoff_vacuum"] = \
row_results["u_cutoff_vacuum"]
df_tests.at[idx, "u_cutoff_diluent"] = \
row_results["u_cutoff_diluent"]
df_tests.at[idx, "u_cutoff_oxidizer"] = \
row_results["u_cutoff_oxidizer"]
df_tests["date"] = test_date
# keep diluent dtype consistent as object. Using notna because pd.where
# works backwards compared with np.where and also my brain.
df_tests["diluent"].where(
df_tests["diluent"].notna(),
"None",
inplace=True
)
return df_tests, images
@classmethod
def _process_single_test(
cls,
idx,
df_nominal,
df_pressure,
df_temperature,
df_state,
sample_time,
test_time_row,
mech="gri30.cti",
diode_spacing=1.0668
):
"""
Parameters
----------
idx : int
Index of the test to be analyzed
df_nominal : pd.DataFrame
Dataframe of nominal test conditions, untrimmed
df_pressure : pd.DataFrame
Dataframe of full-day test pressures
df_temperature : pd.DataFrame
Dataframe of full-day test temperatures
df_state : pd.DataFrame
Dataframe of tube state changes, untrimmed
sample_time : None or pd.Timedelta
Length of hold period at the end of a fill state. If None is passed,
value will be read from nominal test conditions.
test_time_row : pd.Series
Row of current test in the main test dataframe
mech : str
Mechanism for cantera calculations
diode_spacing : float
Diode spacing, in meters
Returns
-------
Tuple[Int, Dict]
A tuple containing the index of the analyzed test and a dictionary
of the test results
"""
out = dict()
# collect nominal test conditions
df_test_nominal = cls._get_test_nominal(df_nominal, test_time_row)
fuel = df_test_nominal["fuel"]
oxidizer = df_test_nominal["oxidizer"]
if oxidizer.lower() == "air":
oxidizer_species = "O2:1 N2:3.76"
else:
oxidizer_species = oxidizer
diluent = df_test_nominal["diluent"]
dil_mf_nom = df_test_nominal["diluent_mol_frac_nominal"]
phi_nom = df_test_nominal["phi_nominal"]
p_0_nom = df_test_nominal["p_0_nominal"]
if sample_time is None:
if hasattr(df_test_nominal, "sample_time"):
sample_time = pd.Timedelta(
seconds=df_test_nominal["sample_time"]
)
else:
sample_time = pd.Timedelta(seconds=70)
# collect current test temperature with uncertainty
# TODO: move to separate function and update calculation to be like
# new pressure calc
temps = cls._collect_current_test_df(
df_temperature,
test_time_row
)["temperature"].values
temps = unp.uarray(
temps,
uncertainty.u_temperature(temps)
)
t_0 = temps.mean()
# collect current test pressures
df_current_test_pressure = cls._collect_current_test_df(
df_pressure,
test_time_row
)
df_state_cutoff_times = cls._get_pressure_cutoff_times(
df_state,
test_time_row,
sample_time
)
# extract cutoff pressures
p_cutoff_vac = cls._get_cutoff_pressure(
"vacuum",
df_current_test_pressure,
df_state_cutoff_times
)
p_cutoff_fuel = cls._get_cutoff_pressure(
"fuel",
df_current_test_pressure,
df_state_cutoff_times
)
p_cutoff_oxidizer = cls._get_cutoff_pressure(
"oxidizer",
df_current_test_pressure,
df_state_cutoff_times
)
p_cutoff_diluent = cls._get_cutoff_pressure(
"diluent",
df_current_test_pressure,
df_state_cutoff_times
)
# calculate partial pressures
p_fuel = p_cutoff_fuel - p_cutoff_vac
p_diluent = p_cutoff_diluent - p_cutoff_fuel
# TODO: since current detonations use air as an oxidizer, add vacuum
# cutoff pressure to oxidizer partial pressure. Change this if non-air
# oxidizers are used
# using minimum in case of fill order change
p_oxidizer = p_cutoff_oxidizer - np.nanmin((
p_cutoff_diluent,
p_cutoff_fuel
)) + p_cutoff_vac
# oxidizer is the last fill state, which means that p_0 == p_oxidizer
p_0 = cls._get_cutoff_pressure(
"oxidizer",
df_current_test_pressure,
df_state_cutoff_times
)
# calculate equivalence ratio and diluent mole fraction
phi = thermo.get_equivalence_ratio(
p_fuel,
p_oxidizer,
thermo.get_f_a_st(
fuel,
oxidizer_species,
mech
)
)
dil_mf = thermo.get_dil_mol_frac(p_fuel, p_oxidizer, p_diluent)
# get wave speed
wave_speed = diodes.calculate_velocity(
test_time_row["diodes"],
diode_spacing=diode_spacing
)[0]
# background subtract schlieren
if not pd.isnull(test_time_row["schlieren"]):
# do bg subtraction
date = os.path.split(
os.path.dirname(
test_time_row["schlieren"]
)
)[1]
out["schlieren"] = {
"{:s}_shot{:02d}".format(
date,
int(test_time_row["shot"])
): schlieren.bg_subtract_all_frames(
test_time_row["schlieren"])
}
else:
out["schlieren"] = None
out["t_0"] = t_0.nominal_value
out["u_t_0"] = t_0.std_dev
out["p_0_nom"] = p_0_nom
out["p_0"] = p_0.nominal_value
out["u_p_0"] = p_0.std_dev
out["phi_nom"] = phi_nom
out["phi"] = phi.nominal_value
out["u_phi"] = phi.std_dev
out["fuel"] = fuel
out["p_fuel"] = p_fuel.nominal_value
out["u_p_fuel"] = p_fuel.std_dev
out["oxidizer"] = oxidizer
out["p_oxidizer"] = p_oxidizer.nominal_value
out["u_p_oxidizer"] = p_oxidizer.std_dev
out["diluent"] = diluent
out["p_diluent"] = p_diluent.nominal_value
out["u_p_diluent"] = p_diluent.std_dev
out["dil_mf_nom"] = dil_mf_nom
out["dil_mf"] = dil_mf.nominal_value
out["u_dil_mf"] = dil_mf.std_dev
out["wave_speed"] = wave_speed.nominal_value
out["u_wave_speed"] = wave_speed.std_dev
out["cutoff_fuel"] = p_cutoff_fuel.nominal_value
out["cutoff_vacuum"] = p_cutoff_vac.nominal_value
out["cutoff_diluent"] = p_cutoff_oxidizer.nominal_value
out["cutoff_oxidizer"] = p_cutoff_diluent.nominal_value
out["u_cutoff_fuel"] = p_cutoff_fuel.std_dev
out["u_cutoff_vacuum"] = p_cutoff_vac.std_dev
out["u_cutoff_diluent"] = p_cutoff_oxidizer.std_dev
out["u_cutoff_oxidizer"] = p_cutoff_diluent.std_dev
return idx, out
@staticmethod
def _load_nominal_conditions(dir_data):
"""
Loads nominal test conditions from disk
Parameters
----------
dir_data : str
Directory containing a test_conditions.csv file
Returns
-------
pd.DataFrame
Dataframe of nominal test conditions
"""
df_conditions = pd.read_csv(
os.path.join(
dir_data,
"test_conditions.csv"
)
)
df_conditions["datetime"] = pd.to_datetime(
df_conditions["datetime"],
utc=False
)
# drop unnecessary information
df_conditions = df_conditions[
[k for k in df_conditions.keys()
if k not in {"p_dil", "p_ox", "p_f"}]
]
old_cols = [
"datetime",
"diluent_mol_frac",
"equivalence",
"init_pressure"
]
new_cols = [
"time",
"diluent_mol_frac_nominal",
"phi_nominal",
"p_0_nominal"
]
df_conditions.rename(
columns={o: n for o, n in zip(old_cols, new_cols)},
inplace=True
)
df_conditions["p_0_nominal"] *= 101325 # p_0 recorded in atm cus im dum
return df_conditions
@staticmethod
def _get_test_nominal(
df_nominal,
test_time_row
):
"""
Collects nominal test conditions for a given test from a dataframe of
nominal test conditions
Parameters
----------
df_nominal : pd.DataFrame
Nominal test condition dataframe -- see _load_nominal_conditions
test_time_row : pd.Series
Row of current test in the main test dataframe
Returns
-------
pd.Series
Nominal test conditions for a particular test
"""
# subtract one because n_true is on (1, len) while idx is on (0, len-1)
# noinspection PyUnresolvedReferences
# cumsum is on pd.Series you fool
best_idx = (df_nominal["time"] < test_time_row[
"end"]).cumsum().max() - 1
return df_nominal.iloc[best_idx]
@staticmethod
def _extract_sensor_data(
df_sensor,
which="pressure",
dropna=True
):
"""
Extracts pressure or temperature data from full sensor dataframe.
Dropna option is included due to NaNs populated by pandas/nptdms,
which are caused by temperature and pressure traces having different
lengths.
Parameters
----------
df_sensor : pd.DataFrame
Dataframe of full tube tdms output
which : str
`pressure` or `temperature`
dropna : bool
Whether to drop NaN values from the output dataframe
Returns
-------
pd.DataFrame
Desired trace chosen by `which`
"""
if not {"temperature", "pressure"}.intersection({which}):
raise ValueError("which must be temperature or pressure")
df_sens_out = df_sensor[[
"/'%s'/'time'" % which,
"/'%s'/'manifold'" % which
]].dropna()
df_sens_out.columns = ["time", which]
if dropna:
df_sens_out.dropna(inplace=True)
return df_sens_out
@staticmethod
def _find_test_times(
base_dir,
test_date
):
"""
Locates start and end times of tests in a larger dataframe containing
all tube data
Parameters
----------
base_dir : str
Base data directory, (e.g. `/d/Data/Raw/`)
test_date : str
ISO 8601 formatted date of test data
Returns
-------
pd.DataFrame
Dataframe containing start and end times of each test
"""
# end times
# The tube will only automatically go `Closed -> Vent` at the end of
# a completed test cycle
loc_state = os.path.join(base_dir, test_date, "tube state.tdms")
df_state = TdmsFile(loc_state).as_dataframe()
df_state.columns = ["time", "state", "mode"]
df_test_times = pd.DataFrame(columns=["shot", "start", "end"])
df_test_times["end"] = df_state[
(df_state["state"].shift(1) == "Tube Closed") &
(df_state["state"] == "Tube Vent") &
(df_state["mode"] == "auto")
]["time"]
df_test_times.reset_index(drop=True, inplace=True)
df_test_times["shot"] = df_test_times.index.values
# start times
# A test will be considered to have started at the last automatic mix
# section purge preceding its end time.
for i, time in enumerate(df_test_times["end"].values):
df_test_times.at[i, "start"] = df_state[
(df_state["time"].values < time) &
(df_state["state"] == "Mix Section Purge") &
(df_state["mode"] == "auto")
].iloc[-1].time
df_test_times["start"] = pd.to_datetime(df_test_times["start"])
return df_test_times
@staticmethod
def _mask_df_by_row_time(
df_in,
test_row,
include_ends=True
):
"""
Creates a mask of a dataframe with a `time` column from a series
object containing `start` and `end` time stamps.
Parameters
----------
df_in : pd.DataFrame
Dataframe containing a `time` column
test_row : pd.Series
Series containing `start` and `end` time stamp entries
include_ends : bool
True to include end points, false to exclude them
Returns
-------
pd.Series
Boolean series; True where time is within the desired range and
False where it isn't.
"""
start_time = test_row["start"]
end_time = test_row["end"]
if isinstance(start_time, pd.Series):
start_time = start_time.values[0]
end_time = end_time.values[0]
if include_ends:
return (
(df_in["time"] >= start_time) &
(df_in["time"] <= end_time)
)
else:
return (
(df_in["time"] > start_time) &
(df_in["time"] < end_time)
)
@classmethod
def _get_pressure_cutoff_times(
cls,
df_state,
test_time_row,
sample_time
):
"""
Locates start and end times of tube fill events
Parameters
----------
df_state : pd.DataFrame
Dataframe containing tube state changes
test_time_row : pd.Series
Row of current test in the main test dataframe
sample_time : pd.Timedelta
Length of hold period at the end of a fill state
Returns
-------
pd.DataFrame
Dataframe containing start and end times of each portion of the
tube fill sequence
"""
# noinspection PyUnresolvedReferences
# lol it's a pd.Series of booleans you fool
state_mask = cls._mask_df_by_row_time(
df_state,
test_time_row,
include_ends=False
).values
df_state_row = pd.DataFrame(
data={
"state": [
"vacuum",
"fuel",
"diluent",
"oxidizer"
],
"end": [
df_state[
state_mask &
(df_state["state"] == "Fuel Fill")
]["time"].min(),
df_state[
state_mask &
(df_state["state"] == "Diluent Fill")
]["time"].min(),
df_state[
state_mask &
(df_state["state"] == "Oxidizer Fill")
]["time"].min(),
df_state[
state_mask &
(df_state["state"] == "Mixing")
]["time"].min(),
]
}
)
df_state_row["start"] = df_state_row["end"] - sample_time
return df_state_row
@classmethod
def _get_cutoff_pressure(
cls,
state,
df_current_test_pressure,
df_state_cutoff_times
):
"""
Gets the cutoff pressure for a particular tube fill state
Parameters
----------
state : str
One of the main tube fill states: vacuum, fuel, diluent, oxidizer
df_current_test_pressure : pd.DataFrame
Dataframe containing current test pressure trace
df_state_cutoff_times : pd.DataFrame
Dataframe of state cutoff times -- see _get_pressure_cutoff_times
Returns
-------
un.ufloat
Mean pressure value with uncertainty estimate
"""
press = cls._collect_current_test_df(
df_current_test_pressure,
df_state_cutoff_times[df_state_cutoff_times["state"] == state]
)["pressure"].values
# calculate sample uncertainty
num_samples = len(press)
sem = press.std() / sqrt(num_samples)
u_sample = un.ufloat(
0,
sem * t.ppf(0.975, num_samples - 1),
tag="sample"
)
press = unp.uarray(
press,
uncertainty.u_pressure(press, daq_err=False)
)
press = press.mean() + u_sample
return press
@classmethod
def _collect_current_test_df(
cls,
df_to_slice,
test_time_row
):
"""
Slices a temperature or pressure dataframe using _mask_by_row_time
Parameters
----------
df_to_slice : pd.DataFrame
Dataframe with a `time` column
test_time_row : pd.Series
Series containing `start` and `end` timestamps
Returns
-------
pd.DataFrame
Sliced portion of input dataframe
"""
return df_to_slice[cls._mask_df_by_row_time(df_to_slice, test_time_row)]
class _ProcessStructure2:
# TODO: add docstrings
@staticmethod
def _collect_shot_directories(
dir_raw,
date
):
"""
Find all `Shot XX` directories within a single day's raw data directory
Parameters
----------
dir_raw : str
directory for daily raw data
date : str
ISO-8601 formatted date string (YYYY-MM-DD)
Returns
-------
list
"""
dir_search = os.path.join(dir_raw, date)
return sorted(
os.path.join(dir_search, d)
for d in os.listdir(dir_search)
if "shot" in d.lower()
)
@staticmethod
def _get_shot_no_from_dir(
dir_shot
):
"""
Extract shot number from shot directory
Parameters
----------
dir_shot : str
directory containing shot data
Returns
-------
int
"""
return int("".join(
(i for i in os.path.split(dir_shot)[1]
if i in string.digits)
))
@staticmethod
def _population_uncertainty(
data
):
"""
Calculate a 95% confidence interval on measured data. Returns as a
ufloat with a nominal value of 0 for easy addition with instrumentation
uncertainty.
Parameters
----------
data : np.array
array of data points
Returns
-------
un.ufloat
"""
num_samples = len(data)
if num_samples > 0:
sem = np.std(unp.nominal_values(data)) / sqrt(num_samples)
return un.ufloat(
0,
sem * t.ppf(0.975, num_samples - 1),
tag="sample"
)
else:
return un.ufloat(np.NaN, np.NaN)
@classmethod
def _get_fill_cutoffs(
cls,
fill_tdms
):
"""
Extracts fill cutoff pressures from fill.tdms
Parameters
----------
fill_tdms : TdmsFile
TDMS file of shot fill data
Returns
-------
dict
dictionary with keys:
* vacuum
* fuel
* diluent
* oxidizer
"""
cutoffs = dict(
vacuum=un.ufloat(np.NaN, np.NaN),
fuel=un.ufloat(np.NaN, np.NaN),
diluent=un.ufloat(np.NaN, np.NaN),
oxidizer=un.ufloat(np.NaN, np.NaN)
)
for cutoff in cutoffs.keys():
press = fill_tdms.channel_data(cutoff, "pressure")
if len(press) == 0:
if cutoff == "diluent":
pass
else:
raise ValueError("Empty cutoff pressure for %s" % cutoff)
else:
press = unp.uarray(
press,
uncertainty.u_pressure(press, daq_err=False)
)
cutoffs[cutoff] = press.mean() + \
cls._population_uncertainty(press)
return cutoffs
@staticmethod
def _get_diluent_mol_frac(
partials
):
"""
Calculate diluent mole fraction from component partial pressures
Parameters
----------
partials : dict
dictionary of partial pressures (fuel, oxidizer, diluent)
Returns
-------
un.ufloat
"""
if np.isnan(partials["diluent"].nominal_value):
# undiluted mixture
return un.ufloat(0, 0)
else:
return partials["diluent"] / sum(partials.values())
@classmethod
def _read_fill_tdms(
cls,
dir_shot,
oxidizer_is_air
):
"""
Read in partial pressures, fill cutoff pressures, initial conditions,
and diluent mass fraction from fill.tdms
Parameters
----------
dir_shot : str
shot data directory
oxidizer_is_air : bool
If oxidizer is air, vacuum is lumped in with oxidizer
Returns
-------
dict
dictionary with keys:
* partials
* cutoffs
* initial
* dil_mf
"""
fill_tdms = TdmsFile(os.path.join(dir_shot, "fill.tdms"))
initial = cls._get_initial_conditions(fill_tdms)
cutoffs = cls._get_fill_cutoffs(fill_tdms)
partials = cls._get_partials_from_cutoffs(cutoffs, oxidizer_is_air)
dil_mf = cls._get_diluent_mol_frac(partials)
return dict(
partials=partials,
cutoffs=cutoffs,
initial=initial,
dil_mf=dil_mf
)
@classmethod
def _get_initial_conditions(
cls,
fill_tdms
):
"""
Collects initial conditions from fill.tdms
Parameters
----------
fill_tdms : TdmsFile
TDMS file with fill data
Returns
-------
dict
dictionary with the keys:
* pressure
* temperature
"""
pressure = fill_tdms.channel_data("oxidizer", "pressure")
u_pop_pressure = cls._population_uncertainty(pressure)
pressure = unp.uarray(
pressure,
uncertainty.u_pressure(
pressure,
daq_err=False
)
)
temperature = fill_tdms.channel_data("oxidizer", "temperature")
u_pop_temperature = cls._population_uncertainty(temperature)
temperature = unp.uarray(
temperature,
uncertainty.u_pressure(
temperature,
daq_err=False
)
)
initial = dict(
pressure=np.mean(pressure) + u_pop_pressure,
temperature=np.mean(temperature) + u_pop_temperature,
)
return initial
@staticmethod
def _get_partials_from_cutoffs(
cutoffs,
oxidizer_is_air
):
"""
Calculates component partial pressures from fill cutoff pressures
Parameters
----------
cutoffs : dict
dictionary of cutoff pressures (output of _get_fill_cutoffs)
oxidizer_is_air : bool
If oxidizer is air, vacuum is lumped in with oxidizer
Returns
-------
dict
dictionary with keys:
* fuel
* oxidizer
* diluent
"""
partials = dict()
partials["fuel"] = cutoffs["fuel"] - cutoffs["vacuum"]
# propagate nan +/- nan for undiluted mixtures
if np.isnan(cutoffs["diluent"].std_dev):
partials["diluent"] = cutoffs["diluent"]
else:
partials["diluent"] = cutoffs["diluent"] - cutoffs["fuel"]
# using nanmax in case fill order changes again in the future
partials["oxidizer"] = cutoffs["oxidizer"] - np.nanmax((
cutoffs["fuel"],
cutoffs["diluent"]
))
if oxidizer_is_air:
partials["oxidizer"] += cutoffs["vacuum"]
return partials
@staticmethod
def _check_for_schlieren(
dir_shot
):
"""
Returns shot directory if schlieren data was collected, and np.NaN
if not.
Parameters
----------
dir_shot : str
shot data directory
Returns
-------
str or np.NaN
"""
pth_frames = os.path.join(dir_shot, "frames")
pth_bg = os.path.join(dir_shot, "bg")
if os.path.exists(pth_frames) and os.path.exists(pth_bg):
# directories exist. make sure they have files in them.
num_frames = len([f for f in os.listdir(pth_frames)
if f.lower()[-4:] == ".tif"])
num_bg = len([f for f in os.listdir(pth_bg)
if f.lower()[-4:] == ".tif"])
if num_frames > 0 and num_bg == 101:
# everything is awesome
return dir_shot
return np.NaN
@staticmethod
def _check_for_diodes(
dir_shot
):
"""
Returns path to diode file if it exists and is not empty, otherwise
returns np.NaN
Parameters
----------
dir_shot : str
shot data directory
Returns
-------
str or np.NaN
"""
# TODO: update this with the proper diode file size once known
diode_path = os.path.join(dir_shot, "diodes.tdms")
if os.path.exists(diode_path):
if os.path.getsize(diode_path) > 4096:
# diode tdms exists and is at least larger than empty
# (empty is 4096 bytes)
return diode_path
return np.NaN
@staticmethod
def _get_nominal_conditions(
dir_shot
):
"""
Reads in nominal conditions from conditions.csv, which should end up
as a dataframe with only one row.
Parameters
----------
dir_shot : str
shot data directory
Returns
-------
pd.DataFrame
"""
pth_nominal = os.path.join(dir_shot, "conditions.csv")
if os.path.exists(pth_nominal):
return pd.read_csv(pth_nominal).iloc[0]
else:
raise FileExistsError("%s not found" % pth_nominal)
@staticmethod
def _process_schlieren(
dir_shot,
shot_no,
date
):
"""
Background subtract all schlieren frames for a given shot
Parameters
----------
dir_shot : str
shot data directory
shot_no : int
shot number
date : str
shot date
Returns
-------
dict
dictionary with keys of the form:
"/schlieren/dYYYY-MM-DD/shotXX/frame_XX
"""
processed = schlieren.bg_subtract_all_frames(dir_shot)
return {
"/schlieren/d{:s}/shot{:02d}/frame_{:02d}".format(
date.replace("-", "_"),
shot_no,
i
): pd.DataFrame(frame) for i, frame in enumerate(processed)
}
@classmethod
def process_single_test(
cls,
date,
dir_shot,
shot_no,
mech,
diode_spacing,
):
"""
Process data from a single shot
Parameters
----------
date : str
shot date
dir_shot : str
shot data directory
shot_no : int
shot number
mech : str
mechanism for cantera calculations
diode_spacing : float
distance between photodiodes, in meters
Returns
-------
tuple
(shot no., results dataframe, schlieren dictionary)
"""
results = pd.Series(
index=(
"shot",
"start",
"end",
"schlieren",
"diodes",
"t_0",
"u_t_0",
"p_0_nom",
"p_0",
"u_p_0",
"phi_nom",
"phi",
"u_phi",
"fuel",
"p_fuel",
"u_p_fuel",
"oxidizer",
"p_oxidizer",
"u_p_oxidizer",
"diluent",
"p_diluent",
"u_p_diluent",
"dil_mf_nom",
"dil_mf",
"u_dil_mf",
"wave_speed",
"u_wave_speed",
"cutoff_fuel",
"cutoff_vacuum",
"cutoff_diluent",
"cutoff_oxidizer",
"u_cutoff_fuel",
"u_cutoff_vacuum",
"u_cutoff_diluent",
"u_cutoff_oxidizer",
"date",
),
dtype="object"
)
results["date"] = date
results["shot"] = shot_no
# check for schlieren and diode files
# these are paths if they exist and NaN if they don't in order to
# conform to convention that arose during early data management.
# I don't like it, but that's how it is now.
results["schlieren"] = cls._check_for_schlieren(dir_shot)
results["diodes"] = cls._check_for_diodes(dir_shot)
# background subtract schlieren
if not pd.isnull(results["schlieren"]):
schlieren_out = cls._process_schlieren(
results["schlieren"],
results["shot"],
date
)
else:
schlieren_out = dict()
# nominal conditions
# from conditions.csv
nominal = cls._get_nominal_conditions(dir_shot)
for key in ("start", "end"):
nominal[key] = pd.to_datetime(nominal[key])
for key in ("p_0_nom", "phi_nom", "dil_mf_nom"):
# using float() instead of astype(float) because this should be a
# series, therefore nominal[key] returns a value rather than an
# array of values
nominal[key] = float(nominal[key])
nominal["end"] = pd.to_datetime(nominal["end"])
for key in ("start", "end", "p_0_nom", "phi_nom", "fuel",
"oxidizer", "dil_mf_nom"):
results[key] = nominal[key]
if pd.isna(nominal["diluent"]):
results["diluent"] = "None"
else:
results["diluent"] = nominal["diluent"]
# wave speed
# from diodes.tdms
if pd.isna(results["diodes"]):
# this would happen anyway, but explicit is better than implicit
results["wave_speed"] = np.NaN
results["u_wave_speed"] = np.NaN
else:
wave_speed = diodes.calculate_velocity(
results["diodes"],
diode_spacing
)[0]
results["wave_speed"] = wave_speed.nominal_value
results["u_wave_speed"] = wave_speed.std_dev
# measured initial conditions
# from fill.tdms
oxidizer_is_air = results["oxidizer"].lower() == "air"
fill_info = cls._read_fill_tdms(dir_shot, oxidizer_is_air)
initial = fill_info["initial"]
results["p_0"] = initial["pressure"].nominal_value
results["u_p_0"] = initial["pressure"].std_dev
results["t_0"] = initial["temperature"].nominal_value
results["u_t_0"] = initial["temperature"].std_dev
# measured cutoff pressures
# from fill.tdms
cutoffs = fill_info["cutoffs"]
results["cutoff_fuel"] = cutoffs["fuel"].nominal_value
results["u_cutoff_fuel"] = cutoffs["fuel"].std_dev
results["cutoff_diluent"] = cutoffs["diluent"].nominal_value
results["u_cutoff_diluent"] = cutoffs["diluent"].std_dev
results["cutoff_oxidizer"] = cutoffs["oxidizer"].nominal_value
results["u_cutoff_oxidizer"] = cutoffs["oxidizer"].std_dev
results["cutoff_vacuum"] = cutoffs["vacuum"].nominal_value
results["u_cutoff_vacuum"] = cutoffs["vacuum"].std_dev
# measured partial pressures and dilution
# from fill.tdms
partials = fill_info["partials"]
results["p_fuel"] = partials["fuel"].nominal_value
results["u_p_fuel"] = partials["fuel"].std_dev
results["p_diluent"] = partials["diluent"].nominal_value
results["u_p_diluent"] = partials["diluent"].std_dev
results["p_oxidizer"] = partials["oxidizer"].nominal_value
results["u_p_oxidizer"] = partials["oxidizer"].std_dev
results["dil_mf"] = fill_info["dil_mf"].nominal_value
results["u_dil_mf"] = fill_info["dil_mf"].std_dev
# equivalence ratio
# from partials (fill.tdms) and nominal f/ox (conditions.csv)
phi = thermo.get_equivalence_ratio(
partials["fuel"],
partials["oxidizer"],
thermo.get_f_a_st(
nominal["fuel"],
nominal["oxidizer"],
mech
)
)
results["phi"] = phi.nominal_value
results["u_phi"] = phi.std_dev
return shot_no, results.to_frame().T, schlieren_out
@classmethod
def process_all_tests(
cls,
date,
dir_raw,
mech,
diode_spacing,
multiprocessing
):
"""
Process all tests on a given day
Parameters
----------
date : str
test date
dir_raw : str
raw data directory for given day
mech : str
mechanism for cantera calculations
diode_spacing : float
diode spacing in meters
multiprocessing : bool
whether or not to use multiprocessing to speed up calculations
Returns
-------
tuple
(processed dataframe, schlieren dictionary)
"""
shot_dirs = cls._collect_shot_directories(dir_raw, date)
shot_nums = [cls._get_shot_no_from_dir(d) for d in shot_dirs]
df_out = pd.DataFrame()
schlieren_out = dict()
if multiprocessing:
pool = mp.Pool()
results = sorted(pool.starmap(
cls.process_single_test,
[(date, d, sn, mech, diode_spacing)
for d, sn in zip(shot_dirs, shot_nums)]
))
pool.close()
for (_, df_shot, shot_schlieren) in results:
df_out = pd.concat((df_out, df_shot), ignore_index=True)
schlieren_out.update(shot_schlieren)
else:
for sn, d in zip(shot_nums, shot_dirs):
_, df_shot, shot_schlieren = cls.process_single_test(
date,
d,
sn,
mech,
diode_spacing
)
df_out = pd.concat((df_out, df_shot), ignore_index=True)
schlieren_out.update(shot_schlieren)
df_out = to_df_dtyped(df_out)
return df_out, schlieren_out
def store_processed_schlieren(
day,
img,
img_key,
frame_no,
overwrite,
store
):
current_frame_loc = "schlieren/{:s}/{:s}/frame_{:02d}".format(
day,
img_key[-6:],
frame_no
)
if not overwrite and "/" + current_frame_loc in store.keys():
w_str = "Ignoring {:s}.".format(
current_frame_loc
)
warnings.warn(w_str)
pass
else:
store.put(
current_frame_loc,
pd.DataFrame(img)
)
def row_dateshot_string(df_row):
return "_".join(df_row[["date", "shot"]].astype(str))
def to_df_dtyped(df_or_series):
"""
Enforces column dtypes so the HDFStore is happy. Everything happens
in place, but the dataframe is returned anyway.
Parameters
----------
df_or_series : pd.DataFrame or pd.Series
dataframe or series of processed tube data
Returns
-------
pd.DataFrame
"""
out = df_or_series.copy()
if isinstance(out, pd.Series):
out = out.to_frame().T
elif not isinstance(out, pd.DataFrame):
raise TypeError("argument must be a dataframe or series")
int_keys = (
"shot",
)
time_keys = (
"start",
"end",
)
float_keys = (
"t_0",
"u_t_0",
"p_0_nom",
"p_0",
"u_p_0",
"phi_nom",
"phi",
"u_phi",
"p_fuel",
"u_p_fuel",
"p_oxidizer",
"u_p_oxidizer",
"p_diluent",
"u_p_diluent",
"dil_mf_nom",
"dil_mf",
"u_dil_mf",
"wave_speed",
"u_wave_speed",
"cutoff_fuel",
"cutoff_vacuum",
"cutoff_diluent",
"cutoff_oxidizer",
"u_cutoff_fuel",
"u_cutoff_vacuum",
"u_cutoff_diluent",
"u_cutoff_oxidizer",
)
for k in int_keys:
out[k] = out[k].astype(int)
for k in time_keys:
out[k] = pd.to_datetime(out[k])
for k in float_keys:
out[k] = out[k].astype(float)
return out
def store_processed_test(
test_row,
existing_tests,
overwrite,
store
):
current_test = row_dateshot_string(test_row)
if current_test in existing_tests:
if overwrite:
mask = existing_tests == current_test
store["data"][mask] = test_row.values
else:
w_str = "Ignoring {:s} shot {:d}.".format(
test_row["date"],
test_row["shot"]
)
warnings.warn(w_str)
pass
else:
store.append(
"data",
to_df_dtyped(test_row),
min_itemsize={
"schlieren": 50,
"diodes": 50
}
)
def get_existing_tests(
store
):
try:
existing_tests = store["data"].apply(
row_dateshot_string,
axis=1
).values
except KeyError or NoSuchNodeError:
existing_tests = ""
return existing_tests
def process_multiple_days(
dates_to_process,
loc_processed_h5=os.path.join(
d_drive,
"Data",
"Processed",
"tube_data.h5"
),
raw_base_dir=os.path.join(
d_drive,
"Data",
"Raw"
),
multiprocess=True,
overwrite=False
):
"""
Process multiple days worth of tube data
Parameters
----------
dates_to_process : List[Str] or str
List of dates to post process as YYYY-MM-DD
loc_processed_h5 : str
Location of processed data HDF5
raw_base_dir : str
Base directory where raw data directories are located. You shouldn't
need to change this.
multiprocess : bool
Whether to use multiprocessing for each day to speed things up
overwrite : bool
Whether or not to overwrite existing processed data in the processed
data HDF5 database
"""
if hasattr(dates_to_process, "lower"):
# it's a string. make it a list.
dates_to_process = [dates_to_process]
with pd.HDFStore(
os.path.join(
_DIR,
"../../data",
"tube_data_template.h5"
),
"r"
) as store:
df_out = store["data"]
for day in dates_to_process:
df_day, day_schlieren = process_by_date(
day,
raw_base_dir,
multiprocess
)
# force df_day to match desired structure
df_day = pd.concat((df_out, df_day), sort=False, ignore_index=True)
day = "d" + day.replace("-", "_")
with | pd.HDFStore(loc_processed_h5, "a") | pandas.HDFStore |
import textwrap
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .utils import maybe_save_fig, shifted_color_map
def product_heatmap(*a, **kws):
return feature_loading_heatmap(
*a, original_features_axis_label='product name', **kws)
def feature_loading_heatmap(
loadings, feature_labels, font_size=16, tick_font_size=4,
figsize=(50, 10), major_tick_label_size=6, minor_tick_label_size=0,
cmap=mpl.cm.PuOr_r, shift_color_map=True, save_fig=None,
original_features_axis_label='original features'):
"""Plot the loadings on features as a heatmap.
Parameters
----------
loadings : array of shape
[number of loadings on features, number of features]
The vectors of loadings on features, stacked horizontally. For example,
loadings could be PCA().components_.
feature_labels : list of strings of length equal to the number of products
The names of the features
font_size, tick_font_size, : int, optional, default: 16
`font_size` is the font size of the labels of the axes and color bar.
The others are sizes of ticks.
major_tick_label_size, minor_tick_label_size : int, optional, default 4, 6
Sizes of the ticks
cmap : matplotlib color map
shift_color_map : bool, default: True
Whether to shift the colormap `cmap` to center it at zero.
save_fig : string or None, optional, default: None
If not None, then save the figure to the path specified by `save_fig`
"""
fig, ax = plt.subplots(figsize=figsize)
vmin = np.min(loadings)
vmax = np.max(loadings)
if shift_color_map:
midpoint = 1 - vmax / (vmax + np.abs(vmin))
cmap = shifted_color_map(cmap, midpoint=midpoint)
axim = ax.matshow(loadings, cmap=cmap, aspect=10,
vmin=vmin, vmax=vmax)
ax.set_xlabel(original_features_axis_label, size=font_size)
ax.set_ylabel('component', size=font_size)
ax.tick_params(axis='both', which='major', labelsize=major_tick_label_size)
ax.tick_params(axis='both', which='minor', labelsize=minor_tick_label_size)
ax.set_xticks(list(range(loadings.shape[1])))
ax.set_xticklabels(feature_labels, rotation=90, fontsize=tick_font_size)
cax, kwargs = mpl.colorbar.make_axes(
[ax], location='bottom', fraction=0.05,
aspect=40, pad=.04, label='loading')
cb = fig.colorbar(axim, cax=cax, **kwargs)
text = cb.ax.xaxis.label
font = mpl.font_manager.FontProperties(size=font_size)
text.set_font_properties(font)
maybe_save_fig(fig, save_fig)
return fig, ax
def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
"""Return a colormap with its center shifted.
Useful for data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Adapted from: http://stackoverflow.com/a/20528097/6301373
Parameters
----------
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.
"""
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = mpl.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
def biplot(scores, loadings, axis=None,
labels=None, figsize=(8, 4),
xlabel='Score on the first principal component',
ylabel='Score on the second principal component',
scatter_size=20,
label_offset=0.1, fontsize=14, label_font_size=10, scale_loadings=1,
arrow_color='r', label_color='r', arrow_width=.001, arrow_alpha=0.5,
scatter_alpha=0.5, score_color='b', tick_color='b',
save_fig=None):
"""Create a biplot of the principal component analysis of a dataset.
Parameters
----------
scores : array of shape [number of samples, number of principal components]
The scores of the data on the principal components. The number of
principal components should be at least two.
loadings : numpy array of shape
[number of features, number of principal components]
The loadings of features on the principal components
axis : matplotlib axis, optional, default: None
The axis on which to attach the biplot. If None, create a new figure.
labels : list of strings, optional, default: None
The labels of the features to be plotted next to the arrowheads
figsize : tuple (width, height), optional, default: (8, 4)
The size of the figure
xlabel, ylabel : str or None, optional
The labels of the axes. If None, then no label is shown.
scatter_size : float, default 20
Size of the scatter points in units of points^2
label_offset : scalar, optional, default: 0.1
The amount by which to scale the position of the label compared to the
location of the arrowhead, relative to the 2-norm of the loading.
fontsize : int, optional, default: 14
The font size of the axes' labels
label_font_size : int, optional, default: 14
The font size of the labels of the arrows (the loading vectors)
scale_loadings : float, optional, default: 1
The amount by which to scale the loading vectors to make them easier to
see.
arrow_color : string, optional, default: 'r'
The color of the loading vectors (arrows)
score_color : string, optional, default: 'b'
The color of the scores (scatter plot)
arrow_width : float, optional, default: 0.001
The width of the loading vectors' arrows
arrow_alpha, scatter_alpha : float, optional, default: 0.5
The opacity of the arrows and of the scatter plot
label_color : string, optional, default: 'r'
The color of the labels of the loading vectors (arrows)
tick_color : string, optional, default: 'b'
The color of the ticks and axis labels
save_fig : None or a path and file name, optional, default: None
If save_fig is not False or None, then the figure is saved to this
file name.
"""
n = loadings.shape[0] # number of features to be plotted as arrows
if scores.shape[1] < 2:
raise ValueError("The number of principal component scores" +
" must be at least 2.")
if axis is None:
fig, axis = plt.subplots(figsize=figsize)
else:
fig = axis.figure
axis.scatter(
*scores[:, :2].T, alpha=scatter_alpha, color=score_color,
s=scatter_size)
if xlabel is not None:
axis.set_xlabel(xlabel, color=tick_color, fontsize=fontsize)
if ylabel is not None:
axis.set_ylabel(ylabel, color=tick_color, fontsize=fontsize)
for tl in axis.get_xticklabels() + axis.get_yticklabels():
tl.set_color(tick_color)
for i in range(n):
axis.arrow(0, 0,
loadings[i, 0] * scale_loadings,
loadings[i, 1] * scale_loadings,
color=arrow_color, alpha=arrow_alpha, width=arrow_width)
if labels is not None:
label_position = np.array([loadings[i, 0],
loadings[i, 1]]) * scale_loadings
label_position += (label_offset * label_position /
np.linalg.norm(label_position))
axis.text(*label_position, labels[i],
color=label_color, ha='center', va='center',
wrap=True, fontsize=label_font_size, alpha=1)
maybe_save_fig(fig, save_fig)
return
def plot_variance_explained(
explained_variance_ratio,
labels_var_explained=[0, 1],
labels_cumulative_var_explained=[0, 1, 9, 99, 199],
fig_title='Variance explained by the principal components',
save_fig=None):
"""Plot the ratio of variance explained by the principal components. Returns
a row of two plots of the variance explained by each component and of the
cumulative variance explained up to a certain component.
Parameters
----------
explained_variance_ratio : array of shape [n_components]
This is PCA().explained_variance_ratio
labels_var_explained : list of int's, optional, default: [0, 1]
Which principal components to label in the plot of variance explained
by each component.
labels_cumulative_var_explained : list of int's, optional,
default: [0, 1, 9, 99, 199]
Which principal components to label in the plot of cumulative variance
explained by each component. Including integer `i` in this list means
label the fraction of variance explained by all components
0, 1, ..., i, so the label is "i + 1 components".
fig_title : str, optional, default: 'Variance explained by the principal
components'
The figure's title (i.e., its `suptitle`).
save_fig : None or file path
If None, do not save the file to disk. Otherwise, save the file to the
path `save_fig`.
Returns
-------
fig, ax : matplotib Figure and AxesSubplot objects
The row of two figures showing (1) fraction of variance explained by
each principal component and (2) the cumulative fraction of variance
explained by each principal component.
"""
n_components = len(explained_variance_ratio)
fig, axes = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True,
figsize=(8, 4))
options = {'s': 5, 'marker': 'o'}
axes[0].scatter(range(n_components), explained_variance_ratio, **options)
axes[0].set_ylabel('explained variance')
axes[0].set_xlabel('principal component')
# Label some of the points
for i, var in enumerate(explained_variance_ratio[labels_var_explained]):
axes[0].text(
i + .03, var + .03,
('{var:.1%} (component {n_comp})'.format(var=var, n_comp=i)))
axes[1].scatter(np.array(range(n_components)),
np.cumsum(explained_variance_ratio), **options)
axes[1].set_ylabel('cumulative explained variance')
for i, var in enumerate(np.cumsum(explained_variance_ratio)):
if i in labels_cumulative_var_explained:
axes[1].text(
i + 5, var - .06,
('{var:.1%} ({n_comp} component'.format(
var=var, n_comp=i + 1) +
('s)' if i + 1 != 1 else ')'))
)
axes[1].set_xlabel('number of principal components')
axes[0].set_xlim(-5, n_components + .5)
axes[0].set_ylim(0, 1)
fig.suptitle(fig_title, size=14)
maybe_save_fig(fig, save_fig)
return fig, axes
def loadings_histograms(
pca, feature_labels, n_components='all', bins=50,
n_features_to_label=10, max_text_len=35,
text_kws={'color': 'white',
'bbox': {'facecolor': 'k', 'alpha': 0.7, 'pad': 1}},
save_fig=None):
"""Plot histograms of the loadings for each principal component.
Inputs
------
pca : fitted sklearn.decomposition.pca.PCA object
feature_labels : list of strings of length equal to
`pca.components_.shape[1]`
The labels of the features
bins : int
The number of bins to use in the histograms
n_components : 'all' or int
The number of principal components to show. The components shown are
0, 1, ..., n_components - 1. If n_components is 'all', then all
components are shown.
n_features_to_label : int, default: 10
The number of most highly and least weighted features to label on the
histogram. If 0, then do not show any such labels.
max_text_len : int, default: 35
The maximum number of characters to use in the labels of the most/least
weighted features.
save_fig : None or file path, default: None
If not None, then save the figure to this path
text_kws : dict
The keywrod arguments for the text labels of the features.
"""
if n_components == 'all':
pca_weights = pd.DataFrame(pca.components_.T)
else:
pca_weights = | pd.DataFrame(pca.components_[:n_components].T) | pandas.DataFrame |
import importlib
import inspect
import os
import warnings
from unittest.mock import patch
import cloudpickle
import numpy as np
import pandas as pd
import pytest
from skopt.space import Categorical
from evalml.exceptions import (
ComponentNotYetFittedError,
EnsembleMissingPipelinesError,
MethodPropertyNotFoundError,
)
from evalml.model_family import ModelFamily
from evalml.pipelines import BinaryClassificationPipeline
from evalml.pipelines.components import (
LSA,
PCA,
ARIMARegressor,
BaselineClassifier,
BaselineRegressor,
CatBoostClassifier,
CatBoostRegressor,
ComponentBase,
DateTimeFeaturizer,
DFSTransformer,
DropColumns,
DropNullColumns,
DropRowsTransformer,
ElasticNetClassifier,
ElasticNetRegressor,
Estimator,
ExtraTreesClassifier,
ExtraTreesRegressor,
Imputer,
LightGBMClassifier,
LightGBMRegressor,
LinearDiscriminantAnalysis,
LinearRegressor,
LogisticRegressionClassifier,
NaturalLanguageFeaturizer,
OneHotEncoder,
Oversampler,
PerColumnImputer,
PolynomialDetrender,
ProphetRegressor,
RandomForestClassifier,
RandomForestRegressor,
RFClassifierSelectFromModel,
RFRegressorSelectFromModel,
SelectByType,
SelectColumns,
SimpleImputer,
StandardScaler,
SVMClassifier,
SVMRegressor,
TargetImputer,
TimeSeriesBaselineEstimator,
TimeSeriesFeaturizer,
Transformer,
Undersampler,
XGBoostClassifier,
XGBoostRegressor,
)
from evalml.pipelines.components.ensemble import (
StackedEnsembleBase,
StackedEnsembleClassifier,
StackedEnsembleRegressor,
)
from evalml.pipelines.components.estimators.classifiers.vowpal_wabbit_classifiers import (
VowpalWabbitBinaryClassifier,
VowpalWabbitMulticlassClassifier,
)
from evalml.pipelines.components.estimators.regressors.vowpal_wabbit_regressor import (
VowpalWabbitRegressor,
)
from evalml.pipelines.components.transformers.encoders.label_encoder import (
LabelEncoder,
)
from evalml.pipelines.components.transformers.preprocessing.log_transformer import (
LogTransformer,
)
from evalml.pipelines.components.transformers.samplers.base_sampler import (
BaseSampler,
)
from evalml.pipelines.components.utils import (
_all_estimators,
_all_transformers,
all_components,
generate_component_code,
)
from evalml.problem_types import ProblemTypes
@pytest.fixture(scope="module")
def test_classes():
class MockComponent(ComponentBase):
name = "Mock Component"
modifies_features = True
modifies_target = False
training_only = False
class MockEstimator(Estimator):
name = "Mock Estimator"
model_family = ModelFamily.LINEAR_MODEL
supported_problem_types = ["binary"]
class MockTransformer(Transformer):
name = "Mock Transformer"
def transform(self, X, y=None):
return X
return MockComponent, MockEstimator, MockTransformer
@pytest.fixture(scope="module")
def test_estimator_needs_fitting_false():
class MockEstimatorNeedsFittingFalse(Estimator):
name = "Mock Estimator Needs Fitting False"
model_family = ModelFamily.LINEAR_MODEL
supported_problem_types = ["binary"]
needs_fitting = False
def predict(self, X):
pass
return MockEstimatorNeedsFittingFalse
class MockFitComponent(ComponentBase):
name = "Mock Fit Component"
modifies_features = True
modifies_target = False
training_only = False
def __init__(self, param_a=2, param_b=10, random_seed=0):
parameters = {"param_a": param_a, "param_b": param_b}
super().__init__(parameters=parameters, component_obj=None, random_seed=0)
def fit(self, X, y=None):
pass
def predict(self, X):
return np.array(
[self.parameters["param_a"] * 2, self.parameters["param_b"] * 10]
)
def test_init(test_classes):
MockComponent, MockEstimator, MockTransformer = test_classes
assert MockComponent().name == "Mock Component"
assert MockEstimator().name == "Mock Estimator"
assert MockTransformer().name == "Mock Transformer"
def test_describe(test_classes):
MockComponent, MockEstimator, MockTransformer = test_classes
params = {"param_a": "value_a", "param_b": 123}
component = MockComponent(parameters=params)
assert component.describe(return_dict=True) == {
"name": "Mock Component",
"parameters": params,
}
estimator = MockEstimator(parameters=params)
assert estimator.describe(return_dict=True) == {
"name": "Mock Estimator",
"parameters": params,
}
transformer = MockTransformer(parameters=params)
assert transformer.describe(return_dict=True) == {
"name": "Mock Transformer",
"parameters": params,
}
def test_describe_component():
enc = OneHotEncoder()
imputer = Imputer()
simple_imputer = SimpleImputer("mean")
column_imputer = PerColumnImputer({"a": "mean", "b": ("constant", 100)})
scaler = StandardScaler()
feature_selection_clf = RFClassifierSelectFromModel(
n_estimators=10, number_features=5, percent_features=0.3, threshold=-np.inf
)
feature_selection_reg = RFRegressorSelectFromModel(
n_estimators=10, number_features=5, percent_features=0.3, threshold=-np.inf
)
drop_col_transformer = DropColumns(columns=["col_one", "col_two"])
drop_null_transformer = DropNullColumns()
datetime = DateTimeFeaturizer()
natural_language_featurizer = NaturalLanguageFeaturizer()
lsa = LSA()
pca = PCA()
lda = LinearDiscriminantAnalysis()
ft = DFSTransformer()
us = Undersampler()
assert enc.describe(return_dict=True) == {
"name": "One Hot Encoder",
"parameters": {
"top_n": 10,
"features_to_encode": None,
"categories": None,
"drop": "if_binary",
"handle_unknown": "ignore",
"handle_missing": "error",
},
}
assert imputer.describe(return_dict=True) == {
"name": "Imputer",
"parameters": {
"categorical_impute_strategy": "most_frequent",
"categorical_fill_value": None,
"numeric_impute_strategy": "mean",
"numeric_fill_value": None,
},
}
assert simple_imputer.describe(return_dict=True) == {
"name": "Simple Imputer",
"parameters": {"impute_strategy": "mean", "fill_value": None},
}
assert column_imputer.describe(return_dict=True) == {
"name": "Per Column Imputer",
"parameters": {
"impute_strategies": {"a": "mean", "b": ("constant", 100)},
"default_impute_strategy": "most_frequent",
},
}
assert scaler.describe(return_dict=True) == {
"name": "Standard Scaler",
"parameters": {},
}
assert feature_selection_clf.describe(return_dict=True) == {
"name": "RF Classifier Select From Model",
"parameters": {
"number_features": 5,
"n_estimators": 10,
"max_depth": None,
"percent_features": 0.3,
"threshold": -np.inf,
"n_jobs": -1,
},
}
assert feature_selection_reg.describe(return_dict=True) == {
"name": "RF Regressor Select From Model",
"parameters": {
"number_features": 5,
"n_estimators": 10,
"max_depth": None,
"percent_features": 0.3,
"threshold": -np.inf,
"n_jobs": -1,
},
}
assert drop_col_transformer.describe(return_dict=True) == {
"name": "Drop Columns Transformer",
"parameters": {"columns": ["col_one", "col_two"]},
}
assert drop_null_transformer.describe(return_dict=True) == {
"name": "Drop Null Columns Transformer",
"parameters": {"pct_null_threshold": 1.0},
}
assert datetime.describe(return_dict=True) == {
"name": "DateTime Featurization Component",
"parameters": {
"features_to_extract": ["year", "month", "day_of_week", "hour"],
"encode_as_categories": False,
"date_index": None,
},
}
assert natural_language_featurizer.describe(return_dict=True) == {
"name": "Natural Language Featurization Component",
"parameters": {},
}
assert lsa.describe(return_dict=True) == {
"name": "LSA Transformer",
"parameters": {},
}
assert pca.describe(return_dict=True) == {
"name": "PCA Transformer",
"parameters": {"n_components": None, "variance": 0.95},
}
assert lda.describe(return_dict=True) == {
"name": "Linear Discriminant Analysis Transformer",
"parameters": {"n_components": None},
}
assert ft.describe(return_dict=True) == {
"name": "DFS Transformer",
"parameters": {"index": "index"},
}
assert us.describe(return_dict=True) == {
"name": "Undersampler",
"parameters": {
"sampling_ratio": 0.25,
"sampling_ratio_dict": None,
"min_samples": 100,
"min_percentage": 0.1,
},
}
try:
oversampler = Oversampler()
assert oversampler.describe(return_dict=True) == {
"name": "Oversampler",
"parameters": {
"sampling_ratio": 0.25,
"sampling_ratio_dict": None,
"k_neighbors_default": 5,
"n_jobs": -1,
},
}
except ImportError:
pass
# testing estimators
base_classifier = BaselineClassifier()
base_regressor = BaselineRegressor()
lr_classifier = LogisticRegressionClassifier()
en_classifier = ElasticNetClassifier()
en_regressor = ElasticNetRegressor()
et_classifier = ExtraTreesClassifier(n_estimators=10, max_features="auto")
et_regressor = ExtraTreesRegressor(n_estimators=10, max_features="auto")
rf_classifier = RandomForestClassifier(n_estimators=10, max_depth=3)
rf_regressor = RandomForestRegressor(n_estimators=10, max_depth=3)
linear_regressor = LinearRegressor()
svm_classifier = SVMClassifier()
svm_regressor = SVMRegressor()
assert base_classifier.describe(return_dict=True) == {
"name": "Baseline Classifier",
"parameters": {"strategy": "mode"},
}
assert base_regressor.describe(return_dict=True) == {
"name": "Baseline Regressor",
"parameters": {"strategy": "mean"},
}
assert lr_classifier.describe(return_dict=True) == {
"name": "Logistic Regression Classifier",
"parameters": {
"penalty": "l2",
"C": 1.0,
"n_jobs": -1,
"multi_class": "auto",
"solver": "lbfgs",
},
}
assert en_classifier.describe(return_dict=True) == {
"name": "Elastic Net Classifier",
"parameters": {
"C": 1.0,
"l1_ratio": 0.15,
"n_jobs": -1,
"multi_class": "auto",
"solver": "saga",
"penalty": "elasticnet",
},
}
assert en_regressor.describe(return_dict=True) == {
"name": "Elastic Net Regressor",
"parameters": {
"alpha": 0.0001,
"l1_ratio": 0.15,
"max_iter": 1000,
"normalize": False,
},
}
assert et_classifier.describe(return_dict=True) == {
"name": "Extra Trees Classifier",
"parameters": {
"n_estimators": 10,
"max_features": "auto",
"max_depth": 6,
"min_samples_split": 2,
"min_weight_fraction_leaf": 0.0,
"n_jobs": -1,
},
}
assert et_regressor.describe(return_dict=True) == {
"name": "Extra Trees Regressor",
"parameters": {
"n_estimators": 10,
"max_features": "auto",
"max_depth": 6,
"min_samples_split": 2,
"min_weight_fraction_leaf": 0.0,
"n_jobs": -1,
},
}
assert rf_classifier.describe(return_dict=True) == {
"name": "Random Forest Classifier",
"parameters": {"n_estimators": 10, "max_depth": 3, "n_jobs": -1},
}
assert rf_regressor.describe(return_dict=True) == {
"name": "Random Forest Regressor",
"parameters": {"n_estimators": 10, "max_depth": 3, "n_jobs": -1},
}
assert linear_regressor.describe(return_dict=True) == {
"name": "Linear Regressor",
"parameters": {"fit_intercept": True, "normalize": False, "n_jobs": -1},
}
assert svm_classifier.describe(return_dict=True) == {
"name": "SVM Classifier",
"parameters": {
"C": 1.0,
"kernel": "rbf",
"gamma": "auto",
"probability": True,
},
}
assert svm_regressor.describe(return_dict=True) == {
"name": "SVM Regressor",
"parameters": {"C": 1.0, "kernel": "rbf", "gamma": "auto"},
}
try:
xgb_classifier = XGBoostClassifier(
eta=0.1, min_child_weight=1, max_depth=3, n_estimators=75
)
xgb_regressor = XGBoostRegressor(
eta=0.1, min_child_weight=1, max_depth=3, n_estimators=75
)
assert xgb_classifier.describe(return_dict=True) == {
"name": "XGBoost Classifier",
"parameters": {
"eta": 0.1,
"max_depth": 3,
"min_child_weight": 1,
"n_estimators": 75,
"n_jobs": 12,
"eval_metric": "logloss",
},
}
assert xgb_regressor.describe(return_dict=True) == {
"name": "XGBoost Regressor",
"parameters": {
"eta": 0.1,
"max_depth": 3,
"min_child_weight": 1,
"n_estimators": 75,
"n_jobs": 12,
},
}
except ImportError:
pass
try:
cb_classifier = CatBoostClassifier()
cb_regressor = CatBoostRegressor()
assert cb_classifier.describe(return_dict=True) == {
"name": "CatBoost Classifier",
"parameters": {
"allow_writing_files": False,
"n_estimators": 10,
"eta": 0.03,
"max_depth": 6,
"bootstrap_type": None,
"silent": True,
"n_jobs": -1,
},
}
assert cb_regressor.describe(return_dict=True) == {
"name": "CatBoost Regressor",
"parameters": {
"allow_writing_files": False,
"n_estimators": 10,
"eta": 0.03,
"max_depth": 6,
"bootstrap_type": None,
"silent": False,
"n_jobs": -1,
},
}
except ImportError:
pass
try:
lg_classifier = LightGBMClassifier()
lg_regressor = LightGBMRegressor()
assert lg_classifier.describe(return_dict=True) == {
"name": "LightGBM Classifier",
"parameters": {
"boosting_type": "gbdt",
"learning_rate": 0.1,
"n_estimators": 100,
"max_depth": 0,
"num_leaves": 31,
"min_child_samples": 20,
"n_jobs": -1,
"bagging_fraction": 0.9,
"bagging_freq": 0,
},
}
assert lg_regressor.describe(return_dict=True) == {
"name": "LightGBM Regressor",
"parameters": {
"boosting_type": "gbdt",
"learning_rate": 0.1,
"n_estimators": 20,
"max_depth": 0,
"num_leaves": 31,
"min_child_samples": 20,
"n_jobs": -1,
"bagging_fraction": 0.9,
"bagging_freq": 0,
},
}
except ImportError:
pass
try:
prophet_regressor = ProphetRegressor()
assert prophet_regressor.describe(return_dict=True) == {
"name": "Prophet Regressor",
"parameters": {
"changepoint_prior_scale": 0.05,
"date_index": None,
"holidays_prior_scale": 10,
"seasonality_mode": "additive",
"seasonality_prior_scale": 10,
"stan_backend": "CMDSTANPY",
},
}
except ImportError:
pass
try:
vw_binary_classifier = VowpalWabbitBinaryClassifier(
loss_function="classic",
learning_rate=0.1,
decay_learning_rate=1.0,
power_t=0.1,
passes=1,
)
vw_multi_classifier = VowpalWabbitMulticlassClassifier(
loss_function="classic",
learning_rate=0.1,
decay_learning_rate=1.0,
power_t=0.1,
passes=1,
)
vw_regressor = VowpalWabbitRegressor(
learning_rate=0.1, decay_learning_rate=1.0, power_t=0.1, passes=1
)
assert vw_binary_classifier.describe(return_dict=True) == {
"name": "Vowpal Wabbit Binary Classifier",
"parameters": {
"loss_function": "classic",
"learning_rate": 0.1,
"decay_learning_rate": 1.0,
"power_t": 0.1,
"passes": 1,
},
}
assert vw_multi_classifier.describe(return_dict=True) == {
"name": "Vowpal Wabbit Multiclass Classifier",
"parameters": {
"loss_function": "classic",
"learning_rate": 0.1,
"decay_learning_rate": 1.0,
"power_t": 0.1,
"passes": 1,
},
}
assert vw_regressor.describe(return_dict=True) == {
"name": "Vowpal Wabbit Regressor",
"parameters": {
"learning_rate": 0.1,
"decay_learning_rate": 1.0,
"power_t": 0.1,
"passes": 1,
},
}
except ImportError:
pass
def test_missing_attributes(X_y_binary):
class MockComponentName(ComponentBase):
pass
with pytest.raises(TypeError):
MockComponentName()
class MockComponentModelFamily(ComponentBase):
name = "Mock Component"
with pytest.raises(TypeError):
MockComponentModelFamily()
class MockEstimatorWithoutAttribute(Estimator):
name = "Mock Estimator"
model_family = ModelFamily.LINEAR_MODEL
with pytest.raises(TypeError):
MockEstimatorWithoutAttribute()
def test_missing_methods_on_components(X_y_binary, test_classes):
X, y = X_y_binary
MockComponent, MockEstimator, MockTransformer = test_classes
component = MockComponent()
with pytest.raises(
MethodPropertyNotFoundError,
match="Component requires a fit method or a component_obj that implements fit",
):
component.fit(X)
estimator = MockEstimator()
estimator._is_fitted = True
with pytest.raises(
MethodPropertyNotFoundError,
match="Estimator requires a predict method or a component_obj that implements predict",
):
estimator.predict(X)
with pytest.raises(
MethodPropertyNotFoundError,
match="Estimator requires a predict_proba method or a component_obj that implements predict_proba",
):
estimator.predict_proba(X)
with pytest.raises(
MethodPropertyNotFoundError,
match="Estimator requires a feature_importance property or a component_obj that implements feature_importances_",
):
estimator.feature_importance
transformer = MockTransformer()
transformer._is_fitted = True
with pytest.raises(
MethodPropertyNotFoundError,
match="Component requires a fit method or a component_obj that implements fit",
):
transformer.fit(X, y)
transformer.transform(X)
with pytest.raises(
MethodPropertyNotFoundError,
match="Component requires a fit method or a component_obj that implements fit",
):
transformer.fit_transform(X)
def test_component_fit(X_y_binary):
X, y = X_y_binary
class MockEstimator:
def fit(self, X, y):
pass
class MockComponent(Estimator):
name = "Mock Estimator"
model_family = ModelFamily.LINEAR_MODEL
supported_problem_types = ["binary"]
hyperparameter_ranges = {}
def __init__(self):
parameters = {}
est = MockEstimator()
super().__init__(parameters=parameters, component_obj=est, random_seed=0)
est = MockComponent()
assert isinstance(est.fit(X, y), ComponentBase)
def test_component_fit_transform(X_y_binary):
X, y = X_y_binary
class MockTransformerWithFitTransform(Transformer):
name = "Mock Transformer"
hyperparameter_ranges = {}
def fit_transform(self, X, y=None):
return X
def transform(self, X, y=None):
return X
def __init__(self):
parameters = {}
super().__init__(parameters=parameters, component_obj=None, random_seed=0)
class MockTransformerWithFitTransformButError(Transformer):
name = "Mock Transformer"
hyperparameter_ranges = {}
def fit_transform(self, X, y=None):
raise RuntimeError
def transform(self, X, y=None):
return X
def __init__(self):
parameters = {}
super().__init__(parameters=parameters, component_obj=None, random_seed=0)
class MockTransformerWithFitAndTransform(Transformer):
name = "Mock Transformer"
hyperparameter_ranges = {}
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X
def __init__(self):
parameters = {}
super().__init__(parameters=parameters, component_obj=None, random_seed=0)
# convert data to pd DataFrame, because the component classes don't
# standardize to pd DataFrame
X = pd.DataFrame(X)
y = pd.Series(y)
component = MockTransformerWithFitTransform()
assert isinstance(component.fit_transform(X, y), pd.DataFrame)
component = MockTransformerWithFitTransformButError()
with pytest.raises(RuntimeError):
component.fit_transform(X, y)
component = MockTransformerWithFitAndTransform()
assert isinstance(component.fit_transform(X, y), pd.DataFrame)
def test_model_family_components(test_classes):
_, MockEstimator, _ = test_classes
assert MockEstimator.model_family == ModelFamily.LINEAR_MODEL
def test_regressor_call_predict_proba(test_classes):
X = np.array([])
_, MockEstimator, _ = test_classes
component = MockEstimator()
component._is_fitted = True
with pytest.raises(MethodPropertyNotFoundError):
component.predict_proba(X)
def test_component_describe(test_classes, caplog):
MockComponent, _, _ = test_classes
component = MockComponent()
component.describe(print_name=True)
out = caplog.text
assert "Mock Component" in out
def test_component_parameters_getter(test_classes):
MockComponent, _, _ = test_classes
component = MockComponent({"test": "parameter"})
assert component.parameters == {"test": "parameter"}
component.parameters["test"] = "new"
assert component.parameters == {"test": "parameter"}
def test_component_parameters_init(
logistic_regression_binary_pipeline_class, linear_regression_pipeline_class
):
for component_class in all_components():
print("Testing component {}".format(component_class.name))
component = component_class()
parameters = component.parameters
component2 = component_class(**parameters)
parameters2 = component2.parameters
assert parameters == parameters2
def test_clone_init():
params = {"param_a": 2, "param_b": 11}
clf = MockFitComponent(**params)
clf_clone = clf.clone()
assert clf.parameters == clf_clone.parameters
assert clf_clone.random_seed == clf.random_seed
def test_clone_fitted(X_y_binary):
X, y = X_y_binary
params = {"param_a": 3, "param_b": 7}
clf = MockFitComponent(**params)
clf.fit(X, y)
predicted = clf.predict(X)
clf_clone = clf.clone()
assert clf_clone.random_seed == clf.random_seed
assert clf.parameters == clf_clone.parameters
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
clf_clone.predict(X)
clf_clone.fit(X, y)
predicted_clone = clf_clone.predict(X)
np.testing.assert_almost_equal(predicted, predicted_clone)
def test_components_init_kwargs():
for component_class in all_components():
try:
component = component_class()
except EnsembleMissingPipelinesError:
continue
if component._component_obj is None:
continue
if isinstance(component, StackedEnsembleBase):
continue
obj_class = component._component_obj.__class__.__name__
module = component._component_obj.__module__
importlib.import_module(module, obj_class)
patched = module + "." + obj_class + ".__init__"
if component_class == LabelEncoder:
# scikit-learn's LabelEncoder found in different module than where we import from
patched = module[: module.rindex(".")] + "." + obj_class + ".__init__"
def all_init(self, *args, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
with patch(patched, new=all_init) as _:
component = component_class(test_arg="test")
component_with_different_kwargs = component_class(diff_test_arg="test")
assert component.parameters["test_arg"] == "test"
if not isinstance(component, (PolynomialDetrender, LabelEncoder)):
assert component._component_obj.test_arg == "test"
# Test equality of different components with same or different kwargs
assert component == component_class(test_arg="test")
assert component != component_with_different_kwargs
def test_component_has_random_seed():
for component_class in all_components():
params = inspect.signature(component_class.__init__).parameters
assert "random_seed" in params
def test_transformer_transform_output_type(X_y_binary):
X_np, y_np = X_y_binary
assert isinstance(X_np, np.ndarray)
assert isinstance(y_np, np.ndarray)
y_list = list(y_np)
X_df_no_col_names = pd.DataFrame(X_np)
range_index = pd.RangeIndex(start=0, stop=X_np.shape[1], step=1)
X_df_with_col_names = pd.DataFrame(
X_np, columns=["x" + str(i) for i in range(X_np.shape[1])]
)
y_series_no_name = pd.Series(y_np)
y_series_with_name = pd.Series(y_np, name="target")
datatype_combos = [
(X_np, y_np, range_index),
(X_np, y_list, range_index),
(X_df_no_col_names, y_series_no_name, range_index),
(X_df_with_col_names, y_series_with_name, X_df_with_col_names.columns),
]
for component_class in _all_transformers():
if component_class in [PolynomialDetrender, LogTransformer, LabelEncoder]:
# Skipping because these tests are handled in their respective test files
continue
print("Testing transformer {}".format(component_class.name))
for X, y, X_cols_expected in datatype_combos:
print(
'Checking output of transform for transformer "{}" on X type {} cols {}, y type {} name {}'.format(
component_class.name,
type(X),
X.columns if isinstance(X, pd.DataFrame) else None,
type(y),
y.name if isinstance(y, pd.Series) else None,
)
)
component = component_class()
# SMOTE will throw an error if we pass a ratio lower than the current class balance
if "Oversampler" == component_class.name:
# we cover this case in test_oversamplers
continue
elif component_class == TimeSeriesFeaturizer:
# covered in test_delayed_feature_transformer.py
continue
component.fit(X, y=y)
transform_output = component.transform(X, y=y)
if component.modifies_target:
assert isinstance(transform_output[0], pd.DataFrame)
assert isinstance(transform_output[1], pd.Series)
else:
assert isinstance(transform_output, pd.DataFrame)
if isinstance(component, SelectColumns) or isinstance(
component, SelectByType
):
assert transform_output.shape == (X.shape[0], 0)
elif isinstance(component, PCA) or isinstance(
component, LinearDiscriminantAnalysis
):
assert transform_output.shape[0] == X.shape[0]
assert transform_output.shape[1] <= X.shape[1]
elif isinstance(component, DFSTransformer):
assert transform_output.shape[0] == X.shape[0]
assert transform_output.shape[1] >= X.shape[1]
elif component.modifies_target:
assert transform_output[0].shape == X.shape
assert transform_output[1].shape[0] == X.shape[0]
assert len(transform_output[1].shape) == 1
else:
assert transform_output.shape == X.shape
assert list(transform_output.columns) == list(X_cols_expected)
transform_output = component.fit_transform(X, y=y)
if component.modifies_target:
assert isinstance(transform_output[0], pd.DataFrame)
assert isinstance(transform_output[1], pd.Series)
else:
assert isinstance(transform_output, pd.DataFrame)
if isinstance(component, SelectColumns) or isinstance(
component, SelectByType
):
assert transform_output.shape == (X.shape[0], 0)
elif isinstance(component, PCA) or isinstance(
component, LinearDiscriminantAnalysis
):
assert transform_output.shape[0] == X.shape[0]
assert transform_output.shape[1] <= X.shape[1]
elif isinstance(component, DFSTransformer):
assert transform_output.shape[0] == X.shape[0]
assert transform_output.shape[1] >= X.shape[1]
elif component.modifies_target:
assert transform_output[0].shape == X.shape
assert transform_output[1].shape[0] == X.shape[0]
assert len(transform_output[1].shape) == 1
else:
assert transform_output.shape == X.shape
assert list(transform_output.columns) == list(X_cols_expected)
@pytest.mark.parametrize(
"cls",
[
cls
for cls in all_components()
if cls
not in [
StackedEnsembleClassifier,
StackedEnsembleRegressor,
]
],
)
def test_default_parameters(cls):
assert (
cls.default_parameters == cls().parameters
), f"{cls.__name__}'s default parameters don't match __init__."
@pytest.mark.parametrize("cls", [cls for cls in all_components()])
def test_default_parameters_raise_no_warnings(cls):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cls()
assert len(w) == 0
def test_estimator_check_for_fit(X_y_binary):
class MockEstimatorObj:
def __init__(self):
pass
def fit(self, X, y):
return self
def predict(self, X):
series = pd.Series([0] * len(X))
series.ww.init()
return series
def predict_proba(self, X):
df = pd.DataFrame({0: [0] * len(X)})
df.ww.init()
return df
class MockEstimator(Estimator):
name = "Mock Estimator"
model_family = ModelFamily.LINEAR_MODEL
supported_problem_types = ["binary"]
def __init__(self, parameters=None, component_obj=None, random_seed=0):
est = MockEstimatorObj()
super().__init__(
parameters=parameters, component_obj=est, random_seed=random_seed
)
X, y = X_y_binary
est = MockEstimator()
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
est.predict(X)
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
est.predict_proba(X)
est.fit(X, y)
est.predict(X)
est.predict_proba(X)
def test_transformer_check_for_fit(X_y_binary):
class MockTransformerObj:
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X, y=None):
return X
def fit_transform(self, X, y=None):
return X
class MockTransformer(Transformer):
name = "Mock Transformer"
def __init__(self, parameters=None, component_obj=None, random_seed=0):
transformer = MockTransformerObj()
super().__init__(
parameters=parameters,
component_obj=transformer,
random_seed=random_seed,
)
def transform(self, X, y=None):
return X
def inverse_transform(self, X, y=None):
return X, y
X, y = X_y_binary
trans = MockTransformer()
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
trans.transform(X)
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
trans.inverse_transform(X, y)
trans.fit(X, y)
trans.transform(X)
trans.fit_transform(X, y)
trans.inverse_transform(X, y)
def test_transformer_check_for_fit_with_overrides(X_y_binary):
class MockTransformerWithOverride(Transformer):
name = "Mock Transformer"
def fit(self, X, y):
return self
def transform(self, X, y=None):
df = pd.DataFrame()
df.ww.init()
return df
class MockTransformerWithOverrideSubclass(Transformer):
name = "Mock Transformer Subclass"
def fit(self, X, y):
return self
def transform(self, X, y=None):
df = pd.DataFrame()
df.ww.init()
return df
X, y = X_y_binary
transformer = MockTransformerWithOverride()
transformer_subclass = MockTransformerWithOverrideSubclass()
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
transformer.transform(X)
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
transformer_subclass.transform(X)
transformer.fit(X, y)
transformer.transform(X)
transformer_subclass.fit(X, y)
transformer_subclass.transform(X)
def test_all_transformers_needs_fitting():
for component_class in _all_transformers() + _all_estimators():
if component_class.__name__ in [
"DropColumns",
"SelectColumns",
"SelectByType",
]:
assert not component_class.needs_fitting
else:
assert component_class.needs_fitting
def test_all_transformers_check_fit(X_y_binary, ts_data_binary):
for component_class in _all_transformers():
X, y = X_y_binary
if not component_class.needs_fitting:
continue
component = component_class()
# SMOTE will throw errors if we call it but cannot oversample
if "Oversampler" == component_class.name:
component = component_class(sampling_ratio=1)
elif component_class == TimeSeriesFeaturizer:
X, y = ts_data_binary
component = component_class(date_index="date")
with pytest.raises(
ComponentNotYetFittedError, match=f"You must fit {component_class.__name__}"
):
component.transform(X, y)
component.fit(X, y)
component.transform(X, y)
component = component_class()
if "Oversampler" == component_class.name:
component = component_class(sampling_ratio=1)
elif component_class == TimeSeriesFeaturizer:
component = component_class(date_index="date")
component.fit_transform(X, y)
component.transform(X, y)
def test_all_estimators_check_fit(
X_y_binary, ts_data, test_estimator_needs_fitting_false, helper_functions
):
estimators_to_check = [
estimator
for estimator in _all_estimators()
if estimator
not in [
StackedEnsembleClassifier,
StackedEnsembleRegressor,
TimeSeriesBaselineEstimator,
VowpalWabbitBinaryClassifier,
VowpalWabbitMulticlassClassifier,
VowpalWabbitRegressor,
]
] + [test_estimator_needs_fitting_false]
for component_class in estimators_to_check:
if not component_class.needs_fitting:
continue
if (
ProblemTypes.TIME_SERIES_REGRESSION
in component_class.supported_problem_types
):
X, y = ts_data
else:
X, y = X_y_binary
component = helper_functions.safe_init_component_with_njobs_1(component_class)
with patch.object(component, "_component_obj") as mock_component_obj:
with patch.object(
mock_component_obj, "predict"
) as mock_component_obj_predict:
mock_component_obj_predict.return_value = pd.Series([0] * len(y))
if "Prophet" in component.name:
mock_component_obj_predict.return_value = {
"yhat": pd.Series([0] * len(y)),
"ds": pd.Series([0] * len(y)),
}
with pytest.raises(
ComponentNotYetFittedError,
match=f"You must fit {component_class.__name__}",
):
component.predict(X)
if (
ProblemTypes.BINARY in component.supported_problem_types
or ProblemTypes.MULTICLASS in component.supported_problem_types
):
with pytest.raises(
ComponentNotYetFittedError,
match=f"You must fit {component_class.__name__}",
):
component.predict_proba(X)
with pytest.raises(
ComponentNotYetFittedError,
match=f"You must fit {component_class.__name__}",
):
component.feature_importance
component.fit(X, y)
if (
ProblemTypes.BINARY in component.supported_problem_types
or ProblemTypes.MULTICLASS in component.supported_problem_types
):
component.predict_proba(X)
component.predict(X)
component.feature_importance
@pytest.mark.parametrize("data_type", ["li", "np", "pd", "ww"])
def test_all_transformers_check_fit_input_type(
data_type, X_y_binary, make_data_type, ts_data_binary
):
for component_class in _all_transformers():
X, y = X_y_binary
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
kwargs = {}
if not component_class.needs_fitting or "Oversampler" in component_class.name:
# since SMOTE determines categorical columns through the logical type, it can only accept ww data
continue
if component_class == TimeSeriesFeaturizer:
X, y = ts_data_binary
kwargs = {"date_index": "date"}
component = component_class(**kwargs)
component.fit(X, y)
def test_no_fitting_required_components(
X_y_binary, test_estimator_needs_fitting_false, helper_functions
):
X, y = X_y_binary
for component_class in all_components() + [test_estimator_needs_fitting_false]:
if not component_class.needs_fitting:
component = helper_functions.safe_init_component_with_njobs_1(
component_class
)
if issubclass(component_class, Estimator):
component.predict(X)
else:
component.transform(X, y)
def test_serialization(X_y_binary, ts_data, tmpdir, helper_functions):
path = os.path.join(str(tmpdir), "component.pkl")
requires_date_index = [ARIMARegressor, ProphetRegressor, TimeSeriesFeaturizer]
for component_class in all_components():
print("Testing serialization of component {}".format(component_class.name))
component = helper_functions.safe_init_component_with_njobs_1(component_class)
if component_class in requires_date_index:
component = component_class(date_index="date")
X, y = ts_data
else:
X, y = X_y_binary
component.fit(X, y)
for pickle_protocol in range(cloudpickle.DEFAULT_PROTOCOL + 1):
component.save(path, pickle_protocol=pickle_protocol)
loaded_component = ComponentBase.load(path)
assert component.parameters == loaded_component.parameters
assert component.describe(return_dict=True) == loaded_component.describe(
return_dict=True
)
if issubclass(component_class, Estimator) and not (
isinstance(
component,
(
StackedEnsembleClassifier,
StackedEnsembleRegressor,
VowpalWabbitBinaryClassifier,
VowpalWabbitMulticlassClassifier,
VowpalWabbitRegressor,
),
)
):
assert (
component.feature_importance == loaded_component.feature_importance
).all()
@patch("cloudpickle.dump")
def test_serialization_protocol(mock_cloudpickle_dump, tmpdir):
path = os.path.join(str(tmpdir), "pipe.pkl")
component = LogisticRegressionClassifier()
component.save(path)
assert len(mock_cloudpickle_dump.call_args_list) == 1
assert (
mock_cloudpickle_dump.call_args_list[0][1]["protocol"]
== cloudpickle.DEFAULT_PROTOCOL
)
mock_cloudpickle_dump.reset_mock()
component.save(path, pickle_protocol=42)
assert len(mock_cloudpickle_dump.call_args_list) == 1
assert mock_cloudpickle_dump.call_args_list[0][1]["protocol"] == 42
@pytest.mark.parametrize("estimator_class", _all_estimators())
def test_estimators_accept_all_kwargs(
estimator_class,
logistic_regression_binary_pipeline_class,
linear_regression_pipeline_class,
):
estimator = estimator_class()
if estimator._component_obj is None:
pytest.skip(
f"Skipping {estimator_class} because does not have component object."
)
if estimator_class.model_family == ModelFamily.ENSEMBLE:
params = estimator.parameters
elif estimator_class.model_family == ModelFamily.PROPHET:
params = estimator.get_params()
else:
params = estimator._component_obj.get_params()
if "random_state" in params:
del params["random_state"]
estimator_class(**params)
def test_component_equality_different_classes():
# Tests that two classes which are equivalent are not equal
class MockComponent(ComponentBase):
name = "Mock Component"
model_family = ModelFamily.NONE
modifies_features = True
modifies_target = False
training_only = False
class MockComponentWithADifferentName(ComponentBase):
name = "Mock Component"
model_family = ModelFamily.NONE
modifies_features = True
modifies_target = False
training_only = False
assert MockComponent() != MockComponentWithADifferentName()
def test_component_equality_subclasses():
class MockComponent(ComponentBase):
name = "Mock Component"
model_family = ModelFamily.NONE
modifies_features = True
modifies_target = False
training_only = False
class MockEstimatorSubclass(MockComponent):
pass
assert MockComponent() != MockEstimatorSubclass()
def test_component_equality():
class MockComponent(ComponentBase):
name = "Mock Component"
model_family = ModelFamily.NONE
modifies_features = True
modifies_target = False
training_only = False
def __init__(self, param_1=0, param_2=0, random_seed=0, **kwargs):
parameters = {"param_1": param_1, "param_2": param_2}
parameters.update(kwargs)
super().__init__(
parameters=parameters, component_obj=None, random_seed=random_seed
)
def fit(self, X, y=None):
return self
# Test self-equality
mock_component = MockComponent()
assert mock_component == mock_component
# Test defaults
assert MockComponent() == MockComponent()
# Test random_state and random_seed
assert MockComponent(random_seed=10) == MockComponent(random_seed=10)
assert MockComponent(random_seed=10) != MockComponent(random_seed=0)
# Test parameters
assert MockComponent(1, 2) == MockComponent(1, 2)
assert MockComponent(1, 2) != MockComponent(1, 0)
assert MockComponent(0, 2) != MockComponent(1, 2)
# Test fitted equality
mock_component.fit(pd.DataFrame({}))
assert mock_component != MockComponent()
@pytest.mark.parametrize("component_class", all_components())
def test_component_equality_all_components(
component_class,
logistic_regression_binary_pipeline_class,
linear_regression_pipeline_class,
):
component = component_class()
parameters = component.parameters
equal_component = component_class(**parameters)
assert component == equal_component
def test_component_equality_with_subclasses(test_classes):
MockComponent, MockEstimator, MockTransformer = test_classes
mock_component = MockComponent()
mock_estimator = MockEstimator()
mock_transformer = MockTransformer()
assert mock_component != mock_estimator
assert mock_component != mock_transformer
assert mock_estimator != mock_component
assert mock_estimator != mock_transformer
assert mock_transformer != mock_component
assert mock_transformer != mock_estimator
def test_mock_component_str(test_classes):
MockComponent, MockEstimator, MockTransformer = test_classes
assert str(MockComponent()) == "Mock Component"
assert str(MockEstimator()) == "Mock Estimator"
assert str(MockTransformer()) == "Mock Transformer"
def test_mock_component_repr():
component = MockFitComponent()
assert repr(component) == "MockFitComponent(param_a=2, param_b=10)"
component_with_params = MockFitComponent(param_a=29, param_b=None, random_seed=42)
assert repr(component_with_params) == "MockFitComponent(param_a=29, param_b=None)"
component_with_nan = MockFitComponent(param_a=np.nan, param_b=float("nan"))
assert (
repr(component_with_nan) == "MockFitComponent(param_a=np.nan, param_b=np.nan)"
)
component_with_inf = MockFitComponent(param_a=np.inf, param_b=float("-inf"))
assert (
repr(component_with_inf)
== "MockFitComponent(param_a=float('inf'), param_b=float('-inf'))"
)
@pytest.mark.parametrize("component_class", all_components())
def test_component_str(
component_class,
logistic_regression_binary_pipeline_class,
linear_regression_pipeline_class,
):
component = component_class()
assert str(component) == component.name
@pytest.mark.parametrize(
"categorical",
[
{
"type": Categorical(["mean", "median", "mode"]),
"categories": Categorical(["blue", "green"]),
},
{"type": ["mean", "median", "mode"], "categories": ["blue", "green"]},
],
)
def test_categorical_hyperparameters(X_y_binary, categorical):
X, y = X_y_binary
class MockEstimator:
def fit(self, X, y):
pass
class MockComponent(Estimator):
name = "Mock Estimator"
model_family = ModelFamily.LINEAR_MODEL
supported_problem_types = ["binary"]
hyperparameter_ranges = categorical
def __init__(self, agg_type, category="green"):
parameters = {"type": agg_type, "categories": category}
est = MockEstimator()
super().__init__(parameters=parameters, component_obj=est, random_seed=0)
assert MockComponent(agg_type="mean").fit(X, y)
assert MockComponent(agg_type="moat", category="blue").fit(X, y)
def test_generate_code_errors():
with pytest.raises(ValueError, match="Element must be a component instance"):
generate_component_code(BinaryClassificationPipeline([RandomForestClassifier]))
with pytest.raises(ValueError, match="Element must be a component instance"):
generate_component_code(LinearRegressor)
with pytest.raises(ValueError, match="Element must be a component instance"):
generate_component_code(Imputer)
with pytest.raises(ValueError, match="Element must be a component instance"):
generate_component_code(ComponentBase)
def test_generate_code():
expected_code = (
"from evalml.pipelines.components.estimators.classifiers.logistic_regression_classifier import LogisticRegressionClassifier"
"\n\nlogisticRegressionClassifier = LogisticRegressionClassifier(**{'penalty': 'l2', 'C': 1.0, 'n_jobs': -1, 'multi_class': 'auto', 'solver': 'lbfgs'})"
)
component_code = generate_component_code(LogisticRegressionClassifier())
assert component_code == expected_code
expected_code = (
"from evalml.pipelines.components.estimators.regressors.et_regressor import ExtraTreesRegressor"
"\n\nextraTreesRegressor = ExtraTreesRegressor(**{'n_estimators': 50, 'max_features': 'auto', 'max_depth': 6, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_jobs': -1})"
)
component_code = generate_component_code(ExtraTreesRegressor(n_estimators=50))
assert component_code == expected_code
expected_code = (
"from evalml.pipelines.components.transformers.imputers.imputer import Imputer"
"\n\nimputer = Imputer(**{'categorical_impute_strategy': 'most_frequent', 'numeric_impute_strategy': 'mean', 'categorical_fill_value': None, 'numeric_fill_value': None})"
)
component_code = generate_component_code(Imputer())
assert component_code == expected_code
def test_generate_code_custom(test_classes):
MockComponent, MockEstimator, MockTransformer = test_classes
expected_code = "mockComponent = MockComponent(**{})"
component_code = generate_component_code(MockComponent())
assert component_code == expected_code
expected_code = "mockEstimator = MockEstimator(**{})"
component_code = generate_component_code(MockEstimator())
assert component_code == expected_code
expected_code = "mockTransformer = MockTransformer(**{})"
component_code = generate_component_code(MockTransformer())
assert component_code == expected_code
@pytest.mark.parametrize("transformer_class", _all_transformers())
@pytest.mark.parametrize("use_custom_index", [True, False])
def test_transformer_fit_and_transform_respect_custom_indices(
use_custom_index, transformer_class, X_y_binary, ts_data_binary
):
check_names = True
if transformer_class == DFSTransformer:
check_names = False
if use_custom_index:
pytest.skip("The DFSTransformer changes the index so we skip it.")
if transformer_class == PolynomialDetrender:
pytest.skip(
"Skipping PolynomialDetrender because we test that it respects custom indices in "
"test_polynomial_detrender.py"
)
X, y = X_y_binary
kwargs = {}
if transformer_class == TimeSeriesFeaturizer:
kwargs.update({"date_index": "date"})
X, y = ts_data_binary
X = pd.DataFrame(X)
y = pd.Series(y)
if use_custom_index:
custom_index = range(100, 100 + X.shape[0])
X.index = custom_index
y.index = custom_index
X_original_index = X.index.copy()
y_original_index = y.index.copy()
transformer = transformer_class(**kwargs)
transformer.fit(X, y)
| pd.testing.assert_index_equal(X.index, X_original_index) | pandas.testing.assert_index_equal |
"""
The static functions for various calculations and required parameters
"""
# external packages
from astrodbkit2.astrodb import Database, REFERENCE_TABLES # used for pulling out database and querying
from astropy.coordinates import SkyCoord
from flask_wtf import FlaskForm # web forms
from markdown2 import markdown # using markdown formatting
import numpy as np # numerical python
import pandas as pd # running dataframes
from tqdm import tqdm
from wtforms import StringField, SubmitField # web forms
# internal packages
import argparse # system arguments
from typing import Union, List # type hinting
def sysargs():
"""
These are the system arguments given after calling this python script
Returns
-------
_args
The different argument parameters, can be grabbed via their long names (e.g. _args.host)
"""
_args = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
_args.add_argument('-i', '--host', default='127.0.0.1',
help='Local IP Address to host server, default 127.0.0.1')
_args.add_argument('-p', '--port', default=8000,
help='Local port number to host server through, default 8000', type=int)
_args.add_argument('-d', '--debug', help='Run Flask in debug mode?', default=False, action='store_true')
_args.add_argument('-f', '--file', default='SIMPLE.db',
help='Database file path relative to current directory, default SIMPLE.db')
_args = _args.parse_args()
return _args
class SimpleDB(Database): # this keeps pycharm happy about unresolved references
"""
Wrapper class for astrodbkit2.Database specific to SIMPLE
"""
Sources = None # initialise class attribute
Photometry = None
Parallaxes = None
Spectra = None
PhotometryFilters = None
class Inventory:
"""
For use in the solo result page where the inventory of an object is queried, grabs also the RA & Dec
"""
ra: float = 0
dec: float = 0
def __init__(self, resultdict: dict, args):
"""
Constructor method for Inventory
Parameters
----------
resultdict: dict
The dictionary of all the key: values in a given object inventory
"""
self.results: dict = resultdict # given inventory for a target
for key in self.results: # over every key in inventory
if args.debug:
print(key)
if key in REFERENCE_TABLES: # ignore the reference table ones
continue
lowkey: str = key.lower() # lower case of the key
mkdown_output: str = self.listconcat(key) # get in markdown the dataframe value for given key
setattr(self, lowkey, mkdown_output) # set the key attribute with the dataframe for given key
try:
srcs: pd.DataFrame = self.listconcat('Sources', rtnmk=False) # open the Sources result
self.ra, self.dec = srcs.ra[0], srcs.dec[0]
except (KeyError, AttributeError):
pass
return
def listconcat(self, key: str, rtnmk: bool = True) -> Union[pd.DataFrame, str]:
"""
Concatenates the list for a given key
Parameters
----------
key: str
The key corresponding to the inventory
rtnmk: bool
Switch for whether to return either a markdown string or a dataframe
Returns
-------
df: Union[pd.DataFrame, str]
Either the dataframe for a given key or the markdown parsed string
"""
obj: List[dict] = self.results[key] # the value for the given key
df: pd.DataFrame = pd.concat([pd.DataFrame(objrow, index=[i]) # create dataframe from found dict
for i, objrow in enumerate(obj)], ignore_index=True) # every dict in the list
urlinks = []
if rtnmk and key == 'Spectra':
for src in df.spectrum.values: # over every source in table
srclnk = f'<a href="{src}" target="_blank">Link</a>' # construct hyperlink
urlinks.append(srclnk) # add that to list
df.drop(columns=[col for col in df.columns if any([substr in col for substr in ('wave', 'flux')])],
inplace=True)
df = df.loc[:, 'telescope':].copy()
df['download'] = urlinks
df['observation_date'] = df['observation_date'].dt.date
if rtnmk: # return markdown boolean
df.rename(columns={s: s.replace('_', ' ') for s in df.columns}, inplace=True) # renaming columns
return markdown(df.to_html(index=False, escape=False,
classes='table table-dark table-bordered table-striped')) # html then markdown
return df # otherwise return dataframe as is
class SearchForm(FlaskForm):
"""
Searchbar class
"""
search = StringField('Search for an object:', id='autocomplete') # searchbar
submit = SubmitField('Query', id='querybutton') # clicker button to send request
def all_sources(db_file: str):
"""
Queries the full table to get all the sources
Parameters
----------
db_file: str
The connection string to the database
Returns
-------
allresults
Just the main IDs
fullresults
The full dataframe of all the sources
"""
db = SimpleDB(db_file, connection_arguments={'check_same_thread': False}) # open database
fullresults: pd.DataFrame = db.query(db.Sources).pandas()
allresults: list = fullresults['source'].tolist() # gets all the main IDs in the database
return allresults, fullresults
def find_colours(photodf: pd.DataFrame, allbands: np.ndarray, photfilters: pd.DataFrame):
"""
Find all the colours using available photometry
Parameters
----------
photodf: pd.DataFrame
The dataframe with all photometry in
allbands: np.ndarray
All the photometric bands
photfilters: pd.DataFrame
The filters
Returns
-------
photodf: pd.DataFrame
The dataframe with all photometry and colours in
"""
for band in allbands: # loop over all bands
bandtrue = band
if '(' in band: # duplicate bands
bandtrue = band[:band.find('(')]
if bandtrue not in photfilters.columns: # check if we have this in the dictionary
raise KeyError(f'{bandtrue} not yet a supported filter')
for nextband in allbands: # over all bands
if band == nextband: # don't make a colour of same band (0)
continue
nextbandtrue = nextband
if '(' in nextband: # duplicate bands
nextbandtrue = nextband[:nextband.find('(')]
if nextbandtrue not in photfilters.columns: # check if we have this in dictionary
raise KeyError(f'{nextbandtrue} not yet a supported filter')
if photfilters.at['effective_wavelength', bandtrue] >= \
photfilters.at['effective_wavelength', nextbandtrue]: # if not blue-red
continue
try:
photodf[f'{band}-{nextband}'] = photodf[band] - photodf[nextband] # colour
except KeyError:
photodf[f'{band}-{nextband}'] = photodf[bandtrue] - photodf[nextband] # colour for full sample
return photodf
def parse_photometry(photodf: pd.DataFrame, allbands: np.ndarray, multisource: bool = False) -> pd.DataFrame:
"""
Parses the photometry dataframe handling multiple references for same magnitude
Parameters
----------
photodf: pd.DataFrame
The dataframe with all photometry in
allbands: np.ndarray
All the photometric bands
multisource: bool
Switch whether to iterate over initial dataframe with multiple sources
Returns
-------
newphoto: pd.DataFrame
DataFrame of effectively transposed photometry
"""
def replacer(val: int) -> str:
"""
Swapping an integer value for a string denoting the value
Parameters
----------
val: int
The input number
Returns
-------
_: str
The formatted string of the value
"""
if not val:
return ''
return f'({val})'
def one_source_iter(onephotodf: pd.DataFrame):
"""
Parses the photometry dataframe handling multiple references for same magnitude for one object
Parameters
----------
onephotodf: pd.DataFrame
The dataframe with all the photometry in it
Returns
-------
thisnewphot: pd.DataFrame
DataFrame of transposed photometry
"""
onephotodf.set_index('band', inplace=True) # set the band as the index
thisnewphot: pd.DataFrame = onephotodf.loc[:, ['magnitude']].T # flip the dataframe and keep only mags
s = | pd.Series(thisnewphot.columns) | pandas.Series |
"""
5. Running Bycycle on 3D Arrays
===============================
Compute bycycle features for 3D organizations of timeseries.
Bycycle supports computing the features of 3D signals using :func:`~.compute_features_3d`.
Signals may be organized in a different ways, including (n_participants, n_channels, n_timepoints)
or (n_channels, n_epochs, n_timepoints). The difference between these organizations is that
continuity may be assumed across epochs, but not channels. The ``axis`` argument is used to
specificy the axis to iterate over in parallel.
"""
####################################################################################################
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from neurodsp.sim import sim_combined
from bycycle.group import compute_features_3d
from bycycle.plts import plot_feature_categorical
from bycycle.utils import flatten_dfs
####################################################################################################
# Example 1. The ``axis`` Argument
# --------------------------------
#
# Here, we will show how the axis arguments works by iterating over slices of an 3D array.
# The axis argument be may specified as:
#
# - ``axis=0`` : Iterates over 2D slices along the zeroth dimension, (i.e. for each channel in
# (n_channels, n_epochs, n_timepoints)).
# - ``axis=1`` : Iterates over 2D slices along the first dimension (i.e. across flatten epochs in
# (n_epochs, n_timepoints)).
# - ``axis=(0, 1)`` : Iterates over 1D slices along the zeroth and first dimension (i.e across
# each signal independently in (n_participants, n_channels, n_timepoints)).
#
####################################################################################################
arr = np.ones((2, 3, 4))
dim0_len = np.shape(arr)[0]
dim1_len = np.shape(arr)[1]
print("axis=0")
for dim0 in range(dim0_len):
print(np.shape(arr[dim0]))
print("\naxis=1")
for dim1 in range(dim1_len):
print(np.shape(arr[:, dim1]))
print("\naxis=(0, 1)")
for dim0 in range(dim0_len):
for dim1 in range(dim1_len):
print(np.shape(arr[dim0, dim1]))
####################################################################################################
#
# Example 2. 3D Array (n_channels, n_epochs, n_timepoints)
# --------------------------------------------------------
# The features from a 3d array of (n_channels, n_epochs, n_timepoints) will be computed here.
# Bursting frequencies and rise-decay symmetry will be modulated across channels and epochs,
# respectively. The bursting frequencies and rise-decay symmetries will then be compared between
# the simulated parameters and bycycle's caculation.
####################################################################################################
# Simulation settings
n_seconds = 10
fs = 500
f_range = (5, 15)
n_channels = 5
n_epochs = 10
# Define rdsym values for rest and task trials
rdsym_rest = 0.5
rdsym_task = 0.75
####################################################################################################
# Simulate a 3d timeseries
sim_components_rest = {'sim_powerlaw': dict(exponent=-2),
'sim_bursty_oscillation': dict(cycle='asine', rdsym=rdsym_rest)}
sim_components_task = {'sim_powerlaw': dict(exponent=-2),
'sim_bursty_oscillation': dict(cycle='asine', rdsym=rdsym_task)}
sigs_rest = np.zeros((n_channels, n_epochs, n_seconds*fs))
sigs_task = np.zeros((n_channels, n_epochs, n_seconds*fs))
freqs = np.linspace(5, 45, 5)
for ch_idx, freq in zip(range(n_channels), freqs):
sim_components_rest['sim_bursty_oscillation']['freq'] = freq
sim_components_task['sim_bursty_oscillation']['freq'] = freq
for ep_idx in range(n_epochs):
sigs_task[ch_idx][ep_idx] = sim_combined(n_seconds, fs, components=sim_components_task)
sigs_rest[ch_idx][ep_idx] = sim_combined(n_seconds, fs, components=sim_components_rest)
####################################################################################################
# Compute features with an higher than default period consistency threshold.
# This allows for more accurate estimates of burst frequency.
thresholds = dict(amp_fraction_threshold=0., amp_consistency_threshold=.5,
period_consistency_threshold=.9, monotonicity_threshold=.6,
min_n_cycles=3)
compute_kwargs = {'burst_method': 'cycles', 'threshold_kwargs': thresholds}
dfs_rest = compute_features_3d(sigs_rest, fs, (1, 50), axis=0,
compute_features_kwargs=compute_kwargs)
dfs_task = compute_features_3d(sigs_task, fs, (1, 50), axis=0,
compute_features_kwargs=compute_kwargs)
####################################################################################################
# Merge epochs into a single dataframe
df_rest = flatten_dfs(dfs_rest, ['rest'] * n_channels * n_epochs, 'Epoch')
df_task = flatten_dfs(dfs_task, ['task'] * n_channels * n_epochs, 'Epoch')
df_epochs = pd.concat([df_rest, df_task], axis=0)
# Merge channels into a single dataframe
ch_labels = ["CH{ch_idx}".format(ch_idx=ch_idx)
for ch_idx in range(n_channels) for ep_idx in range(n_epochs)]
df_channels = flatten_dfs(np.vstack([dfs_rest, dfs_task]), ch_labels * 2, 'Channel')
# Limit to bursts
df_epochs = df_epochs[df_epochs['is_burst'] == True]
df_channels = df_channels[df_channels['is_burst'] == True]
####################################################################################################
# Plot estimated frequency
df_channels['freqs'] = fs / df_channels['period'].values
plot_feature_categorical(df_channels, 'freqs', 'Channel', ylabel='Burst Frequency',
xlabel=['CH00', 'CH01', 'CH02', 'CH03', 'CH04'])
####################################################################################################
# Compare estimated frequency to simulatated frequency
freqs_est = df_channels.groupby('Channel').mean()['freqs'].values
df_freqs = pd.DataFrame()
df_freqs['Channel'] = ['CH_0{idx}'.format(idx=idx) for idx in range(n_channels)]
df_freqs['Simulated Freqs'] = freqs
df_freqs['Calculated Freqs'] = freqs_est
df_freqs['Error'] = np.abs(freqs - freqs_est)
df_freqs
####################################################################################################
# See how well bycycle estimated each bursting cycle's rise-decay symmetry within epochs
rdsym_rest = df_epochs[df_epochs['Epoch'] == 'rest']['time_rdsym'].mean()
rdsym_task = df_epochs[df_epochs['Epoch'] == 'task']['time_rdsym'].mean()
df_rdsym = | pd.DataFrame() | pandas.DataFrame |
import calendar
from datetime import date, datetime, time
import locale
import unicodedata
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.timezones import maybe_get_tz
from pandas.core.dtypes.common import is_integer_dtype, is_list_like
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, PeriodIndex, Series, TimedeltaIndex,
bdate_range, date_range, period_range, timedelta_range)
from pandas.core.arrays import PeriodArray
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
class TestSeriesDatetimeValues:
def test_dt_namespace_accessor(self):
# GH 7207, 11128
# test .dt namespace accessor
ok_for_period = PeriodArray._datetimelike_ops
ok_for_period_methods = ['strftime', 'to_timestamp', 'asfreq']
ok_for_dt = DatetimeIndex._datetimelike_ops
ok_for_dt_methods = ['to_period', 'to_pydatetime', 'tz_localize',
'tz_convert', 'normalize', 'strftime', 'round',
'floor', 'ceil', 'day_name', 'month_name']
ok_for_td = TimedeltaIndex._datetimelike_ops
ok_for_td_methods = ['components', 'to_pytimedelta', 'total_seconds',
'round', 'floor', 'ceil']
def get_expected(s, name):
result = getattr(Index(s._values), prop)
if isinstance(result, np.ndarray):
if is_integer_dtype(result):
result = result.astype('int64')
elif not is_list_like(result):
return result
return Series(result, index=s.index, name=s.name)
def compare(s, name):
a = getattr(s.dt, prop)
b = get_expected(s, prop)
if not (is_list_like(a) and is_list_like(b)):
assert a == b
else:
tm.assert_series_equal(a, b)
# datetimeindex
cases = [Series( | date_range('20130101', periods=5) | pandas.date_range |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
from itertools import product
import unittest
import pandas as pd
import numpy as np
import pyspark
from databricks import koalas as ks
from databricks.koalas.config import set_option, reset_option
from databricks.koalas.frame import DataFrame
from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils
from databricks.koalas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
class OpsOnDiffFramesEnabledTest(ReusedSQLTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
)
@property
def pdf2(self):
return pd.DataFrame(
{"a": [9, 8, 7, 6, 5, 4, 3, 2, 1], "b": [0, 0, 0, 4, 5, 6, 1, 2, 3]},
index=list(range(9)),
)
@property
def pdf3(self):
return pd.DataFrame(
{"b": [1, 1, 1, 1, 1, 1, 1, 1, 1], "c": [1, 1, 1, 1, 1, 1, 1, 1, 1]},
index=list(range(9)),
)
@property
def pdf4(self):
return pd.DataFrame(
{"e": [2, 2, 2, 2, 2, 2, 2, 2, 2], "f": [2, 2, 2, 2, 2, 2, 2, 2, 2]},
index=list(range(9)),
)
@property
def pdf5(self):
return pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"b": [4, 5, 6, 3, 2, 1, 0, 0, 0],
"c": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
).set_index(["a", "b"])
@property
def pdf6(self):
return pd.DataFrame(
{
"a": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"b": [0, 0, 0, 4, 5, 6, 1, 2, 3],
"c": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"e": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=list(range(9)),
).set_index(["a", "b"])
@property
def pser1(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx)
@property
def pser2(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
return pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx)
@property
def pser3(self):
midx = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [1, 1, 2, 0, 0, 2, 2, 2, 1]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
@property
def kdf1(self):
return ks.from_pandas(self.pdf1)
@property
def kdf2(self):
return ks.from_pandas(self.pdf2)
@property
def kdf3(self):
return ks.from_pandas(self.pdf3)
@property
def kdf4(self):
return ks.from_pandas(self.pdf4)
@property
def kdf5(self):
return ks.from_pandas(self.pdf5)
@property
def kdf6(self):
return ks.from_pandas(self.pdf6)
@property
def kser1(self):
return ks.from_pandas(self.pser1)
@property
def kser2(self):
return ks.from_pandas(self.pser2)
@property
def kser3(self):
return ks.from_pandas(self.pser3)
def test_ranges(self):
self.assert_eq(
(ks.range(10) + ks.range(10)).sort_index(),
(
ks.DataFrame({"id": list(range(10))}) + ks.DataFrame({"id": list(range(10))})
).sort_index(),
)
def test_no_matched_index(self):
with self.assertRaisesRegex(ValueError, "Index names must be exactly matched"):
ks.DataFrame({"a": [1, 2, 3]}).set_index("a") + ks.DataFrame(
{"b": [1, 2, 3]}
).set_index("b")
def test_arithmetic(self):
self._test_arithmetic_frame(self.pdf1, self.pdf2, check_extension=False)
self._test_arithmetic_series(self.pser1, self.pser2, check_extension=False)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_extension_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Int64"), self.pdf2.astype("Int64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_extension_float_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Float64"), self.pdf2.astype("Float64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype("Float64"), self.pser2.astype("Float64"), check_extension=True
)
def _test_arithmetic_frame(self, pdf1, pdf2, *, check_extension):
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for dtype in actual.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((kdf1.a - kdf2.b).sort_index(), (pdf1.a - pdf2.b).sort_index())
assert_eq((kdf1.a * kdf2.a).sort_index(), (pdf1.a * pdf2.a).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(kdf1["a"] / kdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index()
)
else:
assert_eq((kdf1["a"] / kdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index())
# DataFrame
assert_eq((kdf1 + kdf2).sort_index(), (pdf1 + pdf2).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
kdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
# Series
assert_eq(
(kdf1[("x", "a")] - kdf2[("x", "b")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")]).sort_index(),
)
assert_eq(
(kdf1[("x", "a")] - kdf2["x"]["b"]).sort_index(),
(pdf1[("x", "a")] - pdf2["x"]["b"]).sort_index(),
)
assert_eq(
(kdf1["x"]["a"] - kdf2[("x", "b")]).sort_index(),
(pdf1["x"]["a"] - pdf2[("x", "b")]).sort_index(),
)
# DataFrame
assert_eq((kdf1 + kdf2).sort_index(), (pdf1 + pdf2).sort_index())
def _test_arithmetic_series(self, pser1, pser2, *, check_extension):
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((kser1 + kser2).sort_index(), (pser1 + pser2).sort_index())
assert_eq((kser1 - kser2).sort_index(), (pser1 - pser2).sort_index())
assert_eq((kser1 * kser2).sort_index(), (pser1 * pser2).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq((kser1 / kser2).sort_index(), (pser1 / pser2).sort_index())
else:
assert_eq((kser1 / kser2).sort_index(), (pser1 / pser2).sort_index())
def test_arithmetic_chain(self):
self._test_arithmetic_chain_frame(self.pdf1, self.pdf2, self.pdf3, check_extension=False)
self._test_arithmetic_chain_series(
self.pser1, self.pser2, self.pser3, check_extension=False
)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_chain_extension_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Int64"),
self.pdf2.astype("Int64"),
self.pdf3.astype("Int64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
self.pser3.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_chain_extension_float_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Float64"),
self.pdf2.astype("Float64"),
self.pdf3.astype("Float64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype("Float64"),
self.pser2.astype("Float64"),
self.pser3.astype("Float64"),
check_extension=True,
)
def _test_arithmetic_chain_frame(self, pdf1, pdf2, pdf3, *, check_extension):
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
kdf3 = ks.from_pandas(pdf3)
common_columns = set(kdf1.columns).intersection(kdf2.columns).intersection(kdf3.columns)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for column, dtype in zip(actual.columns, actual.dtypes):
if column in common_columns:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertFalse(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((kdf1.a - kdf2.b - kdf3.c).sort_index(), (pdf1.a - pdf2.b - pdf3.c).sort_index())
assert_eq(
(kdf1.a * (kdf2.a * kdf3.c)).sort_index(), (pdf1.a * (pdf2.a * pdf3.c)).sort_index()
)
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(kdf1["a"] / kdf2["a"] / kdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
else:
assert_eq(
(kdf1["a"] / kdf2["a"] / kdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
kdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
columns = pd.MultiIndex.from_tuples([("x", "b"), ("y", "c")])
kdf3.columns = columns
pdf3.columns = columns
common_columns = set(kdf1.columns).intersection(kdf2.columns).intersection(kdf3.columns)
# Series
assert_eq(
(kdf1[("x", "a")] - kdf2[("x", "b")] - kdf3[("y", "c")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")] - pdf3[("y", "c")]).sort_index(),
)
assert_eq(
(kdf1[("x", "a")] * (kdf2[("x", "b")] * kdf3[("y", "c")])).sort_index(),
(pdf1[("x", "a")] * (pdf2[("x", "b")] * pdf3[("y", "c")])).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
def _test_arithmetic_chain_series(self, pser1, pser2, pser3, *, check_extension):
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
kser3 = ks.from_pandas(pser3)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((kser1 + kser2 - kser3).sort_index(), (pser1 + pser2 - pser3).sort_index())
assert_eq((kser1 * kser2 * kser3).sort_index(), (pser1 * pser2 * pser3).sort_index())
if check_extension and not extension_float_dtypes_available:
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(
(kser1 - kser2 / kser3).sort_index(), (pser1 - pser2 / pser3).sort_index()
)
else:
expected = pd.Series(
[249.0, np.nan, 0.0, 0.88, np.nan, np.nan, np.nan, np.nan, np.nan, -np.inf]
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex(
[
["cow", "falcon", "koala", "koalas", "lama"],
["length", "power", "speed", "weight"],
],
[
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4],
[0, 1, 2, 2, 3, 0, 0, 1, 2, 3, 0, 0, 3, 3, 0, 2, 3],
],
),
)
self.assert_eq((kser1 - kser2 / kser3).sort_index(), expected)
else:
assert_eq((kser1 - kser2 / kser3).sort_index(), (pser1 - pser2 / pser3).sort_index())
assert_eq((kser1 + kser2 * kser3).sort_index(), (pser1 + pser2 * pser3).sort_index())
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
kser = ks.from_pandas(pser)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
kser = ks.from_pandas(pser)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
def test_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[0, 30, 10, 20, 50],
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1[pdf2.A > -3].sort_index(), kdf1[kdf2.A > -3].sort_index())
self.assert_eq(pdf1.A[pdf2.A > -3].sort_index(), kdf1.A[kdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1)[pdf2.A > -3].sort_index(), (kdf1.A + 1)[kdf2.A > -3].sort_index()
)
def test_loc_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[20, 10, 30, 0, 50],
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.loc[pdf2.A > -3].sort_index(), kdf1.loc[kdf2.A > -3].sort_index())
self.assert_eq(pdf1.A.loc[pdf2.A > -3].sort_index(), kdf1.A.loc[kdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1).loc[pdf2.A > -3].sort_index(), (kdf1.A + 1).loc[kdf2.A > -3].sort_index()
)
def test_bitwise(self):
pser1 = pd.Series([True, False, True, False, np.nan, np.nan, True, False, np.nan])
pser2 = | pd.Series([True, False, False, True, True, False, np.nan, np.nan, np.nan]) | pandas.Series |
from clean2 import*
import pandas as pd
import matplotlib.pyplot as plt
import math
import datetime
import time
def main():
loop_set=[3,5]
set3=[] #labels scaled at different window sizes
set4=[] #labels without scaling
for i in range(0,len(loop_set)):
set3.extend(['totalmovavg_predictclose'+str(loop_set[i])+'_'+str(15*loop_set[i])+'0'])
set3.extend(['totalmovavg_predictclose'+str(loop_set[i])+'_'+str(15*loop_set[i])+'1'])
set3.extend(['totalmovavg_predictclose'+str(loop_set[i])+'_'+str(15*loop_set[i])+'2'])
set4.extend(['totalmovavg_predictclose'+str(loop_set[i])+'_'+str(15*loop_set[i])])
data_window=pd.DataFrame()
data_window_labels=pd.DataFrame()
final_data=pd.DataFrame()
predictors_1=pd.DataFrame()
predictors=pd.DataFrame()
predictors_final=pd.DataFrame()
data_copy_labels=pd.DataFrame()
data_predict=pd.DataFrame()
close_win=pd.DataFrame()
data=pd.DataFrame()
data_copy=pd.DataFrame()
labe_train= | pd.DataFrame() | pandas.DataFrame |
import datetime
import pandas as pd
singleton = None
def convert_receipt(receipt, contract_balance, block_info):
dict_receipt = dict(receipt)
new_dict = {"contract_balance" : str(contract_balance)}
receipt_include = ["transactionHash", "from", "to", "gasUsed"]
block_include = ["timestamp"] ## convert timestamp
# copy only key:value pairs from receipt_include into new_dict (for loop)
for key in receipt_include:
new_dict[key] = receipt[key]
# copy only key, value pairs from block_include into new_dict (for loop)
for key in block_include:
new_dict[key] = block_info[key]
# do timestamp conversion from line 7
new_dict["timestamp"] = datetime.datetime.utcfromtimestamp(new_dict["timestamp"])
new_dict["transactionHash"] = new_dict["transactionHash"].hex()
return new_dict
def add_block(receipt, contract_balance, block_info):
global singleton
dict_receipt = convert_receipt(receipt, contract_balance, block_info)
if not singleton:
singleton = {}
for key in dict_receipt.keys():
singleton[key] = []
for key, value in dict_receipt.items():
singleton[key].insert(0, value)
return singleton
def get_receipts():
if not singleton:
return "There are no receipts to display."
return singleton
# Display the information on the webpage
def update_block_chain_df(receipt, w3):
# st.write("Transaction receipt mined:")
dict_receipt = dict(receipt)
contract_address = dict_receipt["to"]
# Access the balance of an account using the address
contract_balance = w3.eth.get_balance(contract_address)
# st.write(contract_balance)
# Access information for the most recent block
block_info = w3.eth.get_block("latest")
# st.write(dict(block_info))
# calls receipt to add block
add_block(receipt, contract_balance, block_info)
block_chain = get_receipts()
# st.write(block_chain)
block_chain_df = | pd.DataFrame.from_dict(block_chain) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from domain.rastrigin.rastrigin_Categorize import rastrigin_Categorize
from domain.cube.cube_Categorize import cube_Categorize
from domain.wheelcase.wheelcase_Categorize import wheelcase_Categorize
# param: maximize indicates whether to search for max. fitness or not (min search)
def getBestPerCell(samples,fitness,d,edges):
def feval(funcName,*args):
return eval(funcName)(*args)
fitness = pd.DataFrame(data=fitness)
# print("samples")
# print(samples)
# print("fitness")
# print(fitness)
# print("edges")
# print(edges)
# Get features of all samples
feature = feval(d.categorize, samples, d)
# print("feature")
# print(feature)
# print("feature")
# print(feature)
for iDim in range(0,d.nDims):
if ("df_bin" in locals()):
df_add = pd.DataFrame(data=np.digitize(feature.iloc[:,iDim], edges[iDim]))
df_bin = pd.concat([df_bin,df_add], axis=1)
else:
df_bin = pd.DataFrame(data=np.digitize(feature.iloc[:,iDim], edges[iDim]))
# bin[:][iDim] = np.digitize(feature.iloc[:,iDim], edges[iDim])
# print("df_bin")
# print(df_bin)
# df_bin = pd.DataFrame(data=bin)
fitness = pd.DataFrame(data=fitness)
# print("fitness")
# print(fitness)
# a = df_bin.append(fitness.iloc[0], ignore_index=True)
a = pd.concat([df_bin, fitness], axis=1)
a.columns = range(a.shape[1])
# print("a")
# print(a)
sortedByFeatureAndFitness = a.sort_values(by=[0,1,2])
# print("sortedByFeatureAndFitness")
# print(sortedByFeatureAndFitness)
indxSortOne = list(sortedByFeatureAndFitness.index.values)
# print("indxSortOne")
# print(indxSortOne)
# sortedByFeatureAndFitness.reset_index(drop=True, inplace=True)
# if (maximize):
df_drop_dupl = sortedByFeatureAndFitness.drop_duplicates(subset=[0,1], keep='first')
# else:
# df_drop_dupl = sortedByFeatureAndFitness.drop_duplicates(subset=[0,1], keep='last')
indxSortTwo = list(df_drop_dupl.index.values)
# print("indxSortTwo")
# print(indxSortTwo)
bestIndex = indxSortTwo
# print("bestIndex")
# print(bestIndex)
# print("df_bin")
# print(df_bin)
bestBin = | pd.DataFrame(data=df_bin.iloc[bestIndex,:]) | pandas.DataFrame |
import sys
import numpy as np
import pandas as pd
import json
import os
from joblib import Parallel, delayed
from gtad_lib import opts
thumos_class = {
7 : 'BaseballPitch',
9 : 'BasketballDunk',
12: 'Billiards',
21: 'CleanAndJerk',
22: 'CliffDiving',
23: 'CricketBowling',
24: 'CricketShot',
26: 'Diving',
31: 'FrisbeeCatch',
33: 'GolfSwing',
36: 'HammerThrow',
40: 'HighJump',
45: 'JavelinThrow',
51: 'LongJump',
68: 'PoleVault',
79: 'Shotput',
85: 'SoccerPenalty',
92: 'TennisSwing',
93: 'ThrowDiscus',
97: 'VolleyballSpiking',
}
def IOU(s1, e1, s2, e2):
if (s2 > e1) or (s1 > e2):
return 0
Aor = max(e1, e2) - min(s1, s2)
Aand = min(e1, e2) - max(s1, s2)
return float(Aand) / Aor
def Soft_NMS(df, nms_threshold=1e-5, num_prop=200):
'''
From BSN code
:param df:
:param nms_threshold:
:return:
'''
df = df.sort_values(by="score", ascending=False)
tstart = list(df.xmin.values[:])
tend = list(df.xmax.values[:])
tscore = list(df.score.values[:])
rstart = []
rend = []
rscore = []
# frost: I use a trick here, remove the detection XDD
# which is longer than 300
for idx in range(0, len(tscore)):
if tend[idx] - tstart[idx] >= 300:
tscore[idx] = 0
while len(tscore) > 1 and len(rscore) < num_prop and max(tscore)>0:
max_index = tscore.index(max(tscore))
for idx in range(0, len(tscore)):
if idx != max_index:
tmp_iou = IOU(tstart[max_index], tend[max_index], tstart[idx], tend[idx])
if tmp_iou > 0:
tscore[idx] = tscore[idx] * np.exp(-np.square(tmp_iou) / nms_threshold)
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
newDf = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import dash
from dash.dependencies import Input, Output, State, MATCH, ALL
import dash_core_components as dcc
import dash_html_components as html
from dash.exceptions import PreventUpdate
import dash_bootstrap_components as dbc
import dash_table
import plotly.graph_objs as go
from threading import Thread
import queue
import serial
import serial.tools.list_ports
import time
from pathlib import Path
import json
import sqlite3
from datetime import datetime
# globals... yuk
FILE_DIR = ''
APP_ID = 'serial_data'
Q = queue.Queue()
SERIAL_THREAD = None
class SerialThread(Thread):
def __init__(self, port, baud=115200):
super().__init__()
self.port = port
self._isRunning = True
self.ser_obj = serial.Serial(port=port,
baudrate=baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=None)
def run(self):
while self._isRunning:
try:
while self.ser_obj.in_waiting > 2:
try:
line = self.ser_obj.readline()
split_line = line.strip().decode("utf-8")
Q.put(split_line)
except:
continue
except:
continue
def stop(self):
self._isRunning = False
time.sleep(0.25)
self.ser_obj.close()
return None
# layout
layout = dbc.Container([
dbc.Row(
dbc.Col([
dcc.Store(id=f'{APP_ID}_store'),
dcc.Interval(id=f'{APP_ID}_interval',
interval=2000,
n_intervals=0,
disabled=True),
html.H2('Serial Data Plotter'),
html.P('This tests plotting data from serial (arduino) using a background thread to collect the data and send it to a queue. '
'Data is retrieved from the queue and stored in the browser as well as written to a file')
])
),
dbc.Row([
dbc.Col(
dbc.FormGroup([
dbc.Button('COM Ports (refresh)', id=f'{APP_ID}_com_button'),
dcc.Dropdown(id=f'{APP_ID}_com_dropdown',
placeholder='Select COM port',
options=[],
multi=False),
dbc.Textarea(id=f'{APP_ID}_com_desc_label', disabled=True )
]),
width=4
),
dbc.Col(
dbc.FormGroup([
dbc.Label('Headers'),
dbc.Button('Initialize Headers', id=f'{APP_ID}_init_header_button', block=True),
dash_table.DataTable(
id=f'{APP_ID}_header_dt',
columns=[
{"name": 'Position', "id": 'pos', "type": 'numeric', 'editable': False},
{"name": 'Name', "id": 'name', "type": 'text', 'editable': False},
{"name": 'Format', "id": 'fmt', "type": 'text', "presentation": 'dropdown'}
],
data=None,
editable=True,
row_deletable=False,
dropdown={
'fmt': {
'options': [
{'label': i, 'value': i} for i in ['text', 'real', 'integer']
],
},
}
),
]),
width=4
),
]),
dbc.Row(
dbc.Col([
dbc.Toast(
children=[],
id=f'{APP_ID}_header_toast',
header="Initialize Headers",
icon="danger",
dismissable=True,
is_open=False
),
],
width="auto"
),
),
dbc.Row([
dbc.Col(
dbc.FormGroup([
dbc.Label('Filename'),
dbc.Input(placeholder='filename',
id=f'{APP_ID}_filename_input',
type='text',
value=f'data/my_data_{datetime.now().strftime("%m.%d.%Y.%H.%M.%S")}.db')
])
)
]),
dbc.ButtonGroup([
dbc.Button('Start', id=f'{APP_ID}_start_button', n_clicks=0, disabled=True, size='lg', color='secondary'),
dbc.Button('Stop', id=f'{APP_ID}_stop_button', n_clicks=0, disabled=True, size='lg', color='secondary'),
dbc.Button('Clear', id=f'{APP_ID}_clear_button', n_clicks=0, disabled=True, size='lg'),
dbc.Button('Download Data', id=f'{APP_ID}_download_button', n_clicks=0, disabled=True, size='lg'),
],
className='mt-2 mb-2'
),
html.H2('Data Readouts'),
dcc.Dropdown(
id=f'{APP_ID}_readouts_dropdown',
multi=True,
options=[],
value=None
),
dbc.CardDeck(
id=f'{APP_ID}_readouts_card_deck'
),
html.H2('Data Plots', className='mt-2 mb-1'),
dbc.ButtonGroup([
dbc.Button("Add Plot", id=f'{APP_ID}_add_figure_button'),
dbc.Button("Remove Plot", id=f'{APP_ID}_remove_figure_button'),
]),
html.Div(
id=f'{APP_ID}_figure_div'
),
])
def add_dash(app):
@app.callback(
[Output(f'{APP_ID}_header_dt', 'data'),
Output(f'{APP_ID}_header_toast', 'children'),
Output(f'{APP_ID}_header_toast', 'is_open'),
],
[Input(f'{APP_ID}_init_header_button', 'n_clicks')],
[State(f'{APP_ID}_com_dropdown', 'value')]
)
def serial_data_init_header(n_clicks, com):
if n_clicks is None or com is None:
raise PreventUpdate
baud = 115200
try:
ser_obj = serial.Serial(port=com,
baudrate=baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=10)
split_line = '_'
while split_line[0] != '{':
line = ser_obj.readline()
split_line = line.strip().decode("utf-8")
split_line = line.strip().decode("utf-8")
jdic = json.loads(split_line)
data = [{'pos': i, 'name': k} for i, k in enumerate(jdic.keys())]
for i, k in enumerate(jdic.keys()):
if isinstance(jdic[k], int):
data[i].update({'fmt': 'integer'})
elif isinstance(jdic[k], float):
data[i].update({'fmt': 'real'})
else:
data[i].update({'fmt': 'text'})
ser_obj.close()
return data, '', False
except Exception as e:
return [{}], html.P(str(e)), True
return data, '', False
@app.callback(
Output(f'{APP_ID}_com_dropdown', 'options'),
[Input(f'{APP_ID}_com_button', 'n_clicks')]
)
def serial_data_refresh_com_ports(n_clicks):
if n_clicks is None:
raise PreventUpdate
ports = [{'label': comport.device, 'value': comport.device} for comport in serial.tools.list_ports.comports()]
return ports
@app.callback(
Output(f'{APP_ID}_com_desc_label', 'value'),
[Input(f'{APP_ID}_com_dropdown', 'value')]
)
def serial_data_com_desc(com):
if com is None:
raise PreventUpdate
ports = [comport.device for comport in serial.tools.list_ports.comports()]
idx = ports.index(com)
descs = [comport.description for comport in serial.tools.list_ports.comports()]
return descs[idx]
@app.callback(
[
Output(f'{APP_ID}_interval', 'disabled'),
Output(f'{APP_ID}_start_button', 'disabled'),
Output(f'{APP_ID}_start_button', 'color'),
Output(f'{APP_ID}_stop_button', 'disabled'),
Output(f'{APP_ID}_stop_button', 'color'),
Output(f'{APP_ID}_clear_button', 'disabled'),
Output(f'{APP_ID}_clear_button', 'color'),
Output(f'{APP_ID}_filename_input', 'disabled'),
Output(f'{APP_ID}_filename_input', 'value'),
Output(f'{APP_ID}_header_dt', 'editable'),
Output(f'{APP_ID}_store', 'clear_data'),
],
[
Input(f'{APP_ID}_start_button', 'n_clicks'),
Input(f'{APP_ID}_stop_button', 'n_clicks'),
Input(f'{APP_ID}_clear_button', 'n_clicks'),
Input(f'{APP_ID}_header_dt', 'data'),
],
[
State(f'{APP_ID}_com_dropdown', 'value'),
State(f'{APP_ID}_filename_input', 'value'),
State(f'{APP_ID}_header_dt', 'data')
]
)
def serial_data_start_stop(n_start, n_stop, n_clear, hdr_data, port, filename, data_header):
global SERIAL_THREAD
global Q
ctx = dash.callback_context
if any([n_start is None, n_stop is None, port is None, hdr_data is None, n_clear is None]):
raise PreventUpdate
if pd.DataFrame(hdr_data).empty:
raise PreventUpdate
df_hdr = pd.DataFrame(data_header).sort_values('pos')
df_hdr['name'] = df_hdr['name'].fillna(df_hdr['pos'].astype(str))
headers = df_hdr['name'].tolist()
trig = ctx.triggered[0]['prop_id'].split('.')[0]
if trig == f'{APP_ID}_header_dt':
if len(data_header[0].keys()) == 3 and ~df_hdr.isnull().values.any():
return True, False, 'success', True, 'secondary', True, 'secondary', False, filename, True, False
else:
return True, True, 'secondary', True, 'secondary', True, 'secondary', False, filename, True, False
if trig == f'{APP_ID}_start_button':
print(f'starting: {filename}')
if filename is None or filename == '':
filename = f'data/my_data_{datetime.now().strftime("%m.%d.%Y.%H.%M.%S")}.db'
if (Path(FILE_DIR) / filename).exists():
clear = False
else:
clear = True
SERIAL_THREAD = SerialThread(port, baud=115200)
SERIAL_THREAD.start()
return False, True, 'secondary', False, 'danger', True, 'secondary', True, filename, False, clear
if trig == f'{APP_ID}_stop_button':
print('stopping')
SERIAL_THREAD.stop()
with Q.mutex:
Q.queue.clear()
return True, False, 'success', True, 'secondary', False, 'warning', False, filename, True, False
if trig == f'{APP_ID}_clear_button':
print('clearing')
filename = f'data/my_data_{datetime.now().strftime("%m.%d.%Y.%H.%M.%S")}.db'
return True, False, 'success', True, 'secondary', True, 'secondary', False, filename, True, True
@app.callback(
Output(f'{APP_ID}_store', 'data'),
[Input(f'{APP_ID}_interval', 'n_intervals')],
[State(f'{APP_ID}_interval', 'disabled'),
State(f'{APP_ID}_store', 'data'),
State(f'{APP_ID}_filename_input', 'value'),
State(f'{APP_ID}_header_dt', 'data')
]
)
def serial_data_update_store(n_intervals, disabled, data, filename, data_header):
global Q
# get data from queue
if disabled is not None and not disabled:
new_data = []
while not Q.empty():
new_data_dic = json.loads(Q.get())
new_data.append(tuple((new_data_dic[c["name"]] for c in data_header if c["name"] in new_data_dic.keys())))
conn = sqlite3.connect(FILE_DIR + filename)
c = conn.cursor()
c.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='my_data' ''')
if c.fetchone()[0] == 1:
c.executemany(f'INSERT INTO my_data VALUES ({(",".join(["?"] * len(data_header)) )})', new_data)
conn.commit()
last_row_id = c.execute("SELECT COUNT() FROM my_data").fetchone()[0]
conn.close()
else:
c.execute(
f'''CREATE TABLE my_data
(''' + ', '.join([f'{hdr["name"]} {hdr["fmt"]}' for hdr in data_header])
+ ')'
)
c.executemany(f'INSERT INTO my_data VALUES ({(",".join(["?"] * len(data_header)) )})', new_data)
conn.commit()
last_row_id = c.execute("SELECT COUNT() FROM my_data").fetchone()[0]
conn.close()
return last_row_id
@app.callback(
Output(f'{APP_ID}_readouts_dropdown', 'options'),
Input(f'{APP_ID}_header_dt', 'data')
)
def serial_data_readout_options(hdr_data):
if hdr_data is None:
raise PreventUpdate
if pd.DataFrame(hdr_data).empty:
raise PreventUpdate
df_hdr = | pd.DataFrame(hdr_data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import h5py as h5
from typing import Optional, Union
import datetime as dt
from audata.file import File as AuFile
"""
Wrapper for the audata File class
"""
class File (AuFile):
def __init__ (self,
file: h5.File,
time_reference: Optional[dt.datetime] = None,
return_datetimes: bool = True):
super().__init__(file)
def numerics(self, loinc=[], signals=[], time='relative'):
print('Show numerics for {}'.format(loinc))
def waveforms(self, loinc=[], signals=[], time='relative'):
pass
def show_map(self):
print( | pd.DataFrame(self['Mapping'].hdf[:]) | pandas.DataFrame |
import calendar
import datetime
import warnings
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal
from numpy.testing import assert_allclose
import pytest
from pvlib._deprecation import pvlibDeprecationWarning
from pvlib.location import Location
from pvlib import solarposition, spa
from conftest import (fail_on_pvlib_version, requires_ephem, needs_pandas_0_17,
requires_spa_c, requires_numba)
# setup times and locations to be tested.
times = pd.date_range(start=datetime.datetime(2014, 6, 24),
end=datetime.datetime(2014, 6, 26), freq='15Min')
tus = Location(32.2, -111, 'US/Arizona', 700) # no DST issues possible
times_localized = times.tz_localize(tus.tz)
tol = 5
@pytest.fixture()
def golden():
return Location(39.742476, -105.1786, 'America/Denver', 1830.14)
@pytest.fixture()
def golden_mst():
return Location(39.742476, -105.1786, 'MST', 1830.14)
@pytest.fixture()
def expected_solpos():
return _expected_solpos_df()
# hack to make tests work without too much modification while avoiding
# pytest 4.0 inability to call features directly
def _expected_solpos_df():
return pd.DataFrame({'elevation': 39.872046,
'apparent_zenith': 50.111622,
'azimuth': 194.340241,
'apparent_elevation': 39.888378},
index=['2003-10-17T12:30:30Z'])
@pytest.fixture()
def expected_solpos_multi():
return pd.DataFrame({'elevation': [39.872046, 39.505196],
'apparent_zenith': [50.111622, 50.478260],
'azimuth': [194.340241, 194.311132],
'apparent_elevation': [39.888378, 39.521740]},
index=['2003-10-17T12:30:30Z', '2003-10-18T12:30:30Z'])
@pytest.fixture()
def expected_rise_set_spa():
# for Golden, CO, from NREL SPA website
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 8, 2),
]).tz_localize('MST')
sunrise = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 7, 21, 55),
datetime.datetime(2015, 8, 2, 5, 0, 27)
]).tz_localize('MST').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 16, 47, 43),
datetime.datetime(2015, 8, 2, 19, 13, 58)
]).tz_localize('MST').tolist()
transit = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 12, 4, 45),
datetime.datetime(2015, 8, 2, 12, 6, 58)
]).tz_localize('MST').tolist()
return pd.DataFrame({'sunrise': sunrise,
'sunset': sunset,
'transit': transit},
index=times)
@pytest.fixture()
def expected_rise_set_ephem():
# for Golden, CO, from USNO websites
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 1),
datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 1, 3),
datetime.datetime(2015, 8, 2),
]).tz_localize('MST')
sunrise = pd.DatetimeIndex([datetime.datetime(2015, 1, 1, 7, 22, 0),
datetime.datetime(2015, 1, 2, 7, 22, 0),
datetime.datetime(2015, 1, 3, 7, 22, 0),
datetime.datetime(2015, 8, 2, 5, 0, 0)
]).tz_localize('MST').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(2015, 1, 1, 16, 47, 0),
datetime.datetime(2015, 1, 2, 16, 48, 0),
datetime.datetime(2015, 1, 3, 16, 49, 0),
datetime.datetime(2015, 8, 2, 19, 13, 0)
]).tz_localize('MST').tolist()
transit = pd.DatetimeIndex([datetime.datetime(2015, 1, 1, 12, 4, 0),
datetime.datetime(2015, 1, 2, 12, 5, 0),
datetime.datetime(2015, 1, 3, 12, 5, 0),
datetime.datetime(2015, 8, 2, 12, 7, 0)
]).tz_localize('MST').tolist()
return pd.DataFrame({'sunrise': sunrise,
'sunset': sunset,
'transit': transit},
index=times)
@fail_on_pvlib_version('0.7')
def test_deprecated_07():
tt = pd.DatetimeIndex(['2015-01-01 00:00:00']).tz_localize('MST')
with pytest.warns(pvlibDeprecationWarning):
solarposition.get_sun_rise_set_transit(tt,
39.7,
-105.2)
# the physical tests are run at the same time as the NREL SPA test.
# pyephem reproduces the NREL result to 2 decimal places.
# this doesn't mean that one code is better than the other.
@requires_spa_c
def test_spa_c_physical(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_c(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_spa_c
def test_spa_c_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_c(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_spa_python_numpy_physical(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_spa_python_numpy_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_python(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@needs_pandas_0_17
def test_sun_rise_set_transit_spa(expected_rise_set_spa, golden):
# solution from NREL SAP web calculator
south = Location(-35.0, 0.0, tz='UTC')
times = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 0),
datetime.datetime(2004, 12, 4, 0)]
).tz_localize('UTC')
sunrise = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 7, 8, 15),
datetime.datetime(2004, 12, 4, 4, 38, 57)]
).tz_localize('UTC').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 17, 1, 4),
datetime.datetime(2004, 12, 4, 19, 2, 3)]
).tz_localize('UTC').tolist()
transit = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 12, 4, 36),
datetime.datetime(2004, 12, 4, 11, 50, 22)]
).tz_localize('UTC').tolist()
frame = pd.DataFrame({'sunrise': sunrise,
'sunset': sunset,
'transit': transit}, index=times)
result = solarposition.sun_rise_set_transit_spa(times, south.latitude,
south.longitude,
delta_t=65.0)
result_rounded = pd.DataFrame(index=result.index)
# need to iterate because to_datetime does not accept 2D data
# the rounding fails on pandas < 0.17
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('1s')
assert_frame_equal(frame, result_rounded)
# test for Golden, CO compare to NREL SPA
result = solarposition.sun_rise_set_transit_spa(
expected_rise_set_spa.index, golden.latitude, golden.longitude,
delta_t=65.0)
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
# need to iterate because to_datetime does not accept 2D data
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('s').tz_convert('MST')
assert_frame_equal(expected_rise_set_spa, result_rounded)
@requires_ephem
def test_sun_rise_set_transit_ephem(expected_rise_set_ephem, golden):
# test for Golden, CO compare to USNO, using local midnight
result = solarposition.sun_rise_set_transit_ephem(
expected_rise_set_ephem.index, golden.latitude, golden.longitude,
next_or_previous='next', altitude=golden.altitude, pressure=0,
temperature=11, horizon='-0:34')
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('min').tz_convert('MST')
assert_frame_equal(expected_rise_set_ephem, result_rounded)
# test next sunrise/sunset with times
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 3, 0, 0),
datetime.datetime(2015, 1, 2, 10, 15, 0),
datetime.datetime(2015, 1, 2, 15, 3, 0),
datetime.datetime(2015, 1, 2, 21, 6, 7)
]).tz_localize('MST')
expected = pd.DataFrame(index=times,
columns=['sunrise', 'sunset'],
dtype='datetime64[ns]')
expected['sunrise'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunrise']])
expected['sunset'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunset']])
expected['transit'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'transit']])
result = solarposition.sun_rise_set_transit_ephem(times,
golden.latitude,
golden.longitude,
next_or_previous='next',
altitude=golden.altitude,
pressure=0,
temperature=11,
horizon='-0:34')
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('min').tz_convert('MST')
assert_frame_equal(expected, result_rounded)
# test previous sunrise/sunset with times
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 3, 0, 0),
datetime.datetime(2015, 1, 2, 10, 15, 0),
datetime.datetime(2015, 1, 3, 3, 0, 0),
datetime.datetime(2015, 1, 3, 13, 6, 7)
]).tz_localize('MST')
expected = pd.DataFrame(index=times,
columns=['sunrise', 'sunset'],
dtype='datetime64[ns]')
expected['sunrise'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunrise']])
expected['sunset'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset']])
expected['transit'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'transit']])
result = solarposition.sun_rise_set_transit_ephem(
times,
golden.latitude, golden.longitude, next_or_previous='previous',
altitude=golden.altitude, pressure=0, temperature=11, horizon='-0:34')
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('min').tz_convert('MST')
assert_frame_equal(expected, result_rounded)
# test with different timezone
times = times.tz_convert('UTC')
expected = expected.tz_convert('UTC') # resuse result from previous
for col, data in expected.iteritems():
expected[col] = data.dt.tz_convert('UTC')
result = solarposition.sun_rise_set_transit_ephem(
times,
golden.latitude, golden.longitude, next_or_previous='previous',
altitude=golden.altitude, pressure=0, temperature=11, horizon='-0:34')
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('min').tz_convert(times.tz)
assert_frame_equal(expected, result_rounded)
@requires_ephem
def test_sun_rise_set_transit_ephem_error(expected_rise_set_ephem, golden):
with pytest.raises(ValueError):
solarposition.sun_rise_set_transit_ephem(expected_rise_set_ephem.index,
golden.latitude,
golden.longitude,
next_or_previous='other')
tz_naive = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 3, 0, 0)])
with pytest.raises(ValueError):
solarposition.sun_rise_set_transit_ephem(tz_naive,
golden.latitude,
golden.longitude,
next_or_previous='next')
@requires_ephem
def test_sun_rise_set_transit_ephem_horizon(golden):
times = pd.DatetimeIndex([datetime.datetime(2016, 1, 3, 0, 0, 0)
]).tz_localize('MST')
# center of sun disk
center = solarposition.sun_rise_set_transit_ephem(
times,
latitude=golden.latitude, longitude=golden.longitude)
edge = solarposition.sun_rise_set_transit_ephem(
times,
latitude=golden.latitude, longitude=golden.longitude, horizon='-0:34')
result_rounded = (edge['sunrise'] - center['sunrise']).dt.round('min')
sunrise_delta = datetime.datetime(2016, 1, 3, 7, 17, 11) - \
datetime.datetime(2016, 1, 3, 7, 21, 33)
expected = pd.Series(index=times,
data=sunrise_delta,
name='sunrise').dt.round('min')
assert_series_equal(expected, result_rounded)
@requires_ephem
def test_pyephem_physical(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.pyephem(times, golden_mst.latitude,
golden_mst.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos.round(2),
ephem_data[expected_solpos.columns].round(2))
@requires_ephem
def test_pyephem_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.pyephem(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos.round(2),
ephem_data[expected_solpos.columns].round(2))
@requires_ephem
def test_calc_time():
import pytz
import math
# validation from USNO solar position calculator online
epoch = datetime.datetime(1970, 1, 1)
epoch_dt = pytz.utc.localize(epoch)
loc = tus
loc.pressure = 0
actual_time = pytz.timezone(loc.tz).localize(
datetime.datetime(2014, 10, 10, 8, 30))
lb = pytz.timezone(loc.tz).localize(datetime.datetime(2014, 10, 10, tol))
ub = pytz.timezone(loc.tz).localize(datetime.datetime(2014, 10, 10, 10))
alt = solarposition.calc_time(lb, ub, loc.latitude, loc.longitude,
'alt', math.radians(24.7))
az = solarposition.calc_time(lb, ub, loc.latitude, loc.longitude,
'az', math.radians(116.3))
actual_timestamp = (actual_time - epoch_dt).total_seconds()
assert_allclose((alt.replace(second=0, microsecond=0) -
epoch_dt).total_seconds(), actual_timestamp)
assert_allclose((az.replace(second=0, microsecond=0) -
epoch_dt).total_seconds(), actual_timestamp)
@requires_ephem
def test_earthsun_distance():
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D')
distance = solarposition.pyephem_earthsun_distance(times).values[0]
assert_allclose(1, distance, atol=0.1)
def test_ephemeris_physical(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.ephemeris(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_ephemeris_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.ephemeris(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_ephemeris_physical_no_tz(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 19, 30, 30),
periods=1, freq='D')
ephem_data = solarposition.ephemeris(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_get_solarposition_error(golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
with pytest.raises(ValueError):
solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11,
method='error this')
@pytest.mark.parametrize("pressure, expected", [
(82000, _expected_solpos_df()),
(90000, pd.DataFrame(
np.array([[39.88997, 50.11003, 194.34024, 39.87205, 14.64151,
50.12795]]),
columns=['apparent_elevation', 'apparent_zenith', 'azimuth',
'elevation', 'equation_of_time', 'zenith'],
index=['2003-10-17T12:30:30Z']))
])
def test_get_solarposition_pressure(pressure, expected, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
pressure=pressure,
temperature=11)
this_expected = expected.copy()
this_expected.index = times
this_expected = np.round(this_expected, 5)
ephem_data = np.round(ephem_data, 5)
assert_frame_equal(this_expected, ephem_data[this_expected.columns])
@pytest.mark.parametrize("altitude, expected", [
(1830.14, _expected_solpos_df()),
(2000, pd.DataFrame(
np.array([[39.88788, 50.11212, 194.34024, 39.87205, 14.64151,
50.12795]]),
columns=['apparent_elevation', 'apparent_zenith', 'azimuth',
'elevation', 'equation_of_time', 'zenith'],
index=['2003-10-17T12:30:30Z']))
])
def test_get_solarposition_altitude(altitude, expected, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
altitude=altitude,
temperature=11)
this_expected = expected.copy()
this_expected.index = times
this_expected = np.round(this_expected, 5)
ephem_data = np.round(ephem_data, 5)
assert_frame_equal(this_expected, ephem_data[this_expected.columns])
@pytest.mark.parametrize("delta_t, method", [
(None, 'nrel_numpy'),
pytest.param(
None, 'nrel_numba',
marks=[pytest.mark.xfail(
reason='spa.calculate_deltat not implemented for numba yet')]),
(67.0, 'nrel_numba'),
(67.0, 'nrel_numpy'),
])
def test_get_solarposition_deltat(delta_t, method, expected_solpos_multi,
golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=2, freq='D', tz=golden.tz)
with warnings.catch_warnings():
# don't warn on method reload or num threads
warnings.simplefilter("ignore")
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
pressure=82000,
delta_t=delta_t,
temperature=11,
method=method)
this_expected = expected_solpos_multi
this_expected.index = times
this_expected = np.round(this_expected, 5)
ephem_data = np.round(ephem_data, 5)
assert_frame_equal(this_expected, ephem_data[this_expected.columns])
def test_get_solarposition_no_kwargs(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_ephem
def test_get_solarposition_method_pyephem(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
method='pyephem')
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_nrel_earthsun_distance():
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 8, 2)]
).tz_localize('MST')
result = solarposition.nrel_earthsun_distance(times, delta_t=64.0)
expected = pd.Series(np.array([0.983289204601, 1.01486146446]),
index=times)
assert_series_equal(expected, result)
times = datetime.datetime(2015, 1, 2)
result = solarposition.nrel_earthsun_distance(times, delta_t=64.0)
expected = pd.Series(np.array([0.983289204601]),
index=pd.DatetimeIndex([times, ]))
assert_series_equal(expected, result)
def test_equation_of_time():
times = pd.date_range(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H")
output = solarposition.spa_python(times, 37.8, -122.25, 100)
eot = output['equation_of_time']
eot_rng = eot.max() - eot.min() # range of values, around 30 minutes
eot_1 = solarposition.equation_of_time_spencer71(times.dayofyear)
eot_2 = solarposition.equation_of_time_pvcdrom(times.dayofyear)
assert np.allclose(eot_1 / eot_rng, eot / eot_rng, atol=0.3) # spencer
assert np.allclose(eot_2 / eot_rng, eot / eot_rng, atol=0.4) # pvcdrom
def test_declination():
times = pd.date_range(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H")
atmos_refract = 0.5667
delta_t = spa.calculate_deltat(times.year, times.month)
unixtime = np.array([calendar.timegm(t.timetuple()) for t in times])
_, _, declination = spa.solar_position(unixtime, 37.8, -122.25, 100,
1013.25, 25, delta_t, atmos_refract,
sst=True)
declination = np.deg2rad(declination)
declination_rng = declination.max() - declination.min()
declination_1 = solarposition.declination_cooper69(times.dayofyear)
declination_2 = solarposition.declination_spencer71(times.dayofyear)
a, b = declination_1 / declination_rng, declination / declination_rng
assert np.allclose(a, b, atol=0.03) # cooper
a, b = declination_2 / declination_rng, declination / declination_rng
assert np.allclose(a, b, atol=0.02) # spencer
def test_analytical_zenith():
times = pd.date_range(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H").tz_localize('Etc/GMT+8')
lat, lon = 37.8, -122.25
lat_rad = np.deg2rad(lat)
output = solarposition.spa_python(times, lat, lon, 100)
solar_zenith = np.deg2rad(output['zenith']) # spa
# spencer
eot = solarposition.equation_of_time_spencer71(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_spencer71(times.dayofyear)
zenith_1 = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
# pvcdrom and cooper
eot = solarposition.equation_of_time_pvcdrom(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_cooper69(times.dayofyear)
zenith_2 = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
assert np.allclose(zenith_1, solar_zenith, atol=0.015)
assert np.allclose(zenith_2, solar_zenith, atol=0.025)
def test_analytical_azimuth():
times = pd.date_range(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H").tz_localize('Etc/GMT+8')
lat, lon = 37.8, -122.25
lat_rad = np.deg2rad(lat)
output = solarposition.spa_python(times, lat, lon, 100)
solar_azimuth = np.deg2rad(output['azimuth']) # spa
solar_zenith = np.deg2rad(output['zenith'])
# spencer
eot = solarposition.equation_of_time_spencer71(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_spencer71(times.dayofyear)
zenith = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
azimuth_1 = solarposition.solar_azimuth_analytical(lat_rad, hour_angle,
decl, zenith)
# pvcdrom and cooper
eot = solarposition.equation_of_time_pvcdrom(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_cooper69(times.dayofyear)
zenith = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
azimuth_2 = solarposition.solar_azimuth_analytical(lat_rad, hour_angle,
decl, zenith)
idx = np.where(solar_zenith < np.pi/2)
assert np.allclose(azimuth_1[idx], solar_azimuth.values[idx], atol=0.01)
assert np.allclose(azimuth_2[idx], solar_azimuth.values[idx], atol=0.017)
# test for NaN values at boundary conditions (PR #431)
test_angles = np.radians(np.array(
[[ 0., -180., -20.],
[ 0., 0., -5.],
[ 0., 0., 0.],
[ 0., 0., 15.],
[ 0., 180., 20.],
[ 30., 0., -20.],
[ 30., 0., -5.],
[ 30., 0., 0.],
[ 30., 180., 5.],
[ 30., 0., 10.],
[ -30., 0., -20.],
[ -30., 0., -15.],
[ -30., 0., 0.],
[ -30., -180., 5.],
[ -30., 180., 10.]]))
zeniths = solarposition.solar_zenith_analytical(*test_angles.T)
azimuths = solarposition.solar_azimuth_analytical(*test_angles.T,
zenith=zeniths)
assert not np.isnan(azimuths).any()
def test_hour_angle():
"""
Test conversion from hours to hour angles in degrees given the following
inputs from NREL SPA calculator at Golden, CO
date,times,eot,sunrise,sunset
1/2/2015,7:21:55,-3.935172,-70.699400,70.512721
1/2/2015,16:47:43,-4.117227,-70.699400,70.512721
1/2/2015,12:04:45,-4.026295,-70.699400,70.512721
"""
longitude = -105.1786 # degrees
times = pd.DatetimeIndex([
'2015-01-02 07:21:55.2132',
'2015-01-02 16:47:42.9828',
'2015-01-02 12:04:44.6340'
]).tz_localize('Etc/GMT+7')
eot = np.array([-3.935172, -4.117227, -4.026295])
hours = solarposition.hour_angle(times, longitude, eot)
expected = (-70.682338, 70.72118825000001, 0.000801250)
# FIXME: there are differences from expected NREL SPA calculator values
# sunrise: 4 seconds, sunset: 48 seconds, transit: 0.2 seconds
# but the differences may be due to other SPA input parameters
assert np.allclose(hours, expected)
def test_sun_rise_set_transit_geometric(expected_rise_set_spa, golden_mst):
"""Test geometric calculations for sunrise, sunset, and transit times"""
times = expected_rise_set_spa.index
latitude = golden_mst.latitude
longitude = golden_mst.longitude
eot = solarposition.equation_of_time_spencer71(times.dayofyear) # minutes
decl = solarposition.declination_spencer71(times.dayofyear) # radians
sr, ss, st = solarposition.sun_rise_set_transit_geometric(
times, latitude=latitude, longitude=longitude, declination=decl,
equation_of_time=eot)
# sunrise: 2015-01-02 07:26:39.763224487, 2015-08-02 05:04:35.688533801
# sunset: 2015-01-02 16:41:29.951096777, 2015-08-02 19:09:46.597355085
# transit: 2015-01-02 12:04:04.857160632, 2015-08-02 12:07:11.142944443
test_sunrise = solarposition._times_to_hours_after_local_midnight(sr)
test_sunset = solarposition._times_to_hours_after_local_midnight(ss)
test_transit = solarposition._times_to_hours_after_local_midnight(st)
# convert expected SPA sunrise, sunset, transit to local datetime indices
expected_sunrise = pd.DatetimeIndex(expected_rise_set_spa.sunrise.values,
tz='UTC').tz_convert(golden_mst.tz)
expected_sunset = pd.DatetimeIndex(expected_rise_set_spa.sunset.values,
tz='UTC').tz_convert(golden_mst.tz)
expected_transit = pd.DatetimeIndex(expected_rise_set_spa.transit.values,
tz='UTC').tz_convert(golden_mst.tz)
# convert expected times to hours since midnight as arrays of floats
expected_sunrise = solarposition._times_to_hours_after_local_midnight(
expected_sunrise)
expected_sunset = solarposition._times_to_hours_after_local_midnight(
expected_sunset)
expected_transit = solarposition._times_to_hours_after_local_midnight(
expected_transit)
# geometric time has about 4-6 minute error compared to SPA sunset/sunrise
expected_sunrise_error = np.array(
[0.07910089555555544, 0.06908014805555496]) # 4.8[min], 4.2[min]
expected_sunset_error = np.array(
[-0.1036246955555562, -0.06983406805555603]) # -6.2[min], -4.2[min]
expected_transit_error = np.array(
[-0.011150788888889096, 0.0036508177777765383]) # -40[sec], 13.3[sec]
assert np.allclose(test_sunrise, expected_sunrise,
atol=np.abs(expected_sunrise_error).max())
assert np.allclose(test_sunset, expected_sunset,
atol=np.abs(expected_sunset_error).max())
assert np.allclose(test_transit, expected_transit,
atol=np.abs(expected_transit_error).max())
# put numba tests at end of file to minimize reloading
@requires_numba
def test_spa_python_numba_physical(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
with warnings.catch_warnings():
# don't warn on method reload or num threads
# ensure that numpy is the most recently used method so that
# we can use the warns filter below
warnings.simplefilter("ignore")
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy', numthreads=1)
with pytest.warns(UserWarning):
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numba', numthreads=1)
expected_solpos.index = times
| assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns]) | pandas.util.testing.assert_frame_equal |
import os
import pandas as pd
from base import BaseFeature
from encoding_func import target_encoding
from google.cloud import storage, bigquery
from google.cloud import bigquery_storage_v1beta1
class CountEncodingPresentDomains(BaseFeature):
def import_columns(self):
return [
"tweet_id",
"engaging_user_id"
]
def _read_present_domains_count_from_bigquery(
self, train_table_name: str, test_table_name) -> pd.DataFrame:
self._logger.info(f"Reading from {train_table_name} and {test_table_name}")
query = """
WITH subset AS (
(
SELECT tweet_id, any_value(present_domains) AS present_domains
FROM {}
GROUP BY tweet_id
)
UNION ALL
(
SELECT tweet_id, any_value(present_domains) AS present_domains
FROM {}
GROUP BY tweet_id
)
)
, unnest_subset AS (
SELECT tweet_id, present_domain
FROM subset,
unnest(present_domains) AS present_domain
)
, count_present_domain AS (
SELECT present_domain, COUNT(*) AS cnt
FROM unnest_subset
GROUP BY present_domain
)
SELECT
tweet_id,
AVG(cnt) AS mean_value,
min(cnt) AS min_value,
max(cnt) AS max_value,
case when stddev(cnt) is null then 1 else stddev(cnt) end AS std_value
FROM (
SELECT A.tweet_id, A.present_domain, B.cnt
FROM unnest_subset AS A
LEFT OUTER JOIN count_present_domain AS B
ON A.present_domain = B.present_domain
)
GROUP BY
tweet_id
""".format(train_table_name, test_table_name)
if self.debugging:
query += " limit 10000"
bqclient = bigquery.Client(project=self.PROJECT_ID)
bqstorageclient = bigquery_storage_v1beta1.BigQueryStorageClient()
df = (
bqclient.query(query)
.result()
.to_dataframe(bqstorage_client=bqstorageclient)
)
return df
def make_features(self, df_train_input, df_test_input):
# read unnested present_media
count_present_domains = self._read_present_domains_count_from_bigquery(
self.train_table, self.test_table
)
feature_names = ["mean_value", "max_value", "min_value", "std_value"]
print(count_present_domains.shape)
print(count_present_domains.isnull().sum())
df_train_features = pd.DataFrame()
df_test_features = pd.DataFrame()
df_train_input = | pd.merge(df_train_input, count_present_domains, on="tweet_id", how="left") | pandas.merge |
import json
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy import stats
from calibration import *
import pandas as pd
from eval_metrics import *
from tqdm import tqdm
from scipy.stats import norm
from scipy.stats import pearsonr
import argparse
import itertools
from os import listdir
from os.path import isfile, join
from normalisation import *
from error_ir import *
from sklearn.metrics import average_precision_score
def get_df(comet_dir, da_dir, nruns=100, docs=False, ens=True):
SETUP_PATH = comet_dir
files = [f for f in listdir(SETUP_PATH) if isfile(join(SETUP_PATH, f))]
sys_files = [f for f in files if (f.split('_')[0] == 'system') and ('Human' not in f)]
da_scores = | pd.read_csv(da_dir) | pandas.read_csv |
"""Handle the raw data input/output and interface with external formats."""
from obspy.core import read
from obspy.core.utcdatetime import UTCDateTime
import pandas as pd
import datetime as dt
def load_stream(path):
"""Loads a Stream object from the file at path.
Args:
path: path to the input file, (for supported formats see,
http://docs.obspy.org/tutorial/code_snippets/reading_seismograms.html)
Returns:
an obspy.core.Stream object
(http://docs.obspy.org/packages/autogen/obspy.core.stream.Stream.html#obspy.core.stream.Stream)
"""
stream = read(path)
stream.merge()
# assert len(stream) == 3 # We need X,Y,Z traces
return stream
def load_catalog(path):
"""Loads a event catalog from a .csv file.
Each row in the catalog references a know seismic event.
Args:
path: path to the input .csv file.
Returns:
catalog: A Pandas dataframe.
"""
catalog = pd.read_csv(path)
# Check if utc_timestamp exists, otherwise create it
if 'utc_timestamp' not in catalog.columns:
utc_timestamp = []
for e in catalog.origintime.values:
utc_timestamp.append(UTCDateTime(e).timestamp)
catalog['utc_timestamp'] = utc_timestamp
return catalog
def write_stream(stream, path):
stream.write(path, format='MSEED')
def write_catalog(events, path):
catalog = pd.DataFrame(
{'utc_timestamp': | pd.Series([t.timestamp for t in events]) | pandas.Series |
import numpy as np
import seaborn as sns
import matplotlib.pylab as plt
import math
import os
import pandas as pd
import re
def search_year(year, years):
for idx, _year in enumerate(years):
if idx == len(years) -1:
continue
if year >= _year and year < years[idx + 1]:
return idx
return -1
def save_plots_topics(folder, articles_df, column_name, topic_modeler, with_sorted = False,
vmax = 800, relative_number = False, years = list(range(2008,2021)), cnt_per_plot = 25):
if not os.path.exists(folder):
os.makedirs(folder)
topic_year = np.zeros((topic_modeler.n_components, len(years)-1), dtype = int if not relative_number else float)
topic_map = {}
topic_map_by_id = {}
topic_id = 0
all_articles_by_year = np.zeros(len(years)-1, dtype=int)
for i in range(len(articles_df)):
year_ind = search_year(articles_df["year"].values[i], years)
if year_ind >= 0:
for topic in articles_df[column_name].values[i]:
if topic not in topic_map:
topic_map[topic] = topic_id
topic_map_by_id[topic_id] = topic
topic_id += 1
topic_year[topic_map[topic]][year_ind] += 1
all_articles_by_year[year_ind] += 1
if with_sorted:
result = sorted([(idx, topic_val) for idx,topic_val in enumerate(np.sum(topic_year, axis = 1))],key=lambda x: x[1], reverse = True)
else:
result = [(idx, topic_val) for idx,topic_val in enumerate(np.sum(topic_year, axis = 1))]
if relative_number:
topic_year /= all_articles_by_year
topic_year *= 100
for ind in range(math.ceil(topic_modeler.n_components/cnt_per_plot)):
plt.figure(figsize=(15, 6), dpi=150)
topic_year_df = pd.DataFrame(topic_year[[i for i, cnt in result[ind*cnt_per_plot:(ind+1)*cnt_per_plot]],:])
topic_year_df.index = [ topic_map_by_id[i] for i, cnt in result[ind*cnt_per_plot:(ind+1)*cnt_per_plot]]
topic_year_df.columns = [ "%d-%d"%(years[idx], years[idx+1]) for idx, year in enumerate(years) if idx != len(years) -1]
if relative_number:
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax=vmax, annot=True, fmt=".1f")
else:
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax=vmax, annot=True, fmt="d")
plt.tight_layout()
plt.savefig(os.path.join(folder,'%d-%dtopics.png'%(ind*cnt_per_plot+1, (ind+1)*cnt_per_plot)))
def save_plots_districts(folder,
big_dataset,countries = ["Nigeria/", "Malawi/", "Kenya/", "Tanzania/", "Mali/", "Zambia/", "Burkina Faso/", "Philippines/", "Bangladesh/"], with_sorted = False, image_format="eps"):
for country in countries:
country_folder = os.path.join(folder, country)
if not os.path.exists(country_folder):
os.makedirs(country_folder)
districts_dict = {}
districts_dict_interv = {}
for i in range(len(big_dataset)):
for district in big_dataset["districts"].values[i]:
if country in district:
if district not in districts_dict:
districts_dict[district] = 0
districts_dict[district] += 1
if district not in districts_dict_interv:
districts_dict_interv[district] = {"technology intervention": 0, "socioeconomic intervention": 0, "ecosystem intervention": 0}
for column in ["technology intervention", "socioeconomic intervention", "ecosystem intervention"]:
if len(big_dataset[column].values[i]) > 0:
districts_dict_interv[district][column] += 1
if with_sorted:
result = sorted([(name, (interv_val["technology intervention"], interv_val["socioeconomic intervention"], interv_val["ecosystem intervention"]),\
sum(interv_val.values())) for name,interv_val in districts_dict_interv.items()],key=lambda x: x[2], reverse = True)
else:
result = sorted([(name, (districts_dict_interv[name]["technology intervention"], districts_dict_interv[name]["socioeconomic intervention"], districts_dict_interv[name]["ecosystem intervention"]),\
cnt) for name, cnt in districts_dict.items()], key = lambda x: x[2], reverse= True)
for ind in range(math.ceil(len(districts_dict)/30)):
plt.figure(figsize=(15, 6), dpi=150)
topic_year_df = pd.DataFrame([val[1] for val in result[ind*30:(ind+1)*30]])
topic_year_df.index = [val[0] for val in result[ind*30:(ind+1)*30]]
topic_year_df.columns = ["Technology intervention", "Socioeconomic intervention", "Ecosystem intervention"]
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax = 50, annot=True, fmt = "d")
plt.tight_layout()
plt.savefig(os.path.join(country_folder,'%d-%dinterventions.%s'%(ind*30+1, (ind+1)*30, image_format)), format=image_format)
def save_plots_districts_unique(folder, big_dataset,countries = ["Nigeria/", "Malawi/", "Kenya/", "Tanzania/", "Mali/", "Zambia/", "Burkina Faso/", "Philippines/", "Bangladesh/"], with_sorted = False):
for country in countries:
country_folder = os.path.join(folder, country)
if not os.path.exists(country_folder):
os.makedirs(country_folder)
districts_dict = {}
districts_dict_interv = {}
for i in range(len(big_dataset)):
for district in big_dataset["districts"].values[i]:
if country in district:
if district not in districts_dict:
districts_dict[district] = 0
districts_dict[district] += 1
if district not in districts_dict_interv:
districts_dict_interv[district] = {"technology intervention": set(), "socioeconomic intervention":set(), "ecosystem intervention": set()}
for column in ["technology intervention", "socioeconomic intervention", "ecosystem intervention"]:
for val in big_dataset[column].values[i]:
districts_dict_interv[district][column].add(val)
if with_sorted:
result = sorted([(name, (len(interv_val["technology intervention"]), len(interv_val["socioeconomic intervention"]), len(interv_val["ecosystem intervention"])),\
sum([len(interv_val[v]) for v in interv_val])) for name,interv_val in districts_dict_interv.items()],key=lambda x: x[2], reverse = True)
else:
result = sorted([(name, (len(districts_dict_interv[name]["technology intervention"]), len(districts_dict_interv[name]["socioeconomic intervention"]), len(districts_dict_interv[name]["ecosystem intervention"])),\
cnt) for name, cnt in districts_dict.items()], key = lambda x: x[2], reverse= True)
for ind in range(math.ceil(len(districts_dict)/30)):
plt.figure(figsize=(15, 6), dpi=150)
topic_year_df = pd.DataFrame([val[1] for val in result[ind*30:(ind+1)*30]])
topic_year_df.index = [val[0] for val in result[ind*30:(ind+1)*30]]
topic_year_df.columns = ["Technology intervention", "Socioeconomic intervention", "Ecosystem intervention"]
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax = 50, annot=True, fmt = "d")
plt.tight_layout()
plt.savefig(os.path.join(country_folder,'%d-%dinterventions.png'%(ind*30+1, (ind+1)*30)))
def code_sequence(values):
return 4*int("Technology intervention" in values) + 2*int("Socioeconomic intervention" in values) + int("Ecosystem intervention" in values)
def decode_sequence(num):
values = []
for idx, col in enumerate(["Eco", "Socio", "Tech"]):
if num & 2**idx:
values.append(col)
return list(sorted(values))
def save_plots_districts_with_overlapping(folder, big_dataset,
countries = ["Nigeria/", "Malawi/", "Kenya/", "Tanzania/", "Mali/", "Zambia/", "Burkina Faso/", "Philippines/", "Bangladesh/"], with_sorted = False, image_format="eps"):
for country in countries:
country_folder = os.path.join(folder, country)
if not os.path.exists(country_folder):
os.makedirs(country_folder)
districts_dict = {}
districts_dict_interv = {}
for i in range(len(big_dataset)):
for district in big_dataset["districts"].values[i]:
if country in district:
if district not in districts_dict:
districts_dict[district] = 0
districts_dict[district] += 1
if district not in districts_dict_interv:
districts_dict_interv[district] = {}
for i in range(1,8):
districts_dict_interv[district][i] = 0
if code_sequence(big_dataset["intervention_labels"].values[i]) > 0:
districts_dict_interv[district][code_sequence(big_dataset["intervention_labels"].values[i])] += 1
if with_sorted:
result = sorted([(name, tuple([interv_val[w] for w in [4,2,1,6,3,5,7] ]),\
sum(interv_val.values())) for name,interv_val in districts_dict_interv.items()],key=lambda x: x[2], reverse = True)
else:
result = sorted([(name, tuple([interv_val[w] for w in [4,2,1,6,3,5,7] ]),\
cnt) for name, cnt in districts_dict.items()], key = lambda x: x[2], reverse= True)
for ind in range(math.ceil(len(districts_dict)/30)):
plt.figure(figsize=(15, 6), dpi=150)
topic_year_df = pd.DataFrame([val[1] for val in result[ind*30:(ind+1)*30]])
topic_year_df.index = [val[0] for val in result[ind*30:(ind+1)*30]]
topic_year_df.columns = ["; ".join(decode_sequence(w))for w in [4,2,1,6,3,5,7]]
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", vmin = 0, vmax = 50, annot=True, fmt = "d")
plt.tight_layout()
plt.savefig(os.path.join(country_folder,'%d-%dinterventions.%s'%(ind*30+1, (ind+1)*30, image_format)), format=image_format)
def save_plots_topics_interv(folder, articles_df, column_name, with_sorted = True, topic_numbers=125, image_format="eps"):
if not os.path.exists(folder):
os.makedirs(folder)
topic_year_names = {}
topic_year = np.zeros(topic_numbers, dtype = int)
topics_per_page = int(topic_numbers/5)
for i in range(len(articles_df)):
for topic in articles_df[column_name].values[i]:
topic_num = int(re.search("#(\d+)", topic).group(1)) -1
topic_year_names[topic_num] = topic
topic_year[topic_num] += 1
if with_sorted:
result = sorted([(idx, topic_val) for idx,topic_val in enumerate(topic_year)],key=lambda x: x[1], reverse = True)
else:
result = [(idx, topic_val) for idx,topic_val in enumerate(topic_year)]
for ind in range(5):
plt.figure(figsize=(6, 6), dpi=150)
topic_year_df = pd.DataFrame(topic_year[[i for i,cnt in result[ind*topics_per_page:(ind+1)*topics_per_page]]])
topic_year_df.index = [ topic_year_names[i] for i,cnt in result[ind*topics_per_page:(ind+1)*topics_per_page]]
topic_year_df.columns = ["All"]
ax = sns.heatmap(topic_year_df, linewidth=0.5, cmap="YlGnBu", annot=True, fmt = "d", vmax = 50)
plt.tight_layout()
plt.savefig(os.path.join(folder,'%d-%dinterventions.%s'%(ind*topics_per_page+1, (ind+1)*topics_per_page, image_format)), format=image_format)
def save_plots_topics_cooccur(folder, articles_df, topic_num, column_name = "topics", with_sorted = True):
if not os.path.exists(folder):
os.makedirs(folder)
topic_year = np.zeros(150, dtype = int)
for i in range(len(articles_df)):
should_be_used = False
for topic in articles_df[column_name].values[i]:
_topic_num = int(re.search("#(\d+)", topic).group(1))
if _topic_num == topic_num:
should_be_used = True
if should_be_used:
for topic in articles_df[column_name].values[i]:
_topic_num = int(re.search("#(\d+)", topic).group(1)) -1
topic_year[_topic_num] += 1
if with_sorted:
result = sorted([(idx, topic_val) for idx,topic_val in enumerate(topic_year)],key=lambda x: x[1], reverse = True)
else:
result = [(idx, topic_val) for idx,topic_val in enumerate(topic_year)]
for ind in range(5):
plt.figure(figsize=(6, 6), dpi=150)
topic_year_df = | pd.DataFrame(topic_year[[i for i,cnt in result[ind*30:(ind+1)*30]]]) | pandas.DataFrame |
# @author <NAME>
#to merge the 3 different year ED visit files to single file
import pandas as pd
import os
import glob
import numpy as np
def seriesToTypes(series):
try:
series=series.astype("Int64")
except (TypeError,ValueError):
try:
series=pd.to_numeric(series,downcast='unsigned')
except (TypeError,ValueError): pass
#series.loc[pd.isna(series)]=pd.NA
# try:
# series=series.apply(lambda x: pd.NA if pd.isna(x) else str(x)).astype('string')
# series=series.astype('str')
# except:
# pass
return series
folder=r'\\vetmed2.vetmed.w2k.vt.edu\Blitzer\NASA project\Balaji\DSHS ED visit data\Dataset 3_13_2020'
IP_files = glob.glob(folder+'\\IP_*.{}'.format('txt'))
ip_df=pd.DataFrame()
for f in IP_files:
df=pd.read_csv(f,sep='\t')
df.loc[:,'file']=os.path.basename(f).split('.')[0]
ip_df= | pd.concat([ip_df,df]) | pandas.concat |
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
import numpy as np
import pandas as pd
import random
import math
from scipy.spatial.distance import cdist
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from progress.bar import Bar
# read hillslopes computation time
def read_data(path):
df_HS_runtime = pd.read_csv(path)
df_HS_runtime = df_HS_runtime.set_index(['HS_Name'])
return df_HS_runtime
# apply K-means
def apply_kmeans(df_AS, df_Feat_Norm, df_HS_runtime):
df_rmse = pd.DataFrame()
arr_ctime = np.empty([0])
df_fillnan = df_AS.fillna(method='ffill')
nr_itter = df_Feat_Norm.shape[0]+1
bar = Bar('K-Means processing', max=nr_itter-1)
for i in range(1, nr_itter):
mydata = pd.DataFrame()
mydata = df_Feat_Norm.copy()
n_clusters = i
kmeans = KMeans(init='k-means++', n_clusters=n_clusters, n_init=20, max_iter=600).fit(mydata)
pred = kmeans.labels_
ctime = 0.0
# representitive hillslope
rmse_ = pd.DataFrame()
represent = pd.DataFrame()
for j in np.unique(pred):
df_dist = pd.DataFrame(metrics.pairwise.euclidean_distances(mydata[pred == j], mydata[pred == j]), index=mydata[pred == j].index.values, columns=mydata[pred == j].index.values)
reptiv = df_dist.sum().idxmin()
ctime = ctime + float(df_HS_runtime['C_Time'][df_HS_runtime.index.values == reptiv])
represent = represent.append({'representitive':reptiv, 'label':j}, ignore_index=True)
# Root Mean Square Error
for k in range(mydata[pred == j].shape[0]):
rmse = math.sqrt(metrics.mean_squared_error(df_fillnan[mydata[pred == j].iloc[k].name], df_fillnan[reptiv]))
rmse_ = rmse_.append({'hname':mydata[pred == j].iloc[k].name, 'rmse':rmse}, ignore_index=True)
rmse_ = rmse_.set_index(['hname'])
rmse_ = rmse_.sort_index()
df_rmse[str(i)] = rmse_['rmse']
arr_ctime = np.append(arr_ctime, ctime)
bar.next()
df_RmseSum_Kmeans = pd.DataFrame({'rmse_sum':np.sqrt(np.square(df_rmse).sum()), 'ctime':arr_ctime})
bar.finish()
return df_RmseSum_Kmeans
# apply DBSCAN
def apply_DBSCAN(df_AS, df_Feat_Norm, df_HS_runtime):
df_fillnan = df_AS.fillna(method='ffill')
df_RmseSum_DBSCAN = pd.DataFrame()
nr_itter = np.arange(0.1, 2.4, 0.2).shape[0]
bar = Bar('DBSCAN processing', max=nr_itter)
for e in np.arange(0.1, 2.4, 0.2):
df_rmse = pd.DataFrame()
arr_ctime = np.empty(shape=[0, 1])
count = 0
for i in range(1, 22, 2):
mydata = pd.DataFrame()
mydata = df_Feat_Norm.copy()
eps = e
min_samples = i
dbscan = DBSCAN(eps=eps, min_samples=min_samples, metric='euclidean', algorithm='auto').fit(mydata)
pred = dbscan.labels_
ctime = 0.0
# representitive hillslope
rmse_ = pd.DataFrame()
for j in np.unique(pred):
if j == -1:
for k in range(mydata[pred == j].shape[0]):
ctime = ctime + float(df_HS_runtime['C_Time'][df_HS_runtime.index.values == mydata[pred == j].iloc[k].name])
rmse = 0.0
rmse_ = rmse_.append({'hname':mydata[pred == j].iloc[k].name, 'rmse':rmse}, ignore_index=True)
else:
df_dist = pd.DataFrame(metrics.pairwise.euclidean_distances(mydata[pred == j], mydata[pred == j]), index=mydata[pred == j].index.values, columns=mydata[pred == j].index.values)
reptiv = df_dist.sum().idxmin()
ctime = ctime + float(df_HS_runtime['C_Time'][df_HS_runtime.index.values == reptiv])
# Root Mean Square Error
for k in range(mydata[pred == j].shape[0]):
rmse = math.sqrt(metrics.mean_squared_error(df_fillnan[mydata[pred == j].iloc[k].name], df_fillnan[reptiv]))
rmse_ = rmse_.append({'hname':mydata[pred == j].iloc[k].name, 'rmse':rmse}, ignore_index=True)
rmse_ = rmse_.set_index(['hname'])
rmse_ = rmse_.sort_index()
df_rmse[str(i)] = rmse_['rmse']
arr_ctime = np.append(arr_ctime, ctime)
nr_cls = len(np.unique(pred)) - 1 + mydata[pred == -1].shape[0]
df_RmseSum_DBSCAN= df_RmseSum_DBSCAN.append({'rmse_sum':np.sqrt(np.square(df_rmse).sum())[count], 'ctime':arr_ctime[count], 'epsilon':e, 'min_samp':i,
'nr_cls':nr_cls, 'silhouettecoef':metrics.silhouette_score(mydata, pred)}, ignore_index=True)
count +=1
bar.next()
bar.finish()
return df_RmseSum_DBSCAN
# K-medoids clustering
def cluster(distances, k=3):
# number of points
m = distances.shape[0]
# Pick k random medoids.
# curr_medoids = np.array([-1]*k)
# while not len(np.unique(curr_medoids)) == k:
# curr_medoids = np.array([random.randint(0, m - 1) for _ in range(k)])
curr_medoids = np.arange(m)
np.random.shuffle(curr_medoids)
curr_medoids = curr_medoids[:k]
old_medoids = np.array([-1]*k) # Doesn't matter what we initialize these to.
new_medoids = np.array([-1]*k)
# Until the medoids stop updating, do the following:
while not ((old_medoids == curr_medoids).all()):
# Assign each point to cluster with closest medoid.
clusters = assign_points_to_clusters(curr_medoids, distances)
# Update cluster medoids to be lowest cost point.
for curr_medoid in curr_medoids:
cluster = np.where(clusters == curr_medoid)[0]
new_medoids[curr_medoids == curr_medoid] = compute_new_medoid(cluster, distances)
old_medoids[:] = curr_medoids[:]
curr_medoids[:] = new_medoids[:]
return clusters, curr_medoids
# K-medoids clustering assign points
def assign_points_to_clusters(medoids, distances):
distances_to_medoids = distances[:,medoids]
clusters = medoids[np.argmin(distances_to_medoids, axis=1)]
clusters[medoids] = medoids
return clusters
# K-medoids clustering compute new medoid
def compute_new_medoid(cluster, distances):
mask = np.ones(distances.shape)
mask[np.ix_(cluster,cluster)] = 0.
cluster_distances = np.ma.masked_array(data=distances, mask=mask, fill_value=10e9)
costs = cluster_distances.sum(axis=1)
return costs.argmin(axis=0, fill_value=10e9)
# apply K-medoids
def apply_kmedoids(df_AS, df_Feat_Norm, df_HS_runtime):
mydata = pd.DataFrame()
mydata = df_Feat_Norm.copy()
df_dist = pd.DataFrame(metrics.pairwise.euclidean_distances(mydata), index=mydata.index.values, columns=mydata.index.values)
dist = np.array(df_dist)
df_rmse = pd.DataFrame()
arr_ctime = np.empty([0])
df_fillnan = df_AS.fillna(method='ffill')
nr_itter = df_Feat_Norm.shape[0]+1
bar = Bar('K-Medoids processing', max=nr_itter-1)
for i in range(2, nr_itter):
mydata = pd.DataFrame()
mydata = df_Feat_Norm.copy()
n_clusters = i
kclust, kmedoids = cluster(dist, k=n_clusters)
ctime = 0.0
# representitive hillslope
rmse_ = | pd.DataFrame() | pandas.DataFrame |
import quandl
mydata = quandl.get("YAHOO/INDEX_DJI", start_date="2005-12-01", end_date="2005-12-05")
import pandas as pd
authtoken = '<PASSWORD>'
def get_data_quandl(symbol, start_date, end_date):
data = quandl.get(symbol, start_date=start_date, end_date=end_date, authtoken=authtoken)
return data
def generate_features(df):
""" Generate features for a stock/index based on historical price and performance
Args:
df (dataframe with columns "Open", "Close", "High", "Low", "Volume", "Adjusted Close")
Returns:
dataframe, data set with new features
"""
df_new = pd.DataFrame()
# 6 original features
df_new['open'] = df['Open']
df_new['open_1'] = df['Open'].shift(1)
df_new['close_1'] = df['Close'].shift(1)
df_new['high_1'] = df['High'].shift(1)
df_new['low_1'] = df['Low'].shift(1)
df_new['volume_1'] = df['Volume'].shift(1)
# 31 original features
# average price
df_new['avg_price_5'] = pd.rolling_mean(df['Close'], window=5).shift(1)
df_new['avg_price_30'] = pd.rolling_mean(df['Close'], window=21).shift(1)
df_new['avg_price_365'] = pd.rolling_mean(df['Close'], window=252).shift(1)
df_new['ratio_avg_price_5_30'] = df_new['avg_price_5'] / df_new['avg_price_30']
df_new['ratio_avg_price_5_365'] = df_new['avg_price_5'] / df_new['avg_price_365']
df_new['ratio_avg_price_30_365'] = df_new['avg_price_30'] / df_new['avg_price_365']
# average volume
df_new['avg_volume_5'] = pd.rolling_mean(df['Volume'], window=5).shift(1)
df_new['avg_volume_30'] = pd.rolling_mean(df['Volume'], window=21).shift(1)
df_new['avg_volume_365'] = pd.rolling_mean(df['Volume'], window=252).shift(1)
df_new['ratio_avg_volume_5_30'] = df_new['avg_volume_5'] / df_new['avg_volume_30']
df_new['ratio_avg_volume_5_365'] = df_new['avg_volume_5'] / df_new['avg_volume_365']
df_new['ratio_avg_volume_30_365'] = df_new['avg_volume_30'] / df_new['avg_volume_365']
# standard deviation of prices
df_new['std_price_5'] = pd.rolling_std(df['Close'], window=5).shift(1)
df_new['std_price_30'] = pd.rolling_std(df['Close'], window=21).shift(1)
df_new['std_price_365'] = pd.rolling_std(df['Close'], window=252).shift(1)
df_new['ratio_std_price_5_30'] = df_new['std_price_5'] / df_new['std_price_30']
df_new['ratio_std_price_5_365'] = df_new['std_price_5'] / df_new['std_price_365']
df_new['ratio_std_price_30_365'] = df_new['std_price_30'] / df_new['std_price_365']
# standard deviation of volumes
df_new['std_volume_5'] = | pd.rolling_std(df['Volume'], window=5) | pandas.rolling_std |
import numpy as np
import pandas as pd
from six import string_types
from triage.component.catwalk.exceptions import BaselineFeatureNotInMatrix
OPERATOR_METHODS = {">": "gt", ">=": "ge", "<": "lt", "<=": "le", "==": "eq"}
REQUIRED_KEYS = frozenset(["feature_name", "operator", "threshold"])
def get_operator_method(operator_string):
""" Convert the user-passed operator into the the name of the apprpriate
pandas method.
"""
try:
operator_method = OPERATOR_METHODS[operator_string]
except KeyError:
raise ValueError(
(
"Operator '{operator}' extracted from rule is not a "
"supported operator ({supported_operators}).".format(
operator=operator_string,
supported_operators=OPERATOR_METHODS.keys(),
)
)
)
return operator_method
class SimpleThresholder:
""" The simple thresholder applies a set of predetermined logical rules to a
test matrix to classify entities. By default, it will classify entities as 1
if they satisfy any of the rules. When 'and' is set as the logical_operator,
it will classify entities as 1 only if they pass *all* of the rules.
Rules are passed as either strings in the format 'x1 > 5' or dictionaries in
the format {feature_name: 'x1', operator: '>', threshold: 5}. The
feature_name, operator, and threshold keys are required. Eventually, this
class may be abstracted into a BaseThreshold class and more complicated
thresholders could be built around new keys in the dictionaries (e.g., by
specifying scores that could be applied (and possibly summed) to entities
satisfying rules) or by an alternative dictionary format that specifies
more complicated structures for applying rules (for example:
{
or: [
{or: [{}, {}]},
{and: [{}, {}]}
]
}
where rules and operators that combine them can be nested).
"""
def __init__(self, rules, logical_operator="or"):
self.rules = rules
self.logical_operator = logical_operator
self.feature_importances_ = None
self.rule_combination_method = self.lookup_rule_combination_method(
logical_operator
)
@property
def rules(self):
return vars(self)["rules"]
@rules.setter
def rules(self, rules):
""" Validates the rules passed by the user and converts them to the
internal representation. Can be used to validate rules before running an
experiment.
1. If rules are not a list, make them a list.
2. If rules are strings, convert them to dictionaries.
3. If dictionaries or strings are not in a supported format, raise
helpful exceptions.
"""
if not isinstance(rules, list):
rules = [rules]
converted_rules = []
for rule in rules:
if isinstance(rule, string_types):
converted_rules.append(self._convert_string_rule_to_dict(rule))
else:
if not isinstance(rule, dict):
raise ValueError(
(
'Rule "{rule}" is not of a supported type (string or '
"dict).".format(rule=rule)
)
)
if not rule.keys() >= REQUIRED_KEYS:
raise ValueError(
(
'Rule "{rule}" missing one or more required keys '
"({required_keys}).".format(
rule=rule, required_keys=REQUIRED_KEYS
)
)
)
rule["operator"] = get_operator_method(rule["operator"])
converted_rules.append(rule)
vars(self)["rules"] = converted_rules
@property
def all_feature_names(self):
return [rule["feature_name"] for rule in self.rules]
def lookup_rule_combination_method(self, logical_operator):
""" Convert 'and' to 'all' and 'or' to 'any' for interacting with
pandas DataFrames.
"""
rule_combination_method_lookup = {"or": "any", "and": "all"}
return rule_combination_method_lookup[logical_operator]
def _convert_string_rule_to_dict(self, rule):
""" Converts a string rule into a dict, raising helpful exceptions if it
cannot be parsed.
"""
components = rule.rsplit(" ", 2)
if len(components) < 3:
raise ValueError(
(
'{required_keys} could not be parsed from rule "{rule}". Are '
"they all present and separated by spaces?".format(
required_keys=REQUIRED_KEYS, rule=rule
)
)
)
try:
threshold = int(components[2])
except ValueError:
raise ValueError(
(
'Threshold "{threshold}" parsed from rule "{rule}" is not an '
"int.".format(threshold=components[2], rule=rule)
)
)
operator = get_operator_method(components[1])
return {
"feature_name": components[0],
"operator": operator,
"threshold": threshold,
}
def _set_feature_importances_(self, x):
""" Assigns feature importances following the rule: 1 for the features
we are thresholding on, 0 for all other features.
"""
feature_importances = [0] * len(x.columns)
for feature_name in self.all_feature_names:
try:
position = x.columns.get_loc(feature_name)
except KeyError:
raise BaselineFeatureNotInMatrix(
(
"Rules refer to a feature ({feature_name}) not included in "
"the training matrix!".format(feature_name=feature_name)
)
)
feature_importances[position] = 1
self.feature_importances_ = np.array(feature_importances)
def fit(self, x, y):
""" Set feature importances and return self.
"""
self._set_feature_importances_(x)
return self
def predict_proba(self, x):
""" Assign 1 for entities that meet the rules and 0 for those that do not.
"""
rule_evaluations_list = []
for rule in self.rules:
rule_evaluations_list.append(
getattr(x[rule["feature_name"]], rule["operator"])(rule["threshold"])
)
rule_evaluations_dataframe = | pd.concat(rule_evaluations_list, axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from hash import *
class simulation:
def __init__(self, length=12096, mu=0, sigma=0.001117728,
b_target=10, block_reward=12.5, hash_ubd=55,
hash_slope=3, hash_center=1.5, prev_data=pd.DataFrame(),
T_BCH=144, T_BTC=2016, init_price=5400,
init_winning_rate=0.00003):
'''
Parameters
----------
length: time length of simulation
length = the number of blocks generated in one simulation.
A new block is generated in 10 minutes in expection;
12096 blocks are generated in three months in expectation.
mu: average of the brownian motion
sigma: standard deviation of the brownian motion
b_target: target block time (min) (default: 10 min)
\bar{B}
block_reward:
the amount of cryptocurrency the miner receives when he
adds a block. (default: 12.5)
hash_ubd: the upper bound of global hash rate.
hash_slope, hash_center:
the parameters that affects the shape of hash supply function
prev_data:
a pandas dataframe containing (i) prices, (ii) winning rates,
(iii) hash rates, and (iv) block times.
The number of rows should coincides with T_BCH.
T_BCH: the length of the time window used for DAA of BCH.
T_BTC: the length of the time window used for DAA of BTC.
init_price: the initial price.
init_winning-rate: the inirial winning rate.
Attributes
----------
block_times
prices
winning_rates
hash_rates
optimal_winning_rates
expected_returns
Notes
-----
* As for BTC and BCH, b_target is set to be 10 minutes.
'''
# params
self.mu = mu
self.sigma = sigma
self.b_target = b_target
self.length = length
self.block_reward = block_reward
self.hash_ubd = hash_ubd
self.hash_slope = hash_slope
self.hash_center = hash_center
self.T_BCH = T_BCH
self.T_BTC = T_BTC
if prev_data.empty == True:
self.prev_prices = np.ones(T_BCH) * init_price
self.prev_block_times = np.ones(T_BCH) * b_target
self.prev_winning_rates = np.ones(T_BCH) * init_winning_rate
else:
self.prev_prices = prev_data['prices']
self.prev_block_times = prev_data['block_times']
self.prev_winning_rates = prev_data['winning_rates']
def sim_DAA_1(self, prices=pd.DataFrame(), exprvs=pd.DataFrame(),
df_opt_w=pd.DataFrame(),
init_height=551443, presim_length=2016, ubd_param=3):
'''
Conduct a simulation using DAA-1 as its DAA.
DAA-1 is based on the DAA used by BTC.
Parameters
----------
prices : exogenously given. price[t] is the price at time 10*t
exprvs : exogenously given; used for computing block times.
opt_w : the oprimal winning rates, computed in advance.
init_height :
the height of the block that is created first
in the simulation. (default: 551443)
presim_length :
the length of periods contained in prev_data.
(Real data used for the pre-simulation period.)
See also __init__.
ubd_param :
determine the maximum number of iterations
See also _initialization.
Returns
-------
None
Notes
-----
Difficulty, or winning_rate W(t), is adjusted
every self.T_BTC periods. In reality, BTC lets T_BTC = 2016.
'''
if prices.empty == True:
prices = self.generate_prices()
if exprvs.empty == True:
exprvs = self.generate_exprvs()
# initialization
## period 0 to period (presim_length - 1): pre-simulation period
self._initialization(ubd_param)
# main loop
## See what happens within self.length*self.b_target minutes
## default: 12096*10 min = 12 weeks = 3 month
time_ubd = self.length * self.b_target
time = 0
period = presim_length-1
for t in range(presim_length-1, self.length*ubd_param+presim_length-1):
# S(t), W(t) is given
# R(t) = S(t) * M * W(t)
self.expected_rewards[t] =\
self.winning_rates[t] * self.block_reward * self.prices[t]
# W^*(t)
price_truncated = self.prices[t]
price_truncated = (price_truncated//50)*50 # grid size = 50
price_truncated = int(np.max([np.min([price_truncated, 11000]), 100])) # max 11000
self.optimal_winning_rates[t] =\
df_opt_w.loc[price_truncated, 'opt_w']
# hash rate H(t) <- W(t), S(t)
self.hash_rates[t] = self.hash_supply(t)
# block time B(t) <- H(t), W(t)
# multiply 60 to rescale time unit from second to minute
self.block_times[t] = \
exprvs[t]/ \
(self.hash_rates[t] * self.winning_rates[t] * 60)
time += self.block_times[t]
period += 1
if time < time_ubd:
# S(t+1)
self.compute_price(current_period=t, current_time=time,
prices=prices)
# W(t+1)
if (init_height + t)%self.T_BTC == 0:
self.diff_adjust_BTC(current_period=t)
else:
break
self._postprocessing(period)
return None
def sim_DAA_2(self, prices=pd.DataFrame(), exprvs=pd.DataFrame(),
df_opt_w=pd.DataFrame(),
presim_length=2016, ubd_param=3):
'''
Conduct a simulation using DAA-2 as its DAA.
DAA-2 is based on the DAA used by BCH.
Parameters
----------
prices: see sim_BTC.
exprvs: see sim_BTC.
presim_length: see sim_BTC.
ubd_param: see sim_BTC.
Returns
-------
None
Notes
-----
Difficulty, or winning_rate W(t), is adjusted every period.
At each adjustment, the last T_BCH blocks are taken into account.
'''
if prices.empty == True:
prices = self.generate_prices()
if exprvs.empty == True:
exprvs = self.generate_exprvs()
# initialization
## period 0 to period (presim_length - 1): pre-simulation period
self._initialization(ubd_param)
# main loop
## See what happens within self.length*self.b_target minutes
## default: 12096*10 min = 12 weeks = 3 month
time_ubd = self.length * self.b_target
time = 0
period = presim_length-1
for t in range(presim_length-1, self.length*ubd_param+presim_length-1):
# S(t), W(t) is given
# R(t) = S(t) * M * W(t)
self.expected_rewards[t] =\
self.winning_rates[t] * self.block_reward * self.prices[t]
# W^*(t)
price_truncated = self.prices[t]
price_truncated = (price_truncated//50)*50 # grid size = 50
price_truncated = int(np.max([np.min([price_truncated, 11000]), 100])) # max 11000
self.optimal_winning_rates[t] =\
df_opt_w.loc[price_truncated, 'opt_w']
# hash rate H(t) <- W(t), S(t)
self.hash_rates[t] = self.hash_supply(t)
# block time B(t) <- H(t), W(t)
# multiply 60 to rescale time unit from second to minute
self.block_times[t] = \
exprvs[t]/ \
(self.hash_rates[t] * self.winning_rates[t] * 60)
time += self.block_times[t]
period += 1
if time < time_ubd:
# S(t+1)
self.compute_price(current_period=t, current_time=time,
prices=prices)
# W(t+1)
## different from that of BTC in that
## difficulty adjustment is conducted every period.
self.diff_adjust_BCH(current_period=t)
else:
break
self._postprocessing(period)
return None
def sim_DAA_asert(self, prices=pd.DataFrame(), exprvs=pd.DataFrame(),
df_opt_w=pd.DataFrame(),
presim_length=2016, ubd_param=3, half_life=2880):
'''
Conduct a simulation using DAA-2 as its DAA.
DAA-2 is based on the DAA used by BCH.
Parameters
----------
prices: see sim_BTC.
exprvs: see sim_BTC.
presim_length: see sim_BTC.
ubd_param: see sim_BTC.
Returns
-------
None
Notes
-----
Difficulty, or winning_rate W(t), is adjusted every period.
At each adjustment, the last T_BCH blocks are taken into account.
'''
if prices.empty == True:
prices = self.generate_prices()
if exprvs.empty == True:
exprvs = self.generate_exprvs()
# initialization
## period 0 to period (presim_length - 1): pre-simulation period
self._initialization(ubd_param)
# main loop
## See what happens within self.length*self.b_target minutes
## default: 12096*10 min = 12 weeks = 3 month
time_ubd = self.length * self.b_target
time = 0
period = presim_length-1
for t in range(presim_length-1, self.length*ubd_param+presim_length-1):
# S(t), W(t) is given
# R(t) = S(t) * M * W(t)
self.expected_rewards[t] =\
self.winning_rates[t] * self.block_reward * self.prices[t]
# W^*(t)
price_truncated = self.prices[t]
price_truncated = (price_truncated//50)*50 # grid size = 50
price_truncated = int(np.max([np.min([price_truncated, 11000]), 100])) # max 11000
self.optimal_winning_rates[t] =\
df_opt_w.loc[price_truncated, 'opt_w']
# hash rate H(t) <- W(t), S(t)
self.hash_rates[t] = self.hash_supply(t)
# block time B(t) <- H(t), W(t)
# multiply 60 to rescale time unit from second to minute
self.block_times[t] = \
exprvs[t]/ \
(self.hash_rates[t] * self.winning_rates[t] * 60)
time += self.block_times[t]
period += 1
if time < time_ubd:
# S(t+1)
self.compute_price(current_period=t, current_time=time,
prices=prices)
# W(t+1)
## different from that of BTC in that
## difficulty adjustment is conducted every period.
self.diff_adjust_asert(current_period=t, half_life=half_life)
else:
break
self._postprocessing(period)
return None
def sim_DAA_0(self, prices=pd.DataFrame(), exprvs=pd.DataFrame(),
df_opt_w=pd.DataFrame(),
init_height=551443, presim_length=2016, ubd_param=3):
'''
Conduct a simulation where the difficulty is always adjusted
to the optimal level. (imaginary DAA)
Parameters
----------
prices : exogenously given. price[t] is the price at time 10*t
exprvs : exogenously given; used for computing block times.
opt_w :
init_height :
the height of the block that is created first
in the simulation. (default: 551443)
presim_length :
the length of periods contained in prev_data.
(Real data used for the pre-simulation period.)
See also __init__.
ubd_param :
determine the maximum number of iterations
See also _initialization.
Returns
-------
None
Notes
-----
Difficulty, or winning_rate W(t), is adjusted
every self.T_BTC periods. In reality, BTC lets T_BTC = 2016.
'''
if prices.empty == True:
prices = self.generate_prices()
if exprvs.empty == True:
exprvs = self.generate_exprvs()
# initialization
## period 0 to period (presim_length - 1): pre-simulation period
self._initialization(ubd_param)
# main loop
## See what happens within self.length*self.b_target minutes
## default: 12096*10 min = 12 weeks = 3 month
time_ubd = self.length * self.b_target
time = 0
period = presim_length-1
for t in range(presim_length-1, self.length*ubd_param+presim_length-1):
# S(t), W(t) is given
# W^*(t)
## W(t) = W^*(t)
price_truncated = self.prices[t]
price_truncated = (price_truncated//50)*50 # grid size = 50
price_truncated = int(np.max([np.min([price_truncated, 11000]), 100])) # max 11000
self.optimal_winning_rates[t] =\
df_opt_w.loc[price_truncated, 'opt_w']
self.winning_rates[t] = self.optimal_winning_rates[t]
# R(t) = S(t) * M * W(t)
self.expected_rewards[t] =\
self.winning_rates[t] * self.block_reward * self.prices[t]
# hash rate H(t) <- W(t), S(t)
self.hash_rates[t] = self.hash_supply(t)
# block time B(t) <- H(t), W(t)
# multiply 60 to rescale time unit from second to minute
self.block_times[t] = \
exprvs[t]/ \
(self.hash_rates[t] * self.winning_rates[t] * 60)
time += self.block_times[t]
period += 1
if time < time_ubd:
# S(t+1)
self.compute_price(current_period=t, current_time=time,
prices=prices)
else:
break
self._postprocessing(period)
return None
def compute_price(self, current_period, current_time, prices):
'''
Compute the price at the time when the (t+1)-th block is created:
compute S(t+1) using price data via linear interpolation.
prices contains the price date recorded every 10 minutes.
'''
time_left = int(current_time//self.b_target)
time_right = time_left + 1
self.prices[current_period+1] = \
prices[time_left] + (prices[time_right] - prices[time_left]) * \
((current_time - time_left*self.b_target)/self.b_target)
return None
def diff_adjust_BTC(self, current_period):
'''
Used by sim_DAA-1.
Modify self.winning_rates in place.
'''
multiplier = \
(self.block_times[current_period-self.T_BTC+1:\
current_period+1].sum() / (self.T_BTC * self.b_target))
self.winning_rates[current_period+1:current_period+self.T_BTC+1] = \
self.winning_rates[current_period] * multiplier
return None
def diff_adjust_BCH(self, current_period):
'''
Used by sim_DAA_2.
Modify self.winning_rates in place.
'''
# the term related to B(t)
block_term = \
(self.block_times[current_period-self.T_BCH+1: \
current_period+1].sum() / self.b_target)
# the term related to W(t)
temp = np.ones(self.T_BCH)
w_inverses = temp / (self.winning_rates[current_period-self.T_BCH+1: \
current_period+1])
winning_prob_term = 1 / w_inverses.sum()
# update W(t)
self.winning_rates[current_period+1] = \
block_term * winning_prob_term
return None
def diff_adjust_asert(self, current_period, half_life=2880):
'''
Used by sim_DAA_asert.
Modify self.winning_rates in place.
'''
temp = (self.block_times[current_period] - self.b_target)/half_life
# update W(t)
self.winning_rates[current_period+1] = \
self.winning_rates[current_period] * np.exp(temp)
return None
def hash_supply(self, current_period):
'''
Compute hash supply in current period (EH)
'''
current_exp_reward = \
(self.prices[current_period] * self.winning_rates[current_period]
* self.block_reward)
return self.hash_ubd * \
self._sigmoid(self.hash_slope *
(current_exp_reward - self.hash_center))
def _sigmoid(self, x):
sigmoid_range = 34.538776394910684
if x <= -sigmoid_range:
return 1e-15
if x >= sigmoid_range:
return 1.0 - 1e-15
return 1.0 / (1.0 + np.exp(-x))
def _initialization(self, ubd_param, presim_length=2016):
# the number of iteration cannot exceeds self.length * self.ubd_param
sim_length_ubd = self.length * ubd_param
self.prices = np.zeros((sim_length_ubd,)) # S(t)
self.winning_rates = np.zeros((sim_length_ubd,)) # W(t)
self.block_times = np.zeros((sim_length_ubd,)) # B(t)
self.hash_rates = np.zeros((sim_length_ubd,)) #H(t)
self.optimal_winning_rates = np.zeros((sim_length_ubd,)) #W^*(t)
self.expected_rewards = np.zeros((sim_length_ubd,)) #R(t)
# add pre-simulation periods
self.prices = np.hstack([self.prev_prices, self.prices])
self.block_times = \
np.hstack([self.prev_block_times, self.block_times])
self.winning_rates = \
np.hstack([self.prev_winning_rates, self.winning_rates])
## for BTC, set the winning rates
self.winning_rates[presim_length:presim_length+self.T_BTC] = \
self.winning_rates[presim_length-1]
## hash rates in pre-simulation periods will not be used
## The same is true of opt_win_rate and exp_returns
_ = np.zeros(presim_length) + self.hash_supply(presim_length-1) # may be redundant
self.hash_rates = np.hstack([_, self.hash_rates])
_ = np.zeros(presim_length)
self.optimal_winning_rates = np.hstack([_, self.optimal_winning_rates])
self.expected_rewards = np.hstack([_, self.expected_rewards])
return None
def _postprocessing(self, period, presim_length=2016):
self.block_times = self.block_times[presim_length:period]
self.prices = self.prices[presim_length:period]
self.winning_rates = self.winning_rates[presim_length:period]
self.hash_rates = self.hash_rates[presim_length:period]
self.optimal_winning_rates =\
self.optimal_winning_rates[presim_length:period]
self.expected_rewards = self.expected_rewards[presim_length:period]
return None
# Functions
def generate_simulation_data(num_iter=3, price_shock=0, T=None,
opt_w=pd.DataFrame(), prev_data=pd.DataFrame(),
dir_sim='/Volumes/Data/research/BDA/simulation/'):
'''
Notes
-----
num_iter is a number of observations.
The price data 'sim_prices_ps={}_5000obs.csv'.format(price_shock) should
be created in advance.
If T is specified, T_BTC <- T and T_BCH <- T.
'''
df_exprvs = pd.read_csv(dir_sim+'sim_exprvs_5000obs.csv')
df_price = pd.read_csv(dir_sim+'sim_prices_ps={}_5000obs.csv'\
.format(price_shock))
df_opt_w = pd.read_csv(dir_sim + 'opt_w.csv', index_col=0)
path = '../data/BTCdata_presim.csv'
prev_data = pd.read_csv(path)
prev_data['time'] = pd.to_datetime(prev_data['time'])
prev_data = prev_data.rename(columns={'blocktime': 'block_times', 'price': 'prices', 'probability of success /Eh': 'winning_rates'})
df_DAA_1_blocktime = pd.DataFrame()
df_DAA_1_hashrate = pd.DataFrame()
df_DAA_1_winrate = pd.DataFrame()
df_DAA_1_optwinrate = pd.DataFrame()
df_DAA_1_expreward = pd.DataFrame()
df_DAA_2_blocktime = pd.DataFrame()
df_DAA_2_hashrate = pd.DataFrame()
df_DAA_2_winrate = pd.DataFrame()
df_DAA_2_optwinrate = pd.DataFrame()
df_DAA_2_expreward = pd.DataFrame()
df_DAA_0_blocktime = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import torch
from detectron2.data import MetadataCatalog
from detectron2.evaluation.evaluator import DatasetEvaluator
from pycocotools.coco import COCO
from dataset_utils import register_polyp_datasets, dataset_annots
class GianaEvaulator(DatasetEvaluator):
def __init__(self, dataset_name, output_dir, thresholds=None, old_metric=False):
self.iou_thresh = 0.0
self.eval_mode = 'new'
self.dataset_name = dataset_name
self.dataset_folder = os.path.join("datasets", self.dataset_name)
coco_annot_file = os.path.join(self.dataset_folder, "annotations", dataset_annots[dataset_name])
self._coco_api = COCO(coco_annot_file)
self.output_folder = os.path.join(output_dir, "giana")
self.detection_folder = os.path.join(output_dir, "detection")
self.localization_folder = os.path.join(output_dir, "localization")
self.classification_folder = os.path.join(output_dir, "classification")
self.old_metric = old_metric
self.debug = False
if thresholds is None:
self.thresholds = [x / 10 for x in range(10)]
else:
self.thresholds = thresholds
self._partial_results = []
self.make_dirs()
self.classes_id = MetadataCatalog.get(dataset_name).get("thing_dataset_id_to_contiguous_id")
self.class_id_name = {v: k for k, v in
zip(MetadataCatalog.get(dataset_name).get("thing_classes"), self.classes_id.values())}
def make_dirs(self):
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if not os.path.exists(self.detection_folder):
os.makedirs(self.detection_folder)
if not os.path.exists(self.localization_folder):
os.makedirs(self.localization_folder)
if not os.path.exists(self.classification_folder):
os.makedirs(self.classification_folder)
def reset(self):
self.results = pd.DataFrame(columns=["image", "detected", "localized", "classified", "score", "pred_box"])
self._partial_results = []
def evaluate(self):
if not self.debug:
self.results = pd.DataFrame(self._partial_results,
columns=["image", "detected", "localized", "classified", "score", "pred_box"])
print(len(self._partial_results))
print(self.results.groupby("image"))
print(self.results.groupby("image").count())
self.results[['sequence', 'frame']] = self.results.image.str.split("-", expand=True)
sequences = pd.unique(self.results.sequence)
dets = []
locs = []
classifs = []
avg_df_detection = pd.DataFrame(columns=["threshold", "TP", "FP", "TN", "FN"])
avg_df_localization = pd.DataFrame(columns=["threshold", "TP", "FP", "TN", "FN"])
avg_df_classification = pd.DataFrame(columns=["threshold", "TP", "FP", "TN", "FN"])
for sequence in sequences:
df_detection = pd.DataFrame(columns=["threshold", "TP", "FP", "TN", "FN", "RT"])
df_localization = | pd.DataFrame(columns=["threshold", "TP", "FP", "TN", "FN", "RT"]) | pandas.DataFrame |
import csv
import typer
import os.path
import pandas as pd
from datetime import datetime
#from core import configmod as cfm
#from core import loggermod as lgm
file_svr = 'saved_report.csv'
def init(pbpath,dtcnt):
filepath = pbpath + "/" + file_svr
if not(os.path.exists(filepath)):
clm = {'date': [], 'user': []}
for i in range(dtcnt):
clm["data "+(str(i+1))] = []
clm["score"] = []
filedt = | pd.DataFrame(clm) | pandas.DataFrame |
import os
import pickle
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import StandardScaler, RobustScaler
from utils.preprocessing import preprocess
from utils.timefeatures import time_features
warnings.filterwarnings('ignore')
class Dataset_JaneStreet(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='M', data_name='train.csv',
target='action', scale=True, timeenc=0, freq='t'):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'val', 'test']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
# self.features = features
self.target = target
self.scale = scale
self.root_path = root_path
self.data_name = data_name
self.__read_data__()
def __read_data__(self):
scaler = StandardScaler()
print('loading dataset...')
# print(self.root_path)
# print(self.data_name)
data_path = Path(self.root_path)/self.data_name
pickle_path = Path(self.root_path)/f"{self.data_name}.pandas.pickle"
# print(data_path)
if not pickle_path.exists():
with pickle_path.open('wb') as p_fd:
df_raw = pd.read_csv(data_path)
features = [c for c in df_raw.columns if 'feature' in c]
print('preprocessing data...')
df_raw = preprocess(df_raw, self.scale)
pickle.dump(df_raw, p_fd)
with pickle_path.open('rb') as p_fd:
df_pickled = pickle.load(p_fd)
# df_pickled.info()
df_pickled = df_pickled[df_pickled.weight != 0]
# df_pickled = df_pickled[df_pickled.date > 399]
df_pickled = df_pickled[df_pickled.date > 85].reset_index(drop=True)
print('generate target...')
resp_cols = [c for c in df_pickled.columns if 'resp' in c]
# df_pickled['action'] = ((df_pickled['resp'] > 0) &
# (df_pickled['resp_1'] > 0) &
# (df_pickled['resp_2'] > 0) &
# (df_pickled['resp_3'] > 0) &
# (df_pickled['resp_4'] > 0)).astype('int')
# df_pickled['action'] = df_pickled['resp'].copy()
df_pickled['action'] = df_pickled[resp_cols].sum(axis=1)/len(resp_cols)
# df_pickled['action'] = df_pickled.apply(lambda row: row.weight * row.resp, axis='columns')
# df_pickled['action_1'] = df_pickled['resp_1']
# df_pickled['action_2'] = df_pickled['resp_2']
# df_pickled['action_3'] = df_pickled['resp_3']
# df_pickled['action_4'] = df_pickled['resp_4']
# df_pickled.info()
print("split train, valid...")
split_date = 400
train_df = df_pickled.loc[df_pickled.date <= split_date].reset_index(drop=True)
valid_df = df_pickled.loc[df_pickled.date > split_date].reset_index(drop=True)
# print(train_df)
# valid_df['weighted_resp'] = valid_df.apply(lambda row: row.weight * row.resp, axis='columns')
# target_cols = ['action', 'action_1', 'action_2', 'action_3', 'action_4']
# target_cols = ['action', 'action_1', 'action_2', 'action_3']
# target_cols = ['weighted_resp']
target_cols = ['action']
if self.scale:
train_df[target_cols] = scaler.fit_transform(train_df[target_cols].values)
valid_df[target_cols] = scaler.fit_transform(valid_df[target_cols].values)
print('organize values...')
features = [c for c in train_df.columns if 'feature' in c]
if self.set_type == 0:
self.weight = train_df.weight.values
self.resp = train_df.resp.values
self.data_x = train_df[features+target_cols].values
self.data_y = train_df[features+target_cols].values
self.data_stamp = train_df.date.values
elif self.set_type == 1:
self.weight = valid_df.weight.values
self.resp = valid_df.resp.values
self.data_x = valid_df[features+target_cols].values
self.data_y = valid_df[features+target_cols].values
self.data_stamp = valid_df.date.values
def __getitem__(self, index):
# print(index)
# s_begin index
s_begin = index
# s_end index = s_begin + seq_len (4*24)
s_end = s_begin + self.seq_len
# r_begin index = s_end - label_len (2*24)
r_begin = s_end - self.label_len
# r_end index = r_begin + label_len (2*24) + pred_len (1*24)
r_end = r_begin + self.label_len + self.pred_len
# 0 : 0 + 4*24
seq_x = self.data_x[s_begin:s_end]
# 0 + 4*24 - 2*24 : 0 + 4*24 - 2*24 + 2*24 + 1*24
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
# seq_x: (batch_size, enc_in, seq_len)
# seq_y: (batch_size, enc_in, label_len + pred_len)
return s_begin, s_end, r_begin, r_end, seq_x, seq_y, seq_x_mark, seq_y_mark
# return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
class Dataset_ETT_hour(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, timeenc=0, freq='h'):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_name = data_name
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_name))
# 0,
# num of hours in a year - num of hours in 4 days,
# num of hours in a year + num of hours in 4 months - num of hours in 4 days
border1s = [0, 12*30*24 - self.seq_len, 12*30*24+4*30*24 - self.seq_len]
# num of hours in a year,
# num of hours in a year + num of hours in 4 months,
# num of hours in a year + num of hours in 8 months
border2s = [12*30*24, 12*30*24+4*30*24, 12*30*24+8*30*24]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
# print(df_data.shape)
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
# retrieve one year's record
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
if self.timeenc == 0:
df_stamp['month'] = df_stamp.date.apply(lambda row: (row.month))
df_stamp['day'] = df_stamp.date.apply(lambda row: row.day)
df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday())
df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour)
data_stamp = df_stamp.drop(['date'], axis=1).values
elif self.timeenc == 1:
data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)
data_stamp = data_stamp.transpose(1,0)
# print(df_stamp)
# print(data_stamp)
# x and y are identical here
self.data_x = data[border1:border2]
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
# print(self.data_x)
# exit()
def __getitem__(self, index):
# 0 : 0 + 4*24
# s_begin index
s_begin = index
# s_end index = s_begin + seq_len (4*24)
s_end = s_begin + self.seq_len
# 0 + 4*24 - 2*24 : 0 + 4*24 - 2*24 + 2*24 + 1*24
# r_begin index = s_end - label_len (2*24)
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
# seq_x: (batch_size, enc_in, seq_len)
# seq_y: (batch_size, enc_in, label_len + pred_len)
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_ETT_minute(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTm1.csv',
target='OT', scale=True, timeenc=0, freq='t'):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_name = data_name
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_name))
border1s = [0, 12*30*24*4 - self.seq_len, 12*30*24*4+4*30*24*4 - self.seq_len]
border2s = [12*30*24*4, 12*30*24*4+4*30*24*4, 12*30*24*4+8*30*24*4]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
if self.timeenc==0:
df_stamp['month'] = df_stamp.date.apply(lambda row:row.month,1)
df_stamp['day'] = df_stamp.date.apply(lambda row:row.day,1)
df_stamp['weekday'] = df_stamp.date.apply(lambda row:row.weekday(),1)
df_stamp['hour'] = df_stamp.date.apply(lambda row:row.hour,1)
df_stamp['minute'] = df_stamp.date.apply(lambda row:row.minute,1)
df_stamp['minute'] = df_stamp.minute.map(lambda x:x//15)
data_stamp = df_stamp.drop(['date'],1).values
elif self.timeenc==1:
data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)
data_stamp = data_stamp.transpose(1,0)
self.data_x = data[border1:border2]
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_Custom(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, timeenc=0, freq='h'):
# size [seq_len, label_len pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
'''
df_raw.columns: ['date', ...(other features), target feature]
'''
cols = list(df_raw.columns); cols.remove(self.target)
df_raw = df_raw[cols+[self.target]]
num_train = int(len(df_raw)*0.7)
num_test = int(len(df_raw)*0.2)
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
border2s = [num_train, num_train+num_vali, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
if self.timeenc==0:
df_stamp['month'] = df_stamp.date.apply(lambda row:row.month,1)
df_stamp['day'] = df_stamp.date.apply(lambda row:row.day,1)
df_stamp['weekday'] = df_stamp.date.apply(lambda row:row.weekday(),1)
df_stamp['hour'] = df_stamp.date.apply(lambda row:row.hour,1)
data_stamp = df_stamp.drop(['date'],1).values
elif self.timeenc==1:
data_stamp = time_features( | pd.to_datetime(df_stamp['date'].values) | pandas.to_datetime |
import sys
import logging
import arrow
import pandas as pd
from fintrist import Study
from fintrist_lib import ANALYSIS_CATALOG, SCRAPERS_CATALOG
from .settings import Config
from . import util
logger = logging.getLogger(__name__)
def backtest(model, strategy, period='1y', end=None):
"""Run the model Study on previous dates over the period,
collecting the alerts.
::parents:: model
::params:: strategy, period, end
::alerts:: complete
"""
# Define the time period
if not end:
end = arrow.now(Config.TZ)
quant, unit = util.split_alphanum(period)
if unit == 'y':
start = end.shift(years=-quant)
elif unit == 'd':
start = end.shift(days=-quant)
else:
start = end.shift(years=-100)
# Set up the fake study to run
simulated = []
tempstudy = Study()
parent_data = {name: study.data for name, study in model.parents.items()}
try:
recipe = ANALYSIS_CATALOG[model.recipe]
except KeyError:
recipe = SCRAPERS_CATALOG[model.recipe]
parent_data['mock'] = model.data
# At each date, run the model's process on the previous data
# TODO: Date range should be based on the model's parents, not the model.
full_range = model.data.index
for view_date in model.data[start.date():end.date()].index:
logger.debug(f"Log: Backtesting at {view_date}")
curr_idx = full_range.get_loc(view_date)
try:
prev_date = full_range[curr_idx - 1]
except IndexError:
continue
trunc_data = {name: data[:prev_date] for name, data in parent_data.items()}
_, newalerts = recipe.process(**trunc_data, **model.params)
tempstudy.alertslog.record_alerts(newalerts, view_date)
actions = strategy.check_actions(tempstudy)
simulated.append((view_date, actions))
# Save the data
simdata = | pd.DataFrame(simulated, columns=['date', 'signals']) | pandas.DataFrame |
import datetime
import logging
import os.path
import random
import sys
from os import path
from typing import List
import pandas as pd
from tqdm import tqdm
from query_formulation.search import ElasticSearch, SibilsElastic
random.seed(0)
# Gets or creates a logger
def get_logger(filename: str, name: str):
logger = logging.getLogger(name)
# set log level
logger.setLevel(logging.INFO)
# define file handler and set formatter
file_handler = logging.FileHandler(filename)
formatter = logging.Formatter("%(asctime)s : %(levelname)s : %(message)s")
file_handler.setFormatter(formatter)
# add file handler to logger
logger.addHandler(file_handler)
return logger
logger = get_logger("queries.log", __name__)
logger.info("logging initiated")
current_path = path.abspath(path.dirname(__file__))
data_dir = path.join(current_path, 'data')
if len(sys.argv) > 1 and sys.argv[1].lower() == 'sigir':
dataset_type = 'SIGIR_'
else:
dataset_type = ''
searcher = ElasticSearch(None)
relevance_df: pd.DataFrame = pd.read_csv(f'{data_dir}/{dataset_type}results.csv')
topics_df: pd.DataFrame = pd.read_csv(f"{data_dir}/{dataset_type}topics.csv")
def evaluate_result(topic_id: str, query_result: List[str], recall_at_count=1000):
int_query_result = []
for idx, r in enumerate(query_result):
try:
int_query_result.append(int(r) )# pubmed ids are returned as string, convert them to integer)
except:
continue
relevant_pids = relevance_df[relevance_df["topic_id"] == topic_id]['pubmed_id'].tolist()
assert len(relevant_pids)
total_relevant = len(relevant_pids)
total_found = len(int_query_result)
recall = precision = f_score = recall_at = 0
if not total_found:
return recall, precision, f_score, recall_at
true_positives = set(relevant_pids).intersection(int_query_result)
tp = len(true_positives)
recall = round(tp / total_relevant, 4)
if len(int_query_result) > recall_at_count:
true_positives_at = set(relevant_pids).intersection(
int_query_result[:recall_at_count]
)
recall_at = round(len(true_positives_at) / total_relevant, 4)
else:
recall_at = recall
precision = round(tp / total_found, 5)
if not precision and not recall:
f_score = 0
else:
f_score = 2 * precision * recall / (precision + recall)
return recall, precision, recall_at, f_score
def search_and_eval(row):
query = row["query"]
topic_id = row["topic_id"]
end_date, start_date = None, None
if 'start_date' in row and row['start_date']:
try:
start_date = datetime.datetime.strptime(row['start_date'], '%Y-%m-%d')
except Exception:
start_date = None
if 'end_date' in row and row['end_date']:
try:
end_date = datetime.datetime.strptime(row['end_date'], '%Y-%m-%d')
except Exception:
end_date = None
try:
results = searcher.search_pubmed(query, start_date=start_date, end_date=end_date)
except Exception as e:
logger.warning(
f"ERROR: topic_id {topic_id} with query {query} error {e}"
)
return pd.DataFrame()
row["recall"], row["precision"], row["recall_at"], row["f_score"] = evaluate_result(
topic_id, results, recall_at_count=1000
)
row["results_count"] = len(results)
if row['recall']:
logger.info(f"topic_id {topic_id} with query {query} done, recall was {row['recall']}")
else:
logger.info(f'topic_id {topic_id}, nah buddy')
return row
def get_already_searched_data(path: str):
if os.path.isfile(path):
temp_df = pd.read_csv(path)
return temp_df
return | pd.DataFrame() | pandas.DataFrame |
from config import *
import gensim
import pandas as pd
import os
import re
import string as st
import numpy as np
from textblob import TextBlob
class DataProcessor:
def __init__(self, nrows=None):
self.df = None
self.w2v_model = None
self.label_col = "retweet_count"
self.label_max = None
self.drop = False
self.nrows = nrows
def get_glove_model(self):
if not glove_path.exists():
os.system(f"wget https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz "
f"--directory-prefix={data_folder}")
self.w2v_model = gensim.models.KeyedVectors.load_word2vec_format(glove_path, binary=True)
def tweet_w2v(self, text):
vecs = np.array([self.w2v_model[word] for word in text.split() if word in self.w2v_model])
return np.mean(vecs, axis=0)
def clean_df(self):
self.df["text"] = self.df["text"].apply(self.clean)
if self.drop:
self.df = self.df.replace('', np.nan).dropna(subset=["text"]).reset_index(drop=True)
def clean(self, string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
punc = (st.punctuation.replace('@', '').replace('#', '')) + '"' + "'" + '”' + '“' + '‘'
translator = str.maketrans('', '', punc)
string = str(string).lower()
string = string.translate(translator)
string = string.split()
to_remove = []
for word in string:
if word[0] == '#' or word[0] == '@' or word == 'rt' or word[:4] == 'http' or word[0].isnumeric():
to_remove.append(word)
for word in to_remove:
string.remove(word)
text = emoji_pattern.sub(r'', ' '.join(string))
text = re.sub("[^a-zA-Z ]", "", text)
return text
def norm_label(self, overwrite_max=None):
if overwrite_max:
self.label_max = overwrite_max
else:
self.label_max = max(self.df[self.label_col])
self.df[self.label_col] = self.df[self.label_col] / self.label_max
def unnorm(self, norm_labels):
return norm_labels * self.label_max
def get_split_df(self):
from verstack.stratified_continuous_split import scsplit
return scsplit(self.df["text"], self.df[self.label_col], stratify=self.df[self.label_col], train_size=0.7,
test_size=0.3)
def get_split_df_with_all_cols(self):
from verstack.stratified_continuous_split import scsplit
return scsplit(self.df, self.df[self.label_col], stratify=self.df[self.label_col], train_size=0.7,
test_size=0.3)
def apply_glove(self, normalize=True):
print("Loading Glove ...")
self.get_glove_model()
print("Done")
print("Processing data ...")
self.clean_df()
if normalize:
self.norm_label()
if self.drop:
self.df = self.df.replace('', np.nan).dropna(subset=["text"]).reset_index(drop=True)
self.df["text"] = self.df["text"].apply(self.tweet_w2v)
print("Done")
def save_df(self, path):
self.df.to_hdf(path, key='df', mode='w')
print(f"Dataframe saved to {path}")
def load_csv(self, path=train_path):
self.df = pd.read_csv(path, nrows=self.nrows, index_col="id")
self.df["user_verified"] = self.df["user_verified"].astype(bool)
def replace_timestamp(self):
self.df["day_of_week"] = | pd.to_datetime(self.df["timestamp"]) | pandas.to_datetime |
#!/usr/bin/env python
import argparse
import json
import logging
import os
import re
from datetime import datetime
from math import sqrt
from pathlib import Path
from typing import List, Optional
import lightgbm as lgb
import numpy as np
import pandas as pd
from fastcore.utils import store_attr
from mlforecast.core import TimeSeries
from mlforecast.forecast import Forecast
from window_ops.ewm import ewm_mean
from window_ops.expanding import expanding_mean
from window_ops.rolling import rolling_mean, seasonal_rolling_mean
freq2config = {
'D': dict(
lags=[7, 28],
lag_transforms={
7: [(rolling_mean, 7), (rolling_mean, 28)],
28: [
(rolling_mean, 7),
(rolling_mean, 28),
(seasonal_rolling_mean, 7, 4),
(seasonal_rolling_mean, 7, 8),
],
},
date_features=['year', 'quarter', 'month', 'week', 'day', 'dayofweek'],
),
'W': dict(
lags=[1, 2, 3, 4],
lag_transforms={
1: [(expanding_mean), (ewm_mean, 0.1), (ewm_mean, 0.3)],
},
date_features=['year', 'quarter', 'month', 'week']
),
}
class TSForecast:
"""Computes forecast at scale."""
def __init__(self, filename: str,
filename_static: str,
filename_temporal: str,
dir_train: str,
dir_output: str,
freq: str,
unique_id_column: str,
ds_column: str, y_column: str,
horizon: int,
naive_forecast: bool,
backtest_windows: int,
objective: str, metric: str,
learning_rate: int, n_estimators: int,
num_leaves: int, min_data_in_leaf: int,
bagging_freq: int, bagging_fraction: float) -> 'TSForecast':
store_attr()
self.df: pd.DataFrame
self.df_temporal: pd.DataFrame
self.fcst: Forecast
self.static_features: List[str]
self.df, self.df_temporal, self.static_features = self._read_file()
def _clean_columns(self, df: pd.DataFrame) -> None:
new_columns = []
for column in df.columns:
new_column = re.sub(r'[",:{}[\]]', '', column)
new_columns.append(new_column)
df.columns = new_columns
def _read_file(self) -> pd.DataFrame:
logger.info('Reading file...')
df = pd.read_parquet(f'{self.dir_train}/{self.filename}')
logger.info('File read.')
renamer = {self.unique_id_column: 'unique_id',
self.ds_column: 'ds',
self.y_column: 'y'}
df.rename(columns=renamer, inplace=True)
df.set_index(['unique_id', 'ds'], inplace=True)
self._clean_columns(df)
cat_dtypes = ['object', 'int64', 'int32'] #assuming these are cats
static_features = None
if self.filename_static is not None:
logger.info('Processing static features')
static = pd.read_parquet(f'{self.dir_train}/{self.filename_static}')
static.rename(columns=renamer, inplace=True)
static.set_index('unique_id', inplace=True)
self._clean_columns(static)
static_features = static.columns.to_list()
obj_features = static.select_dtypes(cat_dtypes).columns.to_list()
static[obj_features] = static[obj_features].astype('category')
df = df.merge(static, how='left', left_on=['unique_id'],
right_index=True)
logger.info('Done')
df_temporal = None
if self.filename_temporal is not None:
logger.info('Processing temporal features')
df_temporal = pd.read_parquet(f'{self.dir_train}/{self.filename_temporal}')
df_temporal.rename(columns=renamer, inplace=True)
df_temporal.set_index(['unique_id', 'ds'], inplace=True)
self._clean_columns(df_temporal)
obj_features = df_temporal.select_dtypes(cat_dtypes).columns.to_list()
df_temporal[obj_features] = df_temporal[obj_features].astype('category')
df = df.merge(df_temporal, how='left', left_on=['unique_id', 'ds'],
right_index=True)
df_temporal.reset_index(inplace=True)
df_temporal['ds'] = | pd.to_datetime(df_temporal['ds']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from math import sqrt
from dramkit.gentools import power
from dramkit.gentools import isnull
from dramkit.gentools import cal_pct
from dramkit.gentools import x_div_y
from dramkit.gentools import check_l_allin_l0
from dramkit.gentools import get_update_kwargs
from dramkit.gentools import con_count_ignore
from dramkit.gentools import replace_repeat_func_iter
from dramkit.datetimetools import diff_days_date
from dramkit.logtools.utils_logger import logger_show
from dramkit.plottools.plot_common import plot_series
from dramkit.plottools.plot_common import plot_series_conlabel
#%%
def signal_merge(data, sig1_col, sig2_col, merge_type=1):
'''
两个信号合并成一个信号
Parameters
----------
data : pandas.DataFrame
待处理数据,必须包含 ``sig1_col`` 和 ``sig2_col`` 指定的列
sig1_col, sig2_col : str
指定信号列,值为-1表示买(做多),1表示卖(做空)
merge_type : int
设置信号合并方式:
- 1: 两个信号出现任何一个都算有效信号
- 2: 根据两个信号的持仓量叠加计算交易信号(返回信号不适用反向开仓)
- 3: 只有两个信号方向相同时才算交易信号(返回信号不适用反向开仓)
:returns: `pd.Series` - 合并之后的信号
'''
df = data.reindex(columns=[sig1_col, sig2_col])
df.rename(columns={sig1_col: 'sig1', sig2_col: 'sig2'},
inplace=True)
if merge_type == 1:
df['sig'] = df['sig1'] + df['sig2']
df['sig'] = df['sig'].apply(lambda x: -1 if x < 0 else \
(1 if x > 0 else 0))
return df['sig']
elif merge_type == 2:
df['hold1'] = df['sig1'].replace(0, np.nan)
df['hold1'] = df['hold1'].fillna(method='ffill').fillna(0)
df['hold2'] = df['sig2'].replace(0, np.nan)
df['hold2'] = df['hold2'].fillna(method='ffill').fillna(0)
df['hold'] = df['hold1'] + df['hold2']
df['trade'] = df['hold'].diff()
df['sig'] = df['trade'].apply(lambda x: -1 if x < 0 else \
(1 if x > 0 else 0))
return df['sig']
elif merge_type == 3:
df['hold1'] = df['sig1'].replace(0, np.nan)
df['hold1'] = df['hold1'].fillna(method='ffill').fillna(0)
df['hold2'] = df['sig2'].replace(0, np.nan)
df['hold2'] = df['hold2'].fillna(method='ffill').fillna(0)
df['hold'] = df['hold1'] + df['hold2']
df['hold'] = df['hold'].apply(lambda x: 1 if x == 2 else \
(-1 if x == -2 else 0))
df['trade'] = df['hold'].diff()
df['sig'] = df['trade'].apply(lambda x: -1 if x < 0 else \
(1 if x > 0 else 0))
return df['sig']
#%%
def cal_cost_add(hold_vol, hold_cost, add_vol, add_price):
'''
| 计算加仓之后的平均持仓成本
| hold_vol为加仓前持仓量,hold_cost为加仓前平均持仓成本,add_vol为加仓量
'''
holdCost = hold_vol * hold_cost
totCost = holdCost + add_vol * add_price
return totCost / (hold_vol + add_vol)
def get_mean_cost(trade_records, dirt_col, price_col, vol_col):
'''
根据交易记录计算每期持仓成本
Parameters
----------
trade_records : pd.DataFrame
交易记录数据,必须包含 ``dirt_col`` 、 ``price_col`` 和 `vol_col` 指定的列
dirt_col : str
买卖方向列,1为买入(做多),-1为卖出(做空)
price_col : str
成交价格列
vol_col : str
为成交量列
:returns: `pd.DataFrame` - 在trade_records上增加了'holdVol', 'holdCost', 'meanCost'三列
'''
df = trade_records.copy()
ori_idx = df.index
df.index = range(0, df.shape[0])
vol_col_ = vol_col + '_'
df[vol_col_] = df[dirt_col] * df[vol_col]
df['holdVol'] = df[vol_col_].cumsum().round(4)
df.loc[df.index[0], 'holdCost'] = df[price_col].iloc[0] * df[vol_col_].iloc[0]
df.loc[df.index[0], 'meanCost'] = df[price_col].iloc[0]
for k in range(1, df.shape[0]):
holdVol_pre = df['holdVol'].iloc[k-1]
holdCost_pre = df['holdCost'].iloc[k-1]
holdVol = df['holdVol'].iloc[k]
tradeVol = df[vol_col_].iloc[k]
if tradeVol == 0:
holdCost, meanCost = holdCost_pre, df['meanCost'].iloc[k-1]
elif holdVol == 0: # 平仓
holdCost, meanCost = 0, 0
elif holdVol_pre >= 0 and holdVol > holdVol_pre: # 买入开仓或加仓
tradeVal = df[vol_col_].iloc[k] * df[price_col].iloc[k]
holdCost = holdCost_pre + tradeVal
meanCost = holdCost / holdVol
elif holdVol_pre >= 0 and holdVol > 0 and holdVol < holdVol_pre: # 买入减仓
meanCost = df['meanCost'].iloc[k-1]
holdCost = meanCost * holdVol
elif holdVol_pre >= 0 and holdVol < 0: # 买入平仓反向卖出
meanCost = df[price_col].iloc[k]
holdCost = holdVol * meanCost
elif holdVol_pre <= 0 and holdVol < holdVol_pre: # 卖出开仓或加仓
tradeVal = df[vol_col_].iloc[k] * df[price_col].iloc[k]
holdCost = holdCost_pre + tradeVal
meanCost = holdCost / holdVol
elif holdVol_pre <= 0 and holdVol < 0 and holdVol > holdVol_pre: # 卖出减仓
meanCost = df['meanCost'].iloc[k-1]
holdCost = meanCost * holdVol
elif holdVol_pre <= 0 and holdVol > 0: # 卖出平仓反向买入
meanCost = df[price_col].iloc[k]
holdCost = holdVol * meanCost
df.loc[df.index[k], 'holdCost'] = holdCost
df.loc[df.index[k], 'meanCost'] = meanCost
df.index = ori_idx
return df
#%%
def cal_gain_con_futures(price_open, price_now, n, player,
fee=0.1/100, lever=100,
n_future2target=0.001):
'''
永续合约收益计算,如火币BTC合约
Parameters
----------
price_open : float
开仓价格
price_now : float
现价
n : int
数量(张)
player : str
做空或做多
fee : float
手续费比例
lever : int
杠杆
n_future2target : float
一份合约对应的标的数量
Returns
-------
gain_lever : float
盈亏金额
gain_pct : float
盈亏比例
'''
if n == 0:
return 0, 0
b_name = ['buyer', 'Buyer', 'b', 'B', 'buy', 'Buy']
s_name = ['seller', 'Seller', 'seler', 'Seler', 's', 'S', 'sell',
'Sell', 'sel', 'Sel']
price_cost_ = price_open * n_future2target / lever
price_now_ = price_now * n_future2target / lever
if player in b_name:
Cost = price_cost_ * n * (1+fee)
Get = price_now_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Cost
elif player in s_name:
Cost = price_now_ * n * (1+fee)
Get = price_cost_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Get
gain_lever = gain * lever
return gain_lever, gain_pct
def cal_gain_con_futures2(price_open, price_now, n, player,
fee=0.1/100, lever=100):
'''
永续合约收益计算,如币安ETH合约
Parameters
----------
price_open : float
开仓价格
price_now : float
现价
n : int
数量(标的量)
player : str
做空或做多
fee : float
手续费比例
lever : int
杠杆
Returns
-------
gain_lever : float
盈亏金额
gain_pct : float
盈亏比例
'''
if n == 0:
return 0, 0
b_name = ['buyer', 'Buyer', 'b', 'B', 'buy', 'Buy']
s_name = ['seller', 'Seller', 'seler', 'Seler', 's', 'S', 'sell',
'Sell', 'sel', 'Sel']
price_cost_ = price_open / lever
price_now_ = price_now / lever
if player in b_name:
Cost = price_cost_ * n * (1+fee)
Get = price_now_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Cost
elif player in s_name:
Cost = price_now_ * n * (1+fee)
Get = price_cost_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Get
gain_lever = gain * lever
return gain_lever, gain_pct
#%%
def cal_expect_return(hit_prob, gain_loss_ratio):
'''根据胜率和盈亏比计算期望收益'''
return hit_prob*gain_loss_ratio - (1-hit_prob)
def cal_gain_pct_log(price_cost, price, pct_cost0=1):
'''
| 计算对数收益率
| price_cost为成本
| price为现价
| pct_cost0为成本price_cost为0时的返回值'''
if isnull(price_cost) or isnull(price):
return np.nan
if price_cost == 0:
return pct_cost0
elif price_cost > 0:
return np.log(price) - np.log(price_cost)
else:
raise ValueError('price_cost必须大于等于0!')
def cal_gain_pct(price_cost, price, pct_cost0=1):
'''
| 计算百分比收益率
| price_cost为成本
| price为现价
| pct_cost0为成本price_cost为0时的返回值
Note
----
默认以权利方成本price_cost为正(eg. 买入价为100,则price_cost=100)、
义务方成本price_cost为负进行计算(eg. 卖出价为100,则price_cost=-100)
'''
if isnull(price_cost) or isnull(price):
return np.nan
if price_cost > 0:
return price / price_cost - 1
elif price_cost < 0:
return 1 - price / price_cost
else:
return pct_cost0
def cal_gain_pcts(price_series, gain_type='pct',
pct_cost0=1, logger=None):
'''
| 计算资产价值序列price_series(`pd.Series`)每个时间的收益率
| gain_type:
| 为'pct',使用普通百分比收益
| 为'log',使用对数收益率(当price_series存在小于等于0时不能使用)
| 为'dif',收益率为前后差值(适用于累加净值情况)
| pct_cost0为当成本为0时收益率的指定值
'''
if (price_series <= 0).sum() > 0:
gain_type = 'pct'
logger_show('存在小于等于0的值,将用百分比收益率代替对数收益率!',
logger, 'warning')
if gain_type == 'pct':
df = pd.DataFrame({'price_now': price_series})
df['price_cost'] = df['price_now'].shift(1)
df['pct'] = df[['price_cost', 'price_now']].apply(lambda x:
cal_gain_pct(x['price_cost'], x['price_now'],
pct_cost0=pct_cost0), axis=1)
return df['pct']
elif gain_type == 'log':
return price_series.apply(np.log).diff()
elif gain_type == 'dif':
return price_series.diff()
else:
raise ValueError('未识别的`gain_type`,请检查!')
#%%
def cal_beta(values_target, values_base, gain_type='pct', pct_cost0=1):
'''
| 计算贝塔系数
| values_target, values_base分别为目标价值序列和基准价值序列
| gain_type和pct_cost0同 :func:`dramkit.fintools.utils_gains.cal_gain_pcts` 中的参数
| 参考:
| https://www.joinquant.com/help/api/help#api:风险指标
| https://blog.csdn.net/thfyshz/article/details/83443783
'''
values_target = pd.Series(values_target)
values_base = pd.Series(values_base)
pcts_target = cal_gain_pcts(values_target, gain_type=gain_type, pct_cost0=pct_cost0)
pcts_base = cal_gain_pcts(values_base, gain_type=gain_type, pct_cost0=pct_cost0)
pcts_target = pcts_target.iloc[1:]
pcts_base = pcts_base.iloc[1:]
return np.cov(pcts_target, pcts_base)[0][1] / np.var(pcts_base, ddof=1)
def cal_alpha_beta(values_target, values_base, r0=3.0/100, nn=252,
gain_type='pct', rtype='exp', pct_cost0=1, logger=None):
'''
| 计算alpha和beta系数
| 参数参考 :func:`cal_beta` 和 :func:`cal_returns_period` 函数
'''
r = cal_returns_period(values_target, gain_type=gain_type, rtype=rtype,
nn=nn, pct_cost0=pct_cost0, logger=logger)
r_base = cal_returns_period(values_base, gain_type=gain_type, rtype=rtype,
nn=nn, pct_cost0=pct_cost0, logger=logger)
beta = cal_beta(values_target, values_base,
gain_type=gain_type, pct_cost0=pct_cost0)
return r - (r0 + beta*(r_base-r0)), beta
def cal_alpha_by_beta_and_r(r, r_base, beta, r0=3.0/100):
'''
| 根据年化收益以及beta计算alpha
| r为策略年化收益,r_base为基准年化收益,r0为无风险收益率,beta为策略beta值
'''
return r - (r0 + beta*(r_base-r0))
#%%
def cal_return_period_by_gain_pct(gain_pct, n, nn=250, rtype='exp',
gain_pct_type='pct'):
'''
给定最终收益率gain_pct,计算周期化收益率
Parameters
----------
gain_pct : float
给定的最终收益率
n : int
期数
nn : int
| 一个完整周期包含的期数,eg.
| 若price_series周期为日,求年化收益率时nn一般为252(一年的交易日数)
| 若price_series周期为日,求月度收益率时nn一般为21(一个月的交易日数)
| 若price_series周期为分钟,求年化收益率时nn一般为252*240(一年的交易分钟数)
rtype : str
周期化时采用指数方式'exp'或平均方式'mean'
gain_pct_type : str
| 设置最终收益率gain_pct得来的计算方式,可选'pct', 'log'
| 默认为百分比收益,若为对数收益,则计算周期化收益率时只能采用平均法,不能用指数法
.. hint::
| 百分比收益率:
| 复利(指数)公式:1 + R = (1 + r) ^ (n / nn) ——> r = (1 + R) ^ (nn / n) - 1
| 单利(平均)公式:1 + R = 1 + r * (n / nn) ——> r = R * nn / n
| 对数收益率:
| R = r * (n / nn) ——> r = R * nn / n(采用对数收益率计算年化收益只能用平均法)
Returns
-------
r : float
周期化收益率,其周期由nn确定
References
----------
https://zhuanlan.zhihu.com/p/112211063
'''
if gain_pct_type in ['log', 'ln', 'lg']:
rtype = 'mean' # 对数收益率只能采用平均法进行周期化
if rtype == 'exp':
r = power(1 + gain_pct, nn / n) - 1
elif rtype == 'mean':
r = nn * gain_pct / n
return r
def cal_ext_return_period_by_gain_pct(gain_pct, gain_pct_base, n,
nn=250, rtype='exp',
gain_pct_type='pct',
ext_type=1):
'''
| 给定收益率和基准收益率,计算周期化超额收益率
| rtype周期化收益率方法,可选'exp'或'mean'或'log'
| ext_type设置超额收益率计算方式:
| 若为1,先算各自周期化收益率,再相减
| 若为2,先相减,再周期化算超额
| 若为3,先还原两者实际净值,再以相对于基准净值的收益计算周期化超额
| 其他参数意义同 :func:`cal_return_period_by_gain_pct` 函数
| 参考:
| https://xueqiu.com/1930958059/167803003?page=1
'''
if rtype == 'log':
ext_type = 3
if ext_type == 1:
p1 = cal_return_period_by_gain_pct(gain_pct, n, nn=nn, rtype=rtype,
gain_pct_type=gain_pct_type)
p2 = cal_return_period_by_gain_pct(gain_pct_base, n, nn=nn, rtype=rtype,
gain_pct_type=gain_pct_type)
return p1 - p2
elif ext_type == 2:
p = cal_return_period_by_gain_pct(gain_pct-gain_pct_base, n, nn=nn,
rtype=rtype, gain_pct_type=gain_pct_type)
return p
if ext_type == 3:
if gain_pct_type in ['log', 'ln', 'lg']:
p = np.exp(gain_pct)
p_base = np.exp(gain_pct_base)
elif gain_pct_type == 'pct':
p = 1 + gain_pct
p_base = 1 + gain_pct_base
if rtype == 'exp':
return power(p / p_base, nn / n) - 1
elif rtype == 'mean':
return (p / p_base - 1) * nn / n
elif rtype == 'log':
return (np.log(p) - np.log(p_base)) * nn / n
else:
raise ValueError('未识别的ext_type参数,请检查!')
def cal_ext_return_period(values, values_base, gain_type='pct', rtype='exp',
nn=250, pct_cost0=1, ext_type=1, logger=None):
'''
| 根据给定价格或价值序列计values和基准序列values_base,算超额收益
| pct_cost0参考 :func:`cal_gain_pct` 和 :func:`cal_gain_pct_log` 函数
| 其它参数参考 :func:`cal_ext_return_period_by_gain_pct` 函数
'''
values, values_base = np.array(values), np.array(values_base)
n1, n0 = len(values), len(values_base)
if n1 != n0:
raise ValueError('两个序列长度不相等,请检查!')
if gain_type == 'log':
if (values[0] <= 0 or values_base[-1] <= 0) or \
(values_base[0] <= 0 or values_base[-1] <= 0):
logger_show('发现开始值或结束值为负,用百分比收益率代替对数收益率!',
logger, 'warning')
p1 = cal_gain_pct(values[0], values[-1], pct_cost0=pct_cost0)
p0 = cal_gain_pct(values_base[0], values_base[-1], pct_cost0=pct_cost0)
gain_pct_type = 'pct'
else:
p1 = cal_gain_pct_log(values[0], values[-1], pct_cost0=pct_cost0)
p0 = cal_gain_pct_log(values_base[0], values_base[-1], pct_cost0=pct_cost0)
rtype = 'mean' # 采用对数收益率计算年化收益只能用平均法
gain_pct_type = 'log'
elif gain_type == 'pct':
p1 = cal_gain_pct(values[0], values[-1], pct_cost0=pct_cost0)
p0 = cal_gain_pct(values_base[0], values_base[-1], pct_cost0=pct_cost0)
gain_pct_type = 'pct'
elif gain_type == 'dif':
p1 = values[-1] - values[0]
p1 = values_base[-1] - values_base[0]
rtype = 'mean'
gain_pct_type = 'pct'
else:
raise ValueError('未识别的`gain_gype`,请检查!')
extr = cal_ext_return_period_by_gain_pct(p1, p0, n1, nn=nn, rtype=rtype,
gain_pct_type=gain_pct_type,
ext_type=ext_type)
return extr
def cal_returns_period(price_series, gain_type='pct', rtype='exp',
nn=252, pct_cost0=1, logger=None):
'''
计算周期化收益率
Parameters
----------
price_series : pd.Series, np.array, list
资产价值序列(有负值时不能使用对数收益率)
gain_type : str
| 收益率计算方式设置
| 为'pct',使用普通百分比收益
| 为'log',使用对数收益率(当price_series存在小于等于0时不能使用)
| 为'dif',收益率为前后差值(适用于累加净值情况)
rtype : str
| 收益率周期化时采用指数方式'exp'或平均方式'mean'
| (采用对数收益率计算年化收益只能用平均法)
nn : int
| 一个完整周期包含的期数,eg.
| 若price_series周期为日,求年化收益率时nn一般为252(一年的交易日数)
| 若price_series周期为日,求月度收益率时nn一般为21(一个月的交易日数)
| 若price_series周期为分钟,求年化收益率时nn一般为252*240(一年的交易分钟数)
pct_cost0 : float
成本为0时收益率的指定值,参见 :func:`cal_gain_pct` 和 :func:`cal_gain_pct_log` 函数
Returns
-------
r : float
周期化收益率,其周期由nn确定
See Also
--------
:func:`cal_return_period_by_gain_pct`
'''
price_series = np.array(price_series)
n_ = len(price_series)
if gain_type == 'log':
if price_series[0] <= 0 or price_series[-1] <= 0:
logger_show('发现开始值或结束值为负,用百分比收益率代替对数收益率!',
logger, 'warning')
gain_pct = cal_gain_pct(price_series[0], price_series[-1],
pct_cost0=pct_cost0)
gain_pct_type = 'pct'
else:
gain_pct = cal_gain_pct_log(price_series[0], price_series[-1],
pct_cost0=pct_cost0)
rtype = 'mean' # 采用对数收益率计算年化收益只能用平均法
gain_pct_type = 'log'
elif gain_type == 'pct':
gain_pct = cal_gain_pct(price_series[0], price_series[-1],
pct_cost0=pct_cost0)
gain_pct_type = 'pct'
elif gain_type == 'dif':
gain_pct = price_series[-1] - price_series[0]
gain_pct_type = 'pct'
rtype = 'mean'
else:
raise ValueError('未识别的`gain_type`,请检查!')
r = cal_return_period_by_gain_pct(gain_pct, n_, nn=nn, rtype=rtype,
gain_pct_type=gain_pct_type)
return r
def cal_returns_period_mean(price_series, gain_type='pct', nn=252,
pct_cost0=1, logger=None):
'''
| 计算周期化收益率,采用收益率直接平均的方法
| price_series为资产价值序列,pd.Series或list或np.array(有负值时不能使用对数收益率)
| gain_type和pct_cost0参数参见 :func:`cal_gain_pcts`
| nn为一个完整周期包含的期数,eg.
| 若price_series周期为日,求年化收益率时nn一般为252(一年的交易日数)
| 若price_series周期为日,求月度收益率时nn一般为21(一个月的交易日数)
| 若price_series周期为分钟,求年化收益率时nn一般为252*240(一年的交易分钟数)
'''
price_series = pd.Series(price_series)
price_series.name = 'series'
df = | pd.DataFrame(price_series) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 1 22:13:05 2018
@author: tauro
"""
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import StandardScaler, PowerTransformer, MinMaxScaler, LabelEncoder
from sklearn.metrics import accuracy_score, roc_auc_score, cohen_kappa_score, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from datetime import datetime
import os
import pickle
#Progress bar
from tqdm import tqdm
tqdm.pandas(desc="progress-bar")
#Garbage collection
import gc
from scipy.stats import mode
import xgboost as xgb
from lightgbm import LGBMClassifier
from keras.models import Sequential
from keras.layers import Dense, Dropout
#Custom file for data loading and preprocessing
from data_preprocessing import process_data, data_split
start_time = datetime.now()
owd = os.getcwd()
# =============================================================================
# Load required data
train, response, test, test_ids = process_data("data_train.csv", "data_test.csv",
pca=False, scale=True)
X_train, X_test, y_train, y_test = data_split(train, response)
del train, response, test, test_ids
# =============================================================================
# Model 1 - Gaussian Naive Bayes classifier
gc.collect()
print("\nTraining Gaussian Naive Bayes classifier - Model 1 of 8\n")
list_gnb = []
gnb = GaussianNB()
gnb.fit(X_train, y_train)
gnb_pred_prob = gnb.predict_proba(X_test)[:,1]
threshold_gnb = 0.3
gnb_pred = gnb_pred_prob > threshold_gnb
metrics_gnb = {}
metrics_gnb['threshold'] = threshold_gnb
accuracy = accuracy_score(y_test, gnb_pred)
metrics_gnb['accuracy'] = accuracy
roc = roc_auc_score(y_test, gnb_pred)
metrics_gnb['auc_roc'] = roc
kappa = cohen_kappa_score(y_test, gnb_pred)
metrics_gnb['kappa'] = kappa
conf_matrix = confusion_matrix(y_test, gnb_pred)
metrics_gnb['conf_matrix'] = conf_matrix
metrics_gnb['sensitivity'] = conf_matrix[1,1] / (conf_matrix[1,1] + conf_matrix[1,0])
metrics_gnb['specificity'] = conf_matrix[0,0] / (conf_matrix[0,1] + conf_matrix[0,0])
list_gnb.append(metrics_gnb)
# =============================================================================
# Model 2 - XGBoost classifier
gc.collect()
print("\nTraining XG Boost classifier - Model 2 of 8\n")
list_xgb = []
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
params = {'max_depth': 2, 'eta': 0.5, 'silent': 0, 'objective': 'binary:logistic',
'nthread': 4, 'eval_metric': 'auc', 'colsample_bytree': 0.8, 'subsample': 0.8,
'scale_pos_weight': 1, 'gamma': 200, 'learning_rate': 0.02}
evallist = [(dtest, 'eval'), (dtrain, 'train')]
num_rounds = 500
bst = xgb.train(params, dtrain, num_rounds, evallist)
xgb_pred_prob = bst.predict(dtest)
threshold_xgb = 0.5
xgb_pred = xgb_pred_prob > threshold_xgb
xgb_pred = np.multiply(xgb_pred, 1)
xgb_pred = | pd.Series(xgb_pred) | pandas.Series |
import pandas as pd
from inci_db.db_utils import DatabaseConnector
class Abbreviation(DatabaseConnector):
def __init__(self, name):
self.name = name
@classmethod
def create_table(cls, filepath):
"""
Create a table in the database from csv file
with abbreviations of cosmetic ingredients.
"""
my_db = super().db_connect()
cursor = my_db.cursor()
table_name = cls.__name__.lower()
data = | pd.read_csv(filepath, encoding="utf-8") | pandas.read_csv |
"""Wrapped xgboost for tabular datasets."""
from time import perf_counter
import logging
from copy import copy
from copy import deepcopy
from typing import Optional
from typing import Callable
from typing import Tuple
from typing import Dict
import xgboost as xgb
from xgboost import dask as dxgb
import numpy as np
import pandas as pd
import cupy as cp
from lightautoml.dataset.gpu.gpu_dataset import DaskCudfDataset
from lightautoml.dataset.gpu.gpu_dataset import CudfDataset
from .base_gpu import TabularMLAlgo_gpu
from .base_gpu import TabularDatasetGpu
from lightautoml.pipelines.selection.base import ImportanceEstimator
from lightautoml.validation.base import TrainValidIterator
from lightautoml.ml_algo.tuning.base import Distribution
from lightautoml.ml_algo.tuning.base import SearchSpace
logger = logging.getLogger(__name__)
class BoostXGB(TabularMLAlgo_gpu, ImportanceEstimator):
"""Gradient boosting on decision trees from LightGBM library.
default_params: All available parameters listed in lightgbm documentation:
- https://lightgbm.readthedocs.io/en/latest/Parameters.html
freeze_defaults:
- ``True`` : params may be rewritten depending on dataset.
- ``False``: params may be changed only manually or with tuning.
timer: :class:`~lightautoml.utils.timer.Timer` instance or ``None``.
"""
_name: str = 'XGB'
_default_params = {
'tree_method':'gpu_hist',
'predictor':'gpu_predictor',
'task': 'train',
"learning_rate": 0.05,
"max_leaves": 128,
"max_depth": 0,
"verbosity": 0,
"reg_alpha": 1,
"reg_lambda": 0.0,
"gamma": 0.0,
'max_bin': 255,
'n_estimators': 3000,
'early_stopping_rounds': 100,
'random_state': 42
}
def _infer_params(self) -> Tuple[dict, int, int, int, Optional[Callable], Optional[Callable]]:
"""Infer all parameters in lightgbm format.
Returns:
Tuple (params, num_trees, early_stopping_rounds, verbose_eval, fobj, feval).
About parameters: https://lightgbm.readthedocs.io/en/latest/_modules/lightgbm/engine.html
"""
params = copy(self.params)
early_stopping_rounds = params.pop('early_stopping_rounds')
num_trees = params.pop('n_estimators')
root_logger = logging.getLogger()
level = root_logger.getEffectiveLevel()
if level in (logging.CRITICAL, logging.ERROR, logging.WARNING):
verbose_eval = False
elif level == logging.INFO:
verbose_eval = 100
else:
verbose_eval = 10
# get objective params
loss = self.task.losses['xgb']
params['objective'] = loss.fobj_name
fobj = loss.fobj
# get metric params
params['metric'] = loss.metric_name
feval = loss.feval
params['num_class'] = self.n_classes
# add loss and tasks params if defined
params = {**params, **loss.fobj_params, **loss.metric_params}
return params, num_trees, early_stopping_rounds, verbose_eval, fobj, feval
def init_params_on_input(self, train_valid_iterator: TrainValidIterator) -> dict:
"""Get model parameters depending on dataset parameters.
Args:
train_valid_iterator: Classic cv-iterator.
Returns:
Parameters of model.
"""
rows_num = len(train_valid_iterator.train)
task = train_valid_iterator.train.task.name
suggested_params = copy(self.default_params)
if self.freeze_defaults:
# if user change defaults manually - keep it
return suggested_params
if task == 'reg':
suggested_params = {
"learning_rate": 0.05,
"max_leaves": 32
}
if rows_num <= 10000:
init_lr = 0.01
ntrees = 3000
es = 200
elif rows_num <= 20000:
init_lr = 0.02
ntrees = 3000
es = 200
elif rows_num <= 100000:
init_lr = 0.03
ntrees = 1200
es = 200
elif rows_num <= 300000:
init_lr = 0.04
ntrees = 2000
es = 100
else:
init_lr = 0.05
ntrees = 2000
es = 100
if rows_num > 300000:
suggested_params['max_leaves'] = 128 if task == 'reg' else 244
elif rows_num > 100000:
suggested_params['max_leaves'] = 64 if task == 'reg' else 128
elif rows_num > 50000:
suggested_params['max_leaves'] = 32 if task == 'reg' else 64
# params['reg_alpha'] = 1 if task == 'reg' else 0.5
elif rows_num > 20000:
suggested_params['max_leaves'] = 32 if task == 'reg' else 32
suggested_params['reg_alpha'] = 0.5 if task == 'reg' else 0.0
elif rows_num > 10000:
suggested_params['max_leaves'] = 32 if task == 'reg' else 64
suggested_params['reg_alpha'] = 0.5 if task == 'reg' else 0.2
elif rows_num > 5000:
suggested_params['max_leaves'] = 24 if task == 'reg' else 32
suggested_params['reg_alpha'] = 0.5 if task == 'reg' else 0.5
else:
suggested_params['max_leaves'] = 16 if task == 'reg' else 16
suggested_params['reg_alpha'] = 1 if task == 'reg' else 1
suggested_params['learning_rate'] = init_lr
suggested_params['n_estimators'] = ntrees
suggested_params['early_stopping_rounds'] = es
return suggested_params
def _get_default_search_spaces(self, suggested_params: Dict, estimated_n_trials: int) -> Dict:
"""Sample hyperparameters from suggested.
Args:
suggested_params: Dict with parameters.
estimated_n_trials: Maximum number of hyperparameter estimations.
Returns:
dict with sampled hyperparameters.
"""
optimization_search_space = {}
optimization_search_space['max_depth'] = SearchSpace(
Distribution.INTUNIFORM,
low=3,
high=7
)
optimization_search_space['max_leaves'] = SearchSpace(
Distribution.INTUNIFORM,
low=16,
high=255,
)
if estimated_n_trials > 30:
optimization_search_space['min_child_weight'] = SearchSpace(
Distribution.LOGUNIFORM,
low=1e-3,
high=10.0,
)
if estimated_n_trials > 100:
optimization_search_space['reg_alpha'] = SearchSpace(
Distribution.LOGUNIFORM,
low=1e-8,
high=10.0,
)
optimization_search_space['reg_lambda'] = SearchSpace(
Distribution.LOGUNIFORM,
low=1e-8,
high=10.0,
)
return optimization_search_space
def fit_predict_single_fold(self, train: TabularDatasetGpu, valid: TabularDatasetGpu, dev_id: int = 0) -> Tuple[xgb.Booster, np.ndarray]:
"""Implements training and prediction on single fold.
Args:
train: Train Dataset.
valid: Validation Dataset.
Returns:
Tuple (model, predicted_values)
"""
st = perf_counter()
train_target = train.target
train_weights = train.weights
valid_target = valid.target
valid_weights = valid.weights
train_data = train.data
valid_data = valid.data
with cp.cuda.Device(dev_id):
if type(train) == DaskCudfDataset:
train_target = train_target.compute()
if train_weights is not None:
train_weights = train_weights.compute()
valid_target = valid_target.compute()
if valid_weights is not None:
valid_weights = valid_weights.compute()
train_data = train_data.compute()
valid_data = valid_data.compute()
elif type(train) == CudfDataset:
train_target = train_target.copy()
if train_weights is not None:
train_weights = train_weights.copy()
valid_target = valid_target.copy()
if valid_weights is not None:
valid_weights = valid_weights.copy()
train_data = train_data.copy()
valid_data = valid_data.copy()
elif type(train_target) == cp.ndarray:
train_target = cp.copy(train_target)
if train_weights is not None:
train_weights = cp.copy(train_weights)
valid_target = cp.copy(valid_target)
if valid_weights is not None:
valid_weights = cp.copy(valid_weights)
train_data = cp.copy(train_data)
valid_data = cp.copy(valid_data)
else:
raise NotImplementedError("given type of input is not implemented:" + str(type(train_target)) + "class:" + str(self._name))
params, num_trees, early_stopping_rounds, verbose_eval, fobj, feval = self._infer_params()
train_target, train_weight = self.task.losses['xgb'].fw_func(train_target, train_weights)
valid_target, valid_weight = self.task.losses['xgb'].fw_func(valid_target, valid_weights)
xgb_train = xgb.DMatrix(train_data, label=train_target, weight=train_weight)
xgb_valid = xgb.DMatrix(valid_data, label=valid_target, weight=valid_weight)
params['gpu_id'] = dev_id
model = xgb.train(params, xgb_train, num_boost_round=num_trees, evals=[(xgb_train, 'train'), (xgb_valid, 'valid')],
obj=fobj, feval=feval, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbose_eval
)
val_pred = model.inplace_predict(valid_data)
val_pred = self.task.losses['xgb'].bw_func(val_pred)
print(perf_counter() - st, "xgb single fold time")
with cp.cuda.Device(0):
val_pred = cp.copy(val_pred)
return model, val_pred
def predict_single_fold(self, model: xgb.Booster, dataset: TabularDatasetGpu) -> np.ndarray:
"""Predict target values for dataset.
Args:
model: Lightgbm object.
dataset: Test Dataset.
Return:
Predicted target values.
"""
dataset_data = dataset.data
if type(dataset) == DaskCudfDataset:
dataset_data = dataset_data.compute()
pred = self.task.losses['xgb'].bw_func(model.inplace_predict(dataset_data))
return pred
def get_features_score(self) -> pd.Series:
"""Computes feature importance as mean values of feature importance provided by lightgbm per all models.
Returns:
Series with feature importances.
"""
#FIRST SORT TO FEATURES AND THEN SORT BACK TO IMPORTANCES - BAD
imp = 0
for model in self.models:
val = model.get_score(importance_type='gain')
sorted_list = [0.0 if val.get(i) is None else val.get(i) for i in self.features]
scores = np.array(sorted_list)
imp = imp + scores
imp = imp / len(self.models)
return | pd.Series(imp, index=self.features) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # MOJO Scoring: Two Approaches
#
# Now we will use the model we built on the Lending Club data to score the test cases we pickled. To mimick the scoring performance we would experience if the model were implemented in a real-time environment, we will score the records one at a time. We will use the MOJO we downloaded from H2O to score these records in two different ways:
#
# 1. Use the `mojo_predict_pandas` method from the `h2o.utils.shared_utils` to score one record at a time
#
# 2. Use the java application we just built to score one record at a time. To do so, we will first initialize a java virtual machine using python's `subprocess` package. This JVM will instantiate an instance of our scoring class, which loads the model just once at initialization. As we will see, loading the model once is far more efficient than repeatedly calling `mojo_predict_pandas`, which reloads the model for each call. We will then establish a gateway to our JVM using `JavaGateway` from `py4j` and score our test cases one at a time.
#
# Timing of these two approaches will show that the second approach is far faster than the first approach. On my machine, the first approach takes more than 300 *milliseconds* per record whereas the second approach takes less than 100 *microseconds* per record. For many real-time production applications, the difference between the second approach and the first approach is the difference between easily hitting an SLA and almost always failing to hit the SLA.
# ### Imports
# In[1]:
import os, sys, json, pickle
import pandas as pd
import subprocess
from ast import literal_eval
from py4j.java_gateway import JavaGateway
from h2o.utils import shared_utils as su
# ### Read in our pickled test cases and feature engineering pipeline
# In[2]:
test_data = pd.read_pickle('test_cases.pkl')
# In[3]:
with open('pipeline.pkl','rb') as f:
p = pickle.load(f)
# In[4]:
test_data.head()
# ### Apply feature engineering
#
# In real-time production scoring, these transformations would constribute to the end-to-end runtime of the application and therefore influence whether scoring achieves its SLA. Here we are primarily interested in the time it takes to score with the MOJO itself under the two approaches outlined above. Therefore, we do not include this in the timing.
# In[5]:
test_data_prepped = (
p.transform(test_data)
.reset_index(drop=True)
.drop(labels = 'loan_status',axis=1))
# In[6]:
test_data_prepped.head()
# In[7]:
predictors = test_data_prepped.columns.to_list()
# ### Scoring Approach 1: `h2o`'s `mojo_predict_pandas` method
# In[8]:
mojo_zip_path = 'lendingclub-app/src/main/resources/final_gbm.zip'
genmodel_jar_path = 'h2o-genmodel.jar'
records = [test_data_prepped.iloc[[i]] for i in range(test_data_prepped.shape[0])]
# In[9]:
get_ipython().run_cell_magic('timeit', '', '\nresults = []\n\nfor record in records:\n pred = su.mojo_predict_pandas(\n record,\n mojo_zip_path,\n genmodel_jar_path)\n results.append(pred)')
# In[10]:
results = []
for record in records:
pred = su.mojo_predict_pandas(
record,
mojo_zip_path,
genmodel_jar_path)
results.append(pred)
# In[11]:
# Predictions:
pd.concat(results)
# ### Scoring Approach 2: Our Java Application
# In[12]:
## Start JVM using subprocess
cmd = "java -cp " + "lendingclub-app/target/" + "lendingclub-app-1.0-SNAPSHOT-jar-with-dependencies.jar " + "com.lendingclub.app.MojoScoringEntryPoint"
jvm = subprocess.Popen(cmd)
# In[13]:
## Establish gateway with the JVM
gateway = JavaGateway()
mojoscorer = gateway.entry_point.getScorer()
# In[14]:
## Construct cases as list of JSON objects
cases = test_data_prepped[predictors].to_dict(orient='records')
cases = [json.dumps(case) for case in cases]
# In[15]:
get_ipython().run_cell_magic('timeit', '', 'results = []\n\nfor case in cases:\n results.append(literal_eval(mojoscorer.predict(case)))')
# In[16]:
results = []
for case in cases:
results.append(literal_eval(mojoscorer.predict(case)))
| pd.DataFrame(results) | pandas.DataFrame |
import pandas as pd
import glob
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams["figure.dpi"] = 150
# MP2.5
df_mp25 = pd.DataFrame()
for i, file_name in enumerate(sorted(list(glob.glob('../data/*_mp25.csv')), reverse=True)):
temp = pd.read_csv(file_name, usecols=['date', 'name', 'val'])
temp = temp.set_index('date')
temp.index = | pd.to_datetime(temp.index) | pandas.to_datetime |
"""
Tests for Timestamp timezone-related methods
"""
from datetime import (
date,
datetime,
timedelta,
)
import dateutil
from dateutil.tz import (
gettz,
tzoffset,
)
import pytest
import pytz
from pytz.exceptions import (
AmbiguousTimeError,
NonExistentTimeError,
)
from pandas._libs.tslibs import timezones
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
from pandas import (
NaT,
Timestamp,
)
class TestTimestampTZOperations:
# --------------------------------------------------------------
# Timestamp.tz_localize
def test_tz_localize_pushes_out_of_bounds(self):
# GH#12677
# tz_localize that pushes away from the boundary is OK
msg = (
f"Converting {Timestamp.min.strftime('%Y-%m-%d %H:%M:%S')} "
f"underflows past {Timestamp.min}"
)
pac = Timestamp.min.tz_localize("US/Pacific")
assert pac.value > Timestamp.min.value
pac.tz_convert("Asia/Tokyo") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.min.tz_localize("Asia/Tokyo")
# tz_localize that pushes away from the boundary is OK
msg = (
f"Converting {Timestamp.max.strftime('%Y-%m-%d %H:%M:%S')} "
f"overflows past {Timestamp.max}"
)
tokyo = Timestamp.max.tz_localize("Asia/Tokyo")
assert tokyo.value < Timestamp.max.value
tokyo.tz_convert("US/Pacific") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
| Timestamp.max.tz_localize("US/Pacific") | pandas.Timestamp.max.tz_localize |
#importing required modules
import pandas as pd
import numpy as np
#function to create or check required files
def create_file():
try:
exp = pd.read_csv('dom_expense.csv')
except FileNotFoundError:
exp = pd.DataFrame({'Housing': np.NaN,'Electricity': np.NaN,'Telecom': np.NaN,'Groceries': np.NaN,'Entertainment': np.NaN,'Healthcare': np.NaN,'Insurance': np.NaN,'Tax': np.NaN,'Other': np.NaN}, index=["Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec", "Jan","Feb","Mar"])
exp.to_csv('dom_expense.csv')
try:
inc = | pd.read_csv('dom_income.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 25 21:16:23 2021
@author: earnestt1234
"""
from collections.abc import Iterable
from copy import deepcopy
from difflib import SequenceMatcher
import os
import numpy as np
import pandas as pd
fixed_cols = ['Device_Number',
'Battery_Voltage',
'Motor_Turns',
'Session_Type',
'Event',
'Active_Poke',
'Left_Poke_Count',
'Right_Poke_Count',
'Pellet_Count',
'Retrieval_Time',]
needed_cols = ['Pellet_Count',
'Left_Poke_Count',
'Right_Poke_Count',]
data_dict = {'pellets': 'Pellet_Count',
'left_pokes': 'Left_Poke_Count',
'right_pokes': 'Right_Poke_Count'}
zero_date = pd.Timestamp(year=1980, month=1, day=1)
class FEDFrame(pd.DataFrame):
_metadata = ('name', 'path', 'foreign_columns', 'missing_columns',
'_alignment', '_current_offset')
@property
def _constructor(self):
return FEDFrame
@property
def mode(self):
return self.determine_mode()
@property
def events(self):
return len(self.data)
@property
def start_time(self):
return pd.Timestamp(self.index.values[0])
@property
def end_time(self):
return pd.Timestamp(self.index.values[-1])
@property
def duration(self):
return self.end_time-self.start_time
def _load_init(self, name=None, path=None):
self.name = name
self.path = path
self.fix_column_names()
self._handle_retrieval_time()
self._alignment = 'datetime'
self._current_offset = pd.Timedelta(0)
def fix_column_names(self):
self.foreign_columns = []
for col in self.columns:
for fix in fixed_cols:
likeness = SequenceMatcher(a=col, b=fix).ratio()
if likeness > 0.85:
self.rename(columns={col:fix}, inplace=True)
break
self.foreign_columns.append(col)
self.missing_columns = [col for col in needed_cols if
col not in self.columns]
def determine_mode(self):
mode = 'Unknown'
column = | pd.Series(dtype=object) | pandas.Series |
"""
Analysis of the PSID for PS 1 in Gianluca Violante's quantitative
macro course
@author : <NAME> <<EMAIL>>
@date : 2015-02-04 16:57:38
"""
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
| pd.set_option("use_inf_as_null", True, "display.width", 180) | pandas.set_option |
import argparse
import os
from os import listdir
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import yaml
from constants.dataset_tables import ModelsTableHeader, DatasetTableHeader
from file_actions.readers.tomograms import load_tomogram
from file_actions.writers.csv import build_tom_motive_list
from file_actions.writers.tomogram import write_tomogram
from networks.utils import build_prediction_output_dir
from tomogram_utils.coordinates_toolbox.clustering import get_cluster_centroids
parser = argparse.ArgumentParser()
parser.add_argument("-config", "--config", help="yaml_file", type=str)
parser.add_argument("-set", "--set",
help="tomos set name to be used for training", type=int)
args = parser.parse_args()
yaml_file = args.config
config = yaml.safe_load(open(yaml_file))
tomos_set = args.set
tomo_list = config['tomos_sets'][tomos_set]['test_list']
class_number = config['prediction']['class_number']
model_name = config["model_path"][:-4]
output_dir = config["pred_output_dir"]
models_table = os.path.join(output_dir, "models")
models_table = os.path.join(models_table, "models.csv")
label_name = model_name
motl_parameters = config['clustering_parameters']
min_cluster_size = motl_parameters['min_cluster_size']
max_cluster_size = motl_parameters['max_cluster_size']
threshold = motl_parameters['threshold']
ModelsHeader = ModelsTableHeader()
models_df = | pd.read_csv(models_table, dtype=ModelsHeader.dtype_dict) | pandas.read_csv |
#%%
import os
from pyteomics import mzid, mzml
import pandas as pd
import numpy as np
import glob
"""
Files are downloaded and manually randomly divided into different folders
the following code is repeated but has the same effect, it is applied to various folders to
generate pandas data frames and to store all the data in a single hdf5 file
"""
#%%
os.chdir('./files/train')
mzid_files=glob.glob('*.mzid')
indexed_mzid = mzid.chain.from_iterable(mzid_files, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid = []
for entry in(indexed_mzid):
all_mzid.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid)
mzid_df = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra)
spectra_df = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df = | pd.merge(mzid_df,spectra_df,how='left',on=['file','id']) | pandas.merge |
import pandas as pd
import numpy as np
import datetime
import sys
import time
import xgboost as xgb
from add_feture import *
FEATURE_EXTRACTION_SLOT = 10
LabelDay = datetime.datetime(2014,12,18,0,0,0)
Data = pd.read_csv("../../../../data/fresh_comp_offline/drop1112_sub_item.csv")
Data['daystime'] = Data['days'].map(lambda x: time.strptime(x, "%Y-%m-%d")).map(lambda x: datetime.datetime(*x[:6]))
def get_train(train_user,end_time):
# 取出label day 前一天的记录作为打标记录
data_train = train_user[(train_user['daystime'] == (end_time-datetime.timedelta(days=1)))]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
# 训练样本中,删除重复的样本
data_train = data_train.drop_duplicates(['user_id', 'item_id'])
data_train_ui = data_train['user_id'] / data_train['item_id']
# print(len(data_train))
# 使用label day 的实际购买情况进行打标
data_label = train_user[train_user['daystime'] == end_time]
data_label_buy = data_label[data_label['behavior_type'] == 4]
data_label_buy_ui = data_label_buy['user_id'] / data_label_buy['item_id']
# 对前一天的交互记录进行打标
data_train_labeled = data_train_ui.isin(data_label_buy_ui)
dict = {True: 1, False: 0}
data_train_labeled = data_train_labeled.map(dict)
data_train['label'] = data_train_labeled
return data_train[['user_id', 'item_id','item_category', 'label']]
def get_label_testset(train_user,LabelDay):
# 测试集选为上一天所有的交互数据
data_test = train_user[(train_user['daystime'] == LabelDay)]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
data_test = data_test.drop_duplicates(['user_id', 'item_id'])
return data_test[['user_id', 'item_id','item_category']]
def item_category_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_category,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
item_count_before5 = pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type)
item_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
item_count_before_3 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
item_count_before_3 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
item_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
item_count_before_2 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
item_count_before_2 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayitem_count = pd.crosstab(beforeoneday.item_category,beforeoneday.behavior_type)
countAverage = item_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = item_count[1]/item_count[4]
buyRate['skim'] = item_count[2]/item_count[4]
buyRate['collect'] = item_count[3]/item_count[4]
buyRate.index = item_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = item_count_before5[1]/item_count_before5[4]
buyRate_2['skim'] = item_count_before5[2]/item_count_before5[4]
buyRate_2['collect'] = item_count_before5[3]/item_count_before5[4]
buyRate_2.index = item_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = item_count_before_3[1]/item_count_before_3[4]
buyRate_3['skim'] = item_count_before_3[2]/item_count_before_3[4]
buyRate_3['collect'] = item_count_before_3[3]/item_count_before_3[4]
buyRate_3.index = item_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
item_category_feture = pd.merge(item_count,beforeonedayitem_count,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,countAverage,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,buyRate,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before5,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before_3,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before_2,how='left',right_index=True,left_index=True)
# item_category_feture = pd.merge(item_category_feture,buyRate_2,how='left',right_index=True,left_index=True)
# item_category_feture = pd.merge(item_category_feture,buyRate_3,how='left',right_index=True,left_index=True)
item_category_feture.fillna(0,inplace=True)
return item_category_feture
def item_id_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_id,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = pd.crosstab(beforefiveday.item_id,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
item_count_before5 = pd.crosstab(beforefiveday.item_id,beforefiveday.behavior_type)
item_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
item_count_before_3 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
item_count_before_3 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
item_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
item_count_before_2 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
item_count_before_2 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
item_count_unq = data.groupby(by = ['item_id','behavior_type']).agg({"user_id":lambda x:x.nunique()});item_count_unq = item_count_unq.unstack()
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayitem_count = pd.crosstab(beforeoneday.item_id,beforeoneday.behavior_type)
countAverage = item_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = item_count[1]/item_count[4]
buyRate['skim'] = item_count[2]/item_count[4]
buyRate['collect'] = item_count[3]/item_count[4]
buyRate.index = item_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = item_count_before5[1]/item_count_before5[4]
buyRate_2['skim'] = item_count_before5[2]/item_count_before5[4]
buyRate_2['collect'] = item_count_before5[3]/item_count_before5[4]
buyRate_2.index = item_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = item_count_before_3[1]/item_count_before_3[4]
buyRate_3['skim'] = item_count_before_3[2]/item_count_before_3[4]
buyRate_3['collect'] = item_count_before_3[3]/item_count_before_3[4]
buyRate_3.index = item_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
item_id_feture = pd.merge(item_count,beforeonedayitem_count,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,countAverage,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,buyRate,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_unq,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_before5,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_before_3,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_before_2,how='left',right_index=True,left_index=True)
# item_id_feture = pd.merge(item_id_feture,buyRate_2,how='left',right_index=True,left_index=True)
# item_id_feture = pd.merge(item_id_feture,buyRate_3,how='left',right_index=True,left_index=True)
item_id_feture.fillna(0,inplace=True)
return item_id_feture
def user_id_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
user_count = pd.crosstab(data.user_id,data.behavior_type)
user_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
user_count_before5 = pd.crosstab(beforefiveday.user_id,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
user_count_before5 = pd.crosstab(beforefiveday.user_id,beforefiveday.behavior_type)
user_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
user_count_before_3 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
user_count_before_3 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
user_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
user_count_before_2 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
user_count_before_2 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayuser_count = pd.crosstab(beforeoneday.user_id,beforeoneday.behavior_type)
countAverage = user_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = user_count[1]/user_count[4]
buyRate['skim'] = user_count[2]/user_count[4]
buyRate['collect'] = user_count[3]/user_count[4]
buyRate.index = user_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = user_count_before5[1]/user_count_before5[4]
buyRate_2['skim'] = user_count_before5[2]/user_count_before5[4]
buyRate_2['collect'] = user_count_before5[3]/user_count_before5[4]
buyRate_2.index = user_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = user_count_before_3[1]/user_count_before_3[4]
buyRate_3['skim'] = user_count_before_3[2]/user_count_before_3[4]
buyRate_3['collect'] = user_count_before_3[3]/user_count_before_3[4]
buyRate_3.index = user_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
long_online = pd.pivot_table(beforeoneday,index=['user_id'],values=['hours'],aggfunc=[np.min,np.max,np.ptp])
user_id_feture = pd.merge(user_count,beforeonedayuser_count,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,countAverage,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,buyRate,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,user_count_before5,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,user_count_before_3,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,user_count_before_2,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,long_online,how='left',right_index=True,left_index=True)
# user_id_feture = pd.merge(user_id_feture,buyRate_2,how='left',right_index=True,left_index=True)
# user_id_feture = pd.merge(user_id_feture,buyRate_3,how='left',right_index=True,left_index=True)
user_id_feture.fillna(0,inplace=True)
return user_id_feture
def user_item_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
user_item_count = pd.crosstab([data.user_id,data.item_id],data.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
user_item_count_5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
user_item_count_5 = | pd.crosstab([beforefiveday.user_id,beforefiveday.item_id],beforefiveday.behavior_type) | pandas.crosstab |
import pandas as pd
series = pd.Series([21, 23, 56, 54])
print(series)
print(series.index)
print(series.values)
print(series.dtype)
brands = ['BMW', 'Jaguar', 'Ford', 'Kia']
quantities = [20, 10, 50, 75]
series2 = | pd.Series(quantities, index=brands) | pandas.Series |
from albumentations.augmentations.transforms import Normalize
import torch.nn as nn
import torchvision.models as models
from torch.utils.data import Dataset
import torch
import albumentations as A
from albumentations.pytorch import ToTensorV2
from pathlib import Path
import numpy as np
import re
import umap
import pandas as pd
from PIL import Image
from sklearn.preprocessing import StandardScaler
import seaborn as sns
import matplotlib.pyplot as plt
""" Programm to evaluate if there is a significant domain gap between two datasets
To see if there is a domain gap, a pretrained Resnet50 is used to extract features from both datasets and UMAP is used for unsupervised clustering. When distinct clusters for both datasets
are formed, there is a domain gap present.
The domain gap can be evaluated for native Tharun and Thompson and upscaled Nikiforov as well as native Nikiforov and downscaled Tharun and Thompson.
Furthermore, it can be evaluated on the native version on both datasets.
"""
native_dataset = "N" # T, N or both
N_folder_20x = Path(__file__).parent.joinpath("..", "datasets", "Nikiforov").resolve()
N_folder_40x = Path(__file__).parent.joinpath("..", "datasets", "Nikiforov_upscale2x").resolve()
T_folder_40x = Path(__file__).parent.joinpath("..", "datasets", "TharunThompson").resolve()
T_folder_20x = Path(__file__).parent.joinpath("..", "datasets", "TharunThompson_downscale2x").resolve()
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff',
'.TIFF', '.tif', '.TIF']
def get_image_paths(folder, substring=None):
image_paths = []
for file in folder.iterdir():
if any(file.suffix == extension for extension in IMG_EXTENSIONS):
if substring==None:
image_paths.append(file)
else:
if substring in file.name:
image_paths.append(file)
return np.asarray(image_paths)
def merge_path_gt(image_paths, ground_truth, dataset):
patient_numbers = np.zeros(len(image_paths))
diagnose_grouped = []
T_paths = np.asarray(image_paths)
for i, image_path in enumerate(image_paths):
# if patient has multiple images e.g. 1a, 1b, ... a,b, ... is removed
patient_numbers[i] = re.sub('[^0-9]', '', image_path.stem.split("_")[0])
diagnose_grouped.append(ground_truth[ground_truth["sample"]==patient_numbers[i]]["diagnose_grouped"].values[0])
unique_patient_numbers = np.unique(patient_numbers)
merged_info = pd.DataFrame(np.array([image_paths, patient_numbers, diagnose_grouped]).transpose(), columns=["path", "patient_number", "diagnose_grouped"])
merged_info["dataset"]= dataset
return merged_info
def draw_scatter(data, scatter_path, target):
umap_plt = sns.scatterplot(data=data, x="UMAP 1", y="UMAP 2", hue=target)
#umap_plt.set(title="Umap thyroid tumor")
umap_fig = umap_plt.get_figure()
umap_fig.savefig(scatter_path, bbox_inches="tight")
plt.close(umap_fig)
def apply_umap(measures, features, native_dataset, target="target", hparams={}):
# only keep patient, feature selection, diagnose
measures_umap = measures.copy()
scaler = StandardScaler()
measures_umap.reset_index(inplace=True)
measures_umap[features] = pd.DataFrame(scaler.fit_transform(measures_umap[features]), columns=features)
reducer = umap.UMAP(**hparams)
embedding = reducer.fit_transform(measures_umap[features].values)
embedding = pd.DataFrame(list(zip(embedding[:,0], embedding[:,1], measures_umap[target], measures_umap["path"])), columns=["UMAP 1", "UMAP 2", target, "path"])
draw_scatter(embedding, Path(__file__).parent.joinpath("domain_gap_"+target+"_native"+native_dataset+"_umap.png"), target)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class DomainGapDataset(Dataset):
def __init__(self, dataset_info, transform=None):
self.dataset_info = dataset_info
self.transform = transform
def __len__(self):
return len(self.dataset_info)
def __getitem__(self, index):
img = Image.open(self.dataset_info["path"][index])
target = self.dataset_info["diagnose_grouped"][index]
if self.transform is not None:
data = self.transform(image=np.array(img), target= target)
return data
def extract_dl_features(image_info, features_path):
trans = A.Compose([
A.Normalize(),
ToTensorV2()
])
dataset = DomainGapDataset(image_info, transform=trans)
loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
net = models.resnet50(pretrained=True)
net.fc = Identity()
net.to(torch.device("cuda"))
net.eval()
dl_features = np.zeros([len(loader), 2048])
with torch.no_grad():
for step, item in enumerate(loader):
item["image"]= item["image"].to(torch.device("cuda"))
features = net(item["image"]).cpu().numpy()
dl_features[step,:] = features.squeeze()
columns = ["feature_"+str(i) for i in range(dl_features.shape[1])]
dl_features_pd = pd.DataFrame(data=dl_features, columns=columns)
dl_features = pd.concat([image_info, dl_features_pd], axis=1)
dl_features.to_hdf(features_path, key="dl_features", mode="w")
return dl_features
if native_dataset == "T":
# get original resolution of the Tharun and Thompson dataset (40x)
T_paths = get_image_paths(T_folder_40x)
T_ground_truth = pd.read_excel(T_folder_40x.joinpath("ground_truth.xlsx"), engine="openpyxl")
T_data = merge_path_gt(T_paths, T_ground_truth, dataset="Tharun and Thompson")
# get upscaled Nikiforov dataset (matching the T&T dataset)
N_paths = get_image_paths(N_folder_40x)
N_ground_truth = pd.read_excel(N_folder_40x.joinpath("ground_truth.xlsx"), engine="openpyxl")
N_data = merge_path_gt(N_paths, N_ground_truth, dataset="Nikiforov")
image_info = pd.concat([T_data, N_data], ignore_index=True)
features_path = Path(__file__).parent.joinpath("dl_features_nativeT.h5")
elif native_dataset == "N":
# get original Nikiforov dataset (20x)
N_paths = get_image_paths(N_folder_20x)
N_ground_truth = pd.read_excel(N_folder_20x.joinpath("ground_truth.xlsx"), engine="openpyxl")
N_data = merge_path_gt(N_paths, N_ground_truth, dataset="Nikiforov")
# get downscaled Tharun and Thompson dataset (matching the Nikiforv dataset)
T_paths = get_image_paths(T_folder_20x)
T_ground_truth = pd.read_excel(T_folder_20x.joinpath("ground_truth.xlsx"), engine="openpyxl")
T_data = merge_path_gt(T_paths, T_ground_truth, dataset="Tharun and Thompson")
image_info = | pd.concat([T_data, N_data], ignore_index=True) | pandas.concat |
import contextlib
from dateutil.tz import tzlocal
import numpy as np
import os
import pandas as pd
import sys
import warnings
from datetime import date, datetime
from scipy import stats
def date_range(df, begin, end):
"""Extract out a certain date range from a DataFrame.
Extract out a certain data range from a dataframe. The index must be the
dates, and the index must be sorted.
"""
# TODO: is this needed? Do normal pandas operation, timestamp
# checking is not really needed (and limits the formats that can
# be used, pandas can take more than pd.Timestamp)
# Move this function to utils
# Deal with pandas timestamp compatibility
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = df.index[0]
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = df.index[-1]
df_new = df.loc[begin:end]
return df_new
#SYSTEM_TZ = tzlocal() # the operating system timezone - for sqlite output compat
SYSTEM_TZ = 'Europe/Helsinki'
TZ = tzlocal()
TZ = 'Europe/Helsinki'
def set_tz(tz):
"""Globally set the preferred local timezone"""
global TZ
TZ = tz
@contextlib.contextmanager
def tmp_timezone(new_tz):
"""Temporarily override the global timezone for a black.
This is used as a context manager::
with tmp_timezone('Europe/Berlin'):
....
Note: this overrides the global timezone. In the future, there will
be a way to handle timezones as non-global variables, which should
be preferred.
"""
global TZ
old_tz = TZ
TZ = new_tz
yield
TZ = old_tz
SQLITE3_EXTENSIONS_BASENAME = os.path.join(os.path.dirname(__file__), 'sqlite-extension-functions.c')
SQLITE3_EXTENSIONS_FILENAME = os.path.join(os.path.dirname(__file__), 'sqlite-extension-functions.so')
def install_extensions():
"""Automatically install sqlite extension functions.
Only works on Linux for now, improvements welcome."""
import hashlib
if not os.path.exists(SQLITE3_EXTENSIONS_BASENAME):
import urllib.request
extension_url = 'https://sqlite.org/contrib/download/extension-functions.c?get=25'
urllib.request.urlretrieve(extension_url, SQLITE3_EXTENSIONS_BASENAME)
expected_digest = '991b40fe8b2799edc215f7260b890f14a833512c9d9896aa080891330ffe4052'
if hashlib.sha256(open(SQLITE3_EXTENSIONS_BASENAME, 'rb').read()).hexdigest() != expected_digest:
print("sqlite-extension-functions.c has wrong sha256 hash", file=sys.stderr)
os.system('cd %s; gcc -lm -shared -fPIC sqlite-extension-functions.c -o sqlite-extension-functions.so'%
os.path.dirname(__file__))
print("Sqlite extension successfully compiled.")
def uninstall_extensions():
"""Uninstall any installed extensions"""
def unlink_if_exists(x):
if os.path.exists(x):
os.unlink(x)
unlink_if_exists(SQLITE3_EXTENSIONS_FILENAME)
#TODO: reanme to data.py
def df_normalize(df, tz=None, old_tz=None):
"""Normalize a df (from sql) before presenting it to the user.
This sets the dataframe index to the time values, and converts times
to pandas.TimeStamp:s. Modifies the data frame inplace.
"""
if tz is None:
warnings.warn(DeprecationWarning("From now on, you should explicitely specify timezone with e.g. tz='Europe/Helsinki'. Specify as part of the reading function."))
tz = TZ
if 'time' in df:
df.index = to_datetime(df['time'])
df.index.name = None
df['datetime'] = df.index
elif 'day' in df and 'hour' in df:
index = df[['day', 'hour']].apply(lambda row: pd.Timestamp('%s %s:00'%(row['day'], row['hour'])), axis=1)
if old_tz is not None:
# old_tz is given - e.g. sqlite already converts it to localtime
index = index.dt.tz_localize(old_tz).dt.tz_convert(tz)
else:
index = index.dt.tz_localize(tz)
df.index = index
df.index.name = None
def to_datetime(value):
times = | pd.to_datetime(value, unit='s', utc=True) | pandas.to_datetime |
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual( | Timedelta(days=10, seconds=10) | pandas.Timedelta |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 23 19:46:26 2021
@author: KristinaSig
"""
import unittest
import pandas as pd
from code.feature_extraction.mentions_count import MentionsCountFeature
from code.util import COLUMN_MENTIONS
class HashtagFeatureTest(unittest.TestCase):
def setUp(self):
self.COLUMN_MENTIONS = COLUMN_MENTIONS
self.mentions_feature = MentionsCountFeature()
self.df = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.