prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
from typing import List, Dict
import nltk.data
import pandas as pd
import re
from gensim.parsing.preprocessing import strip_multiple_whitespaces
from nltk.tokenize.punkt import PunktSentenceTokenizer
from pandarallel import pandarallel
from tika import parser as tikaparser
class PDFParser:
def __init__(self, books_directory: str = "books/"):
self.directory = books_directory
def parse(self) -> List[str]:
""" Parses the raw pdf(s) provided in the books_directory
:return: A list wiht the parsed text of the pdf(s)
:rtype: List[str]
"""
corpus = []
for book in os.listdir(self.directory):
temp = tikaparser.from_file(self.directory + book)["content"]
temp = " ".join(temp.split("Chapter I"))
corpus.append(temp)
return corpus
def clean(self, corpus: List[str]) -> pd.DataFrame:
""" Applies basic text cleaning.
:param corpus: A list wiht the parsed text of the pdf(s)
:type corpus: List[str]
:return: A DataFrame with a single column named "text" containing the cleaned input
:rtype: pd.DataFrame
"""
corpus = [strip_multiple_whitespaces(n) for n in corpus]
corpus = [n.encode("ascii", "ignore").decode() for n in corpus]
corpus = | pd.DataFrame({"text": corpus}) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.0
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# # OER Analysis Notebook
# ---
#
# * Compute overpotential for all systems
# * Save ORR_PLT instance for OXR plotting classes
# * Save df_overpot dataframe to combine with df_features_targets
# ### Import Modules
# +
import os
print(os.getcwd())
import sys
import time; ti = time.time()
# #########################################################
# Python Modules
import pickle
import numpy as np
import pandas as pd
# #########################################################
# My Modules
from oxr_reaction.oxr_rxn import ORR_Free_E_Plot
from methods import (
get_df_ads,
get_df_job_ids,
get_df_dft,
get_df_features_targets,
)
# -
from methods import isnotebook
isnotebook_i = isnotebook()
if isnotebook_i:
from tqdm.notebook import tqdm
verbose = True
show_plot = True
else:
from tqdm import tqdm
verbose = False
show_plot = False
# +
# #########################################################
df_dft = get_df_dft()
# #########################################################
df_job_ids = get_df_job_ids()
# #########################################################
df_features_targets = get_df_features_targets()
# +
if verbose:
print(
"Change in size of df_features from dropping non-complete rows:"
"\n",
df_features_targets.shape[0],
sep="")
# Only passing through OER sets that are 100% done will all calculations
# if True:
if False:
df_features_targets = df_features_targets[df_features_targets["data"]["all_done"] == True]
if verbose:
print(
df_features_targets.shape[0],
sep="")
# +
smart_format_dict = [
[{"stoich": "AB2"}, {"color2": "black"}],
[{"stoich": "AB3"}, {"color2": "grey"}],
]
ORR_PLT = ORR_Free_E_Plot(
free_energy_df=None,
state_title="ads",
free_e_title="ads_g",
smart_format=smart_format_dict,
color_list=None,
rxn_type="OER")
# new_col = (df_features_targets["targets"]["g_oh"] + 2.8)
new_col = (1.16 * df_features_targets["targets"]["g_oh"] + 2.8)
new_col.name = ("targets", "g_ooh", "", )
df_features_targets = pd.concat([
new_col,
df_features_targets,
], axis=1)
# Loop through data and add to ORR_PLT
data_dict_list_0 = []
for name_i, row_i in df_features_targets.iterrows():
# #####################################################
g_o_i = row_i[("targets", "g_o", "", )]
g_oh_i = row_i[("targets", "g_oh", "", )]
g_ooh_i = row_i[("targets", "g_ooh", "", )]
slab_id_i = row_i[("data", "slab_id", "")]
active_site_i = row_i[("data", "active_site", "")]
job_id_o_i = row_i[("data", "job_id_o", "")]
job_id_oh_i = row_i[("data", "job_id_oh", "")]
# #####################################################
# #####################################################
df_job_ids_i = df_job_ids[df_job_ids.slab_id == slab_id_i]
bulk_ids = df_job_ids_i.bulk_id.unique()
mess_i = "SIJFIDSIFJIDSJIf"
assert len(bulk_ids) == 1, mess_i
bulk_id_i = bulk_ids[0]
# #########################################################
row_dft_i = df_dft.loc[bulk_id_i]
# #########################################################
stoich_i = row_dft_i.stoich
# #########################################################
data_dict_list = [
{"ads_g": g_o_i, "ads": "o", },
{"ads_g": g_oh_i, "ads": "oh", },
{"ads_g": g_ooh_i, "ads": "ooh", },
{"ads_g": 0., "ads": "bulk", },
]
df_i = | pd.DataFrame(data_dict_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import copy
import datetime
from unittest import mock
import numpy
import pandas
from pandas import DataFrame
import pkg_resources
import pytest
from pandas_gbq import gbq
pytestmark = pytest.mark.filter_warnings(
"ignore:credentials from Google Cloud SDK"
)
pandas_installed_version = pkg_resources.get_distribution(
"pandas"
).parsed_version
def _make_connector(project_id="some-project", **kwargs):
return gbq.GbqConnector(project_id, **kwargs)
@pytest.fixture
def min_bq_version():
import pkg_resources
return pkg_resources.parse_version("1.11.0")
def mock_get_credentials_no_project(*args, **kwargs):
import google.auth.credentials
mock_credentials = mock.create_autospec(
google.auth.credentials.Credentials
)
return mock_credentials, None
def mock_get_credentials(*args, **kwargs):
import google.auth.credentials
mock_credentials = mock.create_autospec(
google.auth.credentials.Credentials
)
return mock_credentials, "default-project"
@pytest.fixture
def mock_service_account_credentials():
import google.oauth2.service_account
mock_credentials = mock.create_autospec(
google.oauth2.service_account.Credentials
)
return mock_credentials
@pytest.fixture
def mock_compute_engine_credentials():
import google.auth.compute_engine
mock_credentials = mock.create_autospec(
google.auth.compute_engine.Credentials
)
return mock_credentials
@pytest.fixture(autouse=True)
def no_auth(monkeypatch):
import pydata_google_auth
monkeypatch.setattr(pydata_google_auth, "default", mock_get_credentials)
@pytest.mark.parametrize(
("type_", "expected"),
[
("INTEGER", None), # Can't handle NULL
("BOOLEAN", None), # Can't handle NULL
("FLOAT", numpy.dtype(float)),
# TIMESTAMP will be localized after DataFrame construction.
("TIMESTAMP", "datetime64[ns]"),
("DATETIME", "datetime64[ns]"),
],
)
def test__bqschema_to_nullsafe_dtypes(type_, expected):
result = gbq._bqschema_to_nullsafe_dtypes(
[dict(name="x", type=type_, mode="NULLABLE")]
)
if not expected:
assert result == {}
else:
assert result == {"x": expected}
def test_GbqConnector_get_client_w_old_bq(monkeypatch, mock_bigquery_client):
gbq._test_google_api_imports()
connector = _make_connector()
monkeypatch.setattr(gbq, "HAS_CLIENT_INFO", False)
connector.get_client()
# No client_info argument.
mock_bigquery_client.assert_called_with(
credentials=mock.ANY, project=mock.ANY
)
def test_GbqConnector_get_client_w_new_bq(mock_bigquery_client):
gbq._test_google_api_imports()
pytest.importorskip(
"google.cloud.bigquery", minversion=gbq.BIGQUERY_CLIENT_INFO_VERSION
)
pytest.importorskip("google.api_core.client_info")
connector = _make_connector()
connector.get_client()
_, kwargs = mock_bigquery_client.call_args
assert kwargs["client_info"].user_agent == "pandas-{}".format(
pandas.__version__
)
def test_to_gbq_should_fail_if_invalid_table_name_passed():
with pytest.raises(gbq.NotFoundException):
gbq.to_gbq(DataFrame([[1]]), "invalid_table_name", project_id="1234")
def test_to_gbq_with_no_project_id_given_should_fail(monkeypatch):
import pydata_google_auth
monkeypatch.setattr(
pydata_google_auth, "default", mock_get_credentials_no_project
)
with pytest.raises(ValueError, match="Could not determine project ID"):
gbq.to_gbq(DataFrame([[1]]), "dataset.tablename")
def test_to_gbq_with_verbose_new_pandas_warns_deprecation(min_bq_version):
import pkg_resources
pandas_version = pkg_resources.parse_version("0.23.0")
with pytest.warns(FutureWarning), mock.patch(
"pkg_resources.Distribution.parsed_version",
new_callable=mock.PropertyMock,
) as mock_version:
mock_version.side_effect = [min_bq_version, pandas_version]
try:
gbq.to_gbq(
DataFrame([[1]]),
"dataset.tablename",
project_id="my-project",
verbose=True,
)
except gbq.TableCreationError:
pass
def test_to_gbq_with_not_verbose_new_pandas_warns_deprecation(min_bq_version):
import pkg_resources
pandas_version = pkg_resources.parse_version("0.23.0")
with pytest.warns(FutureWarning), mock.patch(
"pkg_resources.Distribution.parsed_version",
new_callable=mock.PropertyMock,
) as mock_version:
mock_version.side_effect = [min_bq_version, pandas_version]
try:
gbq.to_gbq(
| DataFrame([[1]]) | pandas.DataFrame |
########################################
########## ASTRO FUNCTIONS #############
########################################
def parse_sr_ss(inputs, difference_from_utc_standard):
import os
import datetime as dt
import pandas as pd
now = dt.datetime.now()
year = now.year
columns = ['period','month','day','event','rawtime','min','hour','time','test']
filename = 'Sunrise Sunset-' + str(year) + '.txt'
firstdata = 9
maxdays = 31
start = dt.datetime(year,1,1,0,0)
## NOTE: Input files must be named in the format: name + "-" + date("YYYY-MM-DD")
# FIND OPPORTUNITY DATA
os.chdir(inputs['astro'])
# Using the newer with construct to close the file automatically.
with open(filename) as f:
data = f.readlines()
# cut off extra crap
raw = data[firstdata:firstdata+maxdays]
test = raw
data = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import pandas as pd
def get_replies(df):
'''Extracts the replies to the comments as well as the nested
replies to those replies, adds all of them to the orginal dataframe
and returns the dataframe.'''
if 'replyCount' in df.columns:
selected_df = df.loc[df.replyCount>0]
if selected_df.shape[0] > 0:
df_list = []
for idx, row in selected_df.iterrows():
replies_df = pd.DataFrame(row.replies)
replies_df['inReplyTo'] = row.commentID
df_list.append(replies_df)
all_replies_df = | pd.concat([replies_df for replies_df in df_list]) | pandas.concat |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
from cosypose.utils.tqdm import patch_tqdm; patch_tqdm() # noqa
import torch.multiprocessing
import time
import json
from collections import OrderedDict
import yaml
import argparse
import torch
import numpy as np
import pandas as pd
import pickle as pkl
import logging
from cosypose.config import EXP_DIR, MEMORY, RESULTS_DIR, LOCAL_DATA_DIR
from cosypose.utils.distributed import init_distributed_mode, get_world_size, get_rank
from cosypose.datasets.samplers import DistributedSceneSampler
from cosypose.lib3d import Transform
from cosypose.lib3d.rigid_mesh_database import MeshDataBase
from cosypose.training.pose_models_cfg import create_model_refiner, create_model_coarse, check_update_config
from cosypose.rendering.bullet_batch_renderer import BulletBatchRenderer
from cosypose.integrated.pose_predictor import CoarseRefinePosePredictor
from cosypose.integrated.multiview_predictor import MultiviewScenePredictor
from cosypose.evaluation.meters.pose_meters import PoseErrorMeter
from cosypose.evaluation.pred_runner.multiview_predictions import MultiviewPredictionRunner
from cosypose.evaluation.eval_runner.pose_eval import PoseEvaluation
import cosypose.utils.tensor_collection as tc
from cosypose.evaluation.runner_utils import format_results, gather_predictions
from cosypose.utils.distributed import get_rank
from cosypose.datasets.datasets_cfg import make_scene_dataset, make_object_dataset
from cosypose.datasets.bop import remap_bop_targets
from cosypose.datasets.wrappers.multiview_wrapper import MultiViewWrapper
from cosypose.datasets.samplers import ListSampler
from cosypose.utils.logging import get_logger
logger = get_logger(__name__)
torch.multiprocessing.set_sharing_strategy('file_system')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@MEMORY.cache
def load_posecnn_results():
results_path = LOCAL_DATA_DIR / 'saved_detections' / 'ycbv_posecnn.pkl'
results = pkl.loads(results_path.read_bytes())
infos, poses, bboxes = [], [], []
l_offsets = (LOCAL_DATA_DIR / 'bop_datasets/ycbv' / 'offsets.txt').read_text().strip().split('\n')
ycb_offsets = dict()
for l_n in l_offsets:
obj_id, offset = l_n[:2], l_n[3:]
obj_id = int(obj_id)
offset = np.array(json.loads(offset)) * 0.001
ycb_offsets[obj_id] = offset
def mat_from_qt(qt):
wxyz = qt[:4].copy().tolist()
xyzw = [*wxyz[1:], wxyz[0]]
t = qt[4:].copy()
return Transform(xyzw, t)
for scene_view_str, result in results.items():
scene_id, view_id = scene_view_str.split('/')
scene_id, view_id = int(scene_id), int(view_id)
n_dets = result['rois'].shape[0]
for n in range(n_dets):
obj_id = result['rois'][:, 1].astype(np.int)[n]
label = f'obj_{obj_id:06d}'
infos.append(dict(
scene_id=scene_id,
view_id=view_id,
score=result['rois'][n, 1],
label=label,
))
bboxes.append(result['rois'][n, 2:6])
pose = mat_from_qt(result['poses'][n])
offset = ycb_offsets[obj_id]
pose = pose * Transform((0, 0, 0, 1), offset).inverse()
poses.append(pose.toHomogeneousMatrix())
data = tc.PandasTensorCollection(
infos= | pd.DataFrame(infos) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os, glob
import cv2
import time
from Customer import Customer
from Supermarket import Supermarket
from tiles_skeleton import SupermarketMap, MARKET
def create_unique_customer_id(row):
return row['timestamp'].dayofyear * 100000 + row['customer_no']
def create_dataframe():
df = pd.concat(map(lambda f: pd.read_csv(f, delimiter=';'),glob.glob('data/*.csv')))
df['timestamp'] = pd.to_datetime(df['timestamp'], format="%Y-%m-%d %H:%M:%S")
df['hour']= df['timestamp'].dt.hour
df['minute'] = df['timestamp'].dt.minute
df['weekday'] = df['timestamp'].dt.day_name()
df['customer_id'] = df.apply(lambda row: create_unique_customer_id(row), axis=1)
df = df.drop(['customer_no'], axis = 1)
return df
def create_transition_matrix(df):
trans = df.groupby(['customer_id', 'timestamp', 'location']).all().reset_index()
trans['after'] = trans.groupby('customer_id')['location'].shift(-1)
trans = trans.rename(columns={'location':'before'})
trans = trans.drop(['customer_id','timestamp','hour','minute','weekday'], axis=1)
trans = trans.fillna('checkout')
matrix = | pd.crosstab(trans['before'], trans['after'], normalize=0) | pandas.crosstab |
import sys, requests
import pandas as pd
class Deck:
"""Manage deck statistics here"""
def __init__(self, deck_name='', export_json=None):
if export_json != None:
self.cards = pd.read_json(export_json['cards'])
self.side_board = pd.read_json(export_json['side_board'])
else:
self.cards = pd.DataFrame()
self.side_board = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @author: songjie
# @email: <EMAIL>
# @date: 2021/03/24
# SJ编程规范
# 命名:
# 1. 见名思意,变量的名字必须准确反映它的含义和内容
# 2. 遵循当前语言的变量命名规则
# 3. 不要对不同使用目的的变量使用同一个变量名
# 4. 同个项目不要使用不同名称表述同个东西
# 5. 函数/方法 使用动词+名词组合,其它使用名词组合
# 设计原则:
# 1. KISS原则: Keep it simple and stupid !
# 2. SOLID原则: S: 单一职责 O: 开闭原则 L: 迪米特法则 I: 接口隔离原则 D: 依赖倒置原则
#
import calendar
from itertools import product
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
def build_shop_features(shops_df: pd.DataFrame):
"""
构建商店特征
:param shops_df:
:return:
"""
shops_df['city'] = shops_df['shop_name'].apply(lambda x: x.split()[0].lower())
shops_df.loc[shops_df.city == '!якутск', 'city'] = 'якутск'
shops_df['city_code'] = LabelEncoder().fit_transform(shops_df['city'])
coords = dict()
coords['якутск'] = (62.028098, 129.732555, 4)
coords['адыгея'] = (44.609764, 40.100516, 3)
coords['балашиха'] = (55.8094500, 37.9580600, 1)
coords['волжский'] = (53.4305800, 50.1190000, 3)
coords['вологда'] = (59.2239000, 39.8839800, 2)
coords['воронеж'] = (51.6720400, 39.1843000, 3)
coords['выездная'] = (0, 0, 0)
coords['жуковский'] = (55.5952800, 38.1202800, 1)
coords['интернет-магазин'] = (0, 0, 0)
coords['казань'] = (55.7887400, 49.1221400, 4)
coords['калуга'] = (54.5293000, 36.2754200, 4)
coords['коломна'] = (55.0794400, 38.7783300, 4)
coords['красноярск'] = (56.0183900, 92.8671700, 4)
coords['курск'] = (51.7373300, 36.1873500, 3)
coords['москва'] = (55.7522200, 37.6155600, 1)
coords['мытищи'] = (55.9116300, 37.7307600, 1)
coords['н.новгород'] = (56.3286700, 44.0020500, 4)
coords['новосибирск'] = (55.0415000, 82.9346000, 4)
coords['омск'] = (54.9924400, 73.3685900, 4)
coords['ростовнадону'] = (47.2313500, 39.7232800, 3)
coords['спб'] = (59.9386300, 30.3141300, 2)
coords['самара'] = (53.2000700, 50.1500000, 4)
coords['сергиев'] = (56.3000000, 38.1333300, 4)
coords['сургут'] = (61.2500000, 73.4166700, 4)
coords['томск'] = (56.4977100, 84.9743700, 4)
coords['тюмень'] = (57.1522200, 65.5272200, 4)
coords['уфа'] = (54.7430600, 55.9677900, 4)
coords['химки'] = (55.8970400, 37.4296900, 1)
coords['цифровой'] = (0, 0, 0)
coords['чехов'] = (55.1477000, 37.4772800, 4)
coords['ярославль'] = (57.6298700, 39.8736800, 2)
shops_df['city_coord_1'] = shops_df['city'].apply(lambda x: coords[x][0])
shops_df['city_coord_2'] = shops_df['city'].apply(lambda x: coords[x][1])
shops_df['country_part'] = shops_df['city'].apply(lambda x: coords[x][2])
shops_df = shops_df[['shop_id', 'city_code', 'city_coord_1', 'city_coord_2', 'country_part']]
return shops_df
def build_item_features(items_df: pd.DataFrame, item_cats_df: pd.DataFrame):
"""
构建商品特征
:param items_df:
:param item_cats_df:
:return:
"""
map_dict = {
'Чистые носители (штучные)': 'Чистые носители',
'Чистые носители (шпиль)': 'Чистые носители',
'PC ': 'Аксессуары',
'Служебные': 'Служебные '
}
items_df = pd.merge(items_df, item_cats_df, on='item_category_id')
items_df['item_category'] = items_df['item_category_name'].apply(lambda x: x.split('-')[0])
items_df['item_category'] = items_df['item_category'].apply(lambda x: map_dict[x] if x in map_dict.keys() else x)
items_df['item_category_common'] = LabelEncoder().fit_transform(items_df['item_category'])
items_df['item_category_code'] = LabelEncoder().fit_transform(items_df['item_category_name'])
items_df = items_df[['item_id', 'item_category_common', 'item_category_code']]
return items_df
def count_days(date_block_num):
"""
返回当前日的特征, 下面逻辑有问题 根据实际修改
:param date_block_num:
:return:
"""
year = 2013 + date_block_num // 12
month = 1 + date_block_num % 12
weeknd_count = len([1 for i in calendar.monthcalendar(year, month) if i[6] != 0])
days_in_month = calendar.monthrange(year, month)[1]
return weeknd_count, days_in_month, month
def build_feature_matrix(train_df: pd.DataFrame):
index_cols = ['shop_id', 'item_id', 'date_block_num']
feature_matrix_df = []
for block_num in train_df['date_block_num'].unique():
cur_shops = train_df.loc[train_df['date_block_num'] == block_num, 'shop_id'].unique()
cur_items = train_df.loc[train_df['date_block_num'] == block_num, 'item_id'].unique()
feature_matrix_df.append(np.array(list(product(*[cur_shops, cur_items, [block_num]])), dtype='int32'))
feature_matrix_df = pd.DataFrame(np.vstack(feature_matrix_df), columns=index_cols, dtype=np.int32)
# Add month sales
group = train_df.groupby(['date_block_num', 'shop_id', 'item_id']).agg({'item_cnt_day': ['sum']})
group.columns = ['item_cnt_month']
group.reset_index(inplace=True)
feature_matrix_df = pd.merge(feature_matrix_df, group, on=index_cols, how='left')
feature_matrix_df['item_cnt_month'] = (feature_matrix_df['item_cnt_month']
.fillna(0)
.clip(0, 20)
.astype(np.float16))
return feature_matrix_df
def build_date_features(feature_matrix_df: pd.DataFrame):
"""
构建销售日期特征
:param feature_matrix_df:
:return:
"""
map_dict = {i: count_days(i) for i in range(35)}
feature_matrix_df['weeknd_count'] = feature_matrix_df['date_block_num'].apply(lambda x: map_dict[x][0])
feature_matrix_df['days_in_month'] = feature_matrix_df['date_block_num'].apply(lambda x: map_dict[x][1])
return feature_matrix_df
def build_interaction_features(feature_matrix_df: pd.DataFrame):
"""
构建商品门店间相互作用特征
:param feature_matrix_df:
:return:
"""
first_item_block_df = feature_matrix_df.groupby(['item_id'])['date_block_num'].min().reset_index()
first_item_block_df['item_first_interaction'] = 1
first_shop_item_buy_block_df = feature_matrix_df[feature_matrix_df['date_block_num'] > 0].groupby(['shop_id', 'item_id'])['date_block_num'].min().reset_index()
first_shop_item_buy_block_df['first_date_block_num'] = first_shop_item_buy_block_df['date_block_num']
feature_matrix_df = pd.merge(feature_matrix_df, first_item_block_df[['item_id', 'date_block_num', 'item_first_interaction']], on=['item_id', 'date_block_num'], how='left')
feature_matrix_df = | pd.merge(feature_matrix_df, first_shop_item_buy_block_df[['item_id', 'shop_id', 'first_date_block_num']], on=['item_id', 'shop_id'], how='left') | pandas.merge |
import json
import pandas as pd
import os
import re
def create_entry(raw_entry,hashfunction,encoding):
return_dict = {}
app_metadata = {'is_god':raw_entry['is_admin']}
if not | pd.isna(raw_entry['organisation_id']) | pandas.isna |
import pandas as pd
from diligent.checks.nelson import (nelson_rule_1, nelson_rule_2, nelson_rule_3, nelson_rule_4,
nelson_rule_5, nelson_rule_6, nelson_rule_7, nelson_rule_8)
def test_nelson_rule_1():
mean = 0.0
std = 1.0
s = pd.Series([3, -3, 2, -2])
messages = list(nelson_rule_1(s, mean=mean, std=std))
assert len(messages) == 2
assert messages[0] == 'At 0: 3 is three standard deviations above the mean of %s' % s.mean()
assert messages[1] == 'At 1: -3 is three standard deviations below the mean of %s' % s.mean()
def test_nelson_rule_2():
mean = 0.0
messages = list(nelson_rule_2(pd.Series(range(1, 10)), mean=mean))
assert len(messages) == 1
assert messages[0] == 'At 0: 9 data points in sequence are above the mean of %s' % mean
messages = list(nelson_rule_2(pd.Series(range(-1, -10, -1)), mean=mean))
assert len(messages) == 1
assert messages[0] == 'At 0: 9 data points in sequence are below the mean of %s' % mean
messages = list(nelson_rule_2(pd.Series(range(-4, 5)), mean=mean))
assert len(messages) == 0
def test_nelson_rule_3():
messages = list(nelson_rule_3(pd.Series(range(6))))
assert len(messages) == 1
assert messages[0] == 'At 0: 6 data points in sequence are increasing'
messages = list(nelson_rule_3(pd.Series(range(0, -6, -1))))
assert len(messages) == 1
assert messages[0] == 'At 0: 6 data points in sequence are decreasing'
messages = list(nelson_rule_3(pd.Series([0, 1, 0, 1, 0, 1])))
assert len(messages) == 0
messages = list(nelson_rule_3(pd.Series([0, 0, 0, 0, 0, 0])))
assert len(messages) == 0
def test_nelson_rule_4():
messages = list(nelson_rule_4(pd.Series([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1])))
assert len(messages) == 1
assert messages[0] == 'At 0: 14 data points in sequence alternate in direction'
messages = list(nelson_rule_4(pd.Series([0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])))
assert len(messages) == 0
def test_nelson_rule_5():
mean = 0.0
std = 1.0
messages = list(nelson_rule_5( | pd.Series([2.1, 1, 2.1]) | pandas.Series |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Create_Acs_Indicators.ipynb (unless otherwise specified).
__all__ = ['getColName', 'getColByName', 'addKey', 'nullIfEqual', 'sumInts', 'age5', 'age18', 'age24', 'age64', 'age65',
'bahigher', 'carpool', 'drvalone', 'elheat', 'empl', 'fam', 'female', 'femhhs', 'heatgas', 'hisp', 'hh25inc',
'hh40inc', 'hh60inc', 'hh75inc', 'hhchpov', 'hhm75', 'hhs', 'hsdipl', 'lesshs', 'male', 'mhhi', 'drvalone',
'novhcl', 'nohhint', 'othercom', 'paa', 'p2more', 'pasi', 'pubtran', 'pwhite', 'sclemp', 'tpop', 'trav14',
'trav14', 'trav45', 'trav44', 'unempr', 'unempr', 'walked', 'createAcsIndicator']
# Cell
#@title Run This Cell: Misc Function Declarations
# These functions right here are used in the calculations below.
# Finds a column matchings a substring
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
# Pulls a column from one dataset into a new dataset.
# This is not a crosswalk. calls getColByName()
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
# Return 0 if two specified columns are equal.
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
# I'm thinking this doesnt need to be a function..
def sumInts(df): return df.sum(numeric_only=True)
# Cell
#@title Run This Cell: Create age5
#File: age5.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age5( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_027E_Total_Female_Under_5_years',
'B01001_003E_Total_Male_Under_5_years',
'B01001_001E_Total' , 'tract']
columns.extend(columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df[ 'B01001_003E_Total_Male_Under_5_years' ]
+ df[ 'B01001_027E_Total_Female_Under_5_years' ]
) / df['B01001_001E_Total'] * 100
return fi
# Cell
#@title Run This Cell: age18
#File: age18.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age18( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_001E_Total',
'B01001_004E_Total_Male_5_to_9_years',
'B01001_005E_Total_Male_10_to_14_years' ,
'B01001_006E_Total_Male_15_to_17_years',
'B01001_028E_Total_Female_5_to_9_years',
'B01001_029E_Total_Female_10_to_14_years' ,
'B01001_030E_Total_Female_15_to_17_years']
columns = df.filter(regex='001E|004E|005E|006E|028E|029E|030E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='004E|005E|006E|028E|029E|030E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: Create age24
#File: age24.py
#Author: <NAME>
#Date: 9/8/21
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age24( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_007E_Total_Male_18_and_19_years',
'B01001_008E_Total_Male_20_years',
'B01001_009E_Total_Male_21_years' ,
'B01001_010E_Total_Male_22_to_24_years' ,
'B01001_031E_Total_Female_18_and_19_years' ,
'B01001_032E_Total_Female_20_years' ,
'B01001_033E_Total_Female_21_years' ,
'B01001_034E_Total_Female_22_to_24_years',
'tract']
columns.extend(columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df[ 'B01001_007E_Total_Male_18_and_19_years' ]
+ df[ 'B01001_008E_Total_Male_20_years' ]
+ df[ 'B01001_009E_Total_Male_21_years' ]
+ df[ 'B01001_010E_Total_Male_22_to_24_years' ]
+ df[ 'B01001_031E_Total_Female_18_and_19_years' ]
+ df[ 'B01001_032E_Total_Female_20_years' ]
+ df[ 'B01001_033E_Total_Female_21_years' ]
+ df[ 'B01001_034E_Total_Female_22_to_24_years' ]
) / df['B01001_001E_Total'] * 100
return fi
# Cell
#@title Run This Cell: age64
import pandas as pd
import glob
def age64( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='012E|013E|014E|015E|016E|017E|018E|019E|036E|037E|038E|039E|040E|041E|042E|043E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='012E|013E|014E|015E|016E|017E|018E|019E|036E|037E|038E|039E|040E|041E|042E|043E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: age65
import pandas as pd
import glob
def age65( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|020E|021E|022E|023E|024E|025E|044E|045E|046E|047E|048E|049E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='020E|021E|022E|023E|024E|025E|044E|045E|046E|047E|048E|049E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: bahigher
import pandas as pd
import glob
def bahigher( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='005E|006E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='005E|006E').sum(axis=1)
) / df['B06009_001E'] * 100
return fi
# Cell
#@title Run This Cell: - carpool
import pandas as pd
import glob
def carpool( df, columnsToInclude ):
# Final Dataframe
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|017E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_017E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: - drvalone
import pandas as pd
import glob
def drvalone( df, columnsToInclude ):
# Final Dataframe
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -elheat
import pandas as pd
import glob
def elheat( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='B25040_004E|B25040_001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B25040_004E').sum(axis=1)
) / ( df.filter(regex='B25040_001E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -empl
import pandas as pd
import glob
def empl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -fam
import pandas as pd
import glob
def fam( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -female
import pandas as pd
import glob
def female( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['female'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -femhhs
import pandas as pd
import glob
def femhhs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['femhhs'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -heatgas
import pandas as pd
import glob
def heatgas( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: hisp
import pandas as pd
import glob
def hisp( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B03002_001E_Total',
'B03002_012E_Total_Hispanic_or_Latino']
columns = df.filter(regex='001E|012E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
print(' ')
fi['final'] = ( df.filter(regex='012E').sum(axis=1)
) / df['B03002_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: hh25inc
import pandas as pd
import glob
def hh25inc( df, columnsToInclude ):
df.columns = df.columns.str.replace(r"[$]", "")
fi = pd.DataFrame()
columns = ['B19001_001E_Total',
"B19001_002E_Total_Less_than_10,000",
"B19001_003E_Total_10,000_to_14,999",
"B19001_004E_Total_15,000_to_19,999",
"B19001_005E_Total_20,000_to_24,999"]
columns = df.filter(regex='002E|003E|004E|005E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey col: ', col, df.columns)
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='002E|003E|004E|005E').sum(axis=1)
) / df['B19001_001E_Total:'] * 100
return fi
# Cell
#@ title Run This Cell: -hh40inc
import pandas as pd
import glob
def hh40inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hh60inc
import pandas as pd
import glob
def hh60inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hh75inc
import pandas as pd
import glob
def hh75inc( df, columnsToInclude ):
fi = | pd.DataFrame() | pandas.DataFrame |
import sys,os
#os.chdir("/Users/utkarshvirendranigam/Desktop/Homework/Project")
# required_packages=["PyQt5","re", "scipy","itertools","random","matplotlib","pandas","numpy","sklearn","pydotplus","collections","warnings","seaborn"]
#print(os.getcwd())
# for my_package in required_packages:
# try:
# command_string="conda install "+ my_package+ " --yes"
# os.system(command_string)
# except:
# count=1
from PyQt5.QtWidgets import (QMainWindow, QApplication, QWidget, QPushButton, QAction, QComboBox, QLabel,
QGridLayout, QCheckBox, QGroupBox, QVBoxLayout, QHBoxLayout, QLineEdit, QPlainTextEdit)
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, QRect
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import Qt
# from scipy import interp
from itertools import cycle, combinations
import random
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QSizePolicy, QFormLayout, QRadioButton, QScrollArea, QMessageBox
from PyQt5.QtGui import QPixmap
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import pandas as pd
import numpy as np
import pickle
from numpy.polynomial.polynomial import polyfit
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.compose import make_column_transformer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc, log_loss, brier_score_loss
from sklearn.calibration import calibration_curve
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import feature_selection
from sklearn import metrics
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import cross_val_predict
# Libraries to display decision tree
from pydotplus import graph_from_dot_data
import collections
from sklearn.tree import export_graphviz
import webbrowser
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
from Preprocessing import PreProcessing
import random
import seaborn as sns
#%%-----------------------------------------------------------------------
import os
os.environ["PATH"] += os.pathsep + 'C:\\Program Files (x86)\\graphviz-2.38\\release\\bin'
#%%-----------------------------------------------------------------------
#::--------------------------------
# Deafault font size for all the windows
#::--------------------------------
font_size_window = 'font-size:18px'
class DecisionTree(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(DecisionTree, self).__init__()
self.Title = "Decision Tree Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Decision Tree Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.lblMaxDepth = QLabel('Maximun Depth :')
self.txtMaxDepth = QLineEdit(self)
self.txtMaxDepth.setText("3")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
self.btnDTFigure = QPushButton("View Tree")
self.btnDTFigure.setGeometry(QRect(60, 500, 75, 23))
self.btnDTFigure.clicked.connect(self.view_tree)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.lblMaxDepth, 20, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtMaxDepth, 20, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox1Layout.addWidget(self.btnDTFigure, 21, 1, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_rf = QLineEdit()
self.other_models.layout.addRow('Logistic:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Random Forest:', self.txtAccuracy_rf)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : Importance of Features
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('Importance of Features')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = | pd.concat([self.list_corr_features, df[features_list[23]]],axis=1) | pandas.concat |
import Functions
import pandas as pd
from datetime import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
coin_list_NA = ['BTC', 'BCHNA', 'CardonaNA', 'dogecoinNA', 'EOS_RNA', 'ETHNA', 'LTCNA', 'XRP_RNA', 'MoneroNA',
'BNB_RNA',
'IOTANA', 'TEZOSNA', ]
coin_list = ['BTC', 'BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS', ]
dfAllCoins = pd.DataFrame()
dfWMR = pd.read_csv('Data/' + coin_list[0] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfWMR['Date'] = pd.to_datetime(dfWMR['Date'], format='%b %d, %Y')
dfWMR['Date'] = pd.DatetimeIndex(dfWMR['Date'])
dfWMR.index = dfWMR['Date']
dfWMR = dfWMR.sort_index()
logic = {'Open*': 'first',
'High': 'max',
'Low': 'min',
'Close**': 'last',
'Volume': 'sum',
'Market Cap': 'last'
}
offset = pd.offsets.timedelta(days=-6)
dfWMR = dfWMR.resample('W', loffset=offset).apply(logic)
for column in dfWMR.columns:
dfWMR = dfWMR.drop(columns=column)
dfReturns = dfWMR
dfMarketCap = dfWMR
dfPositive = dfWMR
dfNeutral = dfWMR
dfNegative = dfWMR
dfMOM3 = dfWMR
dfMOM5 = dfWMR
dfMOM7 = dfWMR
dfMOM14 = dfWMR
for i in range(0, len(coin_list)):
dfMarket = pd.read_csv('Data/' + coin_list[i] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfMarket['Date'] = pd.to_datetime(dfMarket['Date'], format='%b %d, %Y')
dfMarket['Date'] = pd.DatetimeIndex(dfMarket['Date'])
dfMarket.index = dfMarket['Date']
dfMarket = dfMarket.sort_index()
logic = {'Open*': 'first',
'High': 'max',
'Low': 'min',
'Close**': 'last',
'Volume': 'sum',
'Market Cap': 'last'
}
offset = pd.offsets.timedelta(days=-6)
dfMarket = dfMarket.resample('W', loffset=offset).apply(logic)
dfMarket['Return'] = dfMarket['Close**'].pct_change()
dfMarket['Mom3'] = dfMarket.Return.rolling(3).sum()
dfMarket['Mom5'] = dfMarket.Return.rolling(5).sum()
dfMarket['Mom7'] = dfMarket.Return.rolling(7).sum()
dfMarket['Mom14'] = dfMarket.Return.rolling(14).sum()
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Return']
dfReturns = dfReturns.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom3']
dfMOM3 = dfMOM3.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom5']
dfMOM5 = dfMOM5.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom7']
dfMOM7 = dfMOM7.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom14']
dfMOM14 = dfMOM14.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Market Cap']
dfMarketCap = dfMarketCap.merge(dfTemp, how='left', left_index=True, right_index=True)
dfSentiment = pd.read_csv('Data/' + coin_list_NA[i] + '_Actual_Sentiment.csv', index_col=0, sep=',')
if coin_list[i] == 'BTC':
# dfSentiment = pd.read_csv('Data/' + coin_list_NA[i] + '_Actual_Sentiment.csv', index_col=0, sep=';')
dfSentiment = pd.read_csv('Data/All_Merged.csv', index_col=0, sep=',')
dfSentiment = dfSentiment[['positive_comment', 'neutral_comment', 'negative_comment']]
dfSentiment['Date'] = dfSentiment.index
dfSentiment['Date'] = pd.to_datetime(dfSentiment['Date'])
dfSentiment.index = | pd.DatetimeIndex(dfSentiment['Date']) | pandas.DatetimeIndex |
import os
import sqlite3
from unittest import TestCase
import warnings
from contextlib2 import ExitStack
from logbook import NullHandler, Logger
import numpy as np
import pandas as pd
from six import with_metaclass, iteritems, itervalues
import responses
from toolz import flip, groupby, merge
from trading_calendars import (
get_calendar,
register_calendar_alias,
)
import h5py
import zipline
from zipline.algorithm import TradingAlgorithm
from zipline.assets import Equity, Future
from zipline.assets.continuous_futures import CHAIN_PREDICATES
from zipline.data.fx import DEFAULT_FX_RATE
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.utils.memoize import classlazyval
from zipline.pipeline import SimplePipelineEngine
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.data.testing import TestingDataSet
from zipline.pipeline.domain import GENERIC, US_EQUITIES
from zipline.pipeline.loaders import USEquityPricingLoader
from zipline.pipeline.loaders.testing import make_seeded_random_loader
from zipline.protocol import BarData
from zipline.utils.paths import ensure_directory, ensure_directory_containing
from .core import (
create_daily_bar_data,
create_minute_bar_data,
make_simple_equity_info,
tmp_asset_finder,
tmp_dir,
write_hdf5_daily_bars,
)
from .debug import debug_mro_failure
from ..data.adjustments import (
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
)
from ..data.bcolz_daily_bars import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
)
from ..data.data_portal import (
DataPortal,
DEFAULT_MINUTE_HISTORY_PREFETCH,
DEFAULT_DAILY_HISTORY_PREFETCH,
)
from ..data.fx import (
InMemoryFXRateReader,
HDF5FXRateReader,
HDF5FXRateWriter,
)
from ..data.hdf5_daily_bars import (
HDF5DailyBarReader,
HDF5DailyBarWriter,
MultiCountryDailyBarReader,
)
from ..data.loader import (
get_benchmark_filename,
)
from ..data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY,
FUTURES_MINUTES_PER_DAY,
)
from ..data.resample import (
minute_frame_to_session_frame,
MinuteResampleSessionBarReader
)
from ..finance.trading import SimulationParameters
from ..utils.classproperty import classproperty
from ..utils.final import FinalMeta, final
from ..utils.memoize import remember_last
zipline_dir = os.path.dirname(zipline.__file__)
class DebugMROMeta(FinalMeta):
"""Metaclass that helps debug MRO resolution errors.
"""
def __new__(mcls, name, bases, clsdict):
try:
return super(DebugMROMeta, mcls).__new__(
mcls, name, bases, clsdict
)
except TypeError as e:
if "(MRO)" in str(e):
msg = debug_mro_failure(name, bases)
raise TypeError(msg)
else:
raise
class ZiplineTestCase(with_metaclass(DebugMROMeta, TestCase)):
"""
Shared extensions to core unittest.TestCase.
Overrides the default unittest setUp/tearDown functions with versions that
use ExitStack to correctly clean up resources, even in the face of
exceptions that occur during setUp/setUpClass.
Subclasses **should not override setUp or setUpClass**!
Instead, they should implement `init_instance_fixtures` for per-test-method
resources, and `init_class_fixtures` for per-class resources.
Resources that need to be cleaned up should be registered using
either `enter_{class,instance}_context` or `add_{class,instance}_callback}.
"""
_in_setup = False
@final
@classmethod
def setUpClass(cls):
# Hold a set of all the "static" attributes on the class. These are
# things that are not populated after the class was created like
# methods or other class level attributes.
cls._static_class_attributes = set(vars(cls))
cls._class_teardown_stack = ExitStack()
try:
cls._base_init_fixtures_was_called = False
cls.init_class_fixtures()
assert cls._base_init_fixtures_was_called, (
"ZiplineTestCase.init_class_fixtures() was not called.\n"
"This probably means that you overrode init_class_fixtures"
" without calling super()."
)
except BaseException: # Clean up even on KeyboardInterrupt
cls.tearDownClass()
raise
@classmethod
def init_class_fixtures(cls):
"""
Override and implement this classmethod to register resources that
should be created and/or torn down on a per-class basis.
Subclass implementations of this should always invoke this with super()
to ensure that fixture mixins work properly.
"""
if cls._in_setup:
raise ValueError(
'Called init_class_fixtures from init_instance_fixtures.'
' Did you write super(..., self).init_class_fixtures() instead'
' of super(..., self).init_instance_fixtures()?',
)
cls._base_init_fixtures_was_called = True
@final
@classmethod
def tearDownClass(cls):
# We need to get this before it's deleted by the loop.
stack = cls._class_teardown_stack
for name in set(vars(cls)) - cls._static_class_attributes:
# Remove all of the attributes that were added after the class was
# constructed. This cleans up any large test data that is class
# scoped while still allowing subclasses to access class level
# attributes.
delattr(cls, name)
stack.close()
@final
@classmethod
def enter_class_context(cls, context_manager):
"""
Enter a context manager to be exited during the tearDownClass
"""
if cls._in_setup:
raise ValueError(
'Attempted to enter a class context in init_instance_fixtures.'
'\nDid you mean to call enter_instance_context?',
)
return cls._class_teardown_stack.enter_context(context_manager)
@final
@classmethod
def add_class_callback(cls, callback, *args, **kwargs):
"""
Register a callback to be executed during tearDownClass.
Parameters
----------
callback : callable
The callback to invoke at the end of the test suite.
"""
if cls._in_setup:
raise ValueError(
'Attempted to add a class callback in init_instance_fixtures.'
'\nDid you mean to call add_instance_callback?',
)
return cls._class_teardown_stack.callback(callback, *args, **kwargs)
@final
def setUp(self):
type(self)._in_setup = True
self._pre_setup_attrs = set(vars(self))
self._instance_teardown_stack = ExitStack()
try:
self._init_instance_fixtures_was_called = False
self.init_instance_fixtures()
assert self._init_instance_fixtures_was_called, (
"ZiplineTestCase.init_instance_fixtures() was not"
" called.\n"
"This probably means that you overrode"
" init_instance_fixtures without calling super()."
)
except BaseException: # Clean up even on KeyboardInterrupt
self.tearDown()
raise
finally:
type(self)._in_setup = False
def init_instance_fixtures(self):
self._init_instance_fixtures_was_called = True
@final
def tearDown(self):
# We need to get this before it's deleted by the loop.
stack = self._instance_teardown_stack
for attr in set(vars(self)) - self._pre_setup_attrs:
delattr(self, attr)
stack.close()
@final
def enter_instance_context(self, context_manager):
"""
Enter a context manager that should be exited during tearDown.
"""
return self._instance_teardown_stack.enter_context(context_manager)
@final
def add_instance_callback(self, callback):
"""
Register a callback to be executed during tearDown.
Parameters
----------
callback : callable
The callback to invoke at the end of each test.
"""
return self._instance_teardown_stack.callback(callback)
def alias(attr_name):
"""Make a fixture attribute an alias of another fixture's attribute by
default.
Parameters
----------
attr_name : str
The name of the attribute to alias.
Returns
-------
p : classproperty
A class property that does the property aliasing.
Examples
--------
>>> class C(object):
... attr = 1
...
>>> class D(C):
... attr_alias = alias('attr')
...
>>> D.attr
1
>>> D.attr_alias
1
>>> class E(D):
... attr_alias = 2
...
>>> E.attr
1
>>> E.attr_alias
2
"""
return classproperty(flip(getattr, attr_name))
class WithDefaultDateBounds(with_metaclass(DebugMROMeta, object)):
"""
ZiplineTestCase mixin which makes it possible to synchronize date bounds
across fixtures.
This fixture should always be the last fixture in bases of any fixture or
test case that uses it.
Attributes
----------
START_DATE : datetime
END_DATE : datetime
The date bounds to be used for fixtures that want to have consistent
dates.
"""
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-12-29', tz='utc')
class WithLogger(object):
"""
ZiplineTestCase mixin providing cls.log_handler as an instance-level
fixture.
After init_instance_fixtures has been called `self.log_handler` will be a
new ``logbook.NullHandler``.
Methods
-------
make_log_handler() -> logbook.LogHandler
A class method which constructs the new log handler object. By default
this will construct a ``NullHandler``.
"""
make_log_handler = NullHandler
@classmethod
def init_class_fixtures(cls):
super(WithLogger, cls).init_class_fixtures()
cls.log = Logger()
cls.log_handler = cls.enter_class_context(
cls.make_log_handler().applicationbound(),
)
class WithAssetFinder(WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.asset_finder as a class-level fixture.
After init_class_fixtures has been called, `cls.asset_finder` is populated
with an AssetFinder.
Attributes
----------
ASSET_FINDER_EQUITY_SIDS : iterable[int]
The default sids to construct equity data for.
ASSET_FINDER_EQUITY_SYMBOLS : iterable[str]
The default symbols to use for the equities.
ASSET_FINDER_EQUITY_START_DATE : datetime
The default start date to create equity data for. This defaults to
``START_DATE``.
ASSET_FINDER_EQUITY_END_DATE : datetime
The default end date to create equity data for. This defaults to
``END_DATE``.
ASSET_FINDER_EQUITY_NAMES: iterable[str]
The default names to use for the equities.
ASSET_FINDER_EQUITY_EXCHANGE : str
The default exchange to assign each equity.
ASSET_FINDER_COUNTRY_CODE : str
The default country code to assign each exchange.
Methods
-------
make_equity_info() -> pd.DataFrame
A class method which constructs the dataframe of equity info to write
to the class's asset db. By default this is empty.
make_futures_info() -> pd.DataFrame
A class method which constructs the dataframe of futures contract info
to write to the class's asset db. By default this is empty.
make_exchanges_info() -> pd.DataFrame
A class method which constructs the dataframe of exchange information
to write to the class's assets db. By default this is empty.
make_root_symbols_info() -> pd.DataFrame
A class method which constructs the dataframe of root symbols
information to write to the class's assets db. By default this is
empty.
make_asset_finder_db_url() -> string
A class method which returns the URL at which to create the SQLAlchemy
engine. By default provides a URL for an in-memory database.
make_asset_finder() -> pd.DataFrame
A class method which constructs the actual asset finder object to use
for the class. If this method is overridden then the ``make_*_info``
methods may not be respected.
See Also
--------
zipline.testing.make_simple_equity_info
zipline.testing.make_jagged_equity_info
zipline.testing.make_rotating_equity_info
zipline.testing.make_future_info
zipline.testing.make_commodity_future_info
"""
ASSET_FINDER_EQUITY_SIDS = ord('A'), ord('B'), ord('C')
ASSET_FINDER_EQUITY_SYMBOLS = None
ASSET_FINDER_EQUITY_NAMES = None
ASSET_FINDER_EQUITY_EXCHANGE = 'TEST'
ASSET_FINDER_EQUITY_START_DATE = alias('START_DATE')
ASSET_FINDER_EQUITY_END_DATE = alias('END_DATE')
ASSET_FINDER_FUTURE_CHAIN_PREDICATES = CHAIN_PREDICATES
ASSET_FINDER_COUNTRY_CODE = '??'
@classmethod
def _make_info(cls, *args):
return None
make_futures_info = _make_info
make_exchanges_info = _make_info
make_root_symbols_info = _make_info
make_equity_supplementary_mappings = _make_info
del _make_info
@classmethod
def make_equity_info(cls):
return make_simple_equity_info(
cls.ASSET_FINDER_EQUITY_SIDS,
cls.ASSET_FINDER_EQUITY_START_DATE,
cls.ASSET_FINDER_EQUITY_END_DATE,
cls.ASSET_FINDER_EQUITY_SYMBOLS,
cls.ASSET_FINDER_EQUITY_NAMES,
cls.ASSET_FINDER_EQUITY_EXCHANGE,
)
@classmethod
def make_asset_finder_db_url(cls):
return 'sqlite:///:memory:'
@classmethod
def make_asset_finder(cls):
"""Returns a new AssetFinder
Returns
-------
asset_finder : zipline.assets.AssetFinder
"""
equities = cls.make_equity_info()
futures = cls.make_futures_info()
root_symbols = cls.make_root_symbols_info()
exchanges = cls.make_exchanges_info(equities, futures, root_symbols)
if exchanges is None:
exchange_names = [
df['exchange']
for df in (equities, futures, root_symbols)
if df is not None
]
if exchange_names:
exchanges = pd.DataFrame({
'exchange': pd.concat(exchange_names).unique(),
'country_code': cls.ASSET_FINDER_COUNTRY_CODE,
})
return cls.enter_class_context(tmp_asset_finder(
url=cls.make_asset_finder_db_url(),
equities=equities,
futures=futures,
exchanges=exchanges,
root_symbols=root_symbols,
equity_supplementary_mappings=(
cls.make_equity_supplementary_mappings()
),
future_chain_predicates=cls.ASSET_FINDER_FUTURE_CHAIN_PREDICATES,
))
@classmethod
def init_class_fixtures(cls):
super(WithAssetFinder, cls).init_class_fixtures()
cls.asset_finder = cls.make_asset_finder()
@classlazyval
def all_assets(cls):
"""A list of Assets for all sids in cls.asset_finder.
"""
return cls.asset_finder.retrieve_all(cls.asset_finder.sids)
@classlazyval
def exchange_names(cls):
"""A list of canonical exchange names for all exchanges in this suite.
"""
infos = itervalues(cls.asset_finder.exchange_info)
return sorted(i.canonical_name for i in infos)
@classlazyval
def assets_by_calendar(cls):
"""A dict from calendar -> list of assets with that calendar.
"""
return groupby(lambda a: get_calendar(a.exchange), cls.all_assets)
@classlazyval
def all_calendars(cls):
"""A list of all calendars for assets in this test suite.
"""
return list(cls.assets_by_calendar)
# TODO_SS: The API here doesn't make sense in a multi-country test scenario.
class WithTradingCalendars(object):
"""
ZiplineTestCase mixin providing cls.trading_calendar,
cls.all_trading_calendars, cls.trading_calendar_for_asset_type as a
class-level fixture.
After ``init_class_fixtures`` has been called:
- `cls.trading_calendar` is populated with a default of the nyse trading
calendar for compatibility with existing tests
- `cls.all_trading_calendars` is populated with the trading calendars
keyed by name,
- `cls.trading_calendar_for_asset_type` is populated with the trading
calendars keyed by the asset type which uses the respective calendar.
Attributes
----------
TRADING_CALENDAR_STRS : iterable
iterable of identifiers of the calendars to use.
TRADING_CALENDAR_FOR_ASSET_TYPE : dict
A dictionary which maps asset type names to the calendar associated
with that asset type.
"""
TRADING_CALENDAR_STRS = ('NYSE',)
TRADING_CALENDAR_FOR_ASSET_TYPE = {Equity: 'NYSE', Future: 'us_futures'}
# For backwards compatibility, exisitng tests and fixtures refer to
# `trading_calendar` with the assumption that the value is the NYSE
# calendar.
TRADING_CALENDAR_PRIMARY_CAL = 'NYSE'
@classmethod
def init_class_fixtures(cls):
super(WithTradingCalendars, cls).init_class_fixtures()
cls.trading_calendars = {}
for cal_str in (
set(cls.TRADING_CALENDAR_STRS) |
{cls.TRADING_CALENDAR_PRIMARY_CAL}
):
# Set name to allow aliasing.
calendar = get_calendar(cal_str)
setattr(cls,
'{0}_calendar'.format(cal_str.lower()), calendar)
cls.trading_calendars[cal_str] = calendar
type_to_cal = iteritems(cls.TRADING_CALENDAR_FOR_ASSET_TYPE)
for asset_type, cal_str in type_to_cal:
calendar = get_calendar(cal_str)
cls.trading_calendars[asset_type] = calendar
cls.trading_calendar = (
cls.trading_calendars[cls.TRADING_CALENDAR_PRIMARY_CAL]
)
_MARKET_DATA_DIR = os.path.join(zipline_dir, 'resources', 'market_data')
@remember_last
def read_checked_in_benchmark_data():
symbol = 'SPY'
filename = get_benchmark_filename(symbol)
source_path = os.path.join(_MARKET_DATA_DIR, filename)
benchmark_returns = pd.read_csv(
source_path,
parse_dates=[0],
index_col=0,
header=None,
).tz_localize('UTC')
return benchmark_returns.iloc[:, 0]
class WithBenchmarkReturns(WithDefaultDateBounds,
WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.benchmark_returns as a class-level
attribute.
"""
_default_treasury_curves = None
@classproperty
def BENCHMARK_RETURNS(cls):
benchmark_returns = read_checked_in_benchmark_data()
# Zipline ordinarily uses cached benchmark returns and treasury
# curves data, but when running the zipline tests this cache is not
# always updated to include the appropriate dates required by both
# the futures and equity calendars. In order to create more
# reliable and consistent data throughout the entirety of the
# tests, we read static benchmark returns and treasury curve csv
# files from source. If a test using this fixture attempts to run
# outside of the static date range of the csv files, raise an
# exception warning the user to either update the csv files in
# source or to use a date range within the current bounds.
static_start_date = benchmark_returns.index[0].date()
static_end_date = benchmark_returns.index[-1].date()
warning_message = (
'The WithBenchmarkReturns fixture uses static data between '
'{static_start} and {static_end}. To use a start and end date '
'of {given_start} and {given_end} you will have to update the '
'files in {resource_dir} to include the missing dates.'.format(
static_start=static_start_date,
static_end=static_end_date,
given_start=cls.START_DATE.date(),
given_end=cls.END_DATE.date(),
resource_dir=_MARKET_DATA_DIR,
)
)
if cls.START_DATE.date() < static_start_date or \
cls.END_DATE.date() > static_end_date:
raise AssertionError(warning_message)
return benchmark_returns
class WithSimParams(WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.sim_params as a class level fixture.
Attributes
----------
SIM_PARAMS_CAPITAL_BASE : float
SIM_PARAMS_DATA_FREQUENCY : {'daily', 'minute'}
SIM_PARAMS_EMISSION_RATE : {'daily', 'minute'}
Forwarded to ``SimulationParameters``.
SIM_PARAMS_START : datetime
SIM_PARAMS_END : datetime
Forwarded to ``SimulationParameters``. If not
explicitly overridden these will be ``START_DATE`` and ``END_DATE``
Methods
-------
make_simparams(**overrides)
Construct a ``SimulationParameters`` using the defaults defined by
fixture configuration attributes. Any parameters to
``SimulationParameters`` can be overridden by passing them by keyword.
See Also
--------
zipline.finance.trading.SimulationParameters
"""
SIM_PARAMS_CAPITAL_BASE = 1.0e5
SIM_PARAMS_DATA_FREQUENCY = 'daily'
SIM_PARAMS_EMISSION_RATE = 'daily'
SIM_PARAMS_START = alias('START_DATE')
SIM_PARAMS_END = alias('END_DATE')
@classmethod
def make_simparams(cls, **overrides):
kwargs = dict(
start_session=cls.SIM_PARAMS_START,
end_session=cls.SIM_PARAMS_END,
capital_base=cls.SIM_PARAMS_CAPITAL_BASE,
data_frequency=cls.SIM_PARAMS_DATA_FREQUENCY,
emission_rate=cls.SIM_PARAMS_EMISSION_RATE,
trading_calendar=cls.trading_calendar,
)
kwargs.update(overrides)
return SimulationParameters(**kwargs)
@classmethod
def init_class_fixtures(cls):
super(WithSimParams, cls).init_class_fixtures()
cls.sim_params = cls.make_simparams()
class WithTradingSessions(WithDefaultDateBounds, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.trading_days, cls.all_trading_sessions
as a class-level fixture.
After init_class_fixtures has been called, `cls.all_trading_sessions`
is populated with a dictionary of calendar name to the DatetimeIndex
containing the calendar trading days ranging from:
(DATA_MAX_DAY - (cls.TRADING_DAY_COUNT) -> DATA_MAX_DAY)
`cls.trading_days`, for compatibility with existing tests which make the
assumption that trading days are equity only, defaults to the nyse trading
sessions.
Attributes
----------
DATA_MAX_DAY : datetime
The most recent trading day in the calendar.
TRADING_DAY_COUNT : int
The number of days to put in the calendar. The default value of
``TRADING_DAY_COUNT`` is 126 (half a trading-year). Inheritors can
override TRADING_DAY_COUNT to request more or less data.
"""
DATA_MIN_DAY = alias('START_DATE')
DATA_MAX_DAY = alias('END_DATE')
# For backwards compatibility, exisitng tests and fixtures refer to
# `trading_days` with the assumption that the value is days of the NYSE
# calendar.
trading_days = alias('nyse_sessions')
@classmethod
def init_class_fixtures(cls):
super(WithTradingSessions, cls).init_class_fixtures()
cls.trading_sessions = {}
for cal_str in cls.TRADING_CALENDAR_STRS:
trading_calendar = cls.trading_calendars[cal_str]
sessions = trading_calendar.sessions_in_range(
cls.DATA_MIN_DAY, cls.DATA_MAX_DAY)
# Set name for aliasing.
setattr(cls,
'{0}_sessions'.format(cal_str.lower()), sessions)
cls.trading_sessions[cal_str] = sessions
class WithTmpDir(object):
"""
ZiplineTestCase mixing providing cls.tmpdir as a class-level fixture.
After init_class_fixtures has been called, `cls.tmpdir` is populated with
a `testfixtures.TempDirectory` object whose path is `cls.TMP_DIR_PATH`.
Attributes
----------
TMP_DIR_PATH : str
The path to the new directory to create. By default this is None
which will create a unique directory in /tmp.
"""
TMP_DIR_PATH = None
@classmethod
def init_class_fixtures(cls):
super(WithTmpDir, cls).init_class_fixtures()
cls.tmpdir = cls.enter_class_context(
tmp_dir(path=cls.TMP_DIR_PATH),
)
class WithInstanceTmpDir(object):
"""
ZiplineTestCase mixing providing self.tmpdir as an instance-level fixture.
After init_instance_fixtures has been called, `self.tmpdir` is populated
with a `testfixtures.TempDirectory` object whose path is
`cls.TMP_DIR_PATH`.
Attributes
----------
INSTANCE_TMP_DIR_PATH : str
The path to the new directory to create. By default this is None
which will create a unique directory in /tmp.
"""
INSTANCE_TMP_DIR_PATH = None
def init_instance_fixtures(self):
super(WithInstanceTmpDir, self).init_instance_fixtures()
self.instance_tmpdir = self.enter_instance_context(
tmp_dir(path=self.INSTANCE_TMP_DIR_PATH),
)
class WithEquityDailyBarData(WithAssetFinder, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.make_equity_daily_bar_data.
Attributes
----------
EQUITY_DAILY_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
EQUITY_DAILY_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_equity_daily_bar_data` will read data from
the minute bars defined by `WithEquityMinuteBarData`.
The current default is `False`, but could be `True` in the future.
EQUITY_DAILY_BAR_COUNTRY_CODES : tuple
The countres to create data for. By default this is populated
with all of the countries present in the asset finder.
Methods
-------
make_equity_daily_bar_data(country_code, sids)
make_equity_daily_bar_currency_codes(country_code, sids)
See Also
--------
WithEquityMinuteBarData
zipline.testing.create_daily_bar_data
""" # noqa
EQUITY_DAILY_BAR_START_DATE = alias('START_DATE')
EQUITY_DAILY_BAR_END_DATE = alias('END_DATE')
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = None
@classproperty
def EQUITY_DAILY_BAR_LOOKBACK_DAYS(cls):
# If we're sourcing from minute data, then we almost certainly want the
# minute bar calendar to be aligned with the daily bar calendar, so
# re-use the same lookback parameter.
if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS
else:
return 0
@classproperty
def EQUITY_DAILY_BAR_COUNTRY_CODES(cls):
return cls.asset_finder.country_codes
@classmethod
def _make_equity_daily_bar_from_minute(cls):
assert issubclass(cls, WithEquityMinuteBarData), \
"Can't source daily data from minute without minute data!"
assets = cls.asset_finder.retrieve_all(cls.asset_finder.equities_sids)
minute_data = dict(cls.make_equity_minute_bar_data())
for asset in assets:
yield asset.sid, minute_frame_to_session_frame(
minute_data[asset.sid],
cls.trading_calendars[Equity])
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
"""
Create daily pricing data.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code. Data should be created for
this country.
sids : tuple[int]
The sids to include in the data.
Yields
------
(int, pd.DataFrame)
A sid, dataframe pair to be passed to a daily bar writer.
The dataframe should be indexed by date, with columns of
('open', 'high', 'low', 'close', 'volume', 'day', & 'id').
"""
# Requires a WithEquityMinuteBarData to come before in the MRO.
# Resample that data so that daily and minute bar data are aligned.
if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls._make_equity_daily_bar_from_minute()
else:
return create_daily_bar_data(cls.equity_daily_bar_days, sids)
@classmethod
def make_equity_daily_bar_currency_codes(cls, country_code, sids):
"""Create listing currencies.
Default is to list all assets in USD.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code. Data should be created for
this country.
sids : tuple[int]
The sids to include in the data.
Returns
-------
currency_codes : pd.Series[int, str]
Map from sids to currency for that sid's prices.
"""
return pd.Series(index=list(sids), data='USD')
@classmethod
def init_class_fixtures(cls):
super(WithEquityDailyBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Equity]
if trading_calendar.is_session(cls.EQUITY_DAILY_BAR_START_DATE):
first_session = cls.EQUITY_DAILY_BAR_START_DATE
else:
first_session = trading_calendar.minute_to_session_label(
| pd.Timestamp(cls.EQUITY_DAILY_BAR_START_DATE) | pandas.Timestamp |
import unittest
import networkx as nx
import numpy as np
import pandas as pd
from graphrole.features.extract import RecursiveFeatureExtractor, as_frame
# try to import igraph; if not found set flag to skip associated tests
skip_igraph_tests = False
try:
import igraph as ig
except ImportError:
skip_igraph_tests = True
np.random.seed(0)
# pylint: disable=protected-access
class TestAsFrame(unittest.TestCase):
def test_as_frame(self):
# test series
series = pd.Series(np.random.rand(10))
result = as_frame(series)
self.assertIsInstance(result, pd.DataFrame)
pd.testing.assert_frame_equal(result, pd.DataFrame(series))
# test dataframe
frame = pd.DataFrame(np.random.rand(10))
result = as_frame(frame)
self.assertIsInstance(result, pd.DataFrame)
pd.testing.assert_frame_equal(result, frame)
class TestRecursiveFeatureExtractorWithDanglingNode(unittest.TestCase):
""" Unit tests for RecursiveFeatureExtractor when graph has dangling nodes """
def setUp(self):
# build graph with dangling nodes
self.nodes = ['a', 'b', 'c', 'd']
self.edge = ('a', 'c')
self.G = nx.Graph()
self.G.add_nodes_from(self.nodes)
self.G.add_edge(*self.edge)
self.rfe = RecursiveFeatureExtractor(self.G)
def test_e2e_with_dangling_nodes(self):
features = self.rfe.extract_features()
# test that all nodes are present in feature dataframe
self.assertListEqual(features.index.tolist(), self.nodes)
# test that no features are null/nan
self.assertTrue(all(features.notnull()))
def test_internal_with_dangling_nodes(self):
# manually seed first generation
next_features0 = self.rfe.graph.get_neighborhood_features()
self.rfe._features = next_features0
self.rfe._final_features = {0: next_features0.to_dict()}
# get next features
self.rfe.generation_count = 1
next_features1 = self.rfe._get_next_features()
# test that dangling nodes did not generate features
self.assertListEqual(next_features1.index.tolist(), list(self.edge))
# test that features are not null/nan
self.assertTrue(all(next_features1.notnull()))
class BaseRecursiveFeatureExtractorTest:
class TestCases(unittest.TestCase):
""" Unit tests for RecursiveFeatureExtractor """
G = None
G_empty = None
nodes = ['a', 'b', 'c', 'd']
edges = [('a', 'b'), ('a', 'c'), ('c', 'd')]
aggs = [
np.sum,
np.mean
]
def setUp(self):
self.rfe = RecursiveFeatureExtractor(self.G, aggs=self.aggs)
# initialize with neighborhood features
self.rfe._features = self.rfe.graph.get_neighborhood_features()
self.rfe._final_features = {0: self.rfe._features.to_dict()}
self.rfe.generation_count = 1
def test_initialize_with_unknown_graph_type(self):
class SomeGraph:
pass
with self.assertRaises(TypeError):
_ = RecursiveFeatureExtractor(SomeGraph)
def test__initialize_with_empty_graph(self):
with self.assertRaises(ValueError):
_ = RecursiveFeatureExtractor(self.G_empty)
def test__get_next_features(self):
# self.rfe._features = self.rfe.graph.get_neighborhood_features()
# self.rfe._final_features = {0: self.rfe._features.to_dict()}
# self.rfe.generation_count = 1
features_gen1 = self.rfe._get_next_features()
expected_features_gen1 = {
'external_edges(sum)': {'a': 2.0, 'b': 1.0, 'c': 2.0, 'd': 1.0},
'degree(sum)': {'a': 3.0, 'b': 2.0, 'c': 3.0, 'd': 2.0},
'internal_edges(sum)': {'a': 3.0, 'b': 2.0, 'c': 3.0, 'd': 2.0},
'external_edges(mean)': {'a': 1.0, 'b': 1.0, 'c': 1.0, 'd': 1.0},
'degree(mean)': {'a': 1.5, 'b': 2.0, 'c': 1.5, 'd': 2.0},
'internal_edges(mean)': {'a': 1.5, 'b': 2.0, 'c': 1.5, 'd': 2.0}
}
# some graph interfaces do not support string node names so we will test
# the values of the feature DataFrames and intentionally ignore the index
self.assertTrue(np.allclose(
features_gen1.sort_index(axis=1).sort_index(axis=0).values,
pd.DataFrame(expected_features_gen1).sort_index(axis=1).sort_index(axis=0).values
))
def test__update(self):
# update with new features, include one that will be pruned
existing_features = self.rfe._features
new_features = pd.concat([
pd.DataFrame(
existing_features['degree'].values,
columns=['degree2'],
index=existing_features.index
),
pd.DataFrame(
np.random.randn(existing_features.shape[0], 2),
columns=['a', 'b'],
index=existing_features.index
)
], axis=1)
self.rfe._update(new_features)
# test _features
features = self.rfe._features
expected_features = pd.concat([
existing_features[['degree', 'external_edges']],
new_features[['a', 'b']]
], axis=1)
pd.testing.assert_frame_equal(features, expected_features)
# test _final_features
final_features = self.rfe._finalize_features()
expected_final_features = pd.concat([
existing_features,
new_features[['a', 'b']]
], axis=1)
pd.testing.assert_frame_equal(
final_features.sort_index(axis=1),
expected_final_features.sort_index(axis=1)
)
def test__aggregated_df_to_dict(self):
# dataframe
index = ['sum', 'mean']
columns = ['feature1', 'feature2', 'feature3']
data = np.arange(len(index) * len(columns)).reshape(len(index), len(columns))
df = pd.DataFrame(data, columns=columns, index=index)
agg_dict = self.rfe._aggregated_df_to_dict(df)
expected_agg_dict = {
'feature1(sum)': 0,
'feature2(sum)': 1,
'feature3(sum)': 2,
'feature1(mean)': 3,
'feature2(mean)': 4,
'feature3(mean)': 5,
}
self.assertDictEqual(agg_dict, expected_agg_dict)
# series
series = | pd.Series([6, 7, 8], index=columns, name='prod') | pandas.Series |
import luigi
import pandas as pd
from tasks.reg.features import ARIMAFeatures
from pathlib import Path
from common import utils
from tasks.reg.gbdt import GBDTFeatures
class CombineFeatures(luigi.Task):
uuid = luigi.Parameter()
task = luigi.Parameter()
def requires(self):
return [GBDTFeatures(self.uuid, self.task),
ARIMAFeatures(self.uuid, self.task)]
def output(self):
self.dir = Path(self.input()[0].path).absolute().parent
outfile = self.dir / 'combined_{0}.csv'.format(self.task)
return luigi.LocalTarget(outfile.absolute().as_posix())
def run(self):
self._source()
df0 = utils.load_data(self.input()[0].path)
df1 = utils.load_data(self.input()[1].path)
final = | pd.merge(df0, df1, how='left', on=self.oncols) | pandas.merge |
import MachineLearning.SupervisedLearning.Regression.LinearRegression as LR
import numpy as np
# from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import pandas as pd
def main():
# ---- inputs -----
x_low = -2
x_high = 6
m = 50 # number of elements in feature vector
# ------------------------------------------------------------------------------------------------------
# get the input vector x as if we have all of the values so we can plot a line to check for over fitting
# uniform spacing from low to high
x_uniform = np.arange(x_low, x_high, 1/m)
# ------------------------------------------------------------------------------------------------------
# Question: 1
# ------------
# Create a input vector of random floats over a range
# https://stackoverflow.com/questions/22071987/generate-random-array-of-floats-between-a-range
x = np.random.uniform(low=x_low, high=x_high, size=(m, 1))
x = np.sort(x, axis=0)
f = np.vectorize(LR.generate_data.poly_third_order)
y = f(x)
# Plot x vs. y
myplot = LR.plotting.PyPlotter()
myplot.plot(x, y, close=False, fig_name='Question_1_Plot', show=False, plot_style='plot', line_style='',
label='Original Data')
# Question: 2
# ------------
# Part: 1
# --------
# Simple linear regression with "normal equations method" (non-iterative) (Maximum Likelihood)
# Linear Hypothesis function
phi_x = LR.features.phi_polynomial(x, order=1)
# phi_x_all = LR.features.phi_polynomial(x_uniform, order=1)
theta_ml = LR.features.least_squares_max_likelihood(phi_x, y)
# get values for y predicted given theta maximum likelihood and input vector x
y_pred = LR.features.predicted_values(theta_ml, x)
# y_pred_all = LR.features.predict_values(phi_x_all, theta_ml)
# get mean squared error and place it on the plot
mse = LR.features.average_least_square_error(y_pred, y)
myplot.plot(x, y_pred, close=False, save_fig=False, show='False', plot_style='plot',
label='First Order Fit (MSE: {:.2f})'.format(mse))
# myplot.plot(x, y_pred_all, close=False, save_fig=False, show='False', plot_style='plot')
myplot.save_figure(fig_name='Question_2_Plot_Part_a')
# Part 2:
# -------
phi_x = LR.features.phi_polynomial(x, order=2)
theta_ml = LR.features.least_squares_max_likelihood(phi_x, y)
# get values for y predicted given theta maximum likelihood and input vector x
y_pred = LR.features.predict_values(phi_x, theta_ml)
# get mean squared error and place it on the plot
mse = LR.features.average_least_square_error(y_pred, y)
myplot.plot(x, y_pred, close=False, save_fig=False, show='False',
label='Second Order Fit (MSE: {:.2f})'.format(mse))
myplot.save_figure(fig_name='Question_2_Plot_Part_b')
# Part 3:
# -------
phi_x = LR.features.phi_polynomial(x, order=3)
theta_ml = LR.features.least_squares_max_likelihood(phi_x, y)
# get values for y predicted given theta maximum likelihood and input vector x
y_pred = LR.features.predicted_values(theta_ml, x)
from copy import deepcopy
y_pred_no_noise = deepcopy(y_pred)
# get mean squared error and place it on the plot
mse = LR.features.average_least_square_error(y_pred, y)
myplot.plot(x, y_pred, close=False, save_fig=False, show='False', label='Third Order Fit (MSE: {:.2f})'.format(mse),
marker='*')
myplot.save_figure(fig_name='Question_2_Plot_Part_c')
print('theta = ' + str(theta_ml))
# Part 4:
# -------
# create some noise
# https://stackoverflow.com/questions/14058340/adding-noise-to-a-signal-in-python
mean = 0 # mean of the normal distribution
variance = 3 # the standard deviation of the normal distribution
noise = np.random.normal(mean, variance, m)
noise = noise[:, np.newaxis]
# add some noise to the y values
y_w_noise = y + noise
theta_ml = LR.features.least_squares_max_likelihood(phi_x, y_w_noise)
print("theta values for predicting on noisy data: {}".format(theta_ml))
# get values for y predicted given theta maximum likelihood and input vector x
y_pred = LR.features.predicted_values(theta_ml, x)
# get mean squared error and place it on the plot
mse = LR.features.average_least_square_error(y_pred, y)
# compare the y_predicted fit on no noise to the y_w_noise
mse_n = LR.features.average_least_square_error(y_pred_no_noise, y_w_noise)
myplot_2 = LR.plotting.PyPlotter()
# myplot_2.plot(x, y, close=False, show=False, label='Original Data')
myplot_2.plot(x, y_w_noise, close=False, show=False, label='Original Data with Noise')
myplot_2.plot(x, y_pred, close=False, show=False, label='Third Order Fit on Data w Noise (MSE: {:.2f})'.format(mse))
myplot_2.plot(x, y_pred_no_noise, close=False, show=False,
label='Third Order Fit on Data w/o Noise (MSE: {:.2f})'.format(mse_n), plot_style='plot', marker='')
myplot_2.save_figure(fig_name='Question_2_Plot_Part_4')
a = 1
# https://www.geeksforgeeks.org/implementation-of-locally-weighted-linear-regression/
# USE THE DATA WITHOUT NOISE FOR TRAINING
tao = 0.1
y_test = []
theta_vals = []
for _x in x:
theta, pred = LR.features.predict(x, y, _x, tao)
a = pred.tolist()
theta_ = theta.tolist()
theta_vals.append(theta_[0][0])
y_test.append(a[0][0])
print('Theta values found for training on the data without noise LOCALLY WIEGHTED: {}'.format(theta_vals))
y_test = np.array(y_test)
y_test = y_test[:, np.newaxis]
theta_vals = np.array(theta_vals)
theta_vals = theta_vals[:, np.newaxis]
# USE THE DATA WITH NOISE FOR TESTING
PRED = x * theta_vals
fix, ax = plt.subplots()
# COMPARE PREDICTED TO NOISY
mse = LR.features.average_least_square_error(y, y_w_noise)
ax.scatter(x, PRED, label='Trained without noise tested with noise (MSE: {:.2f})'.format(mse))
ax.scatter(x, y_w_noise, label='Original with noise')
# get mean squared error and place it on the plot
mse = LR.features.average_least_square_error(y_test, y)
mse_1 = LR.features.average_least_square_error(y_test, y_w_noise)
ax.scatter(x, y_test, label='Local Weighted (MSE to original: {:.2f}, MSE to noise: {:.2f})'.format(mse, mse_1))
# get mean squared error and place it on the plot
# mse = LR.features.average_least_square_error(y_pred, y_w_noise)
# ax.plot(x, y_pred, label='Third Order (MSE: {:.2f})'.format(mse), ls='-')
plt.legend(loc='upper left')
plt.savefig('Question_1_Part_5.jpg')
a = 1
# --------------------------------------------------------------------------------------------------
# Question: 3
file_name = 'Hogsmeade_Prices.csv'
df = pd.read_csv(file_name)
dn = df.to_dict('list')
keys = list(dn.keys())
key0 = keys[0]
inputs = []
output = []
for key in keys:
if key == 'House ID':
continue
if 'Output:' in key:
output.append(dn[key])
else:
inputs.append(dn[key])
x = np.array(inputs).T
y = np.array(output).T
theta_ml = LR.features.least_squares_max_likelihood(x, y)
y_pred = np.matmul(x, theta_ml) # predict in one line
mse = LR.features.average_least_square_error(y_pred, y)
print('Mean squared error of linear regression: {}'.format(mse))
tao = 0.1
y_test = []
for _x in x:
theta, pred = LR.features.predict(x, y, _x, tao)
a = pred.tolist()
y_test.append(a[0][0])
y_test = np.array(y_test)
y_test = y_test[:, np.newaxis]
mse = LR.features.average_least_square_error(y_test, y)
print('Mean squared error of locally weighted: {}'.format(mse))
x_bar = x.mean(axis=0)
x_bar = x_bar[:, np.newaxis]
y_bar = y.mean(axis=0)
y_bar = y_bar[:, np.newaxis]
x_bar_weighted = x_bar * theta_ml
x_bar_percentage_weight = x_bar_weighted / y_bar
x_out = x_bar_percentage_weight.tolist()
dn_out = {'values': x_out}
| pd.DataFrame(dn_out) | pandas.DataFrame |
import pandas as pd
import os
import json
def steering_df(directory, jsonfilename='steering.json',to_letters_dict=None):
to_letters_dict = {87:'w', 83:'s', 68:'d', 65:'a'} if to_letters_dict is None else to_letters_dict
jdata = None
with open(os.path.join(directory,jsonfilename),'r') as f:
jdata = json.load(f)
dictdata = dict(w=[],a=[],s=[],d=[],time=[])
for d,t in jdata:
for number, letter in to_letters_dict.items():
dictdata[letter].append(d[str(number)])
dictdata['time'].append(t)
df2 = pd.DataFrame(dictdata)
return df2
def pics_df(directory):
files = os.listdir(directory)
files = list(filter(lambda x: x[-4:] == '.jpg', files))
pictimes = list(map(lambda x: float(x.split("_")[1][:-4]), files))
emptycol = [None]*len(pictimes)
df = pd.DataFrame(dict(filenames=files, pictimes=pictimes,a=emptycol,w=emptycol,s=emptycol,d=emptycol))
return df
def imu_df(directory):
imu = 'imu.csv'
imu_path = os.path.join(directory, imu)
idf = | pd.read_csv(imu_path) | pandas.read_csv |
# coding: utf-8
# In[2]:
import pandas as pd
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
from pandas.tools.plotting import autocorrelation_plot
# from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score, roc_curve
from sklearn.metrics import jaccard_similarity_score
import seaborn as sns
import datetime
plt.style.use('ggplot')
from bisect import bisect
import re
pd.options.display.max_columns = 999
# In[5]:
lst_f_featureimportance = glob.glob("log_assess/*.csv")
lst_f_featureimportance_actual = glob.glob("log_assess/actual_model_performance/*.csv")
lst_f_performance = glob.glob("log_assess/actual_model_performance/*.txt")
lst_f_res = glob.glob("result_assess/actual_model_result/Results*.csv")
lst_f_roc = glob.glob("roc_assess/*.csv")
lst_f_input = glob.glob("input_data/test/*.csv")
lst_f_actual_res = glob.glob("result_assess/actual_model_result/merged_*.csv")
# In[16]:
fontsize = 20
# In[4]:
# ###2nd run
# lst_f_featureimportance_2nd = glob.glob("backup/log_assess/*.csv")
# lst_f_performance_2nd = glob.glob("backup/log_assess/*.txt")
# lst_f_res_2nd = glob.glob("backup/result_assess/*.csv")
# lst_f_roc_2nd = glob.glob("backup/roc_assess/*.csv")
# ## 1. Accuracy/Kappa/AUC/Recall/F1
# ### Value tendency
# In[1029]:
lst_performance = []
for filename in lst_f_performance:
with open(filename) as f:
lst_performance.append([i for i in f.read().split("\n") if i.strip()])
dict_performance = {}
# dict_performance['accuracy'] = []
# dict_performance['kappa'] = []
dict_performance['auc'] = []
# dict_performance['recall'] = []
# dict_performance['precision'] = []
dict_performance['f1'] = []
dict_performance['date'] = []
for idx, p in enumerate(lst_performance):
for em in p:
if 'AUC' in em:
auc = float(em[em.find('=')+2:].strip())
if 'f1' in em:
dict_performance['date'].append(lst_f_performance[idx])
dict_performance['f1'].append(float(em[em.find('=')+2:].strip()))
dict_performance['auc'].append(auc)
# if 'Accuracy' in em:
# dict_performance['accuracy'].append(float(em[em.find('=')+2:].strip()))
# if 'kappa' in em:
# dict_performance['kappa'].append(float(em[em.find('=')+2:].strip()))
# if 'recall' in em:
# dict_performance['recall'].append(float(em[em.find('=')+2:].strip()))
# if 'precision' in em:
# dict_performance['precision'].append(float(em[em.find('=')+2:].strip()))
df_performance = pd.DataFrame(dict_performance)
def getDate(x):
return x.replace("log_assess/actual_model_performance/ModelPerformance_", "").replace(".txt","")[:10]
df_performance['date'] = pd.to_datetime(df_performance['date'].apply(lambda x: getDate(x)))
df_performance = df_performance.sort_values(by='date').reset_index(drop=True)
# In[6]:
### 2nd run
# lst_performance_2nd = []
# for filename in lst_f_performance_2nd:
# with open(filename) as f:
# lst_performance_2nd.append([i for i in f.read().split("\n") if i.strip()])
# dict_performance_2nd = {}
# dict_performance_2nd['accuracy'] = []
# dict_performance_2nd['kappa'] = []
# dict_performance_2nd['auc'] = []
# dict_performance_2nd['recall'] = []
# dict_performance_2nd['precision'] = []
# dict_performance_2nd['f1'] = []
# for p in lst_performance_2nd:
# for em in p:
# if 'Accuracy' in em:
# dict_performance_2nd['accuracy'].append(float(em[em.find('=')+2:].strip()))
# if 'kappa' in em:
# dict_performance_2nd['kappa'].append(float(em[em.find('=')+2:].strip()))
# if 'AUC' in em:
# dict_performance_2nd['auc'].append(float(em[em.find('=')+2:].strip()))
# if 'recall' in em:
# dict_performance_2nd['recall'].append(float(em[em.find('=')+2:].strip()))
# if 'precision' in em:
# dict_performance_2nd['precision'].append(float(em[em.find('=')+2:].strip()))
# if 'f1' in em:
# dict_performance_2nd['f1'].append(float(em[em.find('=')+2:].strip()))
# df_performance_2nd = pd.DataFrame(dict_performance_2nd)
# dict_date_2nd = {}
# dict_date_2nd['date'] = [fn.replace("backup/log_assess/ModelPerformance_", "").replace(".txt","") for fn in lst_f_performance_2nd]
# df_date_2nd = pd.DataFrame(dict_date_2nd)
# df_performance_2nd = df_performance_2nd.join(df_date_2nd)
# df_performance_2nd['date'] = pd.to_datetime(df_performance_2nd['date'])
# df_performance_2nd = df_performance_2nd.sort_values(by='date').reset_index(drop=True)
# # df_performance.set_index(['date'],inplace=True)
# In[1129]:
kappa_kdd = 0.33
auc_kdd = 0.75
recall_kdd = 0.50
precision_kdd = 0.26
post_deploy = 'Actual performance over time'
pre_deploy = "Initial trained model"
lst_date = [""] + df_performance['date'].dt.strftime('%m-%d-%y').tolist() + [""]
fig, axes = plt.subplots(1,2,figsize=(25,5))
ax = axes.flat
# ax[0].plot(df_performance['accuracy'], marker='o')
# # ax[0].plot(df_performance_2nd['date'], df_performance_2nd['accuracy'], marker='o')
# ax[0].set_title("accuracy")
# # ax[0].legend(['1st run','2nd run'])
# ax[0].plot(df_performance['date'], df_performance['kappa'], marker='o')
# ax[0].plot(df_performance['date'], np.full((df_performance.shape[0]), kappa_kdd), ls='dashed', color = 'r')
# # ax[1].plot(df_performance_2nd['date'], df_performance_2nd['kappa'], marker='o')
# ax[0].set_title("kappa")
# max_lim = max(df_performance['kappa'].max(), kappa_kdd)+0.03
# min_lim = min(df_performance['kappa'].min(), kappa_kdd)-0.02
# ax[0].set_ylim([min_lim, max_lim])
# # ax[1].legend(['1st run','2nd run'])
ax[0].plot(df_performance['date'], df_performance['auc'], marker='o')
# ax[0].plot(df_performance['date'], np.full((df_performance.shape[0]), auc_kdd), ls='dashed', color = 'r')
# ax[2].plot(df_performance_2nd['date'], df_performance_2nd['auc'], marker='o')
ax[0].set_title("auc")
max_lim = df_performance['auc'].max() + 0.02
min_lim = df_performance['auc'].min() - 0.02
ax[0].set_ylim([min_lim, max_lim])
ax[0].set_ylabel("AUC score")
# max_lim = max(df_performance['auc'].max(), auc_kdd)+0.02
# min_lim = min(df_performance['auc'].min(), auc_kdd)-0.02
# ax[2].legend(['1st run','2nd run'])
# ax[2].plot(df_performance['date'], df_performance['recall'], marker='o')
# ax[2].plot(df_performance['date'], np.full((df_performance.shape[0]), recall_kdd), ls='dashed', color = 'r')
# # ax[3].plot(df_performance_2nd['date'], df_performance_2nd['recall'], marker='o')
# max_lim = max(df_performance['recall'].max(), recall_kdd)+0.03
# min_lim = min(df_performance['recall'].min(), recall_kdd)-0.02
# ax[2].set_ylim([min_lim, max_lim])
# ax[2].set_title("recall")
# # ax[3].legend(['1st run','2nd run'])
# ax[3].plot(df_performance['date'], df_performance['precision'], marker='o')
# ax[3].plot(df_performance['date'], np.full((df_performance.shape[0]), precision_kdd), ls='dashed', color = 'r')
# # ax[4].plot(df_performance_2nd['date'], df_performance_2nd['precision'], marker='o')
# max_lim = max(df_performance['precision'].max(), precision_kdd)+0.03
# min_lim = min(df_performance['precision'].min(), precision_kdd)-0.02
# ax[3].set_ylim([min_lim, max_lim])
# ax[3].set_title("precision")
# ax[4].legend(['1st run','2nd run'])
ax[1].plot(df_performance['date'], df_performance['f1'], marker='o')
# ax[5].plot(df_performance_2nd['date'], df_performance_2nd['f1'], marker='o')
ax[1].set_title("F1")
ax[1].set_ylabel("F1 score")
# ax[5].legend(['1st run','2nd run'])
for ax in fig.axes:
# plt.sca(ax)
# plt.xticks(rotation=45)
# ax.set_xticks(np.arange(len(df_performance['date'])))
# ax.set_xticklabels(df_performance['date'].dt.strftime("%m-%d-%y"), rotation = -45, ha='left')
# ax.legend([post_deploy, pre_deploy], loc = 'upper left')
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=SU))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d-%y'))
ax.xaxis.set_tick_params(rotation=-45)
ax.set_xticklabels(lst_date, ha='left')
fig.suptitle("Performance Metrics Over Time(Commercial)", fontsize=15)
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.savefig("analysis_plotting/Performance Metrics Over Time_Commercial.pdf")
# ### Mean/std tendency
# In[1116]:
df_statistic = pd.DataFrame()
for col in df_performance:
if col in ['accuracy', 'auc', 'f1', 'kappa', 'precision' ,'recall']:
df_statistic['mean_%s'%col] = df_performance[col].expanding().mean()
df_statistic['std_%s'%col] = df_performance[col].expanding().std()
df_statistic['date'] = df_performance['date']
df_statistic = df_statistic.iloc[1:,:].reset_index(drop=True)
label_time_window = pd.Series(["over %d week"%i for i in range(1,df_performance.shape[0])], name='time window')
df_statistic = pd.concat([df_statistic,label_time_window], axis=1)
# In[62]:
# #### 2nd run ###
# df_statistic_2nd = pd.DataFrame()
# for col in df_performance_2nd:
# if col in ['accuracy', 'auc', 'f1', 'kappa', 'precision' ,'recall']:
# df_statistic_2nd['mean_%s'%col] = df_performance_2nd[col].expanding().mean()
# df_statistic_2nd['std_%s'%col] = df_performance_2nd[col].expanding().std()
# df_statistic_2nd['date'] = df_performance_2nd['date']
# df_statistic_2nd = df_statistic_2nd.iloc[1:,:].reset_index(drop=True)
# label_time_window_2nd = pd.Series(["over %d week"%i for i in range(1,21)], name='time window')
# df_statistic_2nd = pd.concat([df_statistic_2nd,label_time_window_2nd], axis=1)
# In[1432]:
fig, axes = plt.subplots(1,2,figsize=(25,5))
ax = axes.flat
df_statistic.plot(x='time window', y='mean_auc', ax = ax[0], marker='o', legend=False)
ax[0].set_title("Mean of AUC Score Over Time")
max_lim = df_statistic['mean_auc'].max() + 0.02
min_lim = df_statistic['mean_auc'].min() - 0.02
ax[0].set_ylim([min_lim, max_lim])
df_statistic.plot(x='time window', y='std_auc', ax = ax[1], marker='o', legend=False)
ax[1].set_title("Standard Deviation of AUC Score Over Time")
max_lim = df_statistic['std_auc'].max() + 0.02
min_lim = df_statistic['std_auc'].min() - 0.02
ax[1].set_ylim([min_lim, max_lim])
for ax in fig.axes:
ax.set_xticks(np.arange(len(df_statistic['time window'])))
ax.set_xticklabels(df_statistic['time window'], rotation = -45, ha='left')
ax.set_ylabel("Mean of AUC score")
plt.tight_layout()
plt.savefig("analysis_plotting/Mean_STD of AUC Over Time_Commercial.pdf")
# In[1134]:
fig, axes = plt.subplots(1,2,figsize=(25,5))
ax = axes.flat
df_statistic.plot(x='time window', y='mean_f1', ax = ax[0], marker='o', legend=False)
ax[0].set_title("Mean of F1 Score Over Time")
max_lim = df_statistic['mean_f1'].max() + 0.02
min_lim = df_statistic['mean_f1'].min() - 0.02
ax[0].set_ylim([min_lim, max_lim])
df_statistic.plot(x='time window', y='std_f1', ax = ax[1], marker='o', legend=False)
ax[1].set_title("Standard Deviation of F1 Score Over Time")
max_lim = df_statistic['std_f1'].max() + 0.02
min_lim = df_statistic['std_f1'].min() - 0.02
ax[1].set_ylim([min_lim, max_lim])
for ax in fig.axes:
ax.set_xticks(np.arange(len(df_statistic['time window'])))
ax.set_xticklabels(df_statistic['time window'], rotation = -45, ha='left')
ax.set_ylabel("Mean of F1 score")
plt.tight_layout()
plt.savefig("analysis_plotting/Mean_STD of F1 Over Time_Commercial.pdf")
# In[218]:
fig, axes = plt.subplots(2,2,figsize=(25,10))
ax = axes.flat
# df_statistic.plot(x='time window', y='mean_accuracy', ax = ax[0], marker='o')
# # df_statistic_2nd.plot(x='time window', y='mean_accuracy', ax = ax[0], marker='o')
# ax[0].set_title("mean_accuracy")
# ax[0].legend(['1st run','2nd run'])
df_statistic.plot(x='time window', y='mean_kappa', ax = ax[0], marker='o')
ax[0].plot(np.full((df_statistic.shape[0]), kappa_kdd), ls='dashed', color = 'r')
# df_statistic_2nd.plot(x='time window', y='mean_kappa', ax = ax[1], marker='o')
ax[0].set_title("mean_kappa")
max_lim = max(df_statistic['mean_kappa'].max(), kappa_kdd)+0.02
min_lim = min(df_statistic['mean_kappa'].min(), kappa_kdd)-0.02
ax[0].set_ylim([min_lim, max_lim])
# ax[1].legend(['1st run','2nd run'])
df_statistic.plot(x='time window', y='mean_auc', ax = ax[1], marker='o')
ax[1].plot(np.full((df_performance.shape[0]), auc_kdd), ls='dashed', color = 'r')
# df_statistic_2nd.plot(x='time window', y='mean_auc', ax = ax[2], marker='o')
ax[1].set_title("mean_auc")
max_lim = max(df_statistic['mean_auc'].max(), auc_kdd)+0.02
min_lim = min(df_statistic['mean_auc'].min(), auc_kdd)-0.02
ax[1].set_ylim([min_lim, max_lim])
# ax[2].legend(['1st run','2nd run'])
df_statistic.plot(x='time window', y='mean_recall', ax = ax[2], marker='o')
ax[2].plot(np.full((df_performance.shape[0]), recall_kdd), ls='dashed', color = 'r')
# df_statistic_2nd.plot(x='time window', y='mean_recall', ax = ax[3], marker='o')
max_lim = max(df_statistic['mean_recall'].max(), recall_kdd)+0.02
min_lim = min(df_statistic['mean_recall'].min(), recall_kdd)-0.02
ax[2].set_ylim([min_lim, max_lim])
ax[2].set_title("mean_recall")
# ax[3].legend(['1st run','2nd run'])
df_statistic.plot(x='time window', y='mean_precision', ax = ax[3], marker='o')
ax[3].plot(np.full((df_performance.shape[0]), precision_kdd), ls='dashed', color = 'r')
# df_statistic_2nd.plot(x='time window', y='mean_precision', ax = ax[4], marker='o')
max_lim = max(df_statistic['mean_precision'].max(), precision_kdd)+0.02
min_lim = min(df_statistic['mean_precision'].min(), precision_kdd)-0.02
ax[3].set_ylim([min_lim, max_lim])
ax[3].set_title("mean_precision")
# ax[4].legend(['1st run','2nd run'])
# df_statistic.plot(x='time window', y='mean_f1', ax = ax[5], marker='o')
# # df_statistic_2nd.plot(x='time window', y='mean_f1', ax = ax[5], marker='o')
# ax[5].set_title("mean_f1")
# ax[5].legend(['1st run','2nd run'])
for ax in fig.axes:
# plt.sca(ax)
# plt.xticks(rotation=45)
ax.set_xticks(np.arange(len(df_statistic['time window'])))
ax.set_xticklabels(df_statistic['time window'], rotation = -45, ha='left')
ax.legend([post_deploy, pre_deploy], loc = 'upper left')
fig.suptitle("Mean of Performance Metrics Over Time(Commercial)", fontsize=15)
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.savefig("analysis_plotting/Mean of Performance Metrics Over Time_Commercial.pdf")
# In[216]:
fig, axes = plt.subplots(2,2,figsize=(25,10))
ax = axes.flat
# df_statistic.plot(y='std_accuracy', ax = ax[0], marker='o')
# # df_statistic_2nd.plot(x='time window', y='std_accuracy', ax = ax[0], marker='o')
# ax[0].set_title("std_accuracy")
# ax[4].legend(['1st run','2nd run'])
df_statistic.plot(y='std_kappa', ax = ax[0], marker='o')
# df_statistic_2nd.plot(x='time window', y='std_kappa', ax = ax[1], marker='o')
ax[0].set_title("std_kappa")
# ax[0].legend(['1st run','2nd run'])
df_statistic.plot(y='std_auc', ax = ax[1], marker='o')
# df_statistic_2nd.plot(x='time window', y='std_auc', ax = ax[2], marker='o')
ax[1].set_title("std_auc")
# ax[1].legend(['1st run','2nd run'])
df_statistic.plot(y='std_recall', ax = ax[2], marker='o')
# df_statistic_2nd.plot(x='time window', y='std_recall', ax = ax[3], marker='o')
ax[2].set_title("std_recall")
# ax[2].legend(['1st run','2nd run'])
df_statistic.plot(y='std_precision', ax = ax[3], marker='o')
# df_statistic_2nd.plot(x='time window', y='std_precision', ax = ax[4], marker='o')
ax[3].set_title("std_precision")
# ax[3].legend(['1st run','2nd run'])
# df_statistic.plot(y='std_f1', ax = ax[5], marker='o')
# # df_statistic_2nd.plot(x='time window', y='std_f1', ax = ax[5], marker='o')
# ax[5].set_title("std_f1")
# ax[5].legend(['1st run','2nd run'])
for ax in fig.axes:
# plt.sca(ax)
# plt.xticks(rotation=45)
ax.set_xticks(np.arange(len(df_statistic['time window'])))
ax.set_xticklabels(df_statistic['time window'], rotation = -45, ha='left')
fig.suptitle("Standard Deviation of Performance Metrics Over Time(Commercial)", fontsize=15)
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.savefig("analysis_plotting/Std of Performance Metrics Over Time_Commercial.pdf")
# ### Empirical Risk Score
# In[1516]:
lst_f_res.sort()
lst_f_input.sort()
lst_df_pred = []
for filename in lst_f_res:
df_pred = pd.read_csv(filename)
lst_df_pred.append(df_pred)
lst_df_truth = []
for filename in lst_f_input:
df_truth = pd.read_csv(filename)
lst_df_truth.append(df_truth)
# In[1517]:
def groupRiskScore(x):
if (x <= 0.1):
return 1
elif (x <=0.2):
return 2
elif (x <=0.3):
return 3
elif (x <=0.4):
return 4
elif (x <=0.5):
return 5
elif (x <=0.6):
return 6
elif (x <=0.7):
return 7
elif (x <=0.8):
return 8
elif (x <=0.9):
return 9
else:
return 10
fig, ax = plt.subplots(1,1,figsize=(15,10))
lst_name = []
for i in range(len(lst_f_res)):
if (i%7!=0 or "2018-02" in curve_name):
continue
curve_name = (lst_f_res[i])[22:-4]
# if "2018-09" not in curve_name and "2018-02" not in curve_name:
# continue
lst_name.append(curve_name)
df_pred = lst_df_pred[i]
df_truth = lst_df_truth[i]
df_riskscore = pd.concat([df_pred['RiskScore'], df_truth['fire']], axis=1)
df_riskscore['group'] = df_riskscore['RiskScore'].apply(lambda x: groupRiskScore(x))
res = df_riskscore.groupby('group')['fire'].mean()
# ax.bar(range(len(res)), res, width=0.2, capsize=5,
# yerr = df_riskscore.groupby('group')['fire'].apply(lambda x: np.sqrt(x.mean()*(1-x.mean()) / x.count())))
# df_riskscore['group'] = pd.qcut(df_riskscore['RiskScore'], q=10)
# res = df_riskscore.groupby('group')['fire'].mean()
ax.plot(list(res.index), res, marker='o')
# break
# ax.set_xticks(range(len(res)))
# ax.set_xticklabels(["%.1f"%((i+1)*0.1) for i in range(len(res))])
ax.set_xticks(np.arange(1, 11, 1))
ax.set_xticklabels(["%dth"%i for i in ax.get_xticks()])
ax.set_title("Empirical Risk Curve (Commercial)", y=1.02)
ax.set_xlabel("Decile")
ax.set_ylabel("Mean Empirical Risk")
ax.legend(lst_name, title='Time window end at', ncol=3)
# plt.savefig("analysis_plotting/Empirical Risk Curve_Commercial.pdf")
# ### Actual empirical risk curve
# In[1504]:
lst_df_pred_actual = []
for filename in lst_f_actual_res:
df_pred_actual = pd.read_csv(filename)
lst_df_pred_actual.append(df_pred_actual)
# In[1509]:
def encode_income(df):
lower_bound_income = [int(re.search(r'\$([0-9|,]+)', i).group(1).replace(",","")) for i in df.columns if "Estimate; Total:" in i and "$" in i]
idx_poor_start = 3
idx_rich_end = 3 + len(lower_bound_income)
df['index_thres'] = df.apply(lambda x: bisect(lower_bound_income, x.Threshold), axis=1)
df['idx_poor_end'] = df.apply(lambda x: int(idx_poor_start + x.index_thres - 1), axis=1)
df['idx_rich_start'] = df.apply(lambda x: int(x.idx_poor_end), axis=1)
df['poor_total'] = df.apply(lambda x: x[idx_poor_start:df.loc[x.name,'idx_poor_end']].sum(), axis=1)
df['rich_total'] = df.apply(lambda x: x[df.loc[x.name,'idx_rich_start']:idx_rich_end].sum(), axis=1)
df['income'] = df.apply(lambda x: 0 if x['poor_total'] >= x['rich_total'] else 1, axis=1)
return df
def groupRiskScore(x):
if (x <= 0.1):
return 1
elif (x <=0.2):
return 2
elif (x <=0.3):
return 3
elif (x <=0.4):
return 4
elif (x <=0.5):
return 5
elif (x <=0.6):
return 6
elif (x <=0.7):
return 7
elif (x <=0.8):
return 8
elif (x <=0.9):
return 9
else:
return 10
def calculateStd(x):
return (x.mean()*(1-x.mean()) / x.count())**0.5
# In[1495]:
pd.options.mode.chained_assignment = None
bar_width = 0.3
lst_colors = ['r','b']
fig, axes = plt.subplots(2,1,figsize=(15,10))
lst_name = []
for i in range(len(lst_f_actual_res)):
curve_name = (lst_f_actual_res[i]).replace("result_assess/actual_model_result/merged_threshold_", "").replace(".csv", "")
if (curve_name == "March"):
curve_name = "2018-03-04"
else:
curve_name = "2018-09-23"
df_riskscore = lst_df_pred_actual[i]
df_riskscore = df_riskscore.loc[:, ['RiskScore'] + ['Actual Fire'] + [i for i in df_riskscore.columns if "Estimate; Total:" in i and "$" in i]].rename(columns={'Actual Fire': 'fire'})
df_riskscore.insert(2, 'Threshold', value=lst_df_pred_actual[i]['thresh'])
df_riskscore = encode_income(df_riskscore)
df_riskscore['group'] = df_riskscore['RiskScore'].apply(lambda x: groupRiskScore(x))
df_riskscore_poor = df_riskscore[df_riskscore['income']==0]
df_riskscore_rich = df_riskscore[df_riskscore['income']==1]
res_poor = df_riskscore_poor.groupby('group')['fire'].mean()
axes[i].bar(list(res_poor.index-bar_width/2), res_poor, width=bar_width, capsize=5,
yerr = df_riskscore_poor.groupby('group')['fire'].apply(lambda x: calculateStd(x)))
res_rich = df_riskscore_rich.groupby('group')['fire'].mean()
axes[i].bar(list(res_rich.index+bar_width/2), res_rich, width=bar_width, capsize=5,
yerr = df_riskscore_rich.groupby('group')['fire'].apply(lambda x: calculateStd(x)))
axes[i].set_xticks(np.arange(1, 11, 1))
axes[i].set_xticklabels(["%dth"%i for i in axes[i].get_xticks()])
axes[i].set_xlabel("Decile")
axes[i].set_ylabel("Mean Empirical Risk")
axes[i].legend(['Average household income below threshold', 'Average household income above threshold'], title='Time window end at {}'.format(curve_name), ncol=1)
axes[i].set_title("Empirical Risk Bar Plot at {} (Commercial)".format(curve_name), fontsize=15)
plt.tight_layout()
plt.savefig("analysis_plotting/Empirical Risk Curve_Commercial_actual.pdf")
# In[1587]:
pd.options.mode.chained_assignment = None
bar_width = 0.3
lst_colors = ['r','b']
fig, axes = plt.subplots(1,1,figsize=(15,5))
lst_name = []
for i in range(len(lst_f_actual_res)):
curve_name = (lst_f_actual_res[i]).replace("result_assess/actual_model_result/merged_threshold_", "").replace(".csv", "")
if (curve_name == "March"):
curve_name = "03-04-2018"
df_pred_actual = lst_df_pred_actual[i];
break
df_riskscore = df_pred_actual
df_riskscore = df_riskscore.loc[:, ['RiskScore'] + ['Actual Fire'] + [i for i in df_riskscore.columns if "Estimate; Total:" in i and "$" in i]].rename(columns={'Actual Fire': 'fire'})
df_riskscore.insert(2, 'Threshold', value=df_pred_actual['thresh'])
df_riskscore = encode_income(df_riskscore)
df_riskscore['group'] = df_riskscore['RiskScore'].apply(lambda x: groupRiskScore(x))
df_riskscore_poor = df_riskscore[df_riskscore['income']==0]
df_riskscore_rich = df_riskscore[df_riskscore['income']==1]
res_poor = df_riskscore_poor.groupby('group')['fire'].mean()
axes.bar(list(res_poor.index-bar_width/2), res_poor, width=bar_width, capsize=5,
yerr = df_riskscore_poor.groupby('group')['fire'].apply(lambda x: calculateStd(x)*2))
res_rich = df_riskscore_rich.groupby('group')['fire'].mean()
axes.bar(list(res_rich.index+bar_width/2), res_rich, width=bar_width, capsize=5,
yerr = df_riskscore_rich.groupby('group')['fire'].apply(lambda x: calculateStd(x)*2))
axes.set_xticks(np.arange(1, 11, 1))
axes.set_xticklabels(["%dth"%i for i in axes.get_xticks()])
axes.set_xlabel("Decile", fontsize=fontsize)
axes.set_ylabel("Mean Empirical Risk", fontsize=fontsize)
axes.legend(['Average household income below threshold', 'Average household income above threshold'], ncol=1)
axes.set_title("Model iteration {}".format(curve_name), fontsize=fontsize)
plt.tight_layout()
plt.savefig("analysis_plotting/Empirical_Risk_Curve_Commercial_March.pdf")
# In[1588]:
pd.options.mode.chained_assignment = None
bar_width = 0.3
lst_colors = ['r','b']
fig, axes = plt.subplots(1,1,figsize=(15,5))
lst_name = []
for i in range(len(lst_f_actual_res)):
curve_name = (lst_f_actual_res[i]).replace("result_assess/actual_model_result/merged_threshold_", "").replace(".csv", "")
if (curve_name == "Sept"):
curve_name = "09-23-2018"
df_pred_actual = lst_df_pred_actual[i];
break
df_riskscore = df_pred_actual
df_riskscore = df_riskscore.loc[:, ['RiskScore'] + ['Actual Fire'] + [i for i in df_riskscore.columns if "Estimate; Total:" in i and "$" in i]].rename(columns={'Actual Fire': 'fire'})
df_riskscore.insert(2, 'Threshold', value=df_pred_actual['thresh'])
df_riskscore = encode_income(df_riskscore)
df_riskscore['group'] = df_riskscore['RiskScore'].apply(lambda x: groupRiskScore(x))
df_riskscore_poor = df_riskscore[df_riskscore['income']==0]
df_riskscore_rich = df_riskscore[df_riskscore['income']==1]
res_poor = df_riskscore_poor.groupby('group')['fire'].mean()
axes.bar(list(res_poor.index-bar_width/2), res_poor, width=bar_width, capsize=5,
yerr = df_riskscore_poor.groupby('group')['fire'].apply(lambda x: calculateStd(x)*2))
res_rich = df_riskscore_rich.groupby('group')['fire'].mean()
axes.bar(list(res_rich.index+bar_width/2), res_rich, width=bar_width, capsize=5,
yerr = df_riskscore_rich.groupby('group')['fire'].apply(lambda x: calculateStd(x)*2))
axes.set_xticks(np.arange(1, 11, 1))
axes.set_xticklabels(["%dth"%i for i in axes.get_xticks()])
axes.set_xlabel("Decile", fontsize=fontsize)
axes.set_ylabel("Mean Empirical Risk", fontsize=fontsize)
axes.legend(['Average household income below threshold', 'Average household income above threshold'], ncol=1)
axes.set_title("Model iteration {}".format(curve_name), fontsize=fontsize)
plt.tight_layout()
plt.savefig("analysis_plotting/Empirical_Risk_Curve_Commercial_Sept.pdf")
# In[1591]:
test = df_riskscore_poor[df_riskscore_poor['group'] == 8]
# In[1601]:
np.sqrt((test[test['fire'] == 1].shape[0]/test.shape[0] * test[test['fire'] == 0].shape[0]/test.shape[0])/9)
# In[1604]:
calculateStd(test['fire'])*2
# ### Precision at top K
# In[244]:
fig, (ax1,ax2) = plt.subplots(1,2,figsize=(20,5))
lst_name = []
num = 10
for i in range(len(lst_f_res)):
curve_name = (lst_f_res[i])[22:-4]
if (i%5!=0):
continue
# if "2018-07" not in curve_name and "2018-02-16" not in curve_name:
# continue
lst_name.append(curve_name)
df_pred = lst_df_pred[i]
df_truth = lst_df_truth[i]
df_precision = pd.concat([df_truth['fire'], df_pred[['Fire', 'RiskScore']]], axis=1)
df_precision = df_precision.rename(columns = {'fire':'fire_truth', 'Fire':'fire_pred', 'RiskScore':'RiskScore'})
df_precision = df_precision.sort_values(by='RiskScore', ascending=False)
length = df_precision.shape[0]
group = length//num + 1
pos_num=0
lst_prec = []
lst_recall = []
lst_topK_prec = []
lst_topK_recall = []
for k in range(group):
if (k==group):
pos_num = length
y_label = np.ones((pos_num,1))
lst_topK.append(pos_num)
else:
pos_num = k*num
y_label = np.concatenate([np.ones((pos_num,1)), np.zeros((length-pos_num,1))], axis=0)
if pos_num>=200:
break
if k==0:
lst_topK_recall.append(pos_num)
recall = recall_score(df_precision['fire_truth'], y_label)
lst_recall.append(recall)
continue
lst_topK_prec.append(pos_num)
prec = precision_score(df_precision['fire_truth'], y_label)
lst_prec.append(prec)
lst_topK_recall.append(pos_num)
recall = recall_score(df_precision['fire_truth'], y_label)
lst_recall.append(recall)
ax1.plot(lst_topK_prec, lst_prec, marker='o')
ax2.plot(lst_topK_recall, lst_recall, marker='o')
lst_xtick_prec = [i*num for i in list(range(group)) if i!=0]
# ax1.set_xticks(lst_xtick_prec)
ax1.set_xlabel("Top K")
ax1.set_ylabel("Precision")
ax1.legend(lst_name, title='Time window ends at', ncol=3)
lst_xtick_recall = [i*num for i in list(range(group))]
# ax2.set_xticks(lst_xtick_recall)
ax2.set_xlabel("Top K")
ax2.set_ylabel("Recall")
ax2.legend(lst_name, title='Time window ends at', ncol=3)
ax1.set_title("Precision at top K (Commercial)", y=1.02)
ax2.set_title("Recall at top K (Commercial)", y=1.02)
plt.tight_layout()
plt.savefig("analysis_plotting/Precision_Recall at Top K Curve_Commercial.pdf")
# ### Jaccard score
# In[163]:
lst_f_res.sort()
lst_f_input.sort()
lst_df_pred = []
for filename in lst_f_res:
df_pred = pd.read_csv(filename)
lst_df_pred.append(df_pred)
lst_df_truth = []
for filename in lst_f_input:
df_truth = pd.read_csv(filename)
lst_df_truth.append(df_truth)
# In[164]:
lst_jcd = []
for i in range(len(lst_df_pred)):
df_pred, df_truth = lst_df_pred[i], lst_df_truth[i]
y_pred, y_true = df_pred['Fire'], df_truth['fire']
jcd_score = jaccard_similarity_score(y_true, y_pred, normalize=True, sample_weight=None)
lst_jcd.append(jcd_score)
lst_date = [fn.replace("result_assess/Results_", "").replace(".csv","") for fn in lst_f_res]
lst_date = [datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%m-%d-%y') for date in lst_date]
# In[165]:
fig, ax = plt.subplots(1,1,figsize=(15,5))
ax.plot(lst_date, lst_jcd, marker='o')
ax.set_xlabel('End date of time window')
ax.set_ylabel('Jaccard Score')
ax.set_xticks(np.arange(len(lst_date)))
ax.set_xticklabels(lst_date, rotation = -45)
ax.set_title("Jaccard Score(Commercial)", y=1.08)
plt.tight_layout()
plt.savefig("analysis_plotting/Jaccard Score Curve_Commercial.pdf")
# ### Feature Importance
# In[1478]:
lst_f_featureimportance_actual.sort()
lst_df_fi = []
for filename in lst_f_featureimportance_actual:
df_fi = pd.read_csv(filename,header=None)
lst_df_fi.append(df_fi)
lst_date = [fn.replace("log_assess/actual_model_performance/FeatureImportanceList_", "")[:10] for fn in lst_f_featureimportance_actual]
# lst_date = [datetime.datetime.strptime(i, "%Y-%m-%d").strftime("%m-%d-%y") for i in lst_date]
# In[1226]:
lst_date[0]
# In[1479]:
for idx, df_fi in enumerate(lst_df_fi):
if (lst_date[idx] < start_date):
continue
date_fi = datetime.datetime.strptime(lst_date[idx], "%Y-%m-%d").strftime("%m-%d-%y")
df_fi = df_fi.rename(columns={0:'feature', 1: date_fi})
break
for i in range(idx+1, len(lst_df_fi)):
df = lst_df_fi[i]
date = datetime.datetime.strptime(lst_date[i], "%Y-%m-%d").strftime("%m-%d-%y")
df = df.rename(columns={0:'feature', 1: date})
df_fi = df_fi.merge(df, on='feature')
df_fi['feature'] = df_fi['feature'].apply(lambda x: x.strip())
proportion = 0.4
idx = int(df_fi.shape[0] * proportion)
fig, ax = plt.subplots(1,1,figsize=(15,5))
df_fi = df_fi.set_index('feature')
sns.heatmap(df_fi.iloc[:idx,:], ax = ax, cmap = sns.cm.rocket_r)
ax.set_title("Feature Importance Heatmap(Commercial)")
ax.set_xlabel('End date of time window')
ax.set_ylabel('Feature importance')
plt.xticks(rotation=-45, ha='left')
plt.tight_layout()
plt.savefig("analysis_plotting/Feature Importance Over Time Heatmap_Commercial.pdf")
# In[1525]:
fi = df_fi.iloc[1,:].values
# In[1542]:
df_rank = df_fi.rank(ascending=False)
# In[1551]:
df_rank
# In[1570]:
lst_corrs = []
for idx, col in enumerate(df_rank):
# if (idx == 0):
# continue
curr_rank = df_rank[col].values
last_rank = df_rank.iloc[:,0].values
corr = pearsonr(curr_rank, last_rank)[0]
lst_corrs.append(corr)
# In[1573]:
pearsonr(lst_corrs, df_performance['f1'])[0]
# In[1577]:
plt.plot(lst_corrs)
# In[1569]:
plt.plot(df_performance['f1'])
# In[1529]:
f1 = df_performance['f1'].values
# In[1532]:
from scipy.stats.stats import pearsonr
pearsonr(fi, f1)
# In[1576]:
plt.plot(fi)
# In[1535]:
plt.plot(f1)
# In[1235]:
fig, ax = plt.subplots(1,1,figsize=(15,8))
lst_feature = ['']+list(df_fi.index)
df_fi.std(axis=1).plot(style='o', ax = ax, xticks=[])
ax.set_xticks(np.arange(-1, 35), minor=True)
ax.set_xticklabels(lst_feature, minor=True, rotation=-45, ha='left')
ax.set_title("Standard Deviation of Feature Importance over time(Commercial)", y=1.1)
ax.set_xlabel('Features')
ax.set_ylabel('Std of feature importance score')
plt.tight_layout()
plt.savefig("analysis_plotting/Std of Feature Importance Over Time_Commercial.pdf")
# ### Risk score over time
# In[9]:
lst_f_res.sort()
lst_f_input.sort()
lst_df_pred = []
for filename in lst_f_res:
df_pred = pd.read_csv(filename)
lst_df_pred.append(df_pred)
# In[10]:
high_risk = 3
medium_risk = 2
low_risk = 1
start_date = "2018-03-04"
lst_high_risk = []
lst_medium_risk = []
lst_low_risk = []
def groupRiskScore(x):
if (x <= 0.3):
return low_risk
elif (x <=0.6):
return medium_risk
else:
return high_risk
for df in lst_df_pred:
risk_group = df['RiskScore'].apply(lambda x: groupRiskScore(x))
risk_group_count = risk_group.groupby(risk_group).count()
risk_group_count = risk_group_count / risk_group_count.sum()
print(df.shape[0] * 0.0025,
df.shape[0] * (risk_group_count[medium_risk]),
df.shape[0] * (risk_group_count[high_risk]))
lst_high_risk.append(risk_group_count[high_risk])
lst_medium_risk.append(risk_group_count[medium_risk])
lst_low_risk.append(risk_group_count[low_risk])
lst_date = []
for i in range(len(lst_f_res)):
date = (lst_f_res[i])[42:52]
lst_date.append(date)
lst_name = ['Low Risk Group', 'Medium Risk Group', 'High Risk Group']
df_riskGroup = pd.DataFrame({lst_name[0]: lst_low_risk,
lst_name[1]: lst_medium_risk,
lst_name[2]: lst_high_risk,
"date": lst_date})
df_riskGroup = df_riskGroup[(df_riskGroup['date'] >= start_date)]
df_riskGroup = df_riskGroup.groupby('date').mean().reset_index(drop=False).sort_values('date')
df_riskGroup['date'] = pd.to_datetime(df_riskGroup['date'])
lst_date = [""] + df_riskGroup['date'].dt.strftime('%m-%d-%y').tolist() + [""]
# In[1485]:
test = lst_df_pred[0]
# In[1490]:
test.shape[0] * (1-0.9525)
# In[1483]:
fig, axes = plt.subplots(3,1, figsize=(15,10))
for idx, ax in enumerate(axes):
ax.plot(df_riskGroup['date'], df_riskGroup[lst_name[idx]], marker='o')
ax.set_title(lst_name[idx])
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=SU))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d-%y'))
ax.xaxis.set_tick_params(rotation=-45)
ax.set_xticklabels(lst_date, ha='left')
ax.set_xlabel("Time")
ax.set_ylabel("Proportion of Group")
fig.suptitle("Proportion of Different Risk Groups Over Time", fontsize=15)
plt.tight_layout()
plt.subplots_adjust(top=0.92)
plt.savefig("analysis_plotting/Risk group distribution Over Time_Commercial.pdf")
# In[1431]:
fig, ax = plt.subplots(1,1, figsize=(20,8))
lns1 = ax.plot(df_riskGroup['date'], df_riskGroup[lst_name[1]], marker='o', color = 'r', label = "Low Risk Group")
lns2 = ax.plot(df_riskGroup['date'], df_riskGroup[lst_name[2]], marker='o', color = 'g', label = "Medium Risk Group")
ax_high = ax.twinx()
ax_high.grid(False)
lns3 = ax_high.plot(df_riskGroup['date'], df_riskGroup[lst_name[0]], marker='o', color = 'b', label = "High Risk Group")
ax_high.set_ylabel("Proportion of High Risk Group")
# ax_high.set_ylim([0.85, 0.98])
lns = lns1 + lns2 + lns3
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc="upper right")
ax.set_title("Proportion of Different Risk Groups Over Time")
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=SU))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d-%y'))
ax.xaxis.set_tick_params(rotation=-45)
ax.set_xticklabels(lst_date, ha='left')
ax.set_xlabel("Time")
ax.set_ylabel("Risk score")
ax.set_ylabel("Proportion of Low/Medium Risk Group")
ax.set_ylim([0.01, 0.05])
plt.tight_layout()
plt.savefig("analysis_plotting/Risk group distribution Over Time_Commercial_OnePlot.pdf")
# In[515]:
lst_date = []
for i in range(len(lst_f_res)):
date = (lst_f_res[i])[47:52]
lst_date.append(date)
df_res_all = None
df_res = lst_df_pred[0]
date = lst_date[0]
df_res_noDup = df_res.drop_duplicates(subset='Address', keep=False)[['Address', 'RiskScore']]
df_res_all = df_res_noDup.rename(columns={'RiskScore': date})
for idx, df_res in enumerate(lst_df_pred):
if (idx == 0):
continue
df_res = lst_df_pred[idx]
date = lst_date[idx]
df_res_noDup = df_res.drop_duplicates(subset='Address', keep=False)[['Address', 'RiskScore']]
df_res_all = df_res_all.merge(df_res_noDup, how='inner', on='Address').rename(columns={'RiskScore': date})
# In[488]:
df_res_all = df_res_all.transpose()
df_res_all.columns = df_res_all.iloc[0]
df_res_all = df_res_all.drop(['Address'],axis=0)
df_res_all = df_res_all.rename(columns={'Address': 'date'})
df_res_all.head().columns.name = ""
df_res_all = df_res_all.reset_index().rename(columns={'index':'date'})
df_res_all = df_res_all[(df_res_all['date'] > '02-19') & (df_res_all['date'] <= '09-02')]
df_res_all = df_res_all.drop_duplicates(subset='date').reset_index(drop=True)
# In[490]:
lst_two_dates = []
lst_corr = []
for idx , row in df_res_all.iterrows():
if (idx==0):
continue
lst_two_dates.append(df_res_all.loc[idx-1,'date'] + " with " + row['date'])
last_risk = df_res_all.iloc[idx-1, df_res_all.columns != 'date'].astype('float64')
curr_risk = row.iloc[row.index != 'date'].astype('float64')
lst_corr.append(curr_risk.corr(last_risk))
# In[493]:
fig, ax = plt.subplots(1,1, figsize=(15,5))
ax.scatter(lst_two_dates, lst_corr)
ax.set_xticklabels(lst_two_dates, rotation=45, ha='right')
ax.set_ylabel('')
plt.tight_layout()
plt.savefig("analysis_plotting/Risk score correlation Over Time_Commercial.pdf")
# ### Transition probability over time
# In[12]:
lst_f_actual_res = glob.glob("result_assess/actual_model_result/Results*.csv")
lst_f_actual_res.sort()
lst_f_input.sort()
lst_df_pred = []
lst_date = []
lst_df_truth = []
for filename in lst_f_actual_res:
df_pred = pd.read_csv(filename)
lst_df_pred.append(df_pred)
date = filename[47:52]
lst_date.append(date)
# In[13]:
high_risk = 3
medium_risk = 2
low_risk = 1
def groupRiskScore(x):
if (x <= 0.3):
return low_risk
elif (x <=0.6):
return medium_risk
else:
return high_risk
# In[14]:
df_trans_all = pd.DataFrame()
df = lst_df_pred[0]
df_trans = df.groupby('Address', as_index=False)['RiskScore'].max()
df_trans['group'] = df_trans['RiskScore'].apply(lambda x: groupRiskScore(x))
date = lst_date[0]
df_trans_all = df_trans[['Address', 'group']].rename(columns={'group': date})
lst_trans_name = ["Low to Low",
"Low to Medium",
"Low to High",
"Medium to Low",
"Medium to Medium",
"Medium to High",
"High to Low",
"High to Medium",
"High to High"]
l2h_addrs = set()
h2l_addrs = set()
for idx, df_res in enumerate(lst_df_pred):
if (idx == 0):
continue
df = lst_df_pred[idx]
df_trans = df.groupby('Address', as_index=False)['RiskScore'].max()
df_trans['group'] = df_trans['RiskScore'].apply(lambda x: groupRiskScore(x))
date = lst_date[idx]
df_trans_all = df_trans_all.merge(df_trans[['Address', 'group']], on='Address').rename(columns={'group': date})
df_trans_all = df_trans_all.set_index('Address')
df_trans_all = df_trans_all[[col for col in df_trans_all.columns if col > start_date.replace("2018-", "")]]
def get_transit_count(last, curr):
# Counters
low2low = 0
low2medium = 0
low2high = 0
medium2low = 0
medium2medium = 0
medium2high = 0
high2low = 0
high2medium = 0
high2high = 0
for idx, value in last.iteritems():
# Previous in low risk group
if (last[idx] == low_risk):
# Current in low risk group
if (curr[idx] == low_risk):
low2low += 1
# Current in medium risk group
elif (curr[idx] == medium_risk):
low2medium += 1
# Current in high risk group
elif (curr[idx] == high_risk):
low2high += 1
l2h_addrs.add(idx)
else:
print("error")
# Previous in medium risk group
elif (last[idx] == medium_risk):
# Current in low risk group
if (curr[idx] == low_risk):
medium2low += 1
# Current in medium risk group
elif (curr[idx] == medium_risk):
medium2medium += 1
# Current in high risk group
elif (curr[idx] == high_risk):
medium2high += 1
else:
print("error")
# Previous in high risk group
elif (last[idx] == high_risk):
# Current in low risk group
if (curr[idx] == low_risk):
high2low += 1
h2l_addrs.add(idx)
# Current in medium risk group
elif (curr[idx] == medium_risk):
high2medium += 1
# Current in high risk group
elif (curr[idx] == high_risk):
high2high += 1
else:
print("error")
else:
print("error")
sum = (low2low + low2medium + low2high + medium2low +
+ medium2medium + medium2high + high2low + high2medium + high2high)
if (sum != last.shape[0]):
print("Sum: {}".format(sum))
print("Shape: {}".format(last.shape[0]))
print("error")
return (low2low,
low2medium,
low2high,
medium2low,
medium2medium,
medium2high,
high2low,
high2medium,
high2high)
lst_pair_dates = []
lst_trans_ll = []
lst_trans_lm = []
lst_trans_lh = []
lst_trans_ml = []
lst_trans_mm = []
lst_trans_mh = []
lst_trans_hl = []
lst_trans_hm = []
lst_trans_hh = []
for idx, col in enumerate(df_trans_all):
if (idx == 0):
continue
(low2low,
low2medium,
low2high,
medium2low,
medium2medium,
medium2high,
high2low,
high2medium,
high2high) = get_transit_count(df_trans_all.iloc[:, idx-1], df_trans_all[col])
lst_trans_ll.append(low2low)
lst_trans_lm.append(low2medium)
lst_trans_lh.append(low2high)
lst_trans_ml.append(medium2low)
lst_trans_mm.append(medium2medium)
lst_trans_mh.append(medium2high)
lst_trans_hl.append(high2low)
lst_trans_hm.append(high2medium)
lst_trans_hh.append(high2high)
pair_date = "{} with {}".format(df_trans_all.iloc[:, idx-1].name, df_trans_all[col].name)
lst_pair_dates.append(pair_date)
df_trans_counts = pd.DataFrame({'date': lst_pair_dates,
lst_trans_name[0]: lst_trans_ll,
lst_trans_name[1]: lst_trans_lm,
lst_trans_name[2]: lst_trans_lh,
lst_trans_name[3]: lst_trans_ml,
lst_trans_name[4]: lst_trans_mm,
lst_trans_name[5]: lst_trans_mh,
lst_trans_name[6]: lst_trans_hl,
lst_trans_name[7]: lst_trans_hm,
lst_trans_name[8]: lst_trans_hh})
df_trans_counts = df_trans_counts.set_index('date')
df_trans_counts = df_trans_counts.transpose()
df_trans_counts = df_trans_counts.astype('int')
# In[18]:
# color = 'gist_heat'
def heatmap_scale_minmax(df, ax):
ax = sns.heatmap(df.sub(df.min(axis=1), axis=0).divide(df.max(axis=1)- df.min(axis=1), axis=0),
cmap = sns.cm.rocket_r,
ax=ax, cbar_kws={'label': 'MinMax scaled counts'}, )
def heatmap_unscale(df, ax, reset_cb_range=False):
if (reset_cb_range):
ticks=np.arange(df.values.min(), df.values.max()+1)
sns.heatmap(df, ax=ax, cbar_kws={'label': 'Raw counts', "ticks": ticks}, cmap = sns.cm.rocket_r)
else:
sns.heatmap(df, ax=ax, cbar_kws={'label': 'Raw counts'}, cmap = sns.cm.rocket_r)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4,1, figsize=(20,2*4))
df_trans_plot = df_trans_counts.loc[[lst_trans_name[0], lst_trans_name[4], lst_trans_name[8]], :]
heatmap_scale_minmax(df_trans_plot, ax1)
# sns.heatmap(df_trans_plot.sub(df_trans_plot.min(axis=1), axis=0).divide(df_trans_plot.max(axis=1)- df_trans_plot.min(axis=1), axis=0),
# ax=ax1)
df_trans_plot = df_trans_counts.loc[[lst_trans_name[1], lst_trans_name[3]], :]
heatmap_unscale(df_trans_plot, ax2)
# sns.heatmap(df_trans_plot.sub(df_trans_plot.min(axis=1), axis=0).divide(df_trans_plot.max(axis=1)- df_trans_plot.min(axis=1), axis=0),
# ax=ax2)
df_trans_plot = df_trans_counts.loc[[lst_trans_name[2], lst_trans_name[6]], :]
heatmap_unscale(df_trans_plot, ax3, True)
# sns.heatmap(df_trans_plot.sub(df_trans_plot.min(axis=1), axis=0).divide(df_trans_plot.max(axis=1)- df_trans_plot.min(axis=1), axis=0),
# ax=ax3)
df_trans_plot = df_trans_counts.loc[[lst_trans_name[5], lst_trans_name[7]], :]
heatmap_unscale(df_trans_plot, ax4)
# sns.heatmap(df_trans_plot.sub(df_trans_plot.min(axis=1), axis=0).divide(df_trans_plot.max(axis=1)- df_trans_plot.min(axis=1), axis=0),
# ax=ax4)
ax1.tick_params(labelbottom = False)
ax1.set_xlabel("")
ax1.set_ylabel("Transition type", fontsize=fontsize-10)
ax1.get_yaxis().set_label_coords(-0.12,0.5)
ax2.tick_params(labelbottom = False)
ax2.set_xlabel("")
ax2.set_ylabel("Transition type", fontsize=fontsize-10)
ax2.get_yaxis().set_label_coords(-0.12,0.5)
ax3.tick_params(labelbottom = False)
ax3.set_xlabel("")
ax3.set_ylabel("Transition type", fontsize=fontsize-10)
ax3.get_yaxis().set_label_coords(-0.12,0.5)
ax4.xaxis.label.set_size(fontsize)
ax4.set_xticklabels(lst_pair_dates, rotation = -45, ha='left')
ax4.set_ylabel("Transition type", fontsize=fontsize-10)
ax4.get_yaxis().set_label_coords(-0.12,0.5)
# ax4.set_xlabel("date", fontsize=fontsize)
fig.suptitle("Number of Transitions Between Different Risk Groups Over Time", fontsize=fontsize, y=0.97)
plt.tight_layout()
# plt.subplots_adjust(top=0.9)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig("analysis_plotting/RiskGroupTransition_Commercial.pdf")
# In[1466]:
lst_l2h_addrs = list(l2h_addrs)
lst_h2l_addrs = list(h2l_addrs)
# In[1473]:
with open('Low2High&High2Low_Addresses.txt', 'w') as f:
f.write("Low to High addresses({} in total):\n".format(len(l2h_addrs)))
for item in l2h_addrs:
f.write("%s\n" % item)
f.write("\nHigh to Low addresses({} in total):\n".format(len(h2l_addrs)))
for item in h2l_addrs:
f.write("%s\n" % item)
# In[812]:
df_trans_counts.transpose().plot(logy = True)
# In[767]:
fig, ax = plt.subplots(1,1, figsize=(20,5))
sns.heatmap(df_trans_counts.sub(df_trans_counts.min(axis=1), axis=0).divide(df_trans_counts.max(axis=1)- df_trans_counts.min(axis=1), axis=0),
ax=ax)
# ### Roc Curve
# In[120]:
fig, ax = plt.subplots(1,1,figsize=(10,5))
for i in range(len(lst_df_roc)):
df_roc = lst_df_roc[i].drop(['Unnamed: 0'], axis=1)
lst_date = [fn.replace("roc_assess/roc_", "").replace(".csv","") for fn in lst_f_roc]
date = lst_date[i]
df_roc.plot(x='fpr',y='tpr', ax=ax, label=date)
# In[123]:
roc_curve(y_true, y_pred, pos_label=1)
# ### Transferability
# We measure the transferability of model signature from very first time window to very end of time window.
# In[6]:
lst_f_res.sort()
lst_f_input.sort()
lst_df_pred = []
for filename in lst_f_res:
df_pred = pd.read_csv(filename)
lst_df_pred.append(df_pred)
# lst_df_truth = []
# for filename in lst_f_input:
# df_truth = pd.read_csv(filename)
# lst_df_truth.append(df_truth)
# In[17]:
df_ini = lst_df_pred[0]
df_pair = lst_df_pred[1]
# In[18]:
df_ini = df_ini.drop(['Unnamed: 0'], axis=1)
df_pair = df_pair.drop(['Unnamed: 0'], axis=1)
# In[26]:
key_lst = [i for i in df_ini.columns if i not in ['Fire', 'RiskScore']]
# In[35]:
(df_ini.merge(df_pair, on=key_lst)).shape
# In[31]:
df_ini.shape, df_pair.shape
# In[39]:
display(df_ini.head()), display(df_pair.head())
# In[54]:
df_ini.reset_index().merge(df_pair.reset_index(), on=key_lst).head()
# In[50]:
df2 = pd.DataFrame({'Key': ['b', 'b', 'a', 'c', 'a', 'a', 'b'], 'data2': range(7)})
df1 = pd.DataFrame({'Key': ['a', 'b', 'd'], 'data1': range(3)})
# In[51]:
display(df1,df2)
# In[52]:
| pd.concat([df1, df2], axis=1) | pandas.concat |
# -*- python -*-
# -*- coding utf-8 -*-
#
# This file is part of GDSCTools software
#
# Copyright (c) 2015 - Wellcome Trust Sanger Institute
# All rights reserved
# Copyright (c) 2016 - Institut Pasteur
# All rights reserved
#
# File author(s): <NAME> <<EMAIL>>
# File author(s): <NAME> <<EMAIL>>
#
# Distributed under the BSD 3-Clause License.
# See accompanying file LICENSE.txt distributed with this software
#
# website: http://github.com/CancerRxGene/gdsctools
#
##############################################################################
"""Look for IC50 vs and genomic features associations using Regression methods"""
import itertools
import warnings
import pandas as pd
import pylab
import numpy as np
from easydev import Progress
from gdsctools.models import BaseModels
from gdsctools.boxswarm import BoxSwarm
from sklearn.linear_model import enet_path
from sklearn import preprocessing
from sklearn import model_selection
from sklearn import linear_model # must use the module rather than classes to
__all__ = ["Regression", 'GDSCRidge', "GDSCLasso", "GDSCElasticNet",
"RegressionCVResults"]
"""book keeping
from statsmodels.formula.api import OLS
if self.settings.regression_method == 'ElasticNet':
self.data_lm = OLS(odof.Y, df.values).fit_regularized(
alpha=self.settings.regression_alpha,
L1_wt=self.settings.regression_L1_wt)
elif self.settings.regression_method == 'OLS':
self.data_lm = OLS(odof.Y, df.values).fit()
elif self.settings.regression_method == 'Ridge':
self.data_lm = OLS(odof.Y, df.values).fit_regularized(
alpha=self.settings.regression_alpha, L1_wt=0)
elif self.settings.regression_method == 'Lasso':
self.data_lm = OLS(odof.Y, df.values).fit_regularized(
alpha=self.settings.regression_alpha, L1_wt=1)
"""
class RegressionCVResults(object):
"""Simple data structure to hold some results of the regression analysis
- :attr:`model`
- :attr:`kfold`: number of folds used
- :attr:`Rp`
- :attr:`alpha`: best alpha parameter
- :attr:`ln_alpha` best alpha parameter (log scale)
"""
def __init__(self, model, Rp, kfold=None):
self.model = model
self.Rp = Rp
self.kfold = kfold
def _get_alpha(self):
return self.model.alpha_
alpha = property(_get_alpha)
def _get_ln_alpha(self):
return pylab.log(self.alpha)
ln_alpha = property(_get_ln_alpha)
def _get_coefficients(self):
return self.model.coef_
coefficients = property(_get_coefficients)
def __str__(self):
txt = "Best alpha on %s folds: %s (%.2f in log scale); Rp=%s" %\
(self.kfold, self.alpha, self.ln_alpha, self.Rp)
return txt
class Regression(BaseModels):
"""Base class for all Regression analysis
In the :class:`gdsctools.anova.ANOVA` case, the regression is based on the
OLS method and is computed for a given drug and a given feature (:term:`ODOF`).
Then, the analysis is repeated across all features for a
given drug (:term:`ODAF`) and finally extended to all drugs (:term:`ADAF`).
So, there is one test for each combination of drug and feature.
Here, all features for a given drug are taken together to perform a
Regression analysis (:term:`ODAF`). The regression algorithm implemented so
far are:
- Ridge
- Lasso
- ElasticNet
- LassoLars
Based on tools from the scikit-learn library.
"""
def __init__(self, ic50, genomic_features=None,
verbose=False):
""".. rubric:: Constructor
:param ic50: an IC50 file
:param genomic_features: a genomic feature file
see :ref:`data` for help on the input data formats.
"""
super(Regression, self).__init__(ic50, genomic_features,
verbose=verbose, set_media_factor=False)
self.scale = False
def _get_one_drug_data(self, name, randomize_Y=False):
"""Returns X and Y for a given drug, dropping NAs
:param name: drug name
:param randomize_Y: randomize Y
- drops NA
- drops TISSUE_FACTOR
- drops MSI factor
"""
Y = self.ic50.df[name]
Y.dropna(inplace=True)
X = self.features.df.loc[Y.index].copy()
try:X = X.drop('TISSUE_FACTOR', axis=1)
except:pass
try: X = X.drop('MSI_FACTOR', axis=1)
except:pass
if self.scale is True:
columns = X.columns
# cast is essential here otherwise ValueError is raised
X = preprocessing.scale(X.astype(float))
X = pd.DataFrame(X, columns=columns)
if randomize_Y:
Y = Y.copy()
pylab.shuffle(Y.values)
return X, Y
def _fit_model(self, drug_name, model):
"""call fit method of a model given a drug name
Save the current X, Y, model fitter in _X, _Y and _model attributes
"""
X, Y = self._get_one_drug_data(drug_name)
model.fit(X, Y)
return model
def plot_importance(self, drug_name, model=None, fontsize=11,
max_label_length=35, orientation="vertical"):
"""Plot the absolute weights found by a fittd model.
:param str drug_name:
:param model: a model
:param int fontsize: (defaults to 11)
:param max_label_length: 35 by default
:param orientation: orientation of the plot (vertical or horizontal)
:return: the dataframe with the weights (may be empty)
.. note:: if no weights are different from zeros, no plots are
created.
"""
X, Y = self._get_one_drug_data(drug_name)
if model is None:
model = self.get_best_model(drug_name)
model.fit(X, Y)
df = pd.DataFrame({'name': X.columns, 'weight': model.coef_})
df = df.set_index("name")
df = df[df['weight'] != 0]
if len(df):
barplot(df, "weight", orientation=orientation, max_label_length=max_label_length,
fontsize=fontsize)
# sometimes there is only a few weights close to zero. Set the ylim
# to 1 if it is below 0.1.
if pylab.ylim()[1] < 0.1:
pylab.ylim([0,1])
if len(df) < 5:
pylab.xlim(-5,5)
return df
def _print(self, txt):
if self.verbose:
print(txt)
def get_best_model(self, drug_name, kfolds=10, alphas=None, l1_ratio=0.5):
"""Return best model fitted using a CV
:param drug_name:
:param kfolds:
:param alphas:
:param l1_ratio:
"""
self._print("Running CV to estimate best alpha.")
results = self.runCV(drug_name, kfolds=kfolds, alphas=alphas,
l1_ratio=l1_ratio)
best_alpha = results.alpha
model = self.get_model(alpha=best_alpha)
self._print("Using alpha=%s." % model.alpha)
return model
def plot_weight(self, drug_name, model=None, fontsize=12,
figsize=(10,7), max_label_length=20, Nmax=40):
"""Plot the elastic net weights
:param drug_name: the drug identifier
:param alpha:
:param l1_ratio:
Large alpha values will have a more stringent effects on the
weigths and select only some of them or maybe none. Conversely,
setting alphas to zero will keep all weights.
.. plot::
:include-source:
from gdsctools import *
ic = IC50(gdsctools_data("IC50_v5.csv.gz"))
gf = GenomicFeatures(gdsctools_data("genomic_features_v5.csv.gz"))
en = GDSCElasticNet(ic, gf)
model = en.get_model(alpha=0.01)
en.plot_weight(1047, model=model)
"""
X, Y = self._get_one_drug_data(drug_name)
if model is None:
model = self.get_best_model(drug_name)
model.fit(X, Y)
df = pd.DataFrame({'name': X.columns, 'weight': model.coef_})
df = df.set_index("name").sort_values("weight")
df = df[df != 0].dropna()
# split the data keeping only 50 best weights at most
if len(df) > Nmax:
# figure out the threshold in absolute value so that the
# we keep the Nmax strongest weights irrespective of the sign
threshold = df.abs().sort_values(by="weight").values[-Nmax:][0,0]
df1 = df.query("weight<=0 and abs(weight)>=@threshold").copy()
df2 = df.query("weight>=0 and abs(weight)>=@threshold").copy()
else:
df1 = df[df.weight<0].copy()
df2 = df[df.weight>=0].copy()
df1.index = [this[0:max_label_length] for this in df1.index]
df2.index = [this[0:max_label_length] for this in df2.index]
# We also want some symmetry so as many red as blue so that the span
# of positive and negative is equivalent
N = len(df2) - len(df1)
if N > 0:
# more red LHS than blue RHS
for i in range(1, N+1):
label = "_dummy%s" % i
df1.loc[label, "weight"] = 0
df1.index = [x if not x.startswith("_dummy") else ""
for x in df1.index]
elif N < 0:
# more blue RHS than red LHS
for i in range(1, abs(N)+1):
label = "_dummy%s" % i
df2.loc[label, "weight"] = 0
df2.index = [x if not x.startswith("_dummy") else ""
for x in df2.index]
df2.sort_values(by="weight", ascending=True, inplace=True)
f, (ax, ax2) = pylab.subplots(1,2, sharey=True, figsize=(10,7))
ff = pylab.gcf()
ff.set_facecolor('white')
self.df1 = df1
self.df2 = df2
if len(df1):
df1.plot(y="weight", kind="bar", width=1, lw=1, ax=ax,
color="b", legend=False, fontsize=fontsize, figsize=figsize)
if len(df2):
df2.plot(y="weight", kind="bar", width=1, lw=1, ax=ax2,
color="r", sharey=True, legend=False, fontsize=fontsize,
figsize=figsize)
if len(df1) == 0 and len(df2) == 0:
pylab.xlim([-5,5])
pylab.ylim([-1,1])
pylab.grid(True)
return df
# hide the spines between ax and ax2
ax.spines['right'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax.yaxis.tick_left()
ax2.yaxis.tick_right()
ax2.tick_params(labelleft='off')
d = 0.02 # diagonal lines
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((1 -d, 1 + d), (-d, +d), **kwargs)
ax.plot((1 -d, 1 + d), (1-d, 1+d), **kwargs)
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot(( -d, d), (1-d, 1+d), **kwargs)
ax2.plot(( -d, d), (-d, d), **kwargs)
ax.grid()
ax2.grid()
# x0, y0, width_x, width_y
ax.set_position([0.06,0.3,0.425,0.6])
ax2.set_position([0.50,0.3,0.425,0.6])
return df
def _get_rpearson(self, Y_pred, Y_test):
self.a = Y_pred
self.b = Y_test
if Y_pred.std() == 0:
Rp = 0
else:
Rp = np.corrcoef(Y_pred, Y_test)[0,1]
if abs(Rp) <1e-10:
Rp = 0
return Rp
def fit(self, drug_name, alpha=1, l1_ratio=0.5, kfolds=10,
show=True, tol=1e-3, normalize=False,
shuffle=False, perturbation=0.01, randomize_Y=False):
"""Run Elastic Net with a cross validation for one value of alpha
:param drug_name: the drug to analyse
:param float alpha: note that theis alpha parameter corresponds to the
lambda parameter in glmnet R package.
:param float l1_ratio: This is the lasso penalty parameter.
Note that in scikit-learn, the l1_ratio correspond
to the alpha parameter in glmnet R package. l1_ratio set to 0.5
means that there is a weight equivalent for the Lasso and Ridge
effects.
:param int kfolds: defaults to 10
:param shuffle: shuffle the indices in the KFold
:return: kfolds scores for each fold. The score is the pearson
correlation.
.. note:: l1_ratio < 0.01 is not reliable unless sequence of
alpha is provided.
.. note:: alpha = 0 correspond to an OLS analysis
"""
assert kfolds > 1, "kfolds must be larger than 1"
# Get the data for the requested drug
X, Y = self._get_one_drug_data(drug_name, randomize_Y=randomize_Y)
# Get a model
en = self.get_model(alpha=alpha, l1_ratio=l1_ratio,
normalize=normalize, tol=tol)
# Create a cross validation set of indices for training and testing
kfold = model_selection.KFold(kfolds, shuffle=shuffle)
# Store the results
scores = []
count = 1
if show is True:
pylab.clf()
for train_index, test_index in kfold.split(Y):
# Get X training and testing data set
X_train = X.iloc[train_index]
X_test = X.iloc[test_index]
# Get Y training and testing data set
Y_train = Y.iloc[train_index]
Y_test = Y.iloc[test_index]
# Fit model on the training set
en.fit(X_train, Y_train)
# now compare the prediction with Y_test. This is the coefficient
# of determination R^2. See scikit learn doc for details.
Y_pred = en.predict(X_test)
scores.append(self._get_rpearson(Y_pred, Y_test))
if show is True:
N = len(Y_pred)
import random
pylab.plot(Y_test, Y_pred + np.append(Y_pred[1:],
Y_pred[0]) * perturbation,
"ob", alpha=0.5)
pylab.xlabel("prediction")
pylab.ylabel("test values")
if kfolds == 1 and count == 1:
break
else:
count += 1
if show:
pylab.title("Prediction on test set (Pearson correlation=%.2f)" %
np.mean(scores))
pylab.xlabel("observed drug response")
pylab.ylabel("Predicted drug response")
pylab.grid(True)
self.en = en
self.X = X
self.Y = Y
return scores
def runCV(self, drug_name, l1_ratio=0.5, alphas=None, kfolds=10,
verbose=True, shuffle=True, randomize_Y=False, **kargs):
"""Perform the Cross validation to get the best alpha parameter.
:return: an instance of :class:`RegressionCVResults` that contains
alpha parameter and Pearson correlation value.
"""
# Get the data for the requested drug
X, Y = self._get_one_drug_data(drug_name, randomize_Y=randomize_Y)
if kfolds > len(X):
kfolds = len(X)
# Creates a model
kfold = model_selection.KFold(kfolds, shuffle=shuffle)
en = self._get_cv_model(l1_ratio=l1_ratio, alphas=alphas, kfold=kfold, **kargs)
# Fit the model
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model = en.fit(X, Y)
prediction = model.predict(X)
Rp = self._get_rpearson(prediction, Y)
res = RegressionCVResults(model, Rp, kfolds)
if verbose:
print(res)
return res
def tune_alpha(self, drug_name, alphas=None, N=80, l1_ratio=0.5,
kfolds=10, show=True, shuffle=False, alpha_range=[-2.8,0.1],
randomize_Y=False):
"""Interactive tuning of the model (alpha).
This is much faster than :meth:`plot_cindex` but much slower than
:meth:`runCV`.
.. plot::
:include-source:
from gdsctools import *
ic = IC50(gdsctools_data("IC50_v5.csv.gz"))
gf = GenomicFeatures(gdsctools_data("genomic_features_v5.csv.gz"))
en = GDSCElasticNet(ic, gf)
en.tune_alpha(1047, N=40, l1_ratio=0.1)
"""
if alphas is None:
# logspace returns a vector in natural space that guarantees a
# uniform spacing in a log space (log10 or ln)
# -2.8 to 0.5 means alpha from 1.58e-3 to 3.16
# This is equivalent to log(1.58e-3)=-6.45 to log(3.16)=1.15 in ln
# scale
a1, a2 = alpha_range
alphas = pylab.logspace(a1, a2, N)
# Let us now do a CV across difference alphas
all_scores = []
for alpha in alphas:
scores = self.fit(drug_name, alpha, l1_ratio=l1_ratio,
kfolds=kfolds, shuffle=shuffle,
randomize_Y=randomize_Y)
all_scores.append(scores)
# We can now plot the results that is the mean scores + error enveloppe
df = | pd.DataFrame(all_scores) | pandas.DataFrame |
import pickle
import os
import pandas as pd
import numpy as np
from tensorflow.python.keras.applications.resnet50 import preprocess_input
from tensorflow.python.keras.preprocessing.image import load_img, img_to_array
from tensorflow.python.keras.applications import ResNet50
lis=np.array([ i.strip() for i in pd.read_csv('test.csv')['Image_id']])
liss=['Images/test/'+i for i in lis]
# lis =np.array(os.listdir('Images/val/antelope'))
# liss=['Images/val/antelope/'+i.strip() for i in lis]
import json
json_file=open('model_arch.json','r')
model_arc_data=json_file.read()
json_file.close()
from tensorflow.python.keras.models import model_from_json
my_new_model = model_from_json(model_arc_data)
my_new_model.load_weights('my_model_weights.h5')
my_new_model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])
probs=np.zeros((len(lis),30))
jj=0
pic_in=open('animal_dict','rb')
anim_dic=pickle.load(pic_in)
pic_in.close()
lims=sorted(anim_dic.keys())
for i in liss:
imgs=load_img(i,target_size=(224,224))
img_arr=np.array([img_to_array(imgs)])
test_data=preprocess_input(img_arr)
preds=my_new_model.predict(test_data)
probs[jj]=preds[0]
# preds=my_new_model.predict_classes(test_data)
# print(lims[preds[0]],preds[0])
jj+=1
print(jj)
pic_in=open('probs','wb')
pickle.dump(probs,pic_in)
pic_in.close()
lis=lis.reshape(len(lis),1)
Final_set=np.concatenate((lis,probs),axis=1)
pic_in=open('animal_dict','rb')
anim_dic=pickle.load(pic_in)
pic_in.close()
lis=sorted(anim_dic.keys())
header=['image_id']
for i in lis:
header.append(i.strip())
print(header)
Frame= | pd.DataFrame(Final_set,columns=header) | pandas.DataFrame |
"""
By <NAME>
nickc1.github.io
Functions to query the NDBC (http://www.ndbc.noaa.gov/).
The realtime data for all of their buoys can be found at:
http://www.ndbc.noaa.gov/data/realtime2/
Info about all of noaa data can be found at:
http://www.ndbc.noaa.gov/docs/ndbc_web_data_guide.pdf
What all the values mean:
http://www.ndbc.noaa.gov/measdes.shtml
Each buoy has the data:
File Parameters
---- ----------
.data_spec Raw Spectral Wave Data
.ocean Oceanographic Data
.spec Spectral Wave Summary Data
.supl Supplemental Measurements Data
.swdir Spectral Wave Data (alpha1)
.swdir2 Spectral Wave Data (alpha2)
.swr1 Spectral Wave Data (r1)
.swr2 Spectral Wave Data (r2)
.txt Standard Meteorological Data
Example:
import buoypy as bp
# Get the last 45 days of data
rt = bp.realtime(41013) #frying pan shoals buoy
ocean_data = rt.get_ocean() #get Oceanographic data
wave_data.head()
Out[7]:
WVHT SwH SwP WWH WWP SwD WWD STEEPNESS APD MWD
2016-02-04 17:42:00 1.6 1.3 7.1 0.9 4.5 S S STEEP 5.3 169
2016-02-04 16:42:00 1.7 1.5 7.7 0.9 5.0 S S STEEP 5.4 174
2016-02-04 15:41:00 2.0 0.0 NaN 2.0 7.1 NaN S STEEP 5.3 174
2016-02-04 14:41:00 2.0 1.2 7.7 1.5 5.9 SSE SSE STEEP 5.5 167
2016-02-04 13:41:00 2.0 1.7 7.1 0.9 4.8 S SSE STEEP 5.7 175
TODO:
Make functions with except statements always spit out the same
column headings.
"""
import pandas as pd
import numpy as np
import datetime
class realtime:
def __init__(self, buoy):
self.link = 'http://www.ndbc.noaa.gov/data/realtime2/{}'.format(buoy)
def data_spec(self):
"""
Get the raw spectral wave data from the buoy. The seperation
frequency is dropped to keep the data clean.
Parameters
----------
buoy : string
Buoy number ex: '41013' is off wilmington, nc
Returns
-------
df : pandas dataframe (date, frequency)
data frame containing the raw spectral data. index is the date
and the columns are each of the frequencies
"""
link = "{}.{}".format(self.link, 'data_spec')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, skiprows=1, header=None,
parse_dates=[[0,1,2,3,4]], index_col=0)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
specs = df.iloc[:,1::2]
freqs = df.iloc[0,2::2]
specs.columns=freqs
#remove the parenthesis from the column index
specs.columns = [cname.replace('(','').replace(')','')
for cname in specs.columns]
return specs
def ocean(self):
"""
Retrieve oceanic data. For the buoys explored,
O2%, O2PPM, CLCON, TURB, PH, EH were always NaNs
Returns
-------
df : pandas dataframe
Index is the date and columns are:
DEPTH m
OTMP degc
COND mS/cm
SAL PSU
O2% %
02PPM ppm
CLCON ug/l
TURB FTU
PH -
EH mv
"""
link = "{}.{}".format(self.link, 'ocean')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, na_values='MM',
parse_dates=[[0,1,2,3,4]], index_col=0)
#units are in the second row drop them
df.drop(df.index[0], inplace=True)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['DEPTH','OTMP','COND','SAL']
df[cols] = df[cols].astype(float)
return df
def spec(self):
"""
Get the spectral wave data from the ndbc. Something is wrong with
the data for this parameter. The columns seem to change randomly.
Refreshing the data page will yield different column names from
minute to minute.
parameters
----------
buoy : string
Buoy number ex: '41013' is off wilmington, nc
Returns
-------
df : pandas dataframe
data frame containing the spectral data. index is the date
and the columns are:
HO, SwH, SwP, WWH, WWP, SwD, WWD, STEEPNESS, AVP, MWD
OR
WVHT SwH SwP WWH WWP SwD WWD STEEPNESS APD MWD
"""
link = "{}.{}".format(self.link, 'spec')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, na_values='MM',
parse_dates=[[0,1,2,3,4]], index_col=0)
try:
#units are in the second row drop them
#df.columns = df.columns + '('+ df.iloc[0] + ')'
df.drop(df.index[0], inplace=True)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['WVHT','SwH','SwP','WWH','WWP','APD','MWD']
df[cols] = df[cols].astype(float)
except:
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['H0','SwH','SwP','WWH','WWP','AVP','MWD']
df[cols] = df[cols].astype(float)
return df
def supl(self):
"""
Get supplemental data
Returns
-------
data frame containing the spectral data. index is the date
and the columns are:
PRES hpa
PTIME hhmm
WSPD m/s
WDIR degT
WTIME hhmm
"""
link = "{}.{}".format(self.link, 'supl')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, na_values='MM',
parse_dates=[[0,1,2,3,4]], index_col=0)
#units are in the second row drop them
df.drop(df.index[0], inplace=True)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['PRES','PTIME','WSPD','WDIR','WTIME']
df[cols] = df[cols].astype(float)
return df
def swdir(self):
"""
Spectral wave data for alpha 1.
Returns
-------
specs : pandas dataframe
Index is the date and the columns are the spectrum. Values in
the table indicate how much energy is at each spectrum.
"""
link = "{}.{}".format(self.link, 'swdir')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link,delim_whitespace=True,skiprows=1,na_values=999,
header=None, parse_dates=[[0,1,2,3,4]], index_col=0)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
specs = df.iloc[:,0::2]
freqs = df.iloc[0,1::2]
specs.columns=freqs
#remove the parenthesis from the column index
specs.columns = [cname.replace('(','').replace(')','')
for cname in specs.columns]
return specs
def swdir2(self):
"""
Spectral wave data for alpha 2.
Returns
-------
specs : pandas dataframe
Index is the date and the columns are the spectrum. Values in
the table indicate how much energy is at each spectrum.
"""
link = "{}.{}".format(self.link, 'swdir2')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link,delim_whitespace=True,skiprows=1,
header=None, parse_dates=[[0,1,2,3,4]], index_col=0)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
specs = df.iloc[:,0::2]
freqs = df.iloc[0,1::2]
specs.columns=freqs
#remove the parenthesis from the column index
specs.columns = [cname.replace('(','').replace(')','')
for cname in specs.columns]
return specs
def swr1(self):
"""
Spectral wave data for r1.
Returns
-------
specs : pandas dataframe
Index is the date and the columns are the spectrum. Values in
the table indicate how much energy is at each spectrum.
"""
link = "{}.{}".format(self.link, 'swr1')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link,delim_whitespace=True,skiprows=1,
header=None, parse_dates=[[0,1,2,3,4]], index_col=0)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
specs = df.iloc[:,0::2]
freqs = df.iloc[0,1::2]
specs.columns=freqs
#remove the parenthesis from the column index
specs.columns = [cname.replace('(','').replace(')','')
for cname in specs.columns]
return specs
def swr2(self):
"""
Spectral wave data for r2.
Returns
-------
specs : pandas dataframe
Index is the date and the columns are the spectrum. Values in
the table indicate how much energy is at each spectrum.
"""
link = "{}.{}".format(self.link, 'swr2')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link,delim_whitespace=True,skiprows=1,
header=None, parse_dates=[[0,1,2,3,4]], index_col=0)
#convert the dates to datetimes
df.index = | pd.to_datetime(df.index,format="%Y %m %d %H %M") | pandas.to_datetime |
"""
Logic for cleaning and processing zipcode data
"""
import pandas as pd
import string
import zipcodes
# Mapping of zipcode to Municipality for all zipcodes that are uniquely associated with one municipality, calculated in `zipcode_muni_mapping.ipynb`
ZIP_MUNI_MAP = {
'01001': 'Agawam',
'01003': 'Amherst',
'01004': 'Amherst',
'01005': 'Barre',
'01007': 'Belchertown',
'01008': 'Blandford',
'01009': 'Palmer',
'01010': 'Brimfield',
'01011': 'Chester',
'01012': 'Chesterfield',
'01013': 'Chicopee',
'01014': 'Chicopee',
'01020': 'Chicopee',
'01021': 'Chicopee',
'01022': 'Chicopee',
'01026': 'Cummington',
'01028': 'East Longmeadow',
'01029': 'Otis',
'01030': 'Agawam',
'01031': 'Hardwick',
'01032': 'Goshen',
'01033': 'Granby',
'01034': 'Granville',
'01035': 'Hadley',
'01036': 'Hampden',
'01037': 'Hardwick',
'01038': 'Hatfield',
'01039': 'Williamsburg',
'01040': 'Holyoke',
'01041': 'Holyoke',
'01050': 'Huntington',
'01053': 'Northampton',
'01054': 'Leverett',
'01056': 'Ludlow',
'01057': 'Monson',
'01059': 'Amherst',
'01060': 'Northampton',
'01061': 'Northampton',
'01062': 'Northampton',
'01063': 'Northampton',
'01066': 'Hatfield',
'01068': 'Oakham',
'01069': 'Palmer',
'01070': 'Plainfield',
'01071': 'Russell',
'01072': 'Shutesbury',
'01073': 'Southampton',
'01074': 'Barre',
'01075': 'South Hadley',
'01077': 'Southwick',
'01079': 'Palmer',
'01080': 'Palmer',
'01081': 'Wales',
'01082': 'Ware',
'01083': 'Warren',
'01084': 'Chesterfield',
'01086': 'Westfield',
'01088': 'Hatfield',
'01089': 'West Springfield',
'01090': 'West Springfield',
'01092': 'Warren',
'01093': 'Whately',
'01094': 'Hardwick',
'01095': 'Wilbraham',
'01096': 'Williamsburg',
'01097': 'Russell',
'01098': 'Worthington',
'01101': 'Springfield',
'01102': 'Springfield',
'01103': 'Springfield',
'01104': 'Springfield',
'01105': 'Springfield',
'01106': 'Longmeadow',
'01107': 'Springfield',
'01108': 'Springfield',
'01109': 'Springfield',
'01111': 'Springfield',
'01115': 'Springfield',
'01116': 'Longmeadow',
'01118': 'Springfield',
'01119': 'Springfield',
'01128': 'Springfield',
'01129': 'Springfield',
'01133': 'Springfield',
'01138': 'Springfield',
'01139': 'Springfield',
'01144': 'Springfield',
'01151': 'Springfield',
'01152': 'Springfield',
'01195': 'Springfield',
'01199': 'Springfield',
'01201': 'Pittsfield',
'01202': 'Pittsfield',
'01203': 'Pittsfield',
'01220': 'Adams',
'01222': 'Sheffield',
'01224': 'Lanesborough',
'01225': 'Cheshire',
'01227': 'Dalton',
'01229': 'Stockbridge',
'01230': 'Great Barrington',
'01235': 'Hinsdale',
'01236': 'West Stockbridge',
'01238': 'Lee',
'01240': 'Lenox',
'01242': 'Lenox',
'01243': 'Middlefield',
'01244': 'New Marlborough',
'01245': 'Monterey',
'01252': 'Egremont',
'01253': 'Otis',
'01254': 'Richmond',
'01255': 'Sandisfield',
'01256': 'Savoy',
'01257': 'Sheffield',
'01259': 'New Marlborough',
'01260': 'Lee',
'01262': 'Stockbridge',
'01263': 'Stockbridge',
'01264': 'Tyringham',
'01266': 'West Stockbridge',
'01267': 'Williamstown',
'01270': 'Windsor',
'01301': 'Greenfield',
'01302': 'Greenfield',
'01330': 'Ashfield',
'01338': 'Buckland',
'01340': 'Colrain',
'01341': 'Conway',
'01342': 'Deerfield',
'01343': 'Florida',
'01344': 'Erving',
'01346': 'Heath',
'01347': 'Montague',
'01349': 'Montague',
'01350': 'Monroe',
'01351': 'Montague',
'01354': 'Gill',
'01355': 'New Salem',
'01360': 'Northfield',
'01364': 'Orange',
'01366': 'Petersham',
'01367': 'Rowe',
'01368': 'Royalston',
'01370': 'Shelburne',
'01373': 'Deerfield',
'01375': 'Sunderland',
'01376': 'Montague',
'01378': 'Warwick',
'01379': 'Wendell',
'01380': 'Wendell',
'01420': 'Fitchburg',
'01430': 'Ashburnham',
'01431': 'Ashby',
'01432': 'Ayer',
'01434': 'Ayer',
'01436': 'Templeton',
'01438': 'Templeton',
'01440': 'Gardner',
'01441': 'Westminster',
'01450': 'Groton',
'01451': 'Harvard',
'01452': 'Hubbardston',
'01453': 'Leominster',
'01460': 'Littleton',
'01462': 'Lunenburg',
'01463': 'Pepperell',
'01464': 'Shirley',
'01467': 'Harvard',
'01468': 'Templeton',
'01469': 'Townsend',
'01470': 'Groton',
'01471': 'Groton',
'01472': 'Groton',
'01473': 'Westminster',
'01474': 'Townsend',
'01475': 'Winchendon',
'01477': 'Winchendon',
'01501': 'Auburn',
'01503': 'Berlin',
'01504': 'Blackstone',
'01505': 'Boylston',
'01506': 'Brookfield',
'01507': 'Charlton',
'01508': 'Charlton',
'01509': 'Charlton',
'01510': 'Clinton',
'01515': 'East Brookfield',
'01516': 'Douglas',
'01517': 'Princeton',
'01518': 'Sturbridge',
'01519': 'Grafton',
'01520': 'Holden',
'01521': 'Holland',
'01522': 'Holden',
'01523': 'Lancaster',
'01524': 'Leicester',
'01525': 'Northbridge',
'01526': 'Sutton',
'01527': 'Millbury',
'01529': 'Millville',
'01531': 'New Braintree',
'01532': 'Northborough',
'01534': 'Northbridge',
'01535': 'North Brookfield',
'01536': 'Grafton',
'01537': 'Oxford',
'01538': 'Uxbridge',
'01540': 'Oxford',
'01541': 'Princeton',
'01542': 'Leicester',
'01543': 'Rutland',
'01545': 'Shrewsbury',
'01546': 'Shrewsbury',
'01550': 'Southbridge',
'01560': 'Grafton',
'01561': 'Lancaster',
'01562': 'Spencer',
'01564': 'Sterling',
'01566': 'Sturbridge',
'01568': 'Upton',
'01569': 'Uxbridge',
'01570': 'Webster',
'01571': 'Dudley',
'01580': 'Westborough',
'01581': 'Westborough',
'01582': 'Westborough',
'01583': 'West Boylston',
'01585': 'West Brookfield',
'01586': 'Millbury',
'01588': 'Mansfield',
'01590': 'Sutton',
'01601': 'Worcester',
'01602': 'Worcester',
'01603': 'Worcester',
'01604': 'Worcester',
'01605': 'Worcester',
'01606': 'Worcester',
'01607': 'Worcester',
'01608': 'Worcester',
'01609': 'Worcester',
'01610': 'Worcester',
'01611': 'Leicester',
'01612': 'Paxton',
'01613': 'Worcester',
'01614': 'Worcester',
'01615': 'Worcester',
'01653': 'Worcester',
'01654': 'Worcester',
'01655': 'Worcester',
'01701': 'Framingham',
'01702': 'Framingham',
'01703': 'Framingham',
'01704': 'Framingham',
'01705': 'Framingham',
'01718': 'Acton',
'01719': 'Boxborough',
'01720': 'Acton',
'01721': 'Ashland',
'01730': 'Bedford',
'01731': 'Bedford',
'01740': 'Bolton',
'01741': 'Carlisle',
'01742': 'Concord',
'01745': 'Southborough',
'01746': 'Holliston',
'01747': 'Hopedale',
'01748': 'Hopkinton',
'01749': 'Hudson',
'01752': 'Marlborough',
'01754': 'Maynard',
'01756': 'Mendon',
'01757': 'Milford',
'01760': 'Natick',
'01770': 'Sherborn',
'01772': 'Southborough',
'01773': 'Lincoln',
'01775': 'Stow',
'01776': 'Sudbury',
'01778': 'Wayland',
'01784': 'Hopkinton',
'01801': 'Woburn',
'01803': 'Burlington',
'01805': 'Burlington',
'01806': 'Woburn',
'01807': 'Woburn',
'01808': 'Woburn',
'01810': 'Andover',
'01812': 'Andover',
'01813': 'Woburn',
'01815': 'Woburn',
'01821': 'Billerica',
'01822': 'Billerica',
'01824': 'Chelmsford',
'01826': 'Dracut',
'01827': 'Dunstable',
'01830': 'Haverhill',
'01831': 'Haverhill',
'01832': 'Haverhill',
'01833': 'Georgetown',
'01834': 'Groveland',
'01835': 'Haverhill',
'01840': 'Lawrence',
'01841': 'Lawrence',
'01842': 'Lawrence',
'01843': 'Lawrence',
'01844': 'Methuen',
'01845': 'North Andover',
'01850': 'Lowell',
'01851': 'Lowell',
'01852': 'Lowell',
'01853': 'Lowell',
'01854': 'Lowell',
'01860': 'Merrimac',
'01862': 'Billerica',
'01863': 'Chelmsford',
'01864': 'North Reading',
'01865': 'Billerica',
'01866': 'Billerica',
'01867': 'Reading',
'01876': 'Tewksbury',
'01879': 'Tyngsborough',
'01880': 'Wakefield',
'01885': 'Boxford',
'01886': 'Westford',
'01887': 'Wilmington',
'01888': 'Woburn',
'01889': 'North Reading',
'01890': 'Winchester',
'01899': 'Andover',
'01901': 'Lynn',
'01902': 'Lynn',
'01903': 'Lynn',
'01904': 'Lynn',
'01905': 'Lynn',
'01906': 'Saugus',
'01907': 'Swampscott',
'01908': 'Nahant',
'01910': 'Lynn',
'01913': 'Amesbury',
'01915': 'Beverly',
'01921': 'Boxford',
'01922': 'Newbury',
'01923': 'Danvers',
'01929': 'Essex',
'01930': 'Gloucester',
'01931': 'Gloucester',
'01936': 'Hamilton',
'01937': 'Hathorne',
'01938': 'Ipswich',
'01940': 'Lynnfield',
'01944': 'Manchester-by-the-Sea',
'01945': 'Marblehead',
'01949': 'Middleton',
'01950': 'Newburyport',
'01951': 'Newbury',
'01952': 'Salisbury',
'01960': 'Peabody',
'01961': 'Peabody',
'01965': 'Beverly',
'01966': 'Rockport',
'01969': 'Rowley',
'01970': 'Salem',
'01971': 'Salem',
'01982': 'Hamilton',
'01983': 'Topsfield',
'01984': 'Wenham',
'01985': 'West Newbury',
'02018': 'Norwell',
'02019': 'Bellingham',
'02020': 'Nantucket',
'02021': 'Canton',
'02025': 'Cohasset',
'02026': 'Dedham',
'02027': 'Dedham',
'02030': 'Dover',
'02031': 'Mansfield',
'02032': 'Walpole',
'02035': 'Foxborough',
'02038': 'Franklin',
'02040': 'Scituate',
'02041': 'Duxbury',
'02043': 'Hingham',
'02044': 'Hingham',
'02045': 'Hull',
'02047': 'Scituate',
'02048': 'Mansfield',
'02050': 'Marshfield',
'02051': 'Marshfield',
'02052': 'Medfield',
'02053': 'Medway',
'02054': 'Millis',
'02055': 'Scituate',
'02056': 'Norfolk',
'02059': 'Marshfield',
'02060': 'Scituate',
'02061': 'Norwell',
'02062': 'Norwood',
'02065': 'Marshfield',
'02066': 'Scituate',
'02067': 'Sharon',
'02070': 'Wrentham',
'02071': 'Walpole',
'02072': 'Stoughton',
'02081': 'Walpole',
'02090': 'Westwood',
'02093': 'Wrentham',
'02108': 'Boston',
'02109': 'Boston',
'02110': 'Boston',
'02111': 'Boston',
'02112': 'Boston',
'02113': 'Boston',
'02114': 'Boston',
'02115': 'Boston',
'02116': 'Boston',
'02117': 'Boston',
'02118': 'Boston',
'02119': 'Boston',
'02120': 'Boston',
'02121': 'Boston',
'02122': 'Boston',
'02123': 'Boston',
'02124': 'Boston',
'02125': 'Boston',
'02126': 'Boston',
'02127': 'Boston',
'02128': 'Boston',
'02129': 'Boston',
'02130': 'Boston',
'02131': 'Boston',
'02132': 'Boston',
'02133': 'Boston',
'02134': 'Boston',
'02135': 'Boston',
'02136': 'Boston',
'02137': 'Boston',
'02138': 'Cambridge',
'02139': 'Cambridge',
'02140': 'Cambridge',
'02141': 'Cambridge',
'02142': 'Cambridge',
'02143': 'Somerville',
'02144': 'Somerville',
'02145': 'Somerville',
'02148': 'Malden',
'02149': 'Everett',
'02150': 'Chelsea',
'02151': 'Revere',
'02152': 'Winthrop',
'02153': 'Medford',
'02155': 'Medford',
'02156': 'Medford',
'02163': 'Boston',
'02169': 'Quincy',
'02170': 'Quincy',
'02171': 'Quincy',
'02176': 'Melrose',
'02180': 'Stoneham',
'02184': 'Braintree',
'02185': 'Braintree',
'02186': 'Milton',
'02187': 'Milton',
'02188': 'Weymouth',
'02189': 'Weymouth',
'02190': 'Weymouth',
'02191': 'Weymouth',
'02196': 'Boston',
'02199': 'Boston',
'02201': 'Boston',
'02203': 'Boston',
'02204': 'Boston',
'02205': 'Boston',
'02206': 'Boston',
'02207': 'Boston',
'02210': 'Boston',
'02211': 'Boston',
'02212': 'Boston',
'02215': 'Boston',
'02216': 'Boston',
'02217': 'Boston',
'02222': 'Boston',
'02228': 'Boston',
'02238': 'Cambridge',
'02239': 'Cambridge',
'02241': 'Boston',
'02266': 'Boston',
'02269': 'Quincy',
'02283': 'Boston',
'02284': 'Boston',
'02293': 'Boston',
'02295': 'Boston',
'02297': 'Boston',
'02298': 'Boston',
'02301': 'Brockton',
'02302': 'Brockton',
'02303': 'Brockton',
'02304': 'Brockton',
'02305': 'Brockton',
'02322': 'Avon',
'02324': 'Bridgewater',
'02325': 'Bridgewater',
'02327': 'Pembroke',
'02330': 'Carver',
'02331': 'Duxbury',
'02332': 'Duxbury',
'02333': 'East Bridgewater',
'02334': 'Easton',
'02337': 'East Bridgewater',
'02338': 'Halifax',
'02339': 'Hanover',
'02340': 'Hanover',
'02341': 'Hanson',
'02343': 'Holbrook',
'02344': 'Middleborough',
'02345': 'Plymouth',
'02346': 'Middleborough',
'02347': 'Lakeville',
'02348': 'Lakeville',
'02349': 'Middleborough',
'02350': 'Hanson',
'02351': 'Abington',
'02355': 'Carver',
'02356': 'Easton',
'02357': 'Easton',
'02358': 'Pembroke',
'02359': 'Pembroke',
'02360': 'Plymouth',
'02361': 'Plymouth',
'02362': 'Plymouth',
'02364': 'Kingston',
'02366': 'Carver',
'02367': 'Plympton',
'02368': 'Randolph',
'02370': 'Rockland',
'02375': 'Easton',
'02379': 'West Bridgewater',
'02381': 'Plymouth',
'02382': 'Whitman',
'02420': 'Lexington',
'02421': 'Lexington',
'02445': 'Brookline',
'02446': 'Brookline',
'02447': 'Brookline',
'02451': 'Waltham',
'02452': 'Waltham',
'02453': 'Waltham',
'02454': 'Waltham',
'02455': 'Waltham',
'02456': 'Littleton',
'02457': 'Wellesley',
'02458': 'Newton',
'02459': 'Newton',
'02460': 'Newton',
'02461': 'Newton',
'02462': 'Newton',
'02464': 'Newton',
'02465': 'Newton',
'02466': 'Newton',
'02467': 'Newton',
'02468': 'Newton',
'02471': 'Watertown',
'02472': 'Watertown',
'02474': 'Arlington',
'02475': 'Arlington',
'02476': 'Arlington',
'02477': 'Watertown',
'02478': 'Belmont',
'02479': 'Belmont',
'02481': 'Wellesley',
'02482': 'Wellesley',
'02492': 'Needham',
'02493': 'Weston',
'02494': 'Needham',
'02495': 'Newton',
'02532': 'Bourne',
'02534': 'Bourne',
'02536': 'Falmouth',
'02537': 'Sandwich',
'02538': 'Wareham',
'02539': 'Edgartown',
'02540': 'Falmouth',
'02541': 'Falmouth',
'02542': 'Bourne',
'02543': 'Falmouth',
'02552': 'Chilmark',
'02553': 'Bourne',
'02554': 'Nantucket',
'02556': 'Falmouth',
'02557': 'Oak Bluffs',
'02558': 'Wareham',
'02559': 'Bourne',
'02561': 'Bourne',
'02562': 'Bourne',
'02563': 'Sandwich',
'02564': 'Nantucket',
'02565': 'Falmouth',
'02568': 'Tisbury',
'02571': 'Wareham',
'02573': 'Tisbury',
'02574': 'Falmouth',
'02575': 'West Tisbury',
'02576': 'Wareham',
'02584': 'Nantucket',
'02601': 'Barnstable',
'02630': 'Barnstable',
'02631': 'Brewster',
'02632': 'Barnstable',
'02633': 'Chatham',
'02634': 'Barnstable',
'02635': 'Barnstable',
'02636': 'Barnstable',
'02637': 'Barnstable',
'02638': 'Dennis',
'02639': 'Dennis',
'02641': 'Dennis',
'02642': 'Eastham',
'02643': 'Orleans',
'02644': 'Sandwich',
'02645': 'Harwich',
'02646': 'Harwich',
'02647': 'Barnstable',
'02648': 'Barnstable',
'02649': 'Mashpee',
'02650': 'Chatham',
'02651': 'Eastham',
'02652': 'Truro',
'02653': 'Orleans',
'02655': 'Barnstable',
'02657': 'Provincetown',
'02659': 'Chatham',
'02660': 'Dennis',
'02661': 'Harwich',
'02662': 'Orleans',
'02663': 'Wellfleet',
'02664': 'Yarmouth',
'02666': 'Truro',
'02667': 'Wellfleet',
'02668': 'Barnstable',
'02669': 'Chatham',
'02670': 'Dennis',
'02671': 'Harwich',
'02672': 'Barnstable',
'02673': 'Yarmouth',
'02675': 'Yarmouth',
'02702': 'Freetown',
'02703': 'Attleboro',
'02712': 'Norton',
'02713': 'Gosnold',
'02714': 'Dartmouth',
'02715': 'Dighton',
'02717': 'Freetown',
'02718': 'Taunton',
'02719': 'Fairhaven',
'02720': 'Fall River',
'02721': 'Fall River',
'02722': 'Fall River',
'02723': 'Fall River',
'02724': 'Fall River',
'02725': 'Somerset',
'02726': 'Somerset',
'02738': 'Marion',
'02739': 'Mattapoisett',
'02740': 'New Bedford',
'02741': 'New Bedford',
'02742': 'New Bedford',
'02743': 'Acushnet',
'02744': 'New Bedford',
'02745': 'New Bedford',
'02746': 'New Bedford',
'02747': 'Dartmouth',
'02748': 'Dartmouth',
'02760': 'North Attleboro',
'02761': 'North Attleboro',
'02762': 'Plainville',
'02763': 'Attleboro',
'02764': 'Dighton',
'02766': 'Norton',
'02767': 'Raynham',
'02768': 'Raynham',
'02769': 'Rehoboth',
'02770': 'Rochester',
'02771': 'Seekonk',
'02777': 'Swansea',
'02779': 'Berkley',
'02780': 'Taunton',
'02783': 'Taunton',
'02790': 'Westport',
'02791': 'Westport',
'05501': 'Andover',
'05544': 'Andover'
}
# Mappings of city name to Municipality name ONLY for cities that contain zipcodes that map to multiple municipalities.
# Used to disambiguate multi-muni zipcodes when City field is present.
CITY_MUNI_MAP = {
'amherst': 'Amherst',
'easthampton': 'Easthampton',
'westfield': 'Westfield',
'becket': 'Becket',
'dalton': 'Dalton',
'lanesboro': 'Lanesborough',
'north adams': 'North Adams',
'south egremont': 'Egremont',
'athol': 'Athol',
'bernardston': 'Bernardston',
'charlemont': 'Charlemont',
'chilmark': 'Chilmark',
'alford': 'Alford',
'aquinnah': 'Aquinnah',
'clarksburg': 'Clarksburg',
'hancock': 'Hancock',
'hawley': 'Hawley',
'leyden': 'Leyden',
'montgomery': 'Montgomery',
'mount washington': 'Mount Washington',
'new ashford': 'New Ashford',
'pelham': 'Pelham',
'phillipston': 'Phillipston',
'washington': 'Washington',
'westhampton': 'Westhampton'
}
def clean(zip_series: pd.Series) -> pd.DataFrame:
"""Standardize zipcode formatting in a pandas series
Pandas will likely load zipcodes from an excel file as an object series
mixing string and numeric types. This function will cast all entries to
string types, strip whitespace, left pad to a minimum of 5 characters with zeros, then
validate the entry contains a valid zipcode using a regular expression.
Return: pandas.DataFrame
- Pandas series of cleaned zipcodes: str
- Pandas series of cleaned zip4, or empty string if missing: str
- Pandas series of valid zipcode indicators: boolean
"""
# Valid zipcodes are 3-5 numeric digits, followed by an optional dash and 4 more digits, or an optional ".0" if the data has been cast as floats.
valid_zipcode_regex = r"^([0-9]{3,5})(?:[.]0)?(?:-([0-9]{4})|-)?$"
#The extract function will match this pattern, and extract the zip5 group into column 0 and the zip4 group into column 1. NaN if group is not present or pattern isn't matched.
res = zip_series.astype(str).str.strip().str.extract(valid_zipcode_regex)
cleaned_zipcode_series = res[0].str.zfill(5).fillna('')
cleaned_zip4_series = res[1].fillna('')
valid_zipcode_series = cleaned_zipcode_series.str.match(valid_zipcode_regex)
#Replace invalid value rows with original inputs
cleaned_zipcode_series.loc[~valid_zipcode_series] = zip_series[~valid_zipcode_series]
return pd.DataFrame(data={"zip_cleaned": cleaned_zipcode_series, "zip4_cleaned": cleaned_zip4_series, "zip_valid": valid_zipcode_series})
def validate_zip_town_row(row: dict, town_field: str, zip_field: str) -> pd.Series:
"""
Use zipcodes library to append town data from zipcodes and validate existing zip/town inputs
"""
INVALID_RESULT = pd.Series({
"town": "INVALID",
"municipality": "INVALID",
"zip_exists": False,
"town_valid": False,
})
muni = ZIP_MUNI_MAP.get(row[zip_field])
if not muni:
#Try looking up town_field in City/Muni map
standardized_raw_town_name = str(row[town_field]).lower().strip()
muni = CITY_MUNI_MAP.get(standardized_raw_town_name)
if not muni:
return INVALID_RESULT
# From here on, we were at least able to look up a muni. But we still want to determine whether the raw town data is valid
INVALID_RESULT = pd.Series({
"municipality": muni,
"town": string.capwords(str(row[town_field]).strip()),
"zip_exists": False,
"town_valid": False,
})
try:
zip_results = zipcodes.matching(row[zip_field])
except TypeError:
return INVALID_RESULT
except ValueError:
return INVALID_RESULT
if len(zip_results) > 1:
raise ValueError(f"Multimatch zipcode {row[zip_field]} encountered!")
elif len(zip_results) == 0:
return INVALID_RESULT
else: # len(zip_results)==1
zip_results = zip_results[0]
standardized_raw_town_name = str(row[town_field]).lower().strip()
standardized_town_name = zip_results["city"].lower().strip()
standardized_acceptable_towns = [x.lower().strip() for x in zip_results["acceptable_cities"]]
standardized_acceptable_towns.append(standardized_town_name)
#Strict exact matching for now
town_valid = standardized_raw_town_name in standardized_acceptable_towns
output = {
"municipality": muni,
"town": zip_results["city"],
"zip_exists": True,
"town_valid": town_valid
}
return pd.Series(output)
def validate_zip_town(df: pd.DataFrame, town_field: str, zip_field: str) -> pd.DataFrame:
"""
Use zipcodes library to append town data from zipcodes and validate existing zip/town inputs
Return: pandas.DataFrame
- Pandas series of town name from zipcodes library: str
- Pandas series indicating zipcode exists: boolean
- Pandas series indicating raw data town is valid given zipcode: boolean
"""
return df.merge(df.apply(
lambda row: validate_zip_town_row(row, town_field, zip_field), axis=1
),
left_index=True, right_index=True
)
def valid_town_row(row: dict, town_field: str) -> pd.Series:
standardized_town = string.capwords(row[town_field].strip())
zip_list = zipcodes.filter_by(city=standardized_town, state='MA')
town_valid = (len(zip_list)>0)
muni_list = [ZIP_MUNI_MAP.get(z['zip_code']) for z in zip_list]
unique_munis = list(pd.Series([m for m in muni_list if m is not None]).unique())
if len(unique_munis) == 1:
muni = unique_munis[0]
else:
muni = None
if not muni:
#Try looking up town_field in City/Muni map
standardized_raw_town_name = str(row[town_field]).lower().strip()
muni = CITY_MUNI_MAP.get(standardized_raw_town_name)
if not muni:
muni = "INVALID"
town_valid = False
output = {
"municipality": muni,
"town": standardized_town,
"zip_exists": False,
"zip_cleaned": "",
"zip4_cleaned": "",
"zip_valid": False,
"town_valid": town_valid
}
return | pd.Series(output) | pandas.Series |
# Ler e Importar XML, e exportar para base MSSQL
#
# Importando os pacotes
import pandas as pd
import sqlalchemy
import pyodbc
import xml.etree.ElementTree as et
import tkinter as tk
from tkinter import ttk
from tkinter.filedialog import askdirectory
from tkinter import messagebox
import os
import time
import glob
import math as m
#import re
#from concurrent.futures import ThreadPoolExecutor
class Leitor:
def __init__(self,master):
self.master = master
master.title('Leitor XML v0.7')
# Variáveis auxiliares
self.ns = {'aux' : 'http://www.portalfiscal.inf.br/nfe'}
self.tnome = 'NFe_Base'
self.listad = ['SQL Server Native Client 11.0','SQL Server Native Client 10.0','ODBC Driver 13 for SQL Server']
self.arquivos = 0
self.driver = tk.StringVar(master)
self.driver.set(self.listad[0])
self.statbar = tk.StringVar(master)
self.statbar.set('')
self.cb1 = tk.IntVar()
self.cb1.set(1)
self.cb2 = tk.IntVar()
self.cb2.set(0)
self.lcaminho = tk.Label(text='Insira o caminho dos arquivos XML:')
self.lcaminho.grid(row=0)
self.pathinsert = tk.Entry(root, width=80, borderwidth=2)
self.pathinsert.grid(row=1)
self.bpath = tk.Button(master, text='Pesquisar', command=self.get_cam, padx=5)
self.bpath.grid(row=1,column=1)
self.lserv = tk.Label(text='Insira o Servidor SQL:')
self.lserv.grid(row=3)
self.servinsert = tk.Entry(master, width=80, borderwidth=2)
self.servinsert.grid(row=4)
self.servinsert.insert(0,os.environ['COMPUTERNAME'])
self.checkbox2= tk.Checkbutton(master, text='SQL Express', variable=self.cb2,command=self.set_express)
self.checkbox2.grid(row=4,column=1)
self.lbase = tk.Label(text='Insira a base para inserir os dados:')
self.lbase.grid(row=5)
self.baseinsert = tk.Entry(master, width=80, borderwidth=2)
self.baseinsert.grid(row=6)
self.baseinsert.insert(0,'DB_XML')
self.checkbox1= tk.Checkbutton(master, text='Substituir?', variable=self.cb1)
self.checkbox1.grid(row=6,column=1)
self.dpadrao = tk.Label(text='Driver:')
self.dpadrao.grid(row=9)
self.Lista = ttk.Combobox(master,values=self.listad,textvariable=self.driver)
self.Lista.config(width=78)
self.Lista.grid(row=10)
self.barprog = ttk.Progressbar(master,orient='horizontal',length=490,mode='determinate')
self.barprog.grid(row=12,pady=10)
self.status = tk.Label(textvariable=self.statbar)
self.status.grid(row=13)
self.bconnect = tk.Button(master, text='Importar XML', command=self.sql_connect)
self.bconnect.grid(row=15,pady=10)
#Abre janela do windows para usuário escolher caminho da pasta
def get_cam(self):
global directory
global path
global arquivos
path = askdirectory()
directory = os.fsencode(path)
arquivos = len(glob.glob1(path,'*.xml'))
self.pathinsert.delete(0,tk.END)
self.pathinsert.insert(0,path)
#adiciona /sqlexpress no fim do servidor
def set_express(self):
if self.cb2.get() == 1:
servidorexp = self.servinsert.get()
self.servinsert.delete(0,tk.END)
self.servinsert.insert(0,servidorexp+str('\\sqlexpress'))
self.servinsert.update()
elif self.cb2.get() == 0:
servidornexp = self.servinsert.get()
servidornexp = servidornexp.replace('\\sqlexpress','')
self.servinsert.delete(0,tk.END)
self.servinsert.insert(0,servidornexp)
self.servinsert.update()
#Remove NFe canceladas encontradas durante a leitura do DF
def remover_canc(self):
global all_xml
global canc_xml
global df_all_xml
global df_canc_xml
df_all_xml = | pd.DataFrame(all_xml) | pandas.DataFrame |
import numpy as np
import pandas as pd
import altair as alt
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from keras.preprocessing.image import ImageDataGenerator
# Plot a 3d
def plot3d(X,Y,Z):
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, color='y')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
# Visualise the metrics from the model
def metrics(history):
df = | pd.DataFrame(history) | pandas.DataFrame |
import os
import sys
import inspect
from copy import deepcopy
import numpy as np
import pandas as pd
from ucimlr.helpers import (download_file, download_unzip, one_hot_encode_df_, xy_split,
normalize_df_, split_normalize_sequence, split_df, get_split, split_df_on_column)
from ucimlr.dataset import Dataset
from ucimlr.constants import TRAIN
from ucimlr.constants import REGRESSION
def all_datasets():
"""
Returns a list of all RegressionDataset classes.
"""
return [cls for _, cls in inspect.getmembers(sys.modules[__name__])
if inspect.isclass(cls)
and issubclass(cls, RegressionDataset)
and cls != RegressionDataset]
class RegressionDataset(Dataset):
type_ = REGRESSION # Is this necessary?
@property
def num_targets(self):
return self.y.shape[1]
class Abalone(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Abalone).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path, header=None)
y_columns = df.columns[-1:]
one_hot_encode_df_(df)
df_test, df_train, df_valid = split_df(df, [0.2, 0.8 - 0.8 * validation_size, 0.8 * validation_size])
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class AirFoil(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'airfoil_self_noise.dat'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\t', names =["Frequency(Hz)", "Angle of attacks(Deg)", "Chord length(m)", "Free-stream velocity(m/s)", "Suction side displacement thickness(m)", " Scaled sound pressure level(Db)"])
y_columns = ['Scaled sound pressure level(Db)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class AirQuality(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'AirQualityUCI.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00360/AirQualityUCI.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', parse_dates=[0, 1])
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
df.Date = (df.Date - df.Date.min()).astype('timedelta64[D]') # Days as int
df.Time = df.Time.apply(lambda x: int(x.split('.')[0])) # Hours as int
df['C6H6(GT)'] = df['C6H6(GT)'].apply(lambda x: float(x.replace(',', '.'))) # Target as float
# Some floats are given with ',' instead of '.'
df = df.applymap(lambda x: float(x.replace(',', '.')) if type(x) is str else x) # Target as float
df = df[df['C6H6(GT)'] != -200] # Drop all rows with missing target values
df.loc[df['CO(GT)'] == -200, 'CO(GT)'] = -10 # -200 means missing value, shifting this to be closer to
# the other values for this column
y_columns = ['C6H6(GT)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Appliances_energy_prediction(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'energydata_complete.csv'
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00374/energydata_complete.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, parse_dates=[0, 1])
df.date = (df.date - df.date.min()).astype('timedelta64[D]')
y_columns = ['Appliances']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
self.problem_type = REGRESSION
class AutoMPG(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'auto-mpg.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\s+', names =["mpg", "cylinders", "displacements", "horsepower", "weight", "acceleration", "model year", "origin", "car name"])
y_columns = ['mpg']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
sel.problem_type=REGRESSION
class Automobile(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'imports-85.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names = ["symboling", "normalized-losses", "make", "fuel-type", " aspiration", "num-of-doors", "body-style", "drive-wheels", "engine-location", "wheel-base", " length", "width", " height", "curb-weight", "engine-type", "num-of-cylinders", "engine-size", " fuel-system", " bore", "stroke", " compression-ratio", "horsepower", "peak-rpm", "city-mpg", "highway-mpg", "price"])
y_columns = ['']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class BeijingAirQuality(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+Multi-Site+Air-Quality+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00501/PRSA2017_Data_20130301-20170228.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if 'PRSA_Data' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class BeijingPM(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'PRSA_data_2010.1.1-2014.12.31.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
y_columns=['pm2.5']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type)
self.problem_type = REGRESSION
class BiasCorrection(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bias+correction+of+numerical+prediction+model+temperature+forecast).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Bias_correction_ucl.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00514/Bias_correction_ucl.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col = 'Date', parse_dates= True)
class BikeSharing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class CarbonNanotubes(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Carbon+Nanotubes).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'carbon_nanotubes.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00448/carbon_nanotubes.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ChallengerShuttleORing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Challenger+USA+Space+Shuttle+O-Ring).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'o-ring-erosion-only.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/space-shuttle/o-ring-erosion-only.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class BlogFeedback(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/BlogFeedback).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
file_name = 'blogData_train.csv'
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00304/BlogFeedback.zip'
download_unzip(url, dataset_path)
# Iterate all test csv and concatenate to one DataFrame
test_dfs = []
for fn in os.listdir(dataset_path):
if 'blogData_test' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
test_dfs.append(pd.read_csv(file_path, header=None))
df_test = pd.concat(test_dfs)
file_path = os.path.join(dataset_path, file_name)
df_train_valid = pd.read_csv(file_path, header=None)
y_columns = [280]
df_train_valid[y_columns[0]] = np.log(df_train_valid[y_columns[0]] + 0.01)
df_test[y_columns[0]] = np.log(df_test[y_columns[0]] + 0.01)
page_columns = list(range(50))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class CommunitiesCrime(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'communities.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,header=None)
class ConcreteSlumpTest(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Slump+Test).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'slump_test.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class PropulsionPlants (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI CBM Dataset.zip'
download_unzip(url, dataset_path)
filename = 'data.txt'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col='dteday', parse_dates=True)
class ConcreteCompressiveStrength (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Concrete_Data.xls'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
class ComputerHardware (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Computer+Hardware).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'machine.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names=["vendor name", "Model Name", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "PRP", "ERP"])
class CommunitiesCrimeUnnormalized (RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CommViolPredUnnormalizedData.txt'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00211/CommViolPredUnnormalizedData.txt'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, keep_default_na=False, header=None)
class CTSlices(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00206/slice_localization_data.zip'
download_unzip(url, dataset_path)
file_name = 'slice_localization_data.csv'
file_path = os.path.join(dataset_path, file_name)
df = pd.read_csv(file_path)
# No patient should be in both train and test set
df_train_valid = deepcopy(df.loc[df.patientId < 80, :]) # Pandas complains if it is a view
df_test = deepcopy(df.loc[df.patientId >= 80, :]) # - " -
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'patientId')
y_columns = ['reference']
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
df_res = df_res.drop(columns='patientId')
self.x, self.y = xy_split(df_res, y_columns)
class ForecastingOrders(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Daily+Demand+Forecasting+Orders).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/Daily_Demand_Forecasting_Orders.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ForecastingStoreData(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Demand+Forecasting+for+a+store).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class FacebookComments(RegressionDataset):
"""
Predict the number of likes on posts from a collection of Facebook pages.
Every page has multiple posts, making the number of pages less than the samples
in the dataset (each sample is one post).
# Note
The provided test split has a relatively large discrepancy in terms
of distributions of the features and targets. Training and validation splits are
also made to ensure that the same page is not in both splits. This makes the distributions
of features in training and validation splits vary to a relatively large extent, possible
because the number of pages are not that many, while the features are many.
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Facebook+Comment+Volume+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00363/Dataset.zip'
download_unzip(url, dataset_path)
dataset_path = os.path.join(dataset_path, 'Dataset')
# The 5th variant has the most data
train_path = os.path.join(dataset_path, 'Training', 'Features_Variant_5.csv')
test_path = os.path.join(dataset_path, 'Testing', 'Features_TestSet.csv')
df_train_valid = pd.read_csv(train_path, header=None)
df_test = pd.read_csv(test_path, header=None)
y_columns = df_train_valid.columns[-1:]
# Page ID is not included, but can be derived. Page IDs can not be
# in both training and validation sets
page_columns = list(range(29))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class Facebookmetrics (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00368/Facebook_metrics.zip'
download_unzip(url, dataset_path)
filename = 'dataset_Facebook.csv'
file_path = os.path.join(dataset_path, filename)
df = | pd.read_csv(file_path, sep=';') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
from urllib.parse import urlparse, unquote, quote
from bs4 import BeautifulSoup
import pandas as pd
pd.set_option('display.max_colwidth', -1)
import numpy as np
from w3lib.url import safe_url_string
import time
import re
# In[3]:
#List that contains all links
urlList = []
#finding all possible link in table
for pg in range(1,22):
if pg<10:
parse = "/salary-guide/2019/0000" + str(pg) + "/"
html = urlopen('https://adecco.co.th/salary-guide/2019/0000' + str(pg))
#print(html.getcode())
else:
parse = "/salary-guide/2019/000" + str(pg) + "/"
html = urlopen('https://adecco.co.th/salary-guide/2019/000' + str(pg))
#print(html.getcode())
bsObj = BeautifulSoup(html, "lxml")
tag_a = bsObj.find_all(["li"])
for i in tag_a[:]:
children = i.findChild("a" , recursive=False)
url = children.get('href')
#print(url)
#print(url,type(url))
try:
match = "^" + "(" + parse + ")" + "(.*)"
#print(match)
m = re.match(match ,url)
if m :
print(url)
urlList.append("https://adecco.co.th" +str(url))
except:
pass
#print("=====End of pg=====")
# In[4]:
len(urlList)
# In[56]:
urlList
# In[34]:
testurl = urlList[30]
# In[35]:
html = urlopen(testurl)
bsObj = BeautifulSoup(html, "lxml")
print(html.getcode())
# In[36]:
bsObj.find("h3").get_text()
# In[37]:
bsObj.find("p", {"class": "des-en"}).get_text()
# In[38]:
bsObj.find('table').find_all('td')
# In[10]:
tablelist = bsObj.find('table').find_all('td')
# In[16]:
first = (tablelist[5].get_text().strip())
second = (tablelist[6].get_text().strip())
third = (tablelist[7].get_text().strip())
print(first,second,third)
# In[41]:
counter = 0
urldict = dict()
jobdict = dict()
jddict = dict()
firstdict = dict()
seconddict = dict()
thirddict = dict()
# In[44]:
def splitstr(string):
if string == "":
return -1
elif "-" not in string:
value = int(string.replace(",",""))
return value
else:
lower, upper = string.replace(",","").split("-")
lower = int(lower)
upper = int(upper)
value = (lower+upper)/2
return value
import time
def findfrompg(pg):
global urldict, jobdict, jddict, firstdict, seconddict, thirddict
for url in urlList[pg:]:
t0 = time.time()
print(pg)
html = urlopen(url)
bsObj = BeautifulSoup(html, "lxml")
urldict[pg] = url
job = bsObj.find("h3").get_text().strip()
jobdict[pg] = job
print(job)
jd = bsObj.find("p", {"class": "des-en"}).get_text()
jddict[pg] = jd
print(jd)
tablist = bsObj.find('table').find_all('td')
first = tablist[5].get_text().strip()
second = tablist[6].get_text().strip()
third = tablist[7].get_text().strip()
firstdict[pg] = splitstr(first)
seconddict[pg] = splitstr(second)
thirddict[pg] = splitstr(third)
t1 = time.time()
print(t1-t0)
pg += 1
# In[45]:
findfrompg(0)
# In[53]:
data_list = [jobdict, jddict, firstdict, seconddict, thirddict, urldict]
df = pd.DataFrame.from_dict(data_list, orient='columns').T
df.columns = ["position", "jobdesc", "newgrad", "junior", "senior", "url"]
# In[55]:
df
# In[69]:
len(df[(df.junior > 0)])
# In[65]:
len(df[(df.junior < 0)& (df.newgrad <0)])
# In[66]:
len(df[(df.junior < 0)& (df.newgrad > 0) & (df.senior<0)])
# In[71]:
s1 = (df[(df.junior > 0)]).junior
s2 = (df[(df.junior < 0)& (df.newgrad <0)]).senior
s3 = (df[(df.junior < 0)& (df.newgrad > 0) & (df.senior<0)]).newgrad
# In[74]:
salary = s1.append(s2).append(s3)
# In[78]:
salary = salary.sort_index(axis=0)
# In[77]:
salary.sort_index(axis=0)
# In[80]:
sal_df = | pd.DataFrame(salary) | pandas.DataFrame |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = | pd.DataFrame.from_dict(dicSVC) | pandas.DataFrame.from_dict |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = | sql.read_sql_table("test_read_write", self.conn) | pandas.io.sql.read_sql_table |
#!/usr/bin/env python
import pandas as pd
import numpy as np
import scipy, sklearn, os, sys, string, fileinput, glob, re, math, itertools, functools
import copy, multiprocessing, traceback, logging, pickle, traceback
import scipy.stats, sklearn.decomposition, sklearn.preprocessing, sklearn.covariance
from scipy.stats import describe
from scipy import sparse
import os.path
import scipy.sparse
from scipy.sparse import csr_matrix, csc_matrix
from sklearn.preprocessing import normalize
from collections import defaultdict
from tqdm import tqdm
def fast_csv_read(filename, *args, **kwargs):
small_chunk = | pd.read_csv(filename, nrows=50) | pandas.read_csv |
# fitting activation curves
import numpy as np
import pandas as pd
import math
from GeneralProcess.EphysInfoFilter import EphysInfoFiltering, FindPairedFiles
import lmfit
from scipy.signal import savgol_filter
from scipy.stats import spearmanr, pearsonr, sem
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.backends.backend_pdf import PdfPages
# cmap = plt.cm.get_cmap()
cmap = plt.cm.get_cmap("Set1")
plt.style.use("dark_background")
# plt.style.use("seaborn-colorblind")
def apply_savgol(df, k=3, w=100, show=True):
"""Apply savgol filter to `df` with degree `k` and window `w`"""
df_filt = df.copy()
print("Savgol filter with w = %d" % w)
# window for fitting polynomials in savgol filter
w = math.floor(df_filt.shape[0]/w)
# ensure window is an odd integer
w = w if (w % 2 > 0) else w + 1
for i in range(df.shape[1]):
df_filt.iloc[:, i] = savgol_filter(df_filt.iloc[:, i], w, k)
if show:
plt.plot(df, alpha=0.2, c="w")
plt.plot(df_filt, lw=2, c="r")
plt.title("Order = %d, Window = %d" % (k, w))
plt.show()
return df_filt
def apply_savgol_spearmanr(df, frac=0.40, k=2, w=8, show=False):
"""
Apply savgol filter to last fraction `frac` of `df`
Savgol parameters: window `w` and polynomial order `k`
`show` = plot filtered data on top of original data, coloured by spearman r
Returns Spearman coefficients for each trace in a N-sized array
"""
# select last `frac`-th of `df` for filtering
ind = int(frac*df.shape[0])
df_filt = df.iloc[-ind:, :].copy()
# window for fitting polynomials in savgol filter
w = math.floor(df_filt.shape[0]/w)
# ensure window is an odd integer
w = w if (w % 2 > 0) else w + 1
# apply savgol fitler
if show:
# plt.style.use("default")
plt.plot(df_filt, alpha=0.2)
cmap = plt.cm.get_cmap("seismic")
for i in range(df.shape[1]):
df_filt.iloc[:, i] = savgol_filter(df_filt.iloc[:, i], w, k)
r, _ = spearmanr(df_filt.index, df_filt.iloc[:, i])
if show:
plt.plot(df_filt.iloc[:, i], c=cmap((r+1)/2), lw=2)
df_filt.iat[0, i] = r
if show:
plt.show()
plt.close()
return df_filt.iloc[0, :].abs()
def Set_RC_Defaults(pub=False, dark=False):
"""
Set rcParams
`pub` = if True, sets defaults for publication standards; else, whatever is convenient
"""
if dark:
plt.style.use("dark_background")
else:
plt.style.use("default")
rcParams['axes.labelweight'] = 'bold'
rcParams['axes.titleweight'] = 'bold'
rcParams['axes.labelsize'] = 16
rcParams['axes.titlesize'] = 18
rcParams['legend.fontsize'] = 16
rcParams['font.family'] = 'sans-serif'
rcParams['font.weight'] = 'normal'
rcParams["axes.formatter.limits"] = (-3, 3)
rcParams["ytick.minor.size"] = 6
if pub:
rcParams['font.size'] = 14
rcParams['axes.linewidth'] = 2
rcParams['font.sans-serif'] = 'Arial'
rcParams['svg.fonttype'] = 'none'
rcParams['pdf.use14corefonts'] = True
else:
rcParams['font.size'] = 12
rcParams['axes.linewidth'] = 2
rcParams['font.sans-serif'] = 'Verdana'
def SetXAxisSpacing(ax, dx=30, xmin=-145, xmax=60):
xlo, xhi = ax.get_xlim()
dx2 = int(dx/2)
xlo = math.floor(xlo/dx2) * dx2
xlo = xlo if (xlo > xmin) else xmin
xhi = math.ceil(xhi/dx2) * dx2
xhi = xhi if (xhi < xmax) else xmax
ax.set_xticks(range(xlo, xhi+dx2, dx))
def SetAxProps(ax):
for spine in ["top", "right"]:
ax.spines[spine].set_visible(False)
try:
ax.locator_params(axis="y", nbins=6)
except:
pass
ax.tick_params(axis='both', length=8, width=2, labelsize=12)
class BoltzmannFunctions():
def __init__(self, func_name="boltz_a"):
"""
Select Boltzmann function corresponding to `func_name`
`func_name` options:
'boltz_a' = standard Boltzmann
'boltz_c' = variable maximum
'boltz_d' = variable minimum (maximum = 1)
'boltz_cd' = variable maximum and minimum
"""
self.func_name = func_name
self.descr = ""
def boltz_a(self, v, vh, s):
return 1/(1 + np.exp((v-vh)/s))
# variable Pmax
def boltz_c(self, v, vh, s, c):
return (c / (1 + np.exp((v - vh)/s)))
# variable Pmin
def boltz_d(self, v, vh, s, d):
return ((1-d)/(1 + np.exp((v-vh)/s))) + d
# variable Pmin and Pmax
def boltz_cd(self, v, vh, s, c, d):
return ((c-d) / (1 + np.exp((v - vh)/s))) + d
def select(self):
ind = self.func_name.split("_")[1]
if ind == "a":
self.descr = "Standard Boltzmann (max = 1, min = 0)"
return self.boltz_a
elif ind == "c":
self.descr = "Boltzmann with varying max (max = c > 0.5, min = 0)"
return self.boltz_c
elif ind == "d":
self.descr = "Boltzmann with varying min (max = 1, min = d < 0.5)"
return self.boltz_d
elif ind == "cd":
self.descr = "Boltzmann with varying min and max (max = c > 0.5, min = d < 0.5)"
return self.boltz_cd
def get_vhalf(vh, s, c=1, d=0):
return vh + s*math.log((c-0.5)/(0.5-d))
def get_gv_leg_lab(vh, s, c=1, d=0):
if c < 0.5 or d > 0.5:
print(c, d)
raise Exception("c < 0.5 or d > 0.5")
else:
Vhalf = get_vhalf(vh, s, c=c, d=d)
if c > 0 and d > 0:
return "Fit \n $V_{1/2}$ = %.1f mV \n $s$=%.1f mV \n c=%.4f \n d=%.2e" % (Vhalf, s, c, d)
elif c > 0:
return "Fit \n $V_{1/2}$ = %.1f mV \n $s$=%.1f mV \n c=%.4f" % (Vhalf, s, c)
elif d > 0:
return "Fit \n $V_{1/2}$ = %.1f mV \n $s$=%.1f mV \n d=%.2e" % (Vhalf, s, c)
else:
return "Fit \n $V_{1/2}$ = %.1f mV \n $s$=%.1f mV" % (Vhalf, s)
class NoisyGV():
def __init__(self, df, khz):
self.df = df
self.khz = khz
def current_histogram(self, dt=500):
N = int(self.df.shape[1]/2)
dt = int(dt * self.khz)
f = plt.figure(constrained_layout=True)
gs = f.add_gridspec(N, 2)
# currents
axI = f.add_subplot(gs[:, 0])
for i in range(N):
clr = cmap((i+1)/N)
axI.plot(self.df.iloc[:, i], c=clr)
ax_i = f.add_subplot(gs[i, 1])
ax_i.hist(self.df.iloc[:dt, i],
histtype="stepfilled", bins="auto", color=clr)
ax_i.set_yticks([])
ax_i.set_yticklabels([])
plt.show()
class lmfit_boltzmann():
def __init__(self, func_name="boltz_a"):
"""
`v` = test voltages
`g` = normalized conductances
"""
# declare Boltzmann parameters
B_pars = lmfit.Parameters()
B_pars.add("Vh", value=-100, min=-200, max=0)
B_pars.add("s", value=10, min=3, max=50)
self.pars = B_pars
self.popt = None
# variant of boltzmann function for fitting
b = BoltzmannFunctions(func_name=func_name)
print(b.descr)
self.boltz = b.select()
def do_fit(self, v, g, func_name="boltz_c", c_max=1.1):
# add extra parameters if needed
if "c" in func_name:
# self.pars.add("c", value=1, min=0.51, max=c_max)
self.pars.add("c", value=1, min=1., max=c_max)
if "d" in func_name:
self.pars.add("d", value=0, min=-0, max=0.49)
# select boltzmann function
b = BoltzmannFunctions(func_name)
self.boltz = b.select()
print("Fitting with: \n %s\
Note that `lmfit` is used for fitting. \
\n Parameters are unpacked in the residual by flattening dict values to a list, which may cause loss of order. \
\n If errors are suspected, check if this is the case." % b.descr)
# define residual
def residual(pars, volts=v, data=g):
# unpack parameters: extract .value attribute for each parameter
parvals = list(pars.valuesdict().values())
return self.boltz(volts, *parvals) - data
res = lmfit.minimize(residual, self.pars, method='nelder')
res.params.pretty_print()
print(lmfit.fit_report(res))
# prameters in dict format
self.popt = res.params.valuesdict()
try:
pcov = res.covar
perr = np.sqrt(np.diag(pcov))
except:
perr = [np.nan] * len(self.popt.keys())
return self.popt, perr
def get_fit(self, vrange, p0=[-100, 5]):
popt = list(self.popt.values())
if len(popt) > 0:
return [self.boltz(v, *popt) for v in vrange]
else:
raise Exception("Fitting failed")
def subtract_tail_baseline(df, khz, window=50, filter_ma=10,
show=False, method='min'):
"""
Subtract average of last `window` ms from each trace.
This is justified when tail currents are expected to reach steady-state at the tail voltage.
`df` = dataframe containing extracted tail currents
`khz` = sampling rate
`window` = time window, beginning from the end of the pulse, where currents will be averaged and subtracted
`filter_ma` = if > 0, uses this as timerange to compute moving average
`method` = 'all' or 'min'
* if 'all', subtracts average current in `window` for each tail current;
* if 'min', subtracts average current in `window` for tail current with least standard deviation from all tail currents
* if 'auto', begins with 'all', but switches to 'min' if any Spearman R^2 has absolute value > 0.6 for last 40% of tail currents.
Returns
`df_sub` = subtracted dataframe
"""
if window > df.shape[0]/khz:
raise Exception("`window` cannot be larger than tail duration.")
if method not in ['all', 'min', 'auto']:
raise Exception(
"`method` for `subtract_tail_baseline` must be 'min', 'auto', or 'all'")
if filter_ma > 0:
print("Appling < %d ms > rolling average..." % filter_ma)
df = df.rolling(filter_ma*khz).mean().iloc[filter_ma*khz:, :]
# reset time index to start at 0
df.index -= filter_ma
# test linearity of last 40% of tail currents by Spearman R
print("Spearman coeff. (R) for last 40% of tail currents")
r = apply_savgol_spearmanr(df)
print("Absolute Spearman r: \n", r)
print("Average: ", r.mean())
# convert to time units, then add small offset of 10ms to avoid capacitance
window = (window + 10)*khz
# slice `df` by `window` (+ 10ms offset) with 5ms rolling avg
i_ss = df.iloc[-window:-10*khz, :]
# skip subtraction if average current in `window` is less than 1pA
if (i_ss.rolling(5*khz).mean().mean(axis=0).abs() < 1).all():
print("Mean current in `window` of tail currents: \n", i_ss.mean(axis=0))
print("All mean currents < 1pA. Skipping baseline subtraction.")
print(i_ss.mean(axis=0))
return df
# check that currents are relatively unchanging in `i_ss` by computing (max - min)
def max_minus_min(x): return x.max(axis=0) - x.min(axis=0)
# current drop across `window` and the entire range `whole`
drops = [max_minus_min(y.rolling(2*khz).mean().abs()) for y in [i_ss, df]]
drops = 100 * (drops[0] / drops[1].max())
# range_ss = max_minus_min(i_ss.rolling(2*khz).mean().abs())
# range_whole = max_minus_min(df.rolling(2*khz).mean().abs()).max()
# ideally, the change in current over the subtracted region is
# <= 1% the total current drop
print("Current drop in `window`as % of total current drop. \n", drops)
# try to identify noisy traces where current drop in `window` is
# substantial compared to total current drop throughout tail current
if (drops >= 3).all():
print("All traces have >3% current drop in `window`. Applying savgol filter.")
# degree down to 60
w = max([50, 400 - 40*int(drops.max())])
df = apply_savgol(df, w=w, show=True)
if method == 'min':
# use standard deviation to find tail current with
# least change in current amplitude
# idx = df.iloc[window:-window, :].abs().std(axis=0).argmin()
# i_ss = i_ss.iloc[:, idx]
if df.iat[0, 0] > df.iat[0, -1]:
i_ss = i_ss.iloc[:, -1]
else:
i_ss = i_ss.iloc[:, 0]
# compute baseline
# if method = 'min', this is scalar
# else, 1 x N
i_ss = i_ss.mean(axis=0)
# test for steady-state using Spearman coefficient
if (r > 0.55).any():
print("Spearman coefficients > 0.55 found.")
if method == "auto":
print("For traces with `r > 0.55`, replace with mean of other traces")
inds = r[r > 0.55].index.values
i_ss.iloc[inds] = i_ss.iloc[~inds].min()
else:
print(
"If baseline subtraction should be modified, consider setting `method = 'auto'`")
if show:
fig, axs = plt.subplots(1, 2, figsize=(9, 5), constrained_layout=True)
axs[0].set_title("Subtraction of baseline")
axs[1].set_title("Division by maximum")
axs[0].plot(df.iloc[::10, :], c='gray', lw=1, alpha=0.5)
# subtract baseline
df = df - i_ss
if show:
# decrement in alpha
da = 1/df.shape[1]
# plot subtracted currents and (crudely) normalized currents
imax = df.max(axis=0).max()
for i in range(df.shape[1]):
alpha = 1 - da*i
axs[0].plot(df.iloc[::10, i], c='r', lw=2, alpha=alpha)
# plot a crude normalization, i.e. divide by maximum
axs[1].plot(
(df.iloc[::10, i] / imax),
c="purple", lw=2, alpha=alpha
)
# demarcate window for subtraction
axs[0].axvline(df.index[-window], c='r', ls='--', lw=2, alpha=0.5)
axs[0].axvline(df.index[-10*khz], c='r', ls='--', lw=2, alpha=0.5)
fig.suptitle("Average range_ss/range_whole = %.2f %%" % drops.mean())
plt.show()
plt.close()
return df
class activation_curve():
def __init__(self, test_voltages, tails, khz,
boltz_func="boltz_cd",
subtract_baseline_method="all",
base_method="zero_negative_norm",
post_tails=None, post_tail_voltage=0,
fit_only=False, show_pmin=False, plot_results=False,
show=True, pdf=None):
"""
Compute activation curve/Pmin from leak-subtracted data
INPUTS
`test_voltages` = list of test voltages
`tails` = dataframe of tail currents
`subtract_baseline_method` = method argument for `subtract_baseline_method()`
`base_method` = how to set values of baseline. Unlike `subtract_baseline_method`, which is applied prior to any processing, this is intended to handle stray values *after* initial normalization.
Default is 'leak', which uses the leak-subtracted current. Other options:
* 'zero_negative_norm' = subtract most negative value to zero, then re-normalize. No changes if all values are greater or equal to 0.
* 'zero_negative' = set negative values to zero without re-normalization. Again, no changes if all values are greater or equal to 0.
* 'pmin_threshold=#', where # is a number; all values >= # will be set to 0, without re-normalization.
* 'pmin_threshold_norm=#', subtract minimum of values >= #, then renormalize.
See docstring of `apply_base_method` for more details.
`nparam` = determines number of free parameters in boltzmann fit
'boltz_a' for standard boltzmann, 'boltz_c' for floating maximum, 'boltz_d' for floating minimum, or 'boltz_cd' for floating maximum + minimum
`post_tails` = dataframe of post-tail currents, if available
`post_tail_voltage` = voltage used to elicit post_tails
`show_pmin` = compute Pmin for tail and/or post_tail currents
For deactivation, calling the class will simply return Pmin estimates from tail currents, and, when available, post-tail currents
For activation, calling the class returns normalized Po and Boltzmann parameters
"""
# initialize class variables as None
# if they change, this informs downstream methods about the protocol used
self.norm_tails = None
self.tail_mins = None
self.post_peaks = None
# general
self.test_voltages = None
self.popt = None
self.nparam = boltz_func
if fit_only:
self.test_voltages = test_voltages
self.norm_tails = tails
else:
# correct baseline for activation protocols (GV)
# subtract baseline current from tail currents if average current in last 50ms is greater than 10
if post_tails is None:
tails = subtract_tail_baseline(
tails, khz, window=200, show=show,
method=subtract_baseline_method
)
# manually truncate a problematic trace
# if abs(tails.iloc[:,2].sum() + 66710) < 5:
# tails.iloc[:,2] = tails.iloc[:1850*khz,2]
# print("Warning: Manual truncation of 3rd sweep's tail current during `activation_curve`.")
# compute peak amplitude of tail currents
# peak current from 4ms moving avg over first 100ms,
# with 5ms offset to avoid residual capacitance
tail_peaks = tails.iloc[5*khz:105*khz,
:].rolling(5*khz).mean().abs().max().values
# deactivation, i.e. post_tails isn't None
if post_tails is not None:
# for deactivation, compute pmin from tails, as well as post_tail pmin, if available
norm_tails = pd.concat(
[tails.iloc[:, i].abs()/tail_peaks[i]
for i in range(tails.shape[1])],
axis=1
)
# Pmin from tail currents
# avg of 5ms moving avg of last 50ms, offset by -2ms to help avoid capacitive currents
tail_mins = norm_tails.dropna(
).iloc[-52*khz:-2*khz, :].rolling(5*khz).mean().mean()
self.tail_mins = tail_mins
# check if post_tail_voltage is in test_voltages; if not, ignore post_tails
if post_tail_voltage in test_voltages:
# index of sweep with test pulse voltage corresponding to post_tail_voltage
j = test_voltages.index(post_tail_voltage)
# divide post_tail currents by peak current from (test) tail current at the same voltage
norm_post = post_tails / tail_peaks[j]
# normalized peak from post_tails
# this measures residual activation after deactivating test pulses, hence `Pmin`
post_peaks = norm_post.iloc[:100 *
khz, :].rolling(4*khz).mean().max()
self.post_peaks = post_peaks
# sort all data according to voltages
if self.post_peaks is None:
self.test_voltages, self.tail_mins = multi_sort(
zip(test_voltages, tail_mins))
else:
self.test_voltages, self.tail_mins, self.post_peaks = multi_sort(
zip(test_voltages, tail_mins, post_peaks))
# `base_method` != 'leak' would normally modify Pmin values for deactivation, but \
# because a Popen is unavailable at the duration used for deactivating tail currents,
# this is probably unwise.
# if base_method != "leak":
# self.tail_mins, self.tail_peaks, self.post_peaks = self.apply_base_method(base_method)
if show_pmin:
self.plotter(mode="deactivation", pdf=pdf)
# activation
else:
# ignore tail currents following activating pulses below -150mV or above 100mV
# indices out of the range [-150, 100]
V_InRange = [i for (i, v) in enumerate(
test_voltages) if abs(v + 25) <= 125]
# only keep test voltages that are in range
if len(V_InRange) > 0:
test_voltages = [t for i, t in enumerate(
test_voltages) if i in V_InRange]
tail_peaks = [t for i, t in enumerate(
tail_peaks) if i in V_InRange]
# dataframe of tail currents
tails = tails.iloc[:, V_InRange]
else:
raise Exception(
" No voltages were found in range [-150, 100].")
exit()
# normalize all tail currents by maximum of the entire data
M = np.max(tail_peaks)
norm_tails = tails / M
# find minima of normalized tails as avg of 5ms moving avg of last 50ms, offset by 2ms to help avoid capacitance
tail_mins = norm_tails.iloc[-52*khz:-2 *
khz, :].rolling(5*khz).mean().mean(axis=0)
# normalize peaks by maximum peak
tail_peaks = [t/M for t in tail_peaks]
# sort all data according to voltages
test_voltages, tail_peaks, tail_mins = multi_sort(
zip(test_voltages, tail_peaks, tail_mins))
self.test_voltages = test_voltages
self.norm_tails = tail_peaks
self.tail_mins = tail_mins
# apply baseline method
# variables that are None will remain as such, eg self.post_peaks for activation
if base_method != "leak":
self.tail_mins, self.norm_tails, self.post_peaks = self.apply_base_method(
base_method)
# vrange, y, popt = sim
sim = self.fit_boltz(return_sim=True)
self.popt = sim[2]
if show:
self.plotter(mode="activation", sim=sim, pdf=pdf)
def apply_base_method(self, base_method):
"""
`base_method` = how to set values of baseline. Default is 'leak', which simply uses the leak-subtracted current. Other options:
* 'zero_negative' = set negative values to zero without re-normalization
* 'zero_negative_norm' = subtract minimum of negative values, then re-normalize
* 'pmin_threshold=#', where # is a number; all values < # will be set to 0, without re-normalization.
* 'pmin_threshold_norm=#', subtract minimum of values < #, then renormalize.
* 'v_threshold=#', where # is a voltage; values for voltages > # will be set to 0
"""
# list of relevant quantities for steady-state activation
P = [self.tail_mins, self.norm_tails, self.post_peaks]
if all(p is None for p in P):
raise Exception(
" All of `tail_mins`, `tail_peaks` and `post_peaks` are None.")
else:
print(" Treating baseline with method < %s >" % base_method)
for i, p in enumerate(P):
# skip if None, e.g. post_peaks in activation
if p is None:
continue
elif base_method in ["zero_negative", "zero_negative_norm", "pmin_threshold", "pmin_threshold_norm", "leak"]:
if base_method == "leak":
continue
elif "zero_negative" in base_method:
if any(x < 0 for x in p):
pmin = np.nanmin(p)
P[i] = [x - pmin for x in p]
elif "pmin_threshold" in base_method:
# threshold as float
pmin_t = float(base_method.split("=")[1])
# find values < pmin_t
if any(x < pmin_t for x in p):
for j in range(len(p)):
if p[j] < pmin_t:
P[i][j] = 0
elif 'v_threshold' in base_method:
# threshold voltage as float
v_t = float(base_method.split("=")[1])
# check if any voltages above v_t
if any(x > v_t for x in self.test_voltages):
# assume data are sorted according to voltage (negative to positive)
# index of first element in `v` that satisfies v[i] > v_t
idx = next(i for i, x in enumerate(
self.test_voltages) if x > v_t)
for j in range(len(p) - idx):
P[i][idx+j] = 0
# re-normalize
if "norm" in base_method:
p = P[i][:]
# select `pmax` from `self.norm_tails` if available
# i.e. to avoid normalizing Pmin to Pmin and getting a Pmin of 1
if self.norm_tails is None:
print(
" `self.norm_tails` is None, but `base_method` with `norm` implies re-normalization. \n Re-normalization will therefore produce a [0, 1] scale for the given dataset.")
pmax = np.nanmax(p)
else:
pmax = np.nanmax(self.norm_tails)
P[i] = [x/pmax for x in p]
else:
raise Exception(
" `base_method` %s not understood." % base_method)
return P
def plotter(self, mode="activation", sim=None, show=True, pdf=None):
"""
Plot maximum and minimum values from normalized tail currents (`mode=activation`) or normalized minimum values from tail currents and normalized maximum values from post-tail currents (`mode=deactivation`)
`sim` = output from `self.fit_boltz(return_sim=True)`
"""
volts = self.test_voltages
if mode == "deactivation":
f, ax = plt.subplots(figsize=(9, 6), constrained_layout=True)
ax.plot(volts, self.tail_mins, marker='o', lw=0.5, label="Tail")
if self.post_peaks is not None:
ax.plot(volts, self.post_peaks, marker='s',
lw=0.5, label="Post-tail")
ax.legend()
ax.set_xlabel("Voltage (mV)")
ax.set_ylabel(r"$P_{\mathregular{min}}$")
ax.set_title(
r"Average $P_{\mathregular{min}}$ = %.3f" % np.mean(self.tail_mins))
elif mode == "activation":
if sim is None:
raise Exception(
" No information from Boltzmann fitting provided.")
else:
vrange, y, popt = sim
popt = popt.values()
f, ax = plt.subplots(1, 2, figsize=(12, 5))
ax[0].plot(volts, self.norm_tails,
marker='o', lw=0.5, label="Data")
# create legend label with fit parameters
n = len(popt)
# set defaults
c, d = [0, 0]
# unpack fit parameters
if n == 4:
vh, s, c, d = popt
elif n == 3:
if "d" in self.nparam:
vh, s, d = popt
else:
vh, s, c = popt
else:
vh, s = popt
# create legend label
lab = get_gv_leg_lab(vh, s, c=c, d=d)
ax[0].plot(vrange, y, ls='--', label=lab)
ax[1].plot(volts, self.tail_mins, marker='o', lw=0.5)
# location of legend label
if y[0] > 0.5:
ax[0].legend(loc='lower left', fontsize=10,
framealpha=0.5, edgecolor="none")
else:
ax[0].legend(loc='upper right', fontsize=10,
framealpha=0.5, edgecolor="none")
ax[0].set_title("Normalized Conductance")
ax[1].set_title(r"$P_{min}$")
ax[0].set_ylabel(r"$I/I_{max}$")
for i in range(2):
ax[i].set_xlabel("Voltage (mV)")
# yticks
ax[1].ticklabel_format(axis='y', style='sci', scilimits=(-2, 2))
if pdf is not None:
pdf.savefig()
if show:
plt.show()
plt.close()
def fit_boltz(
self, return_sim=False, vrange=list(range(-200, 10, 5)), fit_kw={}
):
"""
Perform fit with Boltzmann function (standard if `self.nparam = 2`, modified if `self.nparam = 3`).
`c_max` = upper bound for `c` in Boltzmann fit
`vrange` = voltages to evaluate Boltzmann at, only used if `return_sim=True`
If `return_sim = True`, return
voltages `vrange` = list
Boltzmann output `y` = list
Boltzmann fit parameters `popt` = OrderedDict
Else, return Boltzmann fit parameters `popt`.
"""
# sort voltages and normalized tail peaks
try:
v, g = zip(*sorted(zip(self.test_voltages, self.norm_tails)))
except:
v, g = self.test_voltages, self.norm_tails
# fit boltzmann
LM = lmfit_boltzmann()
popt, _ = LM.do_fit(v, g, func_name=self.nparam, **fit_kw)
self.popt = popt
if return_sim:
y = LM.get_fit(vrange)
return vrange, y, popt
else:
return popt
def do_fit(self):
"""
Return normalized peak tail amplitudes and fit parameters
"""
if self.popt is None:
raise Exception(
"\n No fit parameters available to return. Fit wasn't done first, or fit failed.")
else:
return self.norm_tails, self.popt
def tail_pmins(self):
"""
Return normalized minimum tail currents and, if available, normalized post tail peaks (for deactivation protocol).
"""
if self.post_peaks is None:
return self.tail_mins
else:
return self.tail_mins, self.post_peaks
def return_test_voltages(self):
"""
Return test voltages used in recording protocol (not necessarily the same as voltages used for simulating Boltzmann output).
"""
return self.test_voltages
class Summarize_GV():
def __init__(self, fname, dv=3.178, paired_dic={},
individual=False, vsim=range(-200, 10, 5),
path=r"./output/Processing/Pooled_Analyses/CSV_output/"):
"""
`fname` is the filename of a .csv file in the `CSV_output` folder containing voltages and normalized conductances. `fname` can also be a list of Strings. If so, all dataframes will be concatenated.
`dv` = liquid junction potential, subtracted from test_voltages. Calculated using LJPCalc
https://swharden.com/software/LJPcalc/theory/#ljpcalc-calculation-method
`paired_dic` = dictionary of {parent : child} filenames. Otherwise, will retrieve automatically.
`individual` = whether to treat List of Strings in `fname` as individual dataframes; if False, then the data are concatenated and analyzed together
`vsim` = voltages to simulate Boltzmann
Computes statistics and make figure.
"""
plt.style.use("dark_background")
Set_RC_Defaults()
# initialize self.df and self.df_pars, which will hold normalized conductances and boltzmann fit parmaeters, respesctively
self.df = None
self.df_pars = None
# read dataframe of normalized conductances vs voltage;
# paths may specify single String or List of Strings
if isinstance(fname, list):
# concatenate all dataframes column-wise
df_list = [pd.read_csv(path + f, header=0, index_col=0)
for f in fname]
# find parameters csv, which should have same filename except for suffix "boltz_params"
pre = [f.split("__")[:-1] for f in fname]
for i, f in enumerate(pre):
f.append("boltz_params.csv")
pre[i] = "__".join(f)
df_pars_list = [pd.read_csv(
path + f, header=0, index_col=0) for f in pre]
# if 'c' is missing from columns, add it and set it to all 0
for i, d in enumerate(df_pars_list):
if d.shape[1] == 3:
df_pars_list[i].columns = ["Vh", "s", "c"]
elif d.shape[1] == 2:
d["c"] = np.zeros(d.shape[0])
d.columns = ["Vh", "s", "c"]
df_pars_list[i] = d
if individual:
self.df = df_list
self.df_pars = df_pars_list
else:
# concatenate dataframes
df = pd.concat(df_list, axis=1)
# remove duplicate columns (filenames)
self.df = df.loc[:, ~df.columns.duplicated()]
# concatenate along rows
df_pars = pd.concat(df_pars_list, axis=0)
# remove duplicate rows (filenames)
self.df_pars = df_pars.loc[~df_pars.index.duplicated(), :]
else:
self.df = pd.read_csv(path + fname, header=0, index_col=0)
if (self.df is None) or (self.df_pars is None):
raise Exception(
"Failed to parse data as`df` and `df_pars` from `fname`.")
self.fname = fname
self.dv = dv
self.paired_dic = paired_dic
self.vsim = vsim
self.outpath = path
self.refit_params = None
def do_stats(self, fname, df):
"""
Returns mean and error of G-V data in a single dataframe `df` with fname `fname`
First finds paired recordings to identify technical replicates.
Technical replicates are averaged together, then averaged with remaining biological replicates.
Errors are given as stdev if technical replicates are present. Else, uses sem.
If technical replicates are present, variances between these and biological replicates are averaged, then sqrt to find stdev.
"""
# find paired files
F = []
if len(self.paired_dic.keys()) < 1:
try:
F = FindPairedFiles(fname).Find()
# flatten dictionary, since we don't need the parent/child distinction
F = list(F.values())
except:
print(
" Finding paired files failed. \n Continuing, assuming all replicates are biological replicates.")
else:
# check if parent filename is in dictionary value; if so, we can just flatten the dictionary
k = self.paired_dic.keys()[0]
v = self.paired_dic[k]
if k in v:
F = list(self.paired_dic.values())
# otherwise, add parent filename to values, then flatten dictionary afterwards
else:
for k, v in self.paired_dic.items():
if k in v:
continue
else:
v.append(k)
F = list(self.paired_dic.values())
# if technical replicates are present, then `F` is not empty
paired_mu = []
paired_err = []
if len(F) > 0:
for f in F:
# columns of dataframe that correspond to cell `f`, one biological replicate
df_f = df.loc[:, df.columns.isin(f)].dropna(how='all')
if df_f.shape[0] < 2:
continue
else:
# compute mean and std over columns
paired_mu.append(df_f.mean(axis=1))
paired_err.append(df_f.std(axis=1))
# drop the paired files from original dataframe
df.drop(df_f.columns, axis=1, inplace=True)
# mean and sem over biological replicates (if no technical replicates, then this is performed over entirety of `df`)
mu = df.mean(axis=1)
# if no technical replicates, use sem instead of std
if len(paired_err) < 1:
err = df.sem(axis=1)
else:
err = df.std(axis=1)
# if technical presents are present, average with mean and variances of unpaired biological replicates
if len(paired_mu) > 0:
# concatenate statistics of paired files
paired_mu = pd.concat(paired_mu, axis=1, ignore_index=False)
paired_err = pd.concat(paired_err, axis=1, ignore_index=False)
# compute total averages and sem
mu = pd.concat([mu, paired_mu], axis=1,
ignore_index=False).mean(axis=1)
# std**2 -> variance -> average variance -> sqrt(variance) = std
err = pd.concat([err, paired_err], axis=1, ignore_index=False).pow(
2).mean(axis=1).pow(0.5)
return mu, err
def boltzfit(self, df, mu, LJPcorrection=True):
"""
Use lmfit to fit Boltzmann function to G-V data, with voltages in index of `df` and conductances in `mu` \\
`df` = dataframe of voltage x normalized conductance
`mu` = average of conductances in `df`
`LJPcorrection` = whether to apply offset due to liquid junction potential \\
Returns `yfit`, `popt`, `perr`
"""
# adjust voltage, V_cell = V_measured - V_LJP
if LJPcorrection:
volts = df.index - self.dv
else:
volts = df.index.values
# fit boltzmann with lmfit
LM = lmfit_boltzmann()
popt, perr = LM.do_fit(volts, mu, func_name="boltz_cd")
yfit = LM.get_fit(self.vsim)
return yfit, volts, popt, perr
def MeanBoltzFit(self, df, LJPcorrection=True, median=False):
"""
Average fit parameters in `df`, then compute a Boltzmann curve
"""
df_pars = df.copy()
if (LJPcorrection == True) and (self.dv > 0):
df_pars.loc[:, "Vh"] -= self.dv
# compute SEM
err = df_pars.sem(axis=0)
# compute median or mean
if median:
mu = df_pars.median(axis=0)
else:
mu = df_pars.mean(axis=0)
if len(mu.shape) > 1:
mu = | pd.Series(mu) | pandas.Series |
import numpy as np
import pandas as pd
from scipy import signal as ssig
from scipy import stats as spst
import os
import re
import string
from salishsea_tools import geo_tools
import netCDF4 as nc
import gsw
# list CTD cnv files associated with cast numbers
cnvlist19={1:'fraser2017101.cnv',
2:'fraser2017102.cnv',
3:'fraser2017103.cnv',
4:'fraser2017104.cnv',
5:'fraser2017105.cnv',
6:'fraser2017106.cnv',
7:'fraser2017107.cnv',
8:'fraser2017108.cnv',
9:'fraser2017109.cnv',
10:'fraser2017110.cnv',
11:'fraser2017111.cnv',
12:'fraser2017112.cnv',
13:'fraser2017113.cnv',
14.1:'fraser2017114.cnv',
14.2:'fraser2017114.cnv',
15:'fraser2017115.cnv',
16:'fraser2017116.cnv',
17:'fraser2017117.cnv',
18:'fraser2017118.cnv',
19:'fraser2017119.cnv',
20:'fraser2017120.cnv',
21:'fraser2017121.cnv',
22:'fraser2017122.cnv',
23:'fraser2017123.cnv',
24:'fraser2017124.cnv'}
cnvlist25={1:'fraser2017001.cnv',
2:'fraser2017002.cnv',
3:'fraser2017003.cnv',
4:'fraser2017004.cnv',
5:'fraser2017005.cnv',
6:'fraser2017006.cnv',
7:'fraser2017007.cnv',
8:'fraser2017008.cnv',
9:'fraser2017009.cnv',
10:'fraser2017010.cnv',
11:'fraser2017011.cnv',
12:'fraser2017012.cnv',
13:'fraser2017013.cnv',
14.1:'fraser2017014.cnv',
14.2:'fraser2017014.cnv',
15:'fraser2017015.cnv',
16:'fraser2017016.cnv',
17:'fraser2017017.cnv',
18:'fraser2017018.cnv',
19:'fraser2017019.cnv',
20:'fraser2017020.cnv',
21:'fraser2017021.cnv',
22:'fraser2017022.cnv',
23:'fraser2017023.cnv',
24:'fraser2017024.cnv'}
class Cast:
def __init__(self,fpath):
mSta,mLat,mLon,df=readcnv(fpath)
self.sta=mSta
self.lat=mLat
self.lon=mLon
self.df=df
self.source=fpath
class zCast:
def __init__(self,updf,downdf):
self.uCast=updf
self.dCast=downdf
class rawCast:
def __init__(self):
self.uCast=dict()
self.dCast=dict()
class dataPair:
def __init__(self,zval,varval):
self.z=zval
self.val=varval
def fmtVarName(strx):
""" transform string into one that meets python naming conventions"""
vName=re.sub('[^a-zA-Z0-9_\-\s/]','',strx.strip())
vName=re.sub('[\s/]','_',vName)
vName=re.sub('-','_',vName)
if re.match('[0-9]',vName):
vName='_'+vName
return vName
#def rolling_window(a, window):
# # source: http://www.rigtorp.se/2011/01/01/rolling-statistics-numpy.html
# # use example: np.mean(rolling_window(x, 3), -1)
# shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
# strides = a.strides + (a.strides[-1],)
# return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
#
#def rolling_window_padded(a,window):
# # extend rolling window to be same lenth as input array by duplicating first and last values
# # even values not symmetric
# test=rolling_window(a,window)
# while window>1:
# if window%2==0:
# test=np.concatenate(([test[0,:]],test),axis=0)
# else:
# test=np.concatenate((test,[test[-1,:]]),axis=0)
# window+=-1
# return test
def slidingWindowEval(x,func,window,axis=0):
# x is input array
# func is function to carry out over window
# window is window size
# axis is axis to act along, in case of multiple
# if window is even, results will be shifted left by 1/2 unit
x1=np.lib.stride_tricks.sliding_window_view(x, window, axis)
b=func(x1,-1)
# the rest of the code pads the front and back to return an array of the same shape as the original
nfront=np.floor((window-1)/2)
nback=np.floor((window-1)/2)+(window-1)%2
inxf=[slice(None)]*np.ndim(b)
inxf[axis]=slice(0,1,1)
inxb=[slice(None)]*np.ndim(b)
inxb[axis]=slice(np.shape(b)[axis]-1,np.shape(b)[axis],1)
repsf=np.ones(np.ndim(b),dtype=int)
repsf[axis]=int(nfront)
repsb=np.ones(np.ndim(b),dtype=int)
repsb[axis]=int(nback)
x2=np.concatenate((np.tile(b[tuple(inxf)],repsf),b,np.tile(b[tuple(inxb)],repsb)),axis=axis)
return x2
def amp(var,dim=0):
return np.nanmax(var,dim)-np.nanmin(var,dim)
def turbQC(x):
# turbidity sensor produced erroneous zero readings interspersed with real data when too close to surface
# remove suspect values from analysis
# - median filter alone was not enough
# remove a point if the max-min of the surrounding 5 point window
# is greater than 1/3 the maximum turbidity value of the cast
# (remove data within 5 points of a large jump)
#ii1=amp(rolling_window_padded(x,5),-1)>.33*np.nanmax(x)
ii1=slidingWindowEval(x,amp,5)>.33*np.nanmax(x) # was .5
# remove data within 5 points of a near-zero turbidity value
#ii2=np.nanmin(rolling_window_padded(x,5),-1)<.3
ii2=slidingWindowEval(x,np.nanmin,5)<.3
y=np.copy(x)
y[np.logical_or(ii1,ii2,)]=np.nan
y=ssig.medfilt(y,3)
return y
def readcnv(fpath):
alphnumlist=list(string.ascii_letters)+list(string.digits)
# define regexes for reading headers:
reSta=re.compile('(?<=\*\*\sStation:)\s?([0-9])+\s?') # assumes numeric station identifiers
reLat=re.compile('(?<=\*\*\sLatitude\s=)\s?([\-0-9\.]+)\s([\-\.0-9]+)\s?([NS])')
reLon=re.compile('(?<=\*\*\sLongitude\s=)\s?([\-0-9\.]+)\s([\-\.0-9]+)\s?([EW])')
# start_time = May 08 2002 09:39:10
reST=re.compile('(?<=\#\sstart_time\s=).*')
#reTZ=re.compile('(?<=\*\*\s...\s\(Time\)\s=).*')
#reCr=re.compile('(?<=\*\*\sCruise:).*')
reNam=re.compile('(?<=\#\sname\s)([0-9]+)\s=\s(.*)\:\s?(.*)\s?')
# define regex for finding searching:
spStart=re.compile('^\s*[0-9]') # starts with space characters followed by digit
headers=list()
#lineno=0
mSta=None
mLat=None
mLon=None
with open(fpath, 'rt', encoding="ISO-8859-1") as f:
for fline in f:
if fline.startswith('**'):
if reSta.search(fline):
mSta=reSta.search(fline).groups()
if reLat.search(fline):
mLat=reLat.search(fline).groups()
if reLon.search(fline):
mLon=reLon.search(fline).groups()
elif reNam.search(fline):
headers.append(fmtVarName(reNam.search(fline).groups(1)[1]))
elif fline.startswith('*END*'):
break
#lineno+=1
#still in with file open
df=pd.read_csv(f,delim_whitespace=True,names=headers)
# file closed
return mSta,mLat,mLon,df
def bindepth(inP,inV,edges,targets=[],prebin=False):
# calculate depth-associated variables
# 1st calculate bin averages of depth and variable
# then use np interp to estimate at-grid-point values
# edges must be monotonically increasing
if prebin==True:
newP,newV=bindepth(inP,inV,np.arange(edges[0],edges[-1],.05),prebin=False)
inP=newP
inV=newV
inP=inP[~np.isnan(inV)]
inV=inV[~np.isnan(inV)]
binned=np.digitize(inP,edges)
Pa=np.empty(len(edges)-1)
Va=np.empty(len(edges)-1)
if len(targets) == 0:
Pi=.5*(edges[:-1]+edges[1:])
else:
Pi=targets[:(len(edges)-1)]
Vi=np.empty(len(edges)-1)
for jj in range(1,len(edges)):
ll=(binned==jj) #&(~np.isnan(inV))
if np.sum(ll)>0:
Pa[jj-1]=np.mean(inP[ll])
Va[jj-1]=np.mean(inV[ll])
else:
Pa[jj-1]=np.nan
Va[jj-1]=np.nan
# linearly extrapolate some values, but not beyond range of original data
pnew=Pa[0]-(Pa[1]-Pa[0])
vnew=Va[0]-(Va[1]-Va[0])
Pa=np.concatenate(([pnew],Pa))
Va=np.concatenate(([vnew],Va))
Vi=np.interp(Pi,Pa[~np.isnan(Va)],Va[~np.isnan(Va)],right=np.nan,left=np.nan)
Vi[Pi>np.max(inP)]=np.nan
Vi[Pi<np.min(inP)]=np.nan
return Pi, Vi
def cXfromX(X):
X=np.array(X)
X[np.isnan(X)]=-5
Y=np.nan*X
iii=(X>0)&(X<100)
Y[iii]=-np.log(X[iii]/100.0)/.25
return Y
def turbReg(m,Cx,fl):
return np.maximum(0.0,m[0]*Cx-m[1]*fl-m[2])
def turbFit(df0):
# calculate conversion factor for sb19 ctd turbidity to ALS bottle turbidity
# force through (0,0)
x=df0.loc[(df0.ALS_Turb_NTU>0)&(df0.sb19Turb_uncorrected>0)]['sb19Turb_uncorrected'].values
x=x[:,np.newaxis]
y=df0.loc[(df0.ALS_Turb_NTU>0)&(df0.sb19Turb_uncorrected>0)]['ALS_Turb_NTU']
tinv=np.linalg.lstsq(x,y,rcond=None)[0]
tcor=1.0/tinv
return tcor
def loadDataFRP_init(exp='all'):
if exp not in {'exp1', 'exp2', 'exp3', 'all'}:
print('option exp='+exp+' is not defined.')
raise
with open('/ocean/shared/SalishSeaCastData/FRPlume/stationsDigitizedFinal.csv','r') as fa:
df0_a=pd.read_csv(fa,header=0,na_values='None')
with open('/ocean/shared/SalishSeaCastData/FRPlume/util/stationsDigitized_ancillary.csv','r') as fb:
df0_b=pd.read_csv(fb,header=0,na_values='None')
df0= | pd.merge(df0_a,df0_b,how='left',on=['Station','Date']) | pandas.merge |
import numpy as np
import pandas as pd
import pickle
import time
import itertools
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error as MSE
from operator import itemgetter
import operator
from sklearn import linear_model
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
from sqlalchemy import create_engine
from mpl_toolkits.mplot3d import Axes3D
import warnings; warnings.simplefilter('ignore')
from decimal import *
import random
from itertools import combinations
import re
def match_mp(df, covs, covs_max_list,
treatment_indicator_col='treated', match_indicator_col='matched'):
''' Input:
df : a dataframe,
covs : a set of covariates to match on,
covs_max_list :
treatment_indicator_col : the treatment indicator column
match_indicator : the matched indicator column.
Output :
match_indicator : array indicating whether each unit is matched
indices : a list of indices for the matched units
'''
# truncate the matrix with the covariates columns
arr_slice_wo_t = df[covs].values # the covariates values as a matrix
# truncate the matrix with the covariate and treatment indicator columns
arr_slice_w_t = df[ covs + [treatment_indicator_col] ].values
# matrix multiplication: get a unique number for each unit
lidx_wo_t = np.dot( arr_slice_wo_t,
np.array([covs_max_list[i]**(len(covs_max_list)-1-i)
for i in range(len(covs_max_list))]
) )
# get a unique number for each unit with treatment indicator
lidx_w_t = np.dot( arr_slice_w_t,
np.array([covs_max_list[i]**(len(covs_max_list)-i)
for i in range(len(covs_max_list))] + [1]
) )
# count how many times each number appears
_, unqtags_wo_t, counts_wo_t = np.unique(lidx_wo_t, return_inverse=True,
return_counts=True)
# count how many times each number appears (with treatment indicator)
_, unqtags_w_t, counts_w_t = np.unique(lidx_w_t, return_inverse=True,
return_counts=True)
# a unit is matched if and only if the counts don't agree
match_indicator = ~(counts_w_t[unqtags_w_t] == counts_wo_t[unqtags_wo_t])
return match_indicator, lidx_wo_t[match_indicator]
# In[5]:
# function for computing the prediction error
def prediction_error_mp(holdout, covs_subset, ridge_reg = 1):
''' Input :
holdout : the training data matrix
covs_subsets : the list of covariates to matched on
Output :
pe : the prediction error
time_PE : time to compute the regression
'''
# below is the regression part for PE
s = time.time()
# Ridge : solves a regression model where the loss function is
# the linear least squares function and
# regularization is given by the l2-norm
ridge_c = Ridge(alpha=ridge_reg)
ridge_t = Ridge(alpha=ridge_reg)
mse_t = np.mean(cross_val_score(ridge_t,
holdout[holdout['treated']==1][covs_subset],
holdout[holdout['treated']==1]['outcome'],
scoring = 'neg_mean_squared_error' ) )
mse_c = np.mean(cross_val_score(ridge_c,
holdout[holdout['treated']==0][covs_subset],
holdout[holdout['treated']==0]['outcome'],
scoring = 'neg_mean_squared_error' ) )
num_t = holdout[holdout['treated'] == 1]['outcome'].shape[0]
num_c = holdout[holdout['treated'] == 0]['outcome'].shape[0]
PE = mse_t + mse_c
time_PE = time.time() - s
# -- above is the regression part for PE
# -- below is the level-wise MQ
return (PE, time_PE, mse_t, mse_c)
# function to compute the balancing factor
def balancing_factor_mp(df, match_indicator, tradeoff = 0.000):
''' Input :
df : the data matrix
match_indicator : the matched indicator column
Output :
balancing_factor : the balancing factor
time_BF : time to compute the balancing factor
'''
s = time.time()
# how many control units are unmatched
# recall matched units are removed from the data frame
num_control = len(df[df['treated']==0])
# how many treated units that are unmatched
# recall matched units are removed from the data frame
num_treated = len(df[df['treated']==1])
# how many control units are matched at this level
num_control_matched = np.sum((match_indicator) & (df['treated']==0))
# how many treated units are matched at this level
num_treated_matched = np.sum((match_indicator) & (df['treated']==1))
BF = tradeoff * ( float(num_control_matched)/num_control +
float(num_treated_matched)/num_treated )
time_BF = time.time() - s
# -- below is the level-wise MQ
return (BF , time_BF )
# match_quality, the larger the better
def match_quality_mp(BF, PE):
''' Input :
df : the data matrix
holdout : the training data matrix
covs_subsets : the list of covariates to matched on
match_indicator : the matched indicator column
Output :
match_quality : the matched quality
time_PE : time to compute the regression
time_BF : time to compute the balancing factor
'''
return (BF + PE)
def get_CATE_bit_mp(df, match_indicator, index):
d = df[ match_indicator ]
# when index == None, nothing is matched
if index is None:
return None
# we do a groupby to get the statistics
d.loc[:,'grp_id'] = index
res = d.groupby(['grp_id'])
res_list = []
for key, item in res:
df = res.get_group(key)
index_list = df['index0'].tolist()
df_t = df[df['treated']==1]
df_c = df[df['treated']==0]
mean_c = df_c['outcome'].mean()
mean_t = df_t['outcome'].mean()
mean = mean_t - mean_c
res_list.append([Decimal(mean),index_list])
return res_list
def recover_covs_mp(d, covs, covs_max_list, binary = True):
ind = d.index.get_level_values(0)
ind = [ num2vec_mp(ind[i], covs_max_list)
for i in range(len(ind)) if i%2==0]
df = pd.DataFrame(ind, columns=covs ).astype(int)
mean_list = list(d['mean'])
size_list = list(d['size'])
effect_list = [mean_list[2*i+1] - mean_list[2*i]
for i in range(len(mean_list)//2) ]
df.loc[:,'effect'] = effect_list
df.loc[:,'size'] = [size_list[2*i+1] + size_list[2*i]
for i in range(len(size_list)//2) ]
return df
def cleanup_result_mp(res_all):
res = []
for i in range(len(res_all)):
r = res_all[i]
if not r[1] is None:
res.append(recover_covs_mp( r[1], r[0][0], r[0][1] ) )
return res
def num2vec_mp(num, covs_max_list):
res = []
for i in range(len(covs_max_list)):
num_i = num/covs_max_list[i]**(len(covs_max_list)-1-i)
res.append(num_i)
if (num_i == 0) & (num%covs_max_list[i]**(len(covs_max_list)-1-i) == 0):
res = res + [0]*(len(covs_max_list)-1-i)
break
num = num - num_i* covs_max_list[i]**(len(covs_max_list)-1-i)
return res
class PredictionE_mp:
"""Class to define the set of Prediction Error for sets of size k :
PE^k characterized by:
- k = size of the sets
- sets: pred_e : a set and the corresponding prediction error
"""
def __init__(self, size, sets, cur_set, pred_e):
self.size = size
self.sets = {cur_set : pred_e}
def add(self, new_set, new_pred_error):
""" this method adds the new set to the sets and
the corresponding prediction error"""
self.sets[new_set] = new_pred_error
class DroppedSets_mp:
"""Class to define the set of dropped sets of size k :
D^k characterized by:
- min_support : the size of the itemsets in the set
- dropped : set of the dropped sets
- support : list of the current support of each item in the dropped set
- min_support_items : set of items that have minimum support """
# We can create the D^k by specifying k=min_support,
# In the context of FLAME, all the D^k are initialized by:
# min_support = k, k=1..n with n = number of covariates
# dropped = []
# support = [0]*n since we have n covariates
# min_support_items = []
def __init__(self, min_sup, dropped, support, min_sup_item):
self.min_support = min_sup
self.dropped = dropped
self.support = support
self.min_support_item = min_sup_item
def add(self, new_set):
""" this method adds the new set to the dropped set and
updates the support for the current items and
the items with enough support"""
# update the set of dropped sets
self.dropped.append(new_set)
self.dropped = sorted(self.dropped)
# update the support of the items in the new_set
for item in new_set:
self.support[item] += 1
# update the list of items with enough support
if self.support[item] >= self.min_support:
self.min_support_item.append(item)
self.min_support_item = sorted(self.min_support_item)
def generate_active_sets(self, new_set):
""" this method generates the new active sets from
the current dropped set"""
new_active_sets = []
new_candidate = []
rem = []
# start by verifying if all the items in new_set have min support :
# if no, there is no new active set to generate
# if yes, create a new active set by joining the set
# with the items of min support
if set(new_set).issubset(set(self.min_support_item)) :
aux = sorted(set(self.min_support_item) - set(new_set))
for element in aux:
new_candidate = sorted(set(new_set).union(set([element])))
new_active_sets.append(new_candidate)
# now we can test if each candidate can be dropped
for c in new_active_sets:
# generates the subsets needed to have already been dropped
prefix = combinations(c,self.min_support)
for c_p in set(prefix):
if sorted(c_p) not in self.dropped :
# if a prefix of 'c' has not been dropped yet,
# remove 'c' from candidates
rem.append(c)
break # no need to check if the others
# prefixes have been dropped
for r in rem:
new_active_sets.remove(r)
# new_active_sets contains the sets to add to possible_drops
return new_active_sets
def process_data():
df = | pd.read_csv('data.csv', index_col=0, parse_dates=True) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 19 10:09:11 2018
@author: sitaram
"""
import pandas as pd
import numpy as np
from collections import Counter
from itertools import combinations
import sys
#Arguments passed are filename,instance number,source location,destination location
filename=sys.argv[1]
destination_file=sys.argv[2]
def combinations_function(x):
# print(x[0])
if len(x)==1:
return [(x[0],x[0])]
else:
return list(combinations(x,2))
def generate_obs_freq(df,number):
print('calculating combinations and frequencies')
#Group by source_id and collect all reference_issn to store as list
df=df.groupby(['source_id'])['reference_issn'].apply(list).values
#Calculating the journal pairs for each publication
df=list(map(combinations_function, df))
#Flattening the list and storing it as dataframe
print('Flattening the list and creating journal_pairs')
df=pd.DataFrame([z for x in df for z in x],columns=['A','B'])
df['journal_pairs']=df['A']+','+df['B']
#Getting the aggregated count of each journal pair
print('Calculating value cout')
df=df['journal_pairs'].value_counts()
df= | pd.DataFrame({'journal_pairs':df.index,'frequency':df.values}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
def logisticRegression():
dataset = pd.read_csv('mifem.csv')
#replacing y|n|nk by 1|0|None
mapping={"n":0,"y":1,"nk":None}
dataset['stroke']=dataset['stroke'].map(mapping)
Y=dataset.iloc[:,1].values
X=dataset.iloc[:,[2,-1]].values
#filling missing data
#missing data of stroke cases
imputer=Imputer(missing_values='NaN',strategy='most_frequent',axis=0)
imputer.fit(X[:,[-1]])
X[:,[-1]]=imputer.transform(X[:,[-1]])
num = | pd.DataFrame(X) | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with | tm.assert_raises_regex(ValueError, msg) | pandas.util.testing.assert_raises_regex |
from pandas import read_csv as csv, DataFrame as df, merge
from allel import read_vcf as vcf
snp = | csv("data/RiceToolkit/app-master/data/X.csv") | pandas.read_csv |
# Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""."""
import argparse
from os import listdir
from os import makedirs
from os.path import isdir
from os.path import isfile
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas
def directory_type(directory):
"""
Check whether <directory> is an exists. Create it if it does not.
:param directory: The directory path.
:return: The directory path without ending /.
"""
if directory.endswith('/'):
directory = directory[:-1]
if not isdir(directory):
makedirs(directory)
return directory
def plot_history(
data_frame,
save_directory,
column,
experiment_type,
print_summary=False
):
"""
Create a history plot for a given check with one data series per execution.
:param data_frame: A Pandas DataFrame containing all the different
executions data. The executions are marked with the 'Execution' column.
data_frame is expected to contain columns: 'Bytes', <column>, and
'Execution'.
:param save_directory: The directory to place the plot.
:param column: The column to plot.
:param experiment_type: The type of experiment (used for the figure title)
:param print_summary: Whether or not to print the data_frame.
"""
# Validate input types
assert(isinstance(data_frame, pandas.DataFrame))
assert(isinstance(save_directory, str))
assert(isinstance(column, str))
assert(isinstance(print_summary, bool))
# Verify that necessary columns exist
if column not in data_frame:
print('Dataframe does not contain column "{}"'.format(column))
return False
if 'Execution' not in data_frame:
print('Dataframe does not contain column "Execution"')
return False
if 'Bytes' not in data_frame:
print('Dataframe does not contain column "Bytes"')
return False
# Print summary if needed
if print_summary is True:
print(data_frame)
# History plot
fig, ax = plt.subplots()
for key, grp in data_frame.groupby(['Execution']):
ax = grp.plot(
ax=ax,
style='.-',
x='Bytes',
y=column,
label=key,
)
ax.set_xticks(range(len(grp['Bytes'])))
plt.xlabel('Payload [Bytes]')
plt.ylabel('Latency [us]')
plt.legend(loc='best')
plt.grid()
plt.title('History {} {}'.format(experiment_type, column))
if not isdir(save_directory):
makedirs(save_directory)
plt.savefig(
'{}/history_{}.png'.format(
save_directory,
column.replace('%', '')
),
bbox_inches='tight'
)
plt.close(fig)
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""
Script to create history plots based on experiments results
summaries as output by 'latency_process_results.py', and a
requirements CSV file as output by
'latency_determine_requirements.py'. The script creates one history
plot for each experiment type, payload and check (latency median,
maximum, and 99 percentile), with the different executions in the
X-axis, and the latency in the Y-axis. These plots contain the data
series (in blue), and a red dotted line for the requirement set for
that specific experiment type, payload, and check. Furthermore, for
each experiment type and check, the script creates a plot with the
payload in the X-axis, the latency in the Y-axis, and one data
series for each execution.
"""
)
parser.add_argument(
'-p',
'--plots_directory',
type=directory_type,
help='The directory to store the plots',
required=True
)
parser.add_argument(
'-r',
'--requirements',
help='The requirements CSV file',
required=True
)
parser.add_argument(
'-e',
'--experiments_results',
help='The directory containing the results of all the experiments',
required=True
)
args = parser.parse_args()
plots_directory = args.plots_directory
requirements = args.requirements
experiments = args.experiments_results
# Validate arguments
assert(isfile(requirements))
assert(isdir(experiments))
# Get requirements
reqs_data = pandas.read_csv(requirements)
# Get list of experiment types for which there are requirements
supported_exp_types = reqs_data['Experiment type'].tolist()
# Get the list of results directories
results_dirs = sorted(listdir(experiments))
# Create a dict with paths to results files
# data_for_plots = {
# experiment_type_1: {
# build_1: file,
# build_2: file,
# },
# experiment_type_2: {
# build_1: file,
# build_2: file,
# }
# }
data_for_plots = {}
for res in results_dirs:
full_dir = '{}/{}'.format(experiments, res)
result_files = [f for f in listdir(full_dir) if 'summary' in f]
for r in result_files:
experiment_type = r.split('/')[-1].split('.')[-2].split('_')[1:-1]
experiment_type = '_'.join(experiment_type)
if experiment_type not in supported_exp_types:
print('No reference for {}. Skipping'.format(experiment_type))
continue
if experiment_type not in data_for_plots:
data_for_plots[experiment_type] = {}
data_for_plots[experiment_type][res] = '{}/{}/{}'.format(
experiments,
res,
r
)
columns_history_plots = [
'Min',
'Median',
'Max',
'99%',
]
columns_refs_plots = columns_history_plots[1:]
# Create a set of history plots for each experiment type
for experiment_type in data_for_plots:
reqs = reqs_data[reqs_data['Experiment type'] == experiment_type]
# Build a table with all the data for a given experiment type
summaries_data = | pandas.DataFrame() | pandas.DataFrame |
from pandas import read_csv
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale
from pandas import DataFrame,Series
from federatedPCA import merge,privateSAPCA,SMSULQ,SAPCA
import scipy
alfaMat=.5
d=50
n=10000
S=np.random.normal(0,1,(d,d))
S=scipy.linalg.orth(S)
lamb=np.zeros((d,d))
for i in range(d):
lamb[i,i]=np.power(i+1,-alfaMat)
cov=S.T.dot(lamb).dot(S)
X=np.random.multivariate_normal(np.zeros((d)),cov).reshape(d,1)
for i in range(1,n):
X=np.append(X,np.random.multivariate_normal(np.zeros((d)),cov).reshape(d,1),axis=1)
X=X.T
| DataFrame(X) | pandas.DataFrame |
"""
Prepare sample split
Created on 04/10/2020
@author: RH
"""
import os
import pandas as pd
import numpy as np
def set_sep(path, cut=0.3):
trlist = []
telist = []
valist = []
pos = pd.read_csv('../COVID-CT-MetaInfo.csv', header=0, usecols=['image', 'patient'])
neg = pd.read_csv('../NonCOVID-CT-MetaInfo.csv', header=0, usecols=['image', 'patient'])
pos['label'] = 1
neg['label'] = 0
pos['path'] = '../images/CT_COVID/' + pos['image']
neg['path'] = '../images/CT_nonCOVID/' + neg['image']
pos = pos.drop(['image'], axis=1)
neg = neg.drop(['image'], axis=1)
unqp = list(pos.patient.unique())
unqn = list(neg.patient.unique())
np.random.shuffle(unqp)
np.random.shuffle(unqn)
validation = unqp[:int(len(unqp) * cut / 2)]
valist.append(pos[pos['patient'].isin(validation)])
test = unqp[int(len(unqp) * cut / 2):int(len(unqp) * cut)]
telist.append(pos[pos['patient'].isin(test)])
train = unqp[int(len(unqp) * cut):]
trlist.append(pos[pos['patient'].isin(train)])
validation = unqn[:int(len(unqn) * cut / 2)]
valist.append(neg[neg['patient'].isin(validation)])
test = unqn[int(len(unqn) * cut / 2):int(len(unqn) * cut)]
telist.append(neg[neg['patient'].isin(test)])
train = unqn[int(len(unqn) * cut):]
trlist.append(neg[neg['patient'].isin(train)])
test = | pd.concat(telist) | pandas.concat |
import sys
import pandas as pd
import numpy as np
import json
import os
from datetime import date
from scipy.stats import linregress
import yaml
from momentum_data import cfg
DIR = os.path.dirname(os.path.realpath(__file__))
pd.set_option('display.max_rows', None)
pd.set_option('display.width', None)
pd.set_option('display.max_columns', None)
try:
with open('config.yaml', 'r') as stream:
config = yaml.safe_load(stream)
except FileNotFoundError:
config = None
except yaml.YAMLError as exc:
print(exc)
PRICE_DATA = os.path.join(DIR, "data", "price_history.json")
ACCOUNT_VALUE = cfg("CASH")
RISK_FACTOR_CFG = cfg("RISK_FACTOR")
RISK_FACTOR = RISK_FACTOR_CFG or 0.002
MAX_STOCKS = cfg("STOCKS_COUNT_OUTPUT")
SLOPE_DAYS = cfg("MOMENTUM_CALCULATION_PAST_DAYS")
POS_COUNT_TARGET = cfg("POSITIONS_COUNT_TARGET")
MAX_GAP = cfg("EXCLUDE_MAX_GAP_PCT")
EXCLUDE_MA_CROSSES = cfg("EXCLUDE_ALL_MA_CROSSES")
TITLE_RANK = "Rank"
TITLE_TICKER = "Ticker"
TITLE_SECTOR = "Sector"
TITLE_UNIVERSE = "Universe"
TITLE_PERCENTILE = "Percentile"
TITLE_MOMENTUM = "Momentum (%)"
TITLE_RISK = "ATR20d"
TITLE_PRICE = "Price"
TITLE_SHARES = "Shares"
TITLE_POS_SIZE = "Position ($)"
TITLE_SUM = "Sum ($)"
if not os.path.exists('output'):
os.makedirs('output')
def read_json(json_file):
with open(json_file, "r") as fp:
return json.load(fp)
def momentum(closes):
"""Calculates slope of exp. regression normalized by rsquared"""
returns = np.log(closes)
indices = np.arange(len(returns))
slope, _, r, _, _ = linregress(indices, returns)
# return ((1 + slope) ** 253) * (r**2)
return (((np.exp(slope) ** 252) - 1) * 100) * (r**2)
def atr_20(candles):
"""Calculates last 20d ATR"""
daily_atrs = []
for idx, candle in enumerate(candles):
high = candle["high"]
low = candle["low"]
prev_close = 0
if idx > 0:
prev_close = candles[idx - 1]["close"]
daily_atr = max(high-low, np.abs(high - prev_close), np.abs(low - prev_close))
daily_atrs.append(daily_atr)
return pd.Series(daily_atrs).rolling(20).mean().tail(1).item()
def calc_stocks_amount(account_value, risk_factor, risk_input):
return (np.floor(account_value * risk_factor / risk_input)).astype(int)
def calc_pos_size(amount, price):
return np.round(amount * price, 2)
def calc_sums(account_value, pos_size):
sums = []
sum = 0
stocks_count = 0
for position in list(pos_size):
sum = sum + position
sums.append(sum)
if sum < account_value:
stocks_count = stocks_count + 1
return (sums, stocks_count)
def positions():
"""Returns a dataframe doubly sorted by momentum factor, with atr and position size"""
json = read_json(PRICE_DATA)
momentums = {}
ranks = []
for ticker in json:
try:
closes = list(map(lambda candle: candle["close"], json[ticker]["candles"]))
if closes and len(closes) >= 250:
closes_series = pd.Series(closes)
slope_series = closes_series.tail(SLOPE_DAYS[0])
mas = closes_series.rolling(100).mean().tail(SLOPE_DAYS[0])
ma_is_crossed = False
if (EXCLUDE_MA_CROSSES):
ma_crosses = slope_series < mas
ma_crosses = ma_crosses.where(ma_crosses == True).dropna()
ma_is_crossed = ma_crosses.size > 0
# calculate gaps of the last 90 days
diffs = np.abs(slope_series.pct_change().diff()).dropna()
gaps = diffs[diffs > MAX_GAP / 100.0]
ma = mas.tail(1).item()
if ma > closes[-1] or ma_is_crossed:
print("%s was below it's 100d moving average." % ticker)
elif len(gaps):
print(f'{ticker} has a gap > {MAX_GAP}%')
else:
ranks.append(len(ranks)+1)
for idx, slope_days in enumerate(SLOPE_DAYS):
if not slope_days in momentums:
momentums[slope_days] = []
mmntm = momentum( | pd.Series(closes[-slope_days:]) | pandas.Series |
# Description: Download tract-level census variables from the API
import requests
import numpy as np
import pandas as pd
def get_census_variable_descriptions(dataset, year, variables):
"""
Download descriptions of census variables from the API
"""
url_template = "https://api.census.gov/data/{year}/{dataset}/profile/variables/{variable}.json"
variable_descriptions = {}
for variable in variables:
url = url_template.format(year=year, dataset=dataset, variable=variable)
response = requests.get(url)
data = response.json()
variable_descriptions[variable] = {
"concept": data["concept"],
"label": data["label"],
}
return variable_descriptions
def get_census_tracts_data(
tract_fips, api_key, dataset, year, variables, max_tracts=1000, clean=False
):
"""
Download census variables (given some year and dataset) for a series of tracts
limit the max number tracts to download data for in a single api request
"""
# convert vars to string to send to api
variables_str = ",".join(variables)
# census dataframe called cd
cd = pd.DataFrame()
states_counties_tracts = get_states_counties_tracts(tract_fips=tract_fips)
for state in states_counties_tracts:
for county in states_counties_tracts[state]:
tracts = states_counties_tracts[state][county]
# if we pass it too many tracts at once, the census api chokes, so
# break up counties with > max_tracts number of tracts into chunks
for tracts_chunk in chunks(tracts, max_tracts):
# convert tracts to string to send to api
tracts_str = ",".join(tracts_chunk)
print(
"Downloading {} census vars in {}{} for {} tracts.".format(
len(variables), state, county, len(tracts_chunk)
)
)
# get census vars for these tracts and append them to df
df_tmp = get_tracts_census_vars(
api_key=api_key,
dataset=dataset,
variables=variables_str,
state=state,
county=county,
tracts=tracts_str,
year=year,
clean=clean,
)
df_tmp["state"] = state
df_tmp["county"] = county
cd = cd.append(df_tmp)
return cd
def get_states_counties_tracts(tract_fips):
"""
turn a list of tract fips codes into a nested dict keyed by state,
then keyed by county, finally with tract as the value
"""
if not isinstance(tract_fips, pd.Series):
raise TypeError("tract_fips must be a pandas series")
df = pd.DataFrame()
df["state"] = tract_fips.str.slice(0, 2)
df["county"] = tract_fips.str.slice(2, 5)
df["tract"] = tract_fips.str.slice(5)
grouped = df[["state", "county", "tract"]].groupby(["state", "county"])
states_counties_tracts = {}
for (state, county), group in grouped:
if state not in states_counties_tracts:
states_counties_tracts[state] = {}
states_counties_tracts[state][county] = group["tract"].tolist()
return states_counties_tracts
def parse_tract_fips(tract_fips):
"""
turn a full tract fips code into a tuple of state, county, tract
"""
return tract_fips[:2], tract_fips[2:5], tract_fips[5:]
def get_tract_ids(fips_codes):
"""
convert a list of full tract fips codes into just tract fips only
"""
tracts = []
for fips_code in fips_codes:
_, _, tract_fips = parse_tract_fips(fips_code)
tracts.append(tract_fips)
return tracts
def get_tracts_census_vars(
api_key, dataset, variables, state, county, tracts, year, clean
):
"""
download a set of census variables for a state + county + tracts
"""
url_template = (
"https://api.census.gov/data/{year}/{dataset}/profile?"
"get={variables}&for=tract:{tracts}&key={api_key}&in=state:{state}+county:{county}"
)
url = url_template.format(
api_key=api_key,
dataset=dataset,
variables=variables,
state=state,
county=county,
tracts=tracts,
year=year,
)
try:
response = requests.get(url, timeout=30)
json_data = response.json()
except Exception as e:
print(e, response.status_code, response.text, response.url)
# load as dataframe and index by geoid (state+county+tract)
df = | pd.DataFrame(json_data) | pandas.DataFrame |
#!/usr/bin/env python3
# mix_data.py
import sys, os, csv, random
import numpy as np
import pandas as pd
meta = pd.read_csv('partitionmeta/part1.csv', index_col = 'docid')
detectives = meta.index[meta['tags'] == 'detective'].tolist()
masterfantasy = meta.index[meta['tags'] == 'fantasy'].tolist()
randmeta = meta[meta.tags == 'random']
moreneeded = 100 - len(detectives)
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
fantasy = random.sample(masterfantasy, len(detectives))
counter = 0
if not os.path.exists('mix/' + str(ratio)):
os.makedirs('mix/' + str(ratio))
rows = []
rownames = []
for f, d in zip(fantasy, detectives):
fraction = ratio / 100
fantdf = pd.read_csv('../data/' + f + '.tsv', sep = '\t', index_col = 'feature')
detectdf = pd.read_csv('../data/' + d + '.tsv', sep = '\t', index_col = 'feature')
wordsinfantasy = set(fantdf.index)
wordsindetective = set(detectdf.index)
inboth = wordsinfantasy.intersection(wordsindetective)
interdf = fantdf.loc[inboth]
numtochange = int(len(inboth) * fraction)
tochange = random.sample(inboth, numtochange)
for i in tochange:
if type(i) != str:
continue
else:
dval = float(detectdf.loc[i, 'frequency'])
interdf.loc[i, 'frequency'] = dval
onlyfantasy = wordsinfantasy - inboth
numtotake = int(len(onlyfantasy) * (1 - fraction))
fantasywords = random.sample(onlyfantasy, numtotake)
uniquefant = fantdf.loc[fantasywords]
onlydetective = wordsindetective - inboth
numtotake = int(len(onlydetective) * fraction)
detectivewords = random.sample(onlydetective, numtotake)
uniquedetect = detectdf.loc[detectivewords]
thisfile = pd.concat([interdf, uniquefant, uniquedetect])
filename = 'mix/' + str(ratio) + '/mixed_' + str(counter) + '.tsv'
thisfile.to_csv(filename, sep = '\t')
counter += 1
if ratio < 50:
row = meta.loc[f]
elif ratio > 50:
row = meta.loc[d]
else:
whim = random.choice([d, f])
row = meta.loc[whim]
rowname = 'mixed_' + str(counter)
rownames.append(rowname)
rows.append(row)
outmeta = pd.DataFrame(rows, index = rownames)
outmeta = | pd.concat([outmeta, randmeta]) | pandas.concat |
#! /usr/bin/env python3
import urllib.request
import os
import sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets.base import load_iris
from sklearn.model_selection import cross_validate
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import Perceptron
from sklearn.svm import SVC
def download_glass_file(filename):
print('Downloading \'{}\'...'.format(filename), file=sys.stderr)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/glass/'
response = urllib.request.urlopen(url + filename)
data = response.read().decode('utf-8')
print('Download complete!', file=sys.stderr)
return data
def download_glass_dataset():
glass_files = ['glass.data', 'glass.names', 'glass.tag']
for file_ in glass_files:
filepath = os.path.join(os.getcwd() + '/glass', file_)
if not os.path.exists(filepath):
data = download_glass_file(file_)
with open(filepath, 'w') as w_file:
w_file.write(data)
else:
print('File \'{}\' already exists.'.format(file_), file=sys.stderr)
def read_glass():
attribute_names = ['Id', 'RI', 'Na', 'Mg', # From glass.names
'Al', 'Si', 'K', 'Ca', 'Ba', 'Fe', 'Type']
df = pd.read_csv(os.getcwd() + '/glass/glass.data',
header=None,
names=attribute_names)
return df
def run_glass_experiments(data):
glass_X = data.drop(['Id', 'Type'], axis=1)
glass_y = data.loc[:, 'Type']
mlp = MLPClassifier(hidden_layer_sizes=(10, ),
activation='logistic',
solver='adam',
max_iter=10000)
svm_rbf = SVC(kernel='rbf', gamma='auto')
svm_sigmoid = SVC(kernel='sigmoid', gamma='auto')
svm_linear = SVC(kernel='linear', gamma='auto')
svm_linear_ovo = SVC(kernel='linear', gamma='auto', decision_function_shape='ovo')
perceptron = Perceptron(max_iter=1000,
tol=1e-3)
methods = {'MLP': mlp,
'SVM - RBF': svm_rbf,
'SVM - Sigmoid': svm_sigmoid,
'SVM - Linear - OAA': svm_linear,
'SVM - Linear - AAA': svm_linear_ovo,
'Perceptron': perceptron}
results = list()
for method in methods:
results_model = cross_validate(methods[method],
glass_X, y=glass_y, cv=5,
scoring=['accuracy'],
return_train_score=True)
results_model['method'] = method
results_model['fold'] = np.arange(1, 6)
results.append(pd.DataFrame(results_model))
return pd.concat(results)
def read_iris():
iris = load_iris()
return pd.DataFrame(data=np.c_[iris['data'], iris['target']],
columns=iris['feature_names'] + ['target'])
def run_iris_experiments(data):
iris_X = data.drop('target', axis=1)
iris_y = data.loc[:, 'target']
mlp = MLPClassifier(hidden_layer_sizes=(20, ),
activation='logistic',
solver='adam',
max_iter=2000)
svm_rbf = SVC(kernel='rbf', gamma='auto')
svm_sigmoid = SVC(kernel='sigmoid', gamma='auto')
svm_linear = SVC(kernel='linear', gamma='auto')
svm_linear_ovo = SVC(kernel='linear', gamma='auto', decision_function_shape='ovo')
perceptron = Perceptron(max_iter=1000,
tol=1e-3)
methods = {'MLP': mlp,
'SVM - RBF': svm_rbf,
'SVM - Sigmoid': svm_sigmoid,
'SVM - Linear - OAA': svm_linear,
'SVM - Linear - AAA': svm_linear_ovo,
'Perceptron': perceptron}
results = list()
for method in methods:
results_model = cross_validate(methods[method],
iris_X, y=iris_y, cv=5,
scoring=['accuracy'],
return_train_score=True)
results_model['method'] = method
results_model['fold'] = np.arange(1, 6)
results.append( | pd.DataFrame(results_model) | pandas.DataFrame |
from __future__ import annotations
from datetime import (
datetime,
time,
timedelta,
tzinfo,
)
from typing import (
TYPE_CHECKING,
Literal,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
tslib,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Resolution,
Timestamp,
conversion,
fields,
get_resolution,
iNaT,
ints_to_pydatetime,
is_date_array_normalized,
normalize_i8_timestamps,
timezones,
to_offset,
tzconversion,
)
from pandas._typing import npt
from pandas.errors import PerformanceWarning
from pandas.util._validators import validate_inclusive
from pandas.core.dtypes.cast import astype_dt64_to_dt64tz
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_object_dtype,
is_period_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import (
ExtensionArray,
datetimelike as dtl,
)
from pandas.core.arrays._ranges import generate_regular_range
from pandas.core.arrays.integer import IntegerArray
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.tseries.frequencies import get_period_alias
from pandas.tseries.offsets import (
BDay,
Day,
Tick,
)
if TYPE_CHECKING:
from pandas import DataFrame
from pandas.core.arrays import (
PeriodArray,
TimedeltaArray,
)
_midnight = time(0, 0)
def tz_to_dtype(tz):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
return DT64NS_DTYPE
else:
return DatetimeTZDtype(tz=tz)
def _field_accessor(name: str, field: str, docstring=None):
def f(self):
values = self._local_timestamps()
if field in self._bool_ops:
result: np.ndarray
if field.endswith(("start", "end")):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
values, field, self.freqstr, month_kw
)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result, fill_value=None)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : Series, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Series or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
copy : bool, default False
Whether to copy the underlying array of values.
Attributes
----------
None
Methods
-------
None
"""
_typ = "datetimearray"
_scalar_type = Timestamp
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype = is_datetime64_any_dtype
_infer_matches = ("datetime", "datetime64", "date")
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_object_ops: list[str] = ["freq", "tz"]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"week",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"round",
"floor",
"ceil",
"month_name",
"day_name",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: np.dtype | DatetimeTZDtype
_freq = None
def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy: bool = False):
values = extract_array(values, extract_numpy=True)
if isinstance(values, IntegerArray):
values = values.to_numpy("int64", na_value=iNaT)
inferred_freq = getattr(values, "_freq", None)
if isinstance(values, type(self)):
# validation
dtz = getattr(dtype, "tz", None)
if dtz and values.tz is None:
dtype = DatetimeTZDtype(tz=dtype.tz)
elif dtz and values.tz:
if not timezones.tz_compare(dtz, values.tz):
msg = (
"Timezone of the array and 'dtype' do not match. "
f"'{dtz}' != '{values.tz}'"
)
raise TypeError(msg)
elif values.tz:
dtype = values.dtype
if freq is None:
freq = values.freq
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError(
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray, ndarray, or Series or Index containing one of those."
)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(DT64NS_DTYPE)
if values.dtype != DT64NS_DTYPE:
raise ValueError(
"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
f"Got {values.dtype} instead."
)
dtype = _validate_dt64_dtype(dtype)
if freq == "infer":
raise ValueError(
"Frequency inference not allowed in DatetimeArray.__init__. "
"Use 'pd.array()' instead."
)
if copy:
values = values.copy()
if freq:
freq = to_offset(freq)
if getattr(dtype, "tz", None):
# https://github.com/pandas-dev/pandas/issues/18595
# Ensure that we have a standard timezone for pytz objects.
# Without this, things like adding an array of timedeltas and
# a tz-aware Timestamp (with a tz specific to its datetime) will
# be incorrect(ish?) for the array as a whole
dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz))
NDArrayBacked.__init__(self, values=values, dtype=dtype)
self._freq = freq
if inferred_freq is None and freq is not None:
type(self)._validate_frequency(self, freq)
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE
) -> DatetimeArray:
assert isinstance(values, np.ndarray)
assert values.dtype == DT64NS_DTYPE
result = super()._simple_new(values, dtype)
result._freq = freq
return result
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_not_strict(
cls,
data,
dtype=None,
copy: bool = False,
tz=None,
freq=lib.no_default,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous="raise",
):
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
freq, freq_infer = dtl.maybe_infer_freq(freq)
subarr, tz, inferred_freq = sequence_to_dt64ns(
data,
dtype=dtype,
copy=copy,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
if explicit_none:
freq = None
dtype = tz_to_dtype(tz)
result = cls._simple_new(subarr, freq=freq, dtype=dtype)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
cls._validate_frequency(result, freq, ambiguous=ambiguous)
elif freq_infer:
# Set _freq directly to bypass duplicative _validate_frequency
# check.
result._freq = to_offset(result.inferred_freq)
return result
@classmethod
def _generate_range(
cls,
start,
end,
periods,
freq,
tz=None,
normalize=False,
ambiguous="raise",
nonexistent="raise",
inclusive="both",
):
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
freq = to_offset(freq)
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if start is NaT or end is NaT:
raise ValueError("Neither `start` nor `end` can be NaT")
left_inclusive, right_inclusive = validate_inclusive(inclusive)
start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize)
tz = _infer_tz_from_endpoints(start, end, tz)
if tz is not None:
# Localize the start and end arguments
start_tz = None if start is None else start.tz
end_tz = None if end is None else end.tz
start = _maybe_localize_point(
start, start_tz, start, freq, tz, ambiguous, nonexistent
)
end = _maybe_localize_point(
end, end_tz, end, freq, tz, ambiguous, nonexistent
)
if freq is not None:
# We break Day arithmetic (fixed 24 hour) here and opt for
# Day to mean calendar day (23/24/25 hour). Therefore, strip
# tz info from start and day to avoid DST arithmetic
if isinstance(freq, Day):
if start is not None:
start = start.tz_localize(None)
if end is not None:
end = end.tz_localize(None)
if isinstance(freq, Tick):
values = generate_regular_range(start, end, periods, freq)
else:
xdr = generate_range(start=start, end=end, periods=periods, offset=freq)
values = np.array([x.value for x in xdr], dtype=np.int64)
_tz = start.tz if start is not None else end.tz
values = values.view("M8[ns]")
index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz))
if tz is not None and index.tz is None:
arr = tzconversion.tz_localize_to_utc(
index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
index = cls(arr)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz, ambiguous, nonexistent).asm8
if end is not None:
end = end.tz_localize(tz, ambiguous, nonexistent).asm8
else:
# Create a linearly spaced date_range in local time
# Nanosecond-granularity timestamps aren't always correctly
# representable with doubles, so we limit the range that we
# pass to np.linspace as much as possible
arr = (
np.linspace(0, end.value - start.value, periods, dtype="int64")
+ start.value
)
dtype = tz_to_dtype(tz)
arr = arr.astype("M8[ns]", copy=False)
index = cls._simple_new(arr, freq=None, dtype=dtype)
if start == end:
if not left_inclusive and not right_inclusive:
index = index[1:-1]
else:
if not left_inclusive or not right_inclusive:
if not left_inclusive and len(index) and index[0] == start:
index = index[1:]
if not right_inclusive and len(index) and index[-1] == end:
index = index[:-1]
dtype = tz_to_dtype(tz)
return cls._simple_new(index._ndarray, freq=freq, dtype=dtype)
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value, setitem: bool = False) -> np.datetime64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timestamp.")
self._check_compatible_with(value, setitem=setitem)
return value.asm8
def _scalar_from_string(self, value) -> Timestamp | NaTType:
return Timestamp(value, tz=self.tz)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
self._assert_tzawareness_compat(other)
if setitem:
# Stricter check for setitem vs comparison methods
if not timezones.tz_compare(self.tz, other.tz):
raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'")
# -----------------------------------------------------------------
# Descriptive Properties
def _box_func(self, x) -> Timestamp | NaTType:
if isinstance(x, np.datetime64):
# GH#42228
# Argument 1 to "signedinteger" has incompatible type "datetime64";
# expected "Union[SupportsInt, Union[str, bytes], SupportsIndex]"
x = np.int64(x) # type: ignore[arg-type]
ts = Timestamp(x, tz=self.tz)
# Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if ts is not NaT: # type: ignore[comparison-overlap]
# GH#41586
# do this instead of passing to the constructor to avoid FutureWarning
ts._set_freq(self.freq)
return ts
@property
# error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
# incompatible with return type "ExtensionDtype" in supertype
# "ExtensionArray"
def dtype(self) -> np.dtype | DatetimeTZDtype: # type: ignore[override]
"""
The dtype for the DatetimeArray.
.. warning::
A future version of pandas will change dtype to never be a
``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
always be an instance of an ``ExtensionDtype`` subclass.
Returns
-------
numpy.dtype or DatetimeTZDtype
If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
is returned.
If the values are tz-aware, then the ``DatetimeTZDtype``
is returned.
"""
return self._dtype
@property
def tz(self) -> tzinfo | None:
"""
Return timezone, if any.
Returns
-------
datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
Returns None when the array is tz-naive.
"""
# GH 18595
return getattr(self.dtype, "tz", None)
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError(
"Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate"
)
@property
def tzinfo(self) -> tzinfo | None:
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self) -> bool:
"""
Returns True if all of the dates are at midnight ("no time")
"""
return is_date_array_normalized(self.asi8, self.tz)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution_obj(self) -> Resolution:
return get_resolution(self.asi8, self.tz)
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def __array__(self, dtype=None) -> np.ndarray:
if dtype is None and self.tz:
# The default for tz-aware is object, to preserve tz info
dtype = object
return super().__array__(dtype=dtype)
def __iter__(self):
"""
Return an iterator over the boxed values
Yields
------
tstamp : Timestamp
"""
if self.ndim > 1:
for i in range(len(self)):
yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = (length // chunksize) + 1
with warnings.catch_warnings():
# filter out warnings about Timestamp.freq
warnings.filterwarnings("ignore", category=FutureWarning)
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = ints_to_pydatetime(
data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
)
yield from converted
def astype(self, dtype, copy: bool = True):
# We handle
# --> datetime
# --> period
# DatetimeLikeArrayMixin Super handles the rest.
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if copy:
return self.copy()
return self
elif is_datetime64_ns_dtype(dtype):
return astype_dt64_to_dt64tz(self, dtype, copy, via_utc=False)
elif self.tz is None and is_datetime64_dtype(dtype) and dtype != self.dtype:
# unit conversion e.g. datetime64[s]
return self._ndarray.astype(dtype)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)
# -----------------------------------------------------------------
# Rendering Methods
@dtl.ravel_compat
def _format_native_types(
self, na_rep="NaT", date_format=None, **kwargs
) -> npt.NDArray[np.object_]:
from pandas.io.formats.format import get_format_datetime64_from_values
fmt = get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(
self.asi8, tz=self.tz, format=fmt, na_rep=na_rep
)
# -----------------------------------------------------------------
# Comparison Methods
def _has_same_tz(self, other) -> bool:
# vzone shouldn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
if not hasattr(other, "tzinfo"):
return False
other_tz = other.tzinfo
return timezones.tz_compare(self.tzinfo, other_tz)
def _assert_tzawareness_compat(self, other) -> None:
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, "tzinfo", None)
other_dtype = getattr(other, "dtype", None)
if is_datetime64tz_dtype(other_dtype):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects."
)
elif other_tz is None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects"
)
# -----------------------------------------------------------------
# Arithmetic Methods
def _sub_datetime_arraylike(self, other):
"""subtract DatetimeArray/Index or ndarray[datetime64]"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
assert is_datetime64_dtype(other)
other = type(self)(other)
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
f"{type(self).__name__} subtraction must have the same "
"timezones or no timezones"
)
self_i8 = self.asi8
other_i8 = other.asi8
arr_mask = self._isnan | other._isnan
new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask)
if self._hasnans or other._hasnans:
np.putmask(new_values, arr_mask, iNaT)
return new_values.view("timedelta64[ns]")
def _add_offset(self, offset) -> DatetimeArray:
if self.ndim == 2:
return self.ravel()._add_offset(offset).reshape(self.shape)
assert not isinstance(offset, Tick)
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset._apply_array(values).view("M8[ns]")
result = DatetimeArray._simple_new(result)
result = result.tz_localize(self.tz)
except NotImplementedError:
warnings.warn(
"Non-vectorized DateOffset being applied to Series or DatetimeIndex.",
PerformanceWarning,
)
result = self.astype("O") + offset
if not len(self):
# GH#30336 _from_sequence won't be able to infer self.tz
return type(self)._from_sequence(result).tz_localize(self.tz)
return type(self)._from_sequence(result)
def _sub_datetimelike_scalar(self, other):
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
assert isinstance(other, (datetime, np.datetime64))
assert other is not NaT
other = Timestamp(other)
# error: Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if other is NaT: # type: ignore[comparison-overlap]
return self - NaT
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
"Timestamp subtraction must have the same timezones or no timezones"
)
i8 = self.asi8
result = checked_add_with_arr(i8, -other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result)
return result.view("timedelta64[ns]")
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self) -> np.ndarray:
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
if self.tz is None or timezones.is_utc(self.tz):
return self.asi8
return tzconversion.tz_convert_from_utc(self.asi8, self.tz)
def tz_convert(self, tz) -> DatetimeArray:
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
Array or Index
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.date_range(start='2014-08-01 09:00',
... freq='H', periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert('US/Central')
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='H')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.date_range(start='2014-08-01 09:00', freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[ns]', freq='H')
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError(
"Cannot convert tz-naive timestamps, use tz_localize to localize"
)
# No conversion since timestamps are all UTC to begin with
dtype = tz_to_dtype(tz)
return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq)
@dtl.ravel_compat
def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArray:
"""
Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
This method can also be used to do the inverse -- to create a time
zone unaware object from an aware object. To that end, pass `tz=None`.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq=None)
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[ns, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2018-10-28 01:20:00+02:00
1 2018-10-28 02:36:00+02:00
2 2018-10-28 03:46:00+01:00
dtype: datetime64[ns, CET]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
if self.tz is not None:
if tz is None:
new_dates = tzconversion.tz_convert_from_utc(self.asi8, self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = tzconversion.tz_localize_to_utc(
self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
new_dates = new_dates.view(DT64NS_DTYPE)
dtype = tz_to_dtype(tz)
freq = None
if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])):
# we can preserve freq
# TODO: Also for fixed-offsets
freq = self.freq
elif tz is None and self.tz is None:
# no-op
freq = self.freq
return self._simple_new(new_dates, dtype=dtype, freq=freq)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self) -> npt.NDArray[np.object_]:
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
objects.
Returns
-------
datetimes : ndarray[object]
"""
return ints_to_pydatetime(self.asi8, tz=self.tz)
def normalize(self) -> DatetimeArray:
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',
... periods=3, tz='Asia/Calcutta')
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq='H')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
new_values = normalize_i8_timestamps(self.asi8, self.tz)
return type(self)(new_values)._with_freq("infer").tz_localize(self.tz)
@dtl.ravel_compat
def to_period(self, freq=None) -> PeriodArray:
"""
Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]')
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn(
"Converting to PeriodArray/Index representation "
"will drop timezone information.",
UserWarning,
)
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
raise ValueError(
"You must pass a freq argument as current index has none."
)
res = get_period_alias(freq)
# https://github.com/pandas-dev/pandas/issues/33358
if res is None:
res = freq
freq = res
return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz)
def to_perioddelta(self, freq) -> TimedeltaArray:
"""
Calculate TimedeltaArray of difference between index
values and index converted to PeriodArray at specified
freq. Used for vectorized offsets.
Parameters
----------
freq : Period frequency
Returns
-------
TimedeltaArray/Index
"""
# Deprecaation GH#34853
warnings.warn(
"to_perioddelta is deprecated and will be removed in a "
"future version. "
"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.",
FutureWarning,
# stacklevel chosen to be correct for when called from DatetimeIndex
stacklevel=3,
)
from pandas.core.arrays.timedeltas import TimedeltaArray
i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8
m8delta = i8delta.view("m8[ns]")
return TimedeltaArray(m8delta)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "month_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
def day_name(self, locale=None):
"""
Return the day names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale.
Returns
-------
Index
Index of day names.
Examples
--------
>>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "day_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
@property
def time(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="time")
@property
def timetz(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of datetime.time also containing timezone
information. The time part of the Timestamps.
"""
return ints_to_pydatetime(self.asi8, self.tz, box="time")
@property
def date(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="date")
def isocalendar(self) -> DataFrame:
"""
Returns a DataFrame with the year, week, and day calculated according to
the ISO 8601 standard.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
with columns year, week and day
See Also
--------
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
week number, and weekday for the given Timestamp object.
datetime.date.isocalendar : Return a named tuple object with
three components: year, week and weekday.
Examples
--------
>>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
>>> idx.isocalendar()
year week day
2019-12-29 2019 52 7
2019-12-30 2020 1 1
2019-12-31 2020 1 2
2020-01-01 2020 1 3
>>> idx.isocalendar().week
2019-12-29 52
2019-12-30 1
2019-12-31 1
2020-01-01 1
Freq: D, Name: week, dtype: UInt32
"""
from pandas import DataFrame
values = self._local_timestamps()
sarray = fields.build_isocalendar_sarray(values)
iso_calendar_df = DataFrame(
sarray, columns=["year", "week", "day"], dtype="UInt32"
)
if self._hasnans:
iso_calendar_df.iloc[self._isnan] = None
return iso_calendar_df
@property
def weekofyear(self):
"""
The week ordinal of the year.
.. deprecated:: 1.1.0
weekofyear and week have been deprecated.
Please use DatetimeIndex.isocalendar().week instead.
"""
warnings.warn(
"weekofyear and week have been deprecated, please use "
"DatetimeIndex.isocalendar().week instead, which returns "
"a Series. To exactly reproduce the behavior of week and "
"weekofyear and return an Index, you may call "
"pd.Int64Index(idx.isocalendar().week)",
FutureWarning,
stacklevel=3,
)
week_series = self.isocalendar().week
if week_series.hasnans:
return week_series.to_numpy(dtype="float64", na_value=np.nan)
return week_series.to_numpy(dtype="int64")
week = weekofyear
year = _field_accessor(
"year",
"Y",
"""
The year of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="Y")
... )
>>> datetime_series
0 2000-12-31
1 2001-12-31
2 2002-12-31
dtype: datetime64[ns]
>>> datetime_series.dt.year
0 2000
1 2001
2 2002
dtype: int64
""",
)
month = _field_accessor(
"month",
"M",
"""
The month as January=1, December=12.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="M")
... )
>>> datetime_series
0 2000-01-31
1 2000-02-29
2 2000-03-31
dtype: datetime64[ns]
>>> datetime_series.dt.month
0 1
1 2
2 3
dtype: int64
""",
)
day = _field_accessor(
"day",
"D",
"""
The day of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="D")
... )
>>> datetime_series
0 2000-01-01
1 2000-01-02
2 2000-01-03
dtype: datetime64[ns]
>>> datetime_series.dt.day
0 1
1 2
2 3
dtype: int64
""",
)
hour = _field_accessor(
"hour",
"h",
"""
The hours of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="h")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 01:00:00
2 2000-01-01 02:00:00
dtype: datetime64[ns]
>>> datetime_series.dt.hour
0 0
1 1
2 2
dtype: int64
""",
)
minute = _field_accessor(
"minute",
"m",
"""
The minutes of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="T")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:01:00
2 2000-01-01 00:02:00
dtype: datetime64[ns]
>>> datetime_series.dt.minute
0 0
1 1
2 2
dtype: int64
""",
)
second = _field_accessor(
"second",
"s",
"""
The seconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="s")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:00:01
2 2000-01-01 00:00:02
dtype: datetime64[ns]
>>> datetime_series.dt.second
0 0
1 1
2 2
dtype: int64
""",
)
microsecond = _field_accessor(
"microsecond",
"us",
"""
The microseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="us")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000
1 2000-01-01 00:00:00.000001
2 2000-01-01 00:00:00.000002
dtype: datetime64[ns]
>>> datetime_series.dt.microsecond
0 0
1 1
2 2
dtype: int64
""",
)
nanosecond = _field_accessor(
"nanosecond",
"ns",
"""
The nanoseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="ns")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000000
1 2000-01-01 00:00:00.000000001
2 2000-01-01 00:00:00.000000002
dtype: datetime64[ns]
>>> datetime_series.dt.nanosecond
0 0
1 1
2 2
dtype: int64
""",
)
_dayofweek_doc = """
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor) or DatetimeIndex.
Returns
-------
Series or Index
Containing integers indicating the day number.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Examples
--------
>>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()
>>> s.dt.dayofweek
2016-12-31 5
2017-01-01 6
2017-01-02 0
2017-01-03 1
2017-01-04 2
2017-01-05 3
2017-01-06 4
2017-01-07 5
2017-01-08 6
Freq: D, dtype: int64
"""
day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc)
dayofweek = day_of_week
weekday = day_of_week
day_of_year = _field_accessor(
"dayofyear",
"doy",
"""
The ordinal day of the year.
""",
)
dayofyear = day_of_year
quarter = _field_accessor(
"quarter",
"q",
"""
The quarter of the date.
""",
)
days_in_month = _field_accessor(
"days_in_month",
"dim",
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
_is_month_doc = """
Indicates whether the date is the {first_or_last} day of the month.
Returns
-------
Series or array
For Series, returns a Series with boolean values.
For DatetimeIndex, returns a boolean array.
See Also
--------
is_month_start : Return a boolean indicating whether the date
is the first day of the month.
is_month_end : Return a boolean indicating whether the date
is the last day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> s = pd.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> s.dt.is_month_start
0 False
1 False
2 True
dtype: bool
>>> s.dt.is_month_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.is_month_start
array([False, False, True])
>>> idx.is_month_end
array([False, True, False])
"""
is_month_start = _field_accessor(
"is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first")
)
is_month_end = _field_accessor(
"is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last")
)
is_quarter_start = _field_accessor(
"is_quarter_start",
"is_quarter_start",
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_start=df.dates.dt.is_quarter_start)
dates quarter is_quarter_start
0 2017-03-30 1 False
1 2017-03-31 1 False
2 2017-04-01 2 True
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_start
array([False, False, True, False])
""",
)
is_quarter_end = _field_accessor(
"is_quarter_end",
"is_quarter_end",
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_end=df.dates.dt.is_quarter_end)
dates quarter is_quarter_end
0 2017-03-30 1 False
1 2017-03-31 1 True
2 2017-04-01 2 False
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_end
array([False, True, False, False])
""",
)
is_year_start = _field_accessor(
"is_year_start",
"is_year_start",
"""
Indicate whether the date is the first day of a year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_start
array([False, False, True])
""",
)
is_year_end = _field_accessor(
"is_year_end",
"is_year_end",
"""
Indicate whether the date is the last day of the year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_end
array([False, True, False])
""",
)
is_leap_year = _field_accessor(
"is_leap_year",
"is_leap_year",
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series or ndarray
Booleans indicating if dates belong to a leap year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y")
>>> idx
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
dtype='datetime64[ns]', freq='A-DEC')
>>> idx.is_leap_year
array([ True, False, False])
>>> dates_series = pd.Series(idx)
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[ns]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
""",
)
def to_julian_date(self) -> np.ndarray:
"""
Convert Datetime Array to float64 ndarray of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
https://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return (
day
+ np.fix((153 * month - 457) / 5)
+ 365 * year
+ np.floor(year / 4)
- np.floor(year / 100)
+ np.floor(year / 400)
+ 1_721_118.5
+ (
self.hour
+ self.minute / 60
+ self.second / 3600
+ self.microsecond / 3600 / 10 ** 6
+ self.nanosecond / 3600 / 10 ** 9
)
/ 24
)
# -----------------------------------------------------------------
# Reductions
def std(
self,
axis=None,
dtype=None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
):
# Because std is translation-invariant, we can get self.std
# by calculating (self - Timestamp(0)).std, and we can do it
# without creating a copy by using a view on self._ndarray
from pandas.core.arrays import TimedeltaArray
tda = TimedeltaArray(self._ndarray.view("i8"))
return tda.std(
axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna
)
# -------------------------------------------------------------------
# Constructor Helpers
@overload
def sequence_to_datetimes(
data, allow_object: Literal[False] = ..., require_iso8601: bool = ...
) -> DatetimeArray:
...
@overload
def sequence_to_datetimes(
data, allow_object: Literal[True] = ..., require_iso8601: bool = ...
) -> np.ndarray | DatetimeArray:
...
def sequence_to_datetimes(
data, allow_object: bool = False, require_iso8601: bool = False
) -> np.ndarray | DatetimeArray:
"""
Parse/convert the passed data to either DatetimeArray or np.ndarray[object].
"""
result, tz, freq = sequence_to_dt64ns(
data,
allow_object=allow_object,
allow_mixed=True,
require_iso8601=require_iso8601,
)
if result.dtype == object:
return result
dtype = tz_to_dtype(tz)
dta = DatetimeArray._simple_new(result, freq=freq, dtype=dtype)
return dta
def sequence_to_dt64ns(
data,
dtype=None,
copy=False,
tz=None,
dayfirst=False,
yearfirst=False,
ambiguous="raise",
*,
allow_object: bool = False,
allow_mixed: bool = False,
require_iso8601: bool = False,
):
"""
Parameters
----------
data : list-like
dtype : dtype, str, or None, default None
copy : bool, default False
tz : tzinfo, str, or None, default None
dayfirst : bool, default False
yearfirst : bool, default False
ambiguous : str, bool, or arraylike, default 'raise'
See pandas._libs.tslibs.tzconversion.tz_localize_to_utc.
allow_object : bool, default False
Whether to return an object-dtype ndarray instead of raising if the
data contains more than one timezone.
allow_mixed : bool, default False
Interpret integers as timestamps when datetime objects are also present.
require_iso8601 : bool, default False
Only consider ISO-8601 formats when parsing strings.
Returns
-------
result : numpy.ndarray
The sequence converted to a numpy array with dtype ``datetime64[ns]``.
tz : tzinfo or None
Either the user-provided tzinfo or one inferred from the data.
inferred_freq : Tick or None
The inferred frequency of the sequence.
Raises
------
TypeError : PeriodDType data is passed
"""
inferred_freq = None
dtype = _validate_dt64_dtype(dtype)
tz = timezones.maybe_get_tz(tz)
# if dtype has an embedded tz, capture it
tz = validate_tz_from_dtype(dtype, tz)
if not hasattr(data, "dtype"):
# e.g. list, tuple
if np.ndim(data) == 0:
# i.e. generator
data = list(data)
data = np.asarray(data)
copy = False
elif isinstance(data, ABCMultiIndex):
raise TypeError("Cannot create a DatetimeArray from a MultiIndex.")
else:
data = extract_array(data, extract_numpy=True)
if isinstance(data, IntegerArray):
data = data.to_numpy("int64", na_value=iNaT)
elif not isinstance(data, (np.ndarray, ExtensionArray)):
# GH#24539 e.g. xarray, dask object
data = np.asarray(data)
if isinstance(data, DatetimeArray):
inferred_freq = data.freq
# By this point we are assured to have either a numpy array or Index
data, copy = maybe_convert_dtype(data, copy)
data_dtype = getattr(data, "dtype", None)
if (
is_object_dtype(data_dtype)
or is_string_dtype(data_dtype)
or is_sparse(data_dtype)
):
# TODO: We do not have tests specific to string-dtypes,
# also complex or categorical or other extension
copy = False
if lib.infer_dtype(data, skipna=False) == "integer":
data = data.astype(np.int64)
else:
# data comes back here as either i8 to denote UTC timestamps
# or M8[ns] to denote wall times
data, inferred_tz = objects_to_datetime64ns(
data,
dayfirst=dayfirst,
yearfirst=yearfirst,
allow_object=allow_object,
allow_mixed=allow_mixed,
require_iso8601=require_iso8601,
)
if tz and inferred_tz:
# two timezones: convert to intended from base UTC repr
data = tzconversion.tz_convert_from_utc(data.view("i8"), tz)
data = data.view(DT64NS_DTYPE)
elif inferred_tz:
tz = inferred_tz
elif allow_object and data.dtype == object:
# We encountered mixed-timezones.
return data, None, None
data_dtype = data.dtype
# `data` may have originally been a Categorical[datetime64[ns, tz]],
# so we need to handle these types.
if is_datetime64tz_dtype(data_dtype):
# DatetimeArray -> ndarray
tz = _maybe_infer_tz(tz, data.tz)
result = data._ndarray
elif is_datetime64_dtype(data_dtype):
# tz-naive DatetimeArray or ndarray[datetime64]
data = getattr(data, "_ndarray", data)
if data.dtype != DT64NS_DTYPE:
data = conversion.ensure_datetime64ns(data)
copy = False
if tz is not None:
# Convert tz-naive to UTC
tz = timezones.maybe_get_tz(tz)
data = tzconversion.tz_localize_to_utc(
data.view("i8"), tz, ambiguous=ambiguous
)
data = data.view(DT64NS_DTYPE)
assert data.dtype == DT64NS_DTYPE, data.dtype
result = data
else:
# must be integer dtype otherwise
# assume this data are epoch timestamps
if tz:
tz = timezones.maybe_get_tz(tz)
if data.dtype != INT64_DTYPE:
data = data.astype(np.int64, copy=False)
result = data.view(DT64NS_DTYPE)
if copy:
result = result.copy()
assert isinstance(result, np.ndarray), type(result)
assert result.dtype == "M8[ns]", result.dtype
# We have to call this again after possibly inferring a tz above
validate_tz_from_dtype(dtype, tz)
return result, tz, inferred_freq
def objects_to_datetime64ns(
data: np.ndarray,
dayfirst,
yearfirst,
utc=False,
errors="raise",
require_iso8601: bool = False,
allow_object: bool = False,
allow_mixed: bool = False,
):
"""
Convert data to array of timestamps.
Parameters
----------
data : np.ndarray[object]
dayfirst : bool
yearfirst : bool
utc : bool, default False
Whether to convert timezone-aware timestamps to UTC.
errors : {'raise', 'ignore', 'coerce'}
require_iso8601 : bool, default False
allow_object : bool
Whether to return an object-dtype ndarray instead of raising if the
data contains more than one timezone.
allow_mixed : bool, default False
Interpret integers as timestamps when datetime objects are also present.
Returns
-------
result : ndarray
np.int64 dtype if returned values represent UTC timestamps
np.datetime64[ns] if returned values represent wall times
object if mixed timezones
inferred_tz : tzinfo or None
Raises
------
ValueError : if data cannot be converted to datetimes
"""
assert errors in ["raise", "ignore", "coerce"]
# if str-dtype, convert
data = np.array(data, copy=False, dtype=np.object_)
flags = data.flags
order: Literal["F", "C"] = "F" if flags.f_contiguous else "C"
try:
result, tz_parsed = tslib.array_to_datetime(
data.ravel("K"),
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
require_iso8601=require_iso8601,
allow_mixed=allow_mixed,
)
result = result.reshape(data.shape, order=order)
except ValueError as err:
try:
values, tz_parsed = conversion.datetime_to_datetime64(data.ravel("K"))
# If tzaware, these values represent unix timestamps, so we
# return them as i8 to distinguish from wall times
values = values.reshape(data.shape, order=order)
return values.view("i8"), tz_parsed
except (ValueError, TypeError):
raise err
if tz_parsed is not None:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
# Return i8 values to denote unix timestamps
return result.view("i8"), tz_parsed
elif is_datetime64_dtype(result):
# returning M8[ns] denotes wall-times; since tz is None
# the distinction is a thin one
return result, tz_parsed
elif is_object_dtype(result):
# GH#23675 when called via `pd.to_datetime`, returning an object-dtype
# array is allowed. When called via `pd.DatetimeIndex`, we can
# only accept datetime64 dtype, so raise TypeError if object-dtype
# is returned, as that indicates the values can be recognized as
# datetimes but they have conflicting timezones/awareness
if allow_object:
return result, tz_parsed
raise TypeError(result)
else: # pragma: no cover
# GH#23675 this TypeError should never be hit, whereas the TypeError
# in the object-dtype branch above is reachable.
raise TypeError(result)
def maybe_convert_dtype(data, copy: bool):
"""
Convert data based on dtype conventions, issuing deprecation warnings
or errors where appropriate.
Parameters
----------
data : np.ndarray or pd.Index
copy : bool
Returns
-------
data : np.ndarray or pd.Index
copy : bool
Raises
------
TypeError : PeriodDType data is passed
"""
if not hasattr(data, "dtype"):
# e.g. collections.deque
return data, copy
if is_float_dtype(data.dtype):
# Note: we must cast to datetime64[ns] here in order to treat these
# as wall-times instead of UTC timestamps.
data = data.astype(DT64NS_DTYPE)
copy = False
# TODO: deprecate this behavior to instead treat symmetrically
# with integer dtypes. See discussion in GH#23675
elif is_timedelta64_dtype(data.dtype) or | is_bool_dtype(data.dtype) | pandas.core.dtypes.common.is_bool_dtype |
"""Integration tests for the HyperTransformer."""
import re
from copy import deepcopy
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from rdt import HyperTransformer
from rdt.errors import Error, NotFittedError
from rdt.transformers import (
DEFAULT_TRANSFORMERS, BaseTransformer, BinaryEncoder, FloatFormatter, FrequencyEncoder,
OneHotEncoder, UnixTimestampEncoder, get_default_transformer, get_default_transformers)
class DummyTransformerNumerical(BaseTransformer):
INPUT_SDTYPE = 'categorical'
OUTPUT_SDTYPES = {
'value': 'float'
}
def _fit(self, data):
pass
def _transform(self, data):
return data.astype(float)
def _reverse_transform(self, data):
return data.astype(str)
class DummyTransformerNotMLReady(BaseTransformer):
INPUT_SDTYPE = 'datetime'
OUTPUT_SDTYPES = {
'value': 'categorical',
}
def _fit(self, data):
pass
def _transform(self, data):
# Stringify input data
return data.astype(str)
def _reverse_transform(self, data):
return data.astype('datetime64')
TEST_DATA_INDEX = [4, 6, 3, 8, 'a', 1.0, 2.0, 3.0]
def get_input_data():
datetimes = pd.to_datetime([
'2010-02-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
'2010-01-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
])
data = pd.DataFrame({
'integer': [1, 2, 1, 3, 1, 4, 2, 3],
'float': [0.1, 0.2, 0.1, 0.2, 0.1, 0.4, 0.2, 0.3],
'categorical': ['a', 'a', 'b', 'b', 'a', 'b', 'a', 'a'],
'bool': [False, False, False, True, False, False, True, False],
'datetime': datetimes,
'names': ['Jon', 'Arya', 'Arya', 'Jon', 'Jon', 'Sansa', 'Jon', 'Jon'],
}, index=TEST_DATA_INDEX)
return data
def get_transformed_data():
datetimes = [
1.264982e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18,
1.262304e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18
]
return pd.DataFrame({
'integer.value': [1, 2, 1, 3, 1, 4, 2, 3],
'float.value': [0.1, 0.2, 0.1, 0.2, 0.1, 0.4, 0.2, 0.3],
'categorical.value': [0.3125, 0.3125, .8125, 0.8125, 0.3125, 0.8125, 0.3125, 0.3125],
'bool.value': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
'datetime.value': datetimes,
'names.value': [0.3125, 0.75, 0.75, 0.3125, 0.3125, 0.9375, 0.3125, 0.3125]
}, index=TEST_DATA_INDEX)
def get_reversed_data():
data = get_input_data()
data['bool'] = data['bool'].astype('object')
return data
DETERMINISTIC_DEFAULT_TRANSFORMERS = deepcopy(DEFAULT_TRANSFORMERS)
DETERMINISTIC_DEFAULT_TRANSFORMERS['categorical'] = FrequencyEncoder
@patch('rdt.transformers.DEFAULT_TRANSFORMERS', DETERMINISTIC_DEFAULT_TRANSFORMERS)
def test_hypertransformer_default_inputs():
"""Test the HyperTransformer with default parameters.
This tests that if default parameters are provided to the HyperTransformer,
the ``default_transformers`` method will be used to determine which
transformers to use for each field.
Setup:
- Patch the ``DEFAULT_TRANSFORMERS`` to use the ``FrequencyEncoder``
for categorical sdtypes, so that the output is predictable.
Input:
- A dataframe with every sdtype.
- A fixed random seed to guarantee the samle values are null.
Expected behavior:
- The transformed data should contain all the ML ready data.
- The reverse transformed data should be the same as the input.
"""
# Setup
datetimes = pd.to_datetime([
np.nan,
'2010-02-01',
'2010-01-01',
'2010-01-01',
'2010-01-01',
'2010-02-01',
'2010-01-01',
'2010-01-01',
])
data = pd.DataFrame({
'integer': [1, 2, 1, 3, 1, 4, 2, 3],
'float': [0.1, 0.2, 0.1, np.nan, 0.1, 0.4, np.nan, 0.3],
'categorical': ['a', 'a', np.nan, 'b', 'a', 'b', 'a', 'a'],
'bool': [False, np.nan, False, True, False, np.nan, True, False],
'datetime': datetimes,
'names': ['Jon', 'Arya', 'Arya', 'Jon', 'Jon', 'Sansa', 'Jon', 'Jon'],
}, index=TEST_DATA_INDEX)
# Run
ht = HyperTransformer()
ht.detect_initial_config(data)
ht.fit(data)
transformed = ht.transform(data)
reverse_transformed = ht.reverse_transform(transformed)
# Assert
expected_datetimes = [
1.263069e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18,
1.262304e+18,
1.264982e+18,
1.262304e+18,
1.262304e+18
]
expected_transformed = pd.DataFrame({
'integer.value': [1, 2, 1, 3, 1, 4, 2, 3],
'float.value': [0.1, 0.2, 0.1, 0.2, 0.1, 0.4, 0.2, 0.3],
'categorical.value': [0.3125, 0.3125, 0.9375, 0.75, 0.3125, 0.75, 0.3125, 0.3125],
'bool.value': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
'datetime.value': expected_datetimes,
'names.value': [0.3125, 0.75, 0.75, 0.3125, 0.3125, 0.9375, 0.3125, 0.3125]
}, index=TEST_DATA_INDEX)
| pd.testing.assert_frame_equal(transformed, expected_transformed) | pandas.testing.assert_frame_equal |
import pandas as pd
import numpy as np
from etl_ml.utils.ftps_client import Ftps_client
import hashlib
import os
from configparser import ConfigParser
import ast
import traceback
import logging
import paramiko
#import smtplib
import time
import threading
logger = logging.getLogger(
name=__name__,
)
class ETL_Label:
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
#初始化 会读取文件 ,并对文件做最初步的清洗
def __init__(self,conf_sec='section_etl_excel_label',config_file='conf/etl.conf',header=0,is_csv=False,csv_sep='\t',raw_header_loc=()):
#raw_header_loc_char_dict,sense_code,date_filed,phone_filed,sheet_name='Sheet1',client_nmbr="AA00",batch="p0" header=0,encoding='gbk',
#标准的 入库 列名 顺序 section_etl_excel_label_AA100_p1
self.std_filed_list=("gid","realname","certid","mobile","card","apply_time","y_label","apply_amount","apply_period","overdus_day","sense_code")
col_index=(f for f in range(0,11))
#标准的 入库 列名加索引字典
self.std_filed_dict=dict(zip(col_index,self.std_filed_list))
#excel sheet表格 列单元 索引顺序对应的默认字母
self.sheet_char_index=('A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T')
# 要读取的配置文件
self.config_file=config_file
#配置文件解析器
self.config_parser=ConfigParser()
self.config_parser.read(self.config_file,encoding='utf-8-sig')
#配置文件要读取 的 配置 片段
self.config_etl_label_section=conf_sec
# 读取 要处理的文件路径 相对路径
self.file_path =self.config_parser.get(conf_sec,'file_path')
# excel 表格 的样本所在的 sheet 名称
self.sheet_name = self.config_parser.get(conf_sec,'sheet_name')
# client_nmbr
self.client_nmbr=self.config_parser.get(conf_sec,'client_nmbr')
self.batch=self.config_parser.get(conf_sec,'batch')
#self.hive_host =self.config_parser.get(conf_sec, 'hive_host')
#场景
self.sense_code=self.config_parser.get(conf_sec,'sense_code')
#标准列名 apply_time对应的原始 excel 表格 业务列名
self.date_filed=self.config_parser.get(conf_sec,'date_filed')
#标准列名 mobile 对应的原始 excel 表格 业务列名
self.phone_filed=self.config_parser.get(conf_sec,'phone_filed')
self.csv_sep=csv_sep
l_c_dict_str=self.config_parser.get(conf_sec, 'raw_header_loc_char_dict')
# 标准的 入库 列名 与 excel sheet表格 列单元 索引顺序对应的默认字母 的对应字典,不存在用 * 星号 表示
self.raw_header_loc_char_dict = ast.literal_eval(l_c_dict_str)
#读取的 excel header
self.header=header
#读取的文件 编码
self.encoding = self.config_parser.get(conf_sec,'encoding')
# 实际 清洗excel 文件 原始列名 与标准的 入库 列名 索引的对应字典
self.raw_header_loc = raw_header_loc
self.Ftps=None
#获取当前执行路径
dir=os.getcwd()
# 要导出的 清洗后的 txt csv文件名称及路径
self.export_txtfile_path=dir+'/data/%s_%s_new_etl.txt'%(self.client_nmbr,self.batch)
#要导出的 清洗后的 excel 文件名称及路径
self.export_etl_xlsx_file=dir+'/data/%s_%s_new_etl.xlsx'%(self.client_nmbr,self.batch)
#加载 要读取 清洗的excel 文件,默认读取 excel
if is_csv ==False:
self._rawdata=pd.read_excel(self.file_path,sheet_name=self.sheet_name,header=self.header,encoding=self.encoding,parse_dates=[self.date_filed],dtype={self.phone_filed:np.str})
else:
self._rawdata = pd.read_csv(self.file_path, sep=self.csv_sep, header=self.header, encoding=self.encoding,parse_dates=[self.date_filed], dtype={self.phone_filed: np.str})
try:
logger.info(msg="parse date column")
#预处理 apply_time 列
self._rawdata["apply_time"]=pd.to_datetime(self._rawdata[self.date_filed],format='%Y-%m-%d',errors='ignore')
except:
logger.info(msg="parse date error,please check")
#预处理 应用场景 列
self._rawdata["sense_code"] = self.sense_code
# gid 的生成,五要素生成 md5
def md5_gid(self,five_features,encoding='utf-8'):
md=hashlib.md5()
txt_fix=str(five_features)
#print(txt_fix)
md.update(txt_fix.encode(encoding))
res_md5 = md.hexdigest()
# print(res_md5)
return res_md5
# 将gid 追加到 最后要输出的 dataframe,被引用
def md5_gid_df(self,df):
df["apply_time"]=df["apply_time"].astype(np.str).apply(lambda x:str(x)[:10])
new_df=df[["realname","certid","mobile","card","apply_time"]]
new_col= | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
def readThreeColumnTruth(fn, suffix=""):
df = pd.read_csv(fn, sep=' ', skiprows=1,
names=['Name', 'Gene{}'.format(suffix),
'TPM{}'.format(suffix)], engine='c')
df.set_index("Name", inplace=True)
pd.to_numeric(df["TPM{}".format(suffix)], errors='ignore')
return df
def readRSEMTruth(fn, suffix=""):
df = pd.read_csv(fn, sep='\t', skiprows=1,
names=['Name', 'Gene{}'.format(suffix),
'Length{}'.format(suffix),
'EffectiveLength{}'.format(suffix),
'NumReads{}'.format(suffix),
'TPM{}'.format(suffix),
'FPKM{}'.format(suffix),
'IsoPct{}'.format(suffix)], engine='c')
for col in ["TPM", "Length", "EffectiveLength", "NumReads"]:
pd.to_numeric(df["{}{}".format(col, suffix)], errors='ignore')
df.set_index("Name", inplace=True)
return df
def readStringTie(fn, suffix=""):
"""
Not yet tested
"""
df = pd.read_csv(fn, sep="\t", skiprows=1,
names=["tid{}".format(suffix),
"chr{}".format(suffix),
"strand{}".format(suffix),
"start{}".format(suffix),
"end{}".format(suffix),
"Name",
"num_exons{}".format(suffix),
"Length{}".format(suffix),
"gene_id{}".format(suffix),
"gene_name{}".format(suffix),
"cov{}".format(suffix),
"FPKM{}".format(suffix)])
df.set_index('Name', inplace=True)
pd.to_numeric(df)
return df
def readExpress(fn, suffix=""):
df = pd.read_csv(fn, sep="\t", skiprows=1,
names=["bundle_id{}".format(suffix),
"Name",
"Length{}".format(suffix),
"EffectiveLength{}".format(suffix),
"tot_counts{}".format(suffix),
"uniq_counts{}".format(suffix),
"NumReads{}".format(suffix),
"NumReadsEffective{}".format(suffix),
"ambig_distr_alpha{}".format(suffix),
"ambig_distr_beta{}".format(suffix),
"fpkm{}".format(suffix),
"fpkm_conf_low{}".format(suffix),
"fpkm_conf_high{}".format(suffix),
"solvable{}".format(suffix),
"TPM{}".format(suffix)])
df.convert_objects(convert_numeric=True)
df.set_index('Name', inplace=True)
return df
def readSailfish(fn, suffix=""):
df = | pd.read_table(fn, engine='c') | pandas.read_table |
"""
dateConv is a centralized script that contains a variety of functions useful for quickly and rapidly converting
datetimes. This is a common issue to a large number of datasets. The functions in here are used for analyses across
the entire root folder.
"""
from numba import njit
import numpy as np
import pandas as pd
def toYearFraction(date):
"""
:param date: This function takes in a singular datetime value
:return: returns a decimal year float value
"""
from datetime import datetime as dt
import time
# returns seconds since epoch
def sinceEpoch(datetime):
return time.mktime(datetime.timetuple())
s = sinceEpoch
year = date.year
startOfThisYear = dt(year=year, month=1, day=1)
startOfNextYear = dt(year=year+1, month=1, day=1)
yearElapsed = s(date) - s(startOfThisYear)
yearDuration = s(startOfNextYear) - s(startOfThisYear)
fraction = yearElapsed/yearDuration
return date.year + fraction
def isleapyear(yr):
"""
:param yr: an integer year value (i.e: 2019)
:return: boolean, True if a leap year, False if not a leap year
"""
import pandas as pd
# Month and Day do not matter, just required. Converts to dataframe
placeholder = pd.DataFrame({'year': [yr], 'month': [1], 'day': [1]})
# Converts to the datetime format
date = pd.to_datetime(placeholder)
# Pandas function to tell if leap year
leap = int(date.dt.is_leap_year)
return leap
def decToDatetime(arr):
"""
An approach to convert decyear values into datetime values with numpy vectorization to improve efficiency
:param arr: a numpy array of decyear values
:return: a numpy array of datetime values
"""
import datetime as dt
import calendar
datetimes = []
for i in range(len(arr)):
year = int(arr[i]) # get the year
start = dt.datetime(year - 1, 12, 31) # create starting datetime
numdays = (arr[i] - year) * (365 + calendar.isleap(year)) # calc number of days to current date
result = start + dt.timedelta(days=numdays) # add timedelta of those days
datetimes.append(result) # append results
return datetimes
def noaaDateConv(dataframe):
"""
This function takes a dataframe with datetime values and converts it into a format that the NOAA ccg tool can
easily read
:param dataframe: A dataframe that has to have a column labeled 'datetime' which contains dt.datetime formatted
items
:return: the same dataframe with the datetime column replaced by year, month, day, hour, and minute
"""
import pandas as pd
year, month, day, hour, minute, cpd = [], [], [], [], [], [] # preallocate lists
cpd_name = dataframe.columns[1] # get the cpd name
# iterate through rows and append lists, seperating components of the datetime
for index, value in dataframe.iterrows():
year.append(value.datetime.year)
month.append(value.datetime.month)
day.append(value.datetime.day)
hour.append(value.datetime.hour)
minute.append(value.datetime.minute)
cpd.append(value[cpd_name])
# drop previous columns
dataframe.drop(['datetime', cpd_name], axis=1, inplace=True)
dataframe = dataframe.reset_index()
dataframe.drop('index', axis=1, inplace=True)
# append each list to the new dataframe in appropriate order
for item in [year, month, day, hour, minute, cpd]:
item = pd.Series(item)
dataframe = dataframe.merge(item.to_frame(), left_index=True, right_index=True, how='inner')
# rename columns
dataframe.columns = ['year', 'month', 'day', 'hour', 'minute', cpd_name]
return dataframe
@njit
def convDatetime(yr, mo, dy, hr):
"""
convDatetime takes values (likely from an array) and quickly converts them to decimal year format. Unfortunately
it does not account for leap years but if a level of accuracy that high is not required using this function with
numba's @njit provides nanosecond for looping of massive arrays.
:param yr: year, numpy array
:param mo: month, numpy array
:param dy: day, numpy array
:param hr: hour, numpy array
:return: the decimal year, numpy array
"""
date = np.empty(yr.shape) # preallocate date
for i in range(len(yr)): # iterate through all values
date[i] = ((yr[i]) + # year +
(mo[i] / 12) + # month rem
(dy[i] / 365 / 12) + # day rem
(hr[i] / 24 / 365 / 12)) # hr rem
return date
def createDatetime(yr, mo, dy, hr):
"""
Same thing as above function but converts to datetime format instead of decimal year
"""
import datetime as dt
datetime = []
for i in range(len(yr)):
time = dt.datetime(yr[i], mo[i], dy[i], hr[i])
datetime.append(time)
return datetime
def visitToDatetime(date, arrival, depart):
"""
Converts arrival and departure times from the TAWO visit log into a start and end datetime
:return: datetime column with before, after tuple
Warning: The fact that this function works is honestly beyond me. Only Jesus can understand it at this point. It
is an amalgamation of frustration and defeat with Python datetimes. Good luck.
"""
import calendar
import datetime as dt
# replace NaT values with previous date
mask = np.isnat(date) # boolean array of where NaT values are
idx = np.flatnonzero(mask) # indexes of NaT
nidx = np.flatnonzero(~mask) # other indexs
date[mask] = pd.Timestamp('1980-01-01') # replace NaT with 2000 date
date = [pd.Timestamp(x) for x in date] # convert n64dt to timestamp
date = [calendar.timegm(x.timetuple()) + 86400 for x in date] # convert timestamp to unix time value
date = np.array(date) # convert back to numpy array
nonnats = date[~mask] # get actual non NaT values
date[mask] = np.interp(idx, nidx, nonnats) - 86400 # interp unix date values
date = [dt.datetime.fromtimestamp(x) for x in date] # convert unix timestamp to datetime
s, d = [], []
for i in range(len(arrival)):
base = dt.datetime(date[i].year, # collect the base remove hour
date[i].month,
date[i].day)
# get start hour information
shour = int(str(arrival[i])[:2])
sminute = int(str(arrival[i])[2:])
start = base + pd.Timedelta(f'{shour} hours') + pd.Timedelta(f'{sminute} minutes')
# departing hour information
dhour = int(str(depart[i])[:2])
dminute = int(str(depart[i])[2:])
end = base + pd.Timedelta(f'{dhour} hours') + | pd.Timedelta(f'{dminute} minutes') | pandas.Timedelta |
import pandas as pd
def price_function(periods):
data = pd.DataFrame(columns=['Start hour', 'End hour', 'Price multiplier', 'Additional charge multiplier'])
for period in periods:
start, end, length = period
file_name = f"{start}-{end}.csv"
sim_data = | pd.read_csv(f"Simulation results/1_day_simulations/{length}hr_periods/{file_name}") | pandas.read_csv |
import numpy as np
import pandas as pd
round_1_sp = pd.read_stata('data/NHATS_Round_1_SP_File.dta')
# Exclusions
# dementia
# (hc1disescn9) - 1 - YES, 2 - NO, -1 Inapplicable, -8 DK, -9 Missing
# filter hc1disescn9 == 2
# n post filter = 7146
round_1_cohort = round_1_sp[round_1_sp.hc1disescn9 == ' 2 NO']
# missing grip strength
# n post filter = 5969
# 2 measures, gr1grp1rdng and gr1grp2rdng
# remove those with na in both measurements
both_grip_na = [pd.isna(g1) and pd.isna(g2) for g1, g2 in zip(
round_1_cohort.gr1grp1rdng, round_1_cohort.gr1grp2rdng)]
round_1_cohort = round_1_cohort[[not x for x in both_grip_na]]
# missing height or weight data
# weight in pounds (hw1currweigh), height in feet (hw1howtallft), height in inches (hw1howtallin)
# n post filter = 5822
missing_height_or_weight = [any([pd.isna(weight), pd.isna(height_f), pd.isna(height_in)]) for weight, height_f, height_in in zip(
round_1_cohort.hw1currweigh, round_1_cohort.hw1howtallft, round_1_cohort.hw1howtallin)]
round_1_cohort = round_1_cohort[[not x for x in missing_height_or_weight]]
# Derived measures
# max grip_strength
# max of both grip readings (if applicable)
# appended as max_grip
round_1_cohort['max_grip'] = round_1_cohort.apply(
lambda x: np.nanmax([x.gr1grp1rdng, x.gr1grp2rdng]), axis=1)
# BMI
# defined as self-reported baseline weight in kg divided by height in meters-squared
# appended as weight_kg, height_m, and BMI respectively
round_1_cohort['weight_kg'] = round_1_cohort.hw1currweigh.astype(
'float') / 2.2046
round_1_cohort['height_m'] = (round_1_cohort.hw1howtallft.astype(
'int') * 12 + round_1_cohort.hw1howtallin.astype('int')) * 0.0254
round_1_cohort['bmi'] = round_1_cohort.weight_kg / round_1_cohort.height_m**2
# High waist circumference
# waist measure in inches (wc1wstmsrinc)
# indicator for high waist circumference, >= 102 cm in males, >= 88 cm in females
# appended as high_wc
# 104 missing wc measure
def high_wc(x):
if pd.isna(x.r1dgender) or pd.isna(x.wc1wstmsrinc):
return np.nan
wc = x.wc1wstmsrinc * 2.54
if x.r1dgender == '1 MALE':
return True if wc >= 102 else False
elif x.r1dgender == '2 FEMALE':
return True if wc >= 88 else False
else:
raise Exception
round_1_cohort['high_wc'] = round_1_cohort.apply(high_wc, axis=1)
# Sarcopenia (defined by grip strength)
# grip strength < 35.5 kg in males, <20 kg in females
# appended as sarcopenia
# no na due to exclusion criteria
def sarcopenia(x):
if pd.isna(x.max_grip) or pd.isna(x.r1dgender):
return np.nan
if x.r1dgender == '1 MALE':
return True if x.max_grip < 35.5 else False
elif x.r1dgender == '2 FEMALE':
return True if x.max_grip < 20 else False
else:
raise Exception
round_1_cohort['sarcopenia'] = round_1_cohort.apply(sarcopenia, axis=1)
def sarcopenia_cutoff2(x):
if pd.isna(x.max_grip) or pd.isna(x.r1dgender):
return np.nan
if x.r1dgender == '1 MALE':
return True if x.max_grip < 26 else False
elif x.r1dgender == '2 FEMALE':
return True if x.max_grip < 16 else False
else:
raise Exception
round_1_cohort['sarcopenia_cutoff2'] = round_1_cohort.apply(
sarcopenia_cutoff2, axis=1)
# SDOC Sarcopenia (defined by grip strength/BMI ratio)
# grip strength/BMI < 1.05 in males, < 0.79 in females
# appended as sdoc_sarcopenia
# no na due to exclusion criteria
def sdoc_sarcopenia(x):
if any([pd.isna(m) for m in [x.max_grip, x.bmi, x.r1dgender]]):
return np.nan
ratio = x.max_grip / x.bmi
if x.r1dgender == '1 MALE':
return True if ratio < 1.05 else False
elif x.r1dgender == '2 FEMALE':
return True if ratio < 0.79 else False
else:
raise Exception
round_1_cohort['sdoc_sarcopenia'] = round_1_cohort.apply(
sdoc_sarcopenia, axis=1)
# Gender
# r1dgender
round_1_cohort['gender'] = round_1_cohort.r1dgender
# Race
# rl1dracehisp, recode values below in dictionary
# no na
# appended as race
def race(x):
d = {' 1 White, non-hispanic': 'White', ' 2 Black, non-hispanic': 'Black',
' 3 Other (Am Indian/Asian/Native Hawaiian/Pacific Islander/other specify), non-Hispanic': 'Other', ' 4 Hispanic': 'Hispanic', ' 5 more than one DKRF primary': 'Other', ' 6 DKRF': 'DKRF'}
return d.get(x.rl1dracehisp, np.nan)
round_1_cohort['race'] = round_1_cohort.apply(race, axis=1)
# Smoking status
# Current - sd1smokedreg == 1 (smoked regularly) & sd1smokesnow == 1 (smokes now)
# Former smoker - sd1smokedreg == 1 & sd1smokesnow == 2 or sd1smokesnow is na
# Never - sd1smokedreg == 2 & sd1smokesnow == 2
# appended as smoking_status
# 1 overall na
def smoking_status(x):
if pd.isna(x.sd1smokedreg) and pd.isna(x.sd1smokesnow): # only 1
return np.nan
elif pd.isna(x.sd1smokedreg) and pd.notna(x.sd1smokesnow): # never
raise Exception
elif pd.notna(x.sd1smokedreg) and pd.isna(x.sd1smokesnow): # 2818
if x.sd1smokedreg == ' 1 YES':
return 'Former, maybe current'
elif x.sd1smokedreg == ' 2 NO':
return 'Never'
else: # both exist
if x.sd1smokedreg == ' 1 YES' and x.sd1smokesnow == ' 1 YES':
return 'Current'
elif x.sd1smokedreg == ' 1 YES' and x.sd1smokesnow == ' 2 NO':
return 'Former'
else:
return 'Never'
round_1_cohort['smoking_status'] = round_1_cohort.apply(smoking_status, axis=1)
# Education
# el1higstschl
# Less than high school: 1 - no schooling,
# 2 - 1st to 8th grade,
# 3 - 9th to 12th grade, no diploma
# High school to some college: 4 - high school graduate (diploma or equivalent)
# 5 - vocational, technical, business or trade school certificate
# beyond high school
# 6 - some college but no degree
# College degree: 7 - associate's degree
# 8 - bachelor's degree
# Graduate degree: 9 - master's, professional, or doctoral
# appended as education
# 4 na
def education(x):
d = {' 1 NO SCHOOLING COMPLETED': 'Less than high school',
' 2 1ST-8TH GRADE': 'Less than high school',
' 3 9TH-12TH GRADE (NO DIPLOMA)': 'Less than high school',
' 4 HIGH SCHOOL GRADUATE (HIGH SCHOOL DIPLOMA OR EQUIVALENT)': 'High school to some college',
' 6 SOME COLLEGE BUT NO DEGREE': 'High school to some college',
" 8 BACHELOR'S DEGREE": 'College degree',
" 9 MASTER'S, PROFESSIONAL, OR DOCTORAL DEGREE": 'Graduate degree',
' 5 VOCATIONAL, TECHNICAL, BUSINESS, OR TRADE SCHOOL CERTIFICATE OR DIPLOMA (BEYOND HIGH SCHOOL LEVEL)': 'High school to some college',
" 7 ASSOCIATE'S DEGREE": 'College degree'
}
return d.get(x.el1higstschl, np.nan)
round_1_cohort['education'] = round_1_cohort.apply(education, axis=1)
# Physical activity proxy
# pa1evrgowalk
# appended as ever_walk
# no na
round_1_cohort['ever_walk'] = round_1_cohort.apply(
lambda x: True if x.pa1evrgowalk == ' 1 YES' else False, axis=1)
# Comorbidities
# heart_disease
# hc1disescn1 - had heart attack
# hc1disescn2 - has heart disease
# no na
def heart_disease(x):
if x.hc1disescn1 == ' 1 YES' or x.hc1disescn2 == ' 1 YES':
return True
elif x.hc1disescn1 == ' 2 NO' or x.hc1disescn2 == ' 2 NO':
return False
else:
return np.nan
round_1_cohort['heart_disease'] = round_1_cohort.apply(heart_disease, axis=1)
# hypertension
# hc1disescn3
# 7 na
round_1_cohort['hypertension'] = round_1_cohort.apply(
lambda x: True if x.hc1disescn3 == ' 1 YES' else False if x.hc1disescn3 == ' 2 NO' else np.nan, axis=1)
# arthritis
# hc1disescn4
# 12 na
round_1_cohort['arthritis'] = round_1_cohort.apply(
lambda x: True if x.hc1disescn4 == ' 1 YES' else False if x.hc1disescn4 == ' 2 NO' else np.nan, axis=1)
# diabetes
# hc1disescn6
# 2 na
round_1_cohort['diabetes'] = round_1_cohort.apply(
lambda x: True if x.hc1disescn6 == ' 1 YES' else False if x.hc1disescn6 == ' 2 NO' else np.nan, axis=1)
# lung_disease
# hc1disescn7
# 4 na
round_1_cohort['lung_disease'] = round_1_cohort.apply(
lambda x: True if x.hc1disescn7 == ' 1 YES' else False if x.hc1disescn7 == ' 2 NO' else np.nan, axis=1)
# stroke
# hc1disescn8
# 5 na
round_1_cohort['stroke'] = round_1_cohort.apply(
lambda x: True if x.hc1disescn8 == ' 1 YES' else False if x.hc1disescn8 == ' 2 NO' else np.nan, axis=1)
# cancer
# hc1disescn10
# 2 na
round_1_cohort['cancer'] = round_1_cohort.apply(
lambda x: True if x.hc1disescn10 == ' 1 YES' else False if x.hc1disescn10 == ' 2 NO' else np.nan, axis=1)
# Age category
# r1d2intvrage
# no na
# appended as age_category
def age_category(x):
d = {
'1 - 65-69': '65-69',
'2 - 70-74': '70-74',
'3 - 75-79': '75-79',
'4 - 80-84': '80-84',
'5 - 85-89': '85+',
'6 - 90 +': '85+'
}
return d.get(x.r1d2intvrage, np.nan)
round_1_cohort['age_category'] = round_1_cohort.apply(age_category, axis=1)
# Obesity (defined by BMI)
# BMI >= 30 kg/m^2
# appended as Obesity
# no na due to exclusion criteria
round_1_cohort['obesity'] = round_1_cohort.apply(
lambda x: True if x.bmi >= 30 else False, axis=1)
# Sarcopenic obesity definitions
# Grouping 1: sarcopenia, obesity, sarcopenic obesity, neither
# obesity derived from BMI (variable obesity)
# appended as grouping_1_so_status
def grouping_1_so_status(x):
if pd.isna(x.sarcopenia) or pd.isna(x.obesity): # shouldn't happen
return np.nan
if x.sarcopenia and not x.obesity:
return 'Sarcopenia'
elif not x.sarcopenia and x.obesity:
return 'Obesity'
elif x.sarcopenia and x.obesity:
return 'Sarcopenic Obesity'
elif not x.sarcopenia and not x.obesity:
return 'Neither'
round_1_cohort['grouping_1_so_status'] = round_1_cohort.apply(
grouping_1_so_status, axis=1)
def grouping_1_so_status_cutoff2(x):
if | pd.isna(x.sarcopenia_cutoff2) | pandas.isna |
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
from IMLearn.learners.regressors import linear_regression
import plotly.graph_objects as go
pio.templates.default = "simple_white"
from datetime import datetime, date
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
df = | pd.read_csv(filename) | pandas.read_csv |
import pandas as pd
from sklearn import preprocessing
from scipy import stats
import ast
import re, string, unicodedata
import nltk
import spacy
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from wordlists import workingtime_words, feminine_coded_words, masculine_coded_words, salary_words, workingtime_words, profit_words, family_words, homeoffice_words, safety_words, health_words, travel_words
#from dataprep import *
# Functions for Text-Data preparation and extracting gender-coded words
def strip_html(text):
text = text.replace(' ', ' ') # replace html space specifier with space char
soup = BeautifulSoup(text, 'html.parser')
return soup.get_text(' ', strip=False) # space between tags, don't strip newlines
def remove_between_square_brackets(text):
return re.sub('\[[^]]*\]', '', text)
def denoise_text(text):
text = strip_html(text)
text = remove_between_square_brackets(text)
return text
def remove_punctuation(string, char_list, replacer, regex = r"[.,;:–!•]"):
for char in char_list:
string = string.replace(char, replacer)
string_cleaned = re.sub(regex, "", string, 0)
return string_cleaned
def clean_ad(text):
''' removes html-stuff and replaces parantheses with spaces '''
text_denoised = denoise_text(text)
text_depunct = remove_punctuation(text_denoised, ['(', ')', '"'], " ")
return text_depunct
def tokanize(text):
''' seperates a text and puts words into a list,
lowercases words and removes "empty"-list items.
Returns a list of all words '''
# Lowercasing text
text_lower = text.lower()
# Split text into tokens
tokens = re.split('\s', text_lower)
#remove "empty tokens"
tokens_full = list(filter(None, tokens))
return tokens_full
def add_processing_columns(df):
df['ad_cleaned'] = df.apply(lambda x: clean_ad(x['description']), axis=1)
df['ad_tokens'] = df.apply(lambda x: tokanize(x['ad_cleaned']), axis=1)
return df
# add column "ad_length" which is the number of words in ad
def ad_length (df):
df['ad_length'] = df.apply(lambda x: len(x['ad_tokens']), axis=1)
return df
#_______________________________________________________________________________________________________________________
# Gender-coded words
# Function that checks if ads contains gendered words and returns the gendered-coded words
# and the number of gender-coded words. This funtion has been copied/adapted from German "Decoder" developed by TU München
# please find reference in README
def find_and_count_coded_words(advert_word_list, gendered_word_list):
gender_coded_words = [word for word in advert_word_list
for coded_word in gendered_word_list
if coded_word in word]
# if word.startswith(coded_word)]
return list(gender_coded_words), len(gender_coded_words)
def add_genderword_columns(df):
# add column "words_f". contains list a feminine-associated words
df['words_f'] = df.apply(lambda x: find_and_count_coded_words(x['ad_tokens'], feminine_coded_words)[0], axis=1)
# add column "words_m". contains list a masculine-associated words
df['words_m'] = df.apply(lambda x: find_and_count_coded_words(x['ad_tokens'], masculine_coded_words)[0], axis=1)
# add column "words_f_count". contains number of feminine-associated words
df['words_f_count'] = df.apply(lambda x: find_and_count_coded_words(x['ad_tokens'], feminine_coded_words)[1], axis=1)
# add column "words_m". contains number of masculine-associated words
df['words_m_count'] = df.apply(lambda x: find_and_count_coded_words(x['ad_tokens'], masculine_coded_words)[1], axis=1)
return df
#_______________________________________________________________________________________________________________________
# Functions for preparing dataset:
def transform_ID (df):
# Creating dictionary, matching discipline_id with discipline label
disciplines = {1008:'Health_Medical_Social',
1015:'PR_Journalism ',
1020:'Law',
1022:'Other_Disciplines',
1001:'Analysis_Statistics',
1002:'Administration ',
1003:'Consulting',
1004:'Customer Service',
1005:'Purchasing_Materials_Management_Logistics',
1006:'Finance_Accounting_Controlling',
1007:'Teaching_R&D',
1009:'Graphic_Design_Architecture',
1010:'Engineering_Technical',
1011:'IT_Software_Development',
1012:'Management_Corporate_Development',
1013:'Marketing_Advertising',
1014:'HR',
1016:'Production_Manufacturing',
1017:'Product_Management',
1018:'Project_Management',
1019:'Process_Planning_QA',
1021:'Sales_Commerce'
}
# Creating dictionary, matching industry_id with industry label
industries = {0:'Other',
10000:'Architecture_planning',
20000:'Consumer_goods_trade',
30000:'Automotive_vehicle_manufacturing',
40000:'Industry_mechanical_engineering ',
50000:'Medical_services',
60000:'Energy_water_environment',
70000:'Transport_logistics',
80000:'Tourism_food_service',
90000:'Internet_IT',
100000:'Telecommunication',
110000:'Media_publishing',
120000:'Banking_financial_services',
130000:'Insurance',
140000:'Real_Estate',
150000:'Auditing_tax_law ',
160000:'Consulting',
170000:'Marketing_PR_design ',
180000:'HR_services ',
190000:'Civil_service_associations_institutions',
200000:'Education_science ',
210000:'Health_social',
220000:'Art_culture_sport',
230000:'Other'
}
df['discipline_label'] = df['discipline_id'].map(disciplines)
df['industry_label'] = df['industry_id'].map(industries)
df['discipline_label'].astype("category")
df['industry_label'].astype("category")
return df
# Categorizing industries into male and female dominated
def gender_dominance_ind(df):
female_ratio_for_industry = df["female_ratio_for_industry"]
gender_dominance_ind = []
for i in female_ratio_for_industry:
if i >= 0.5:
gender_dominance_ind.append("female")
else:
gender_dominance_ind.append("male")
# Creating new column
df["gender_dominance_ind"] = gender_dominance_ind
# Changing data type of newly created column "gender_dominance_ind" into category
df["gender_dominance_ind"].astype("category")
return df
# Categorizing disciplines into male and female dominated
def gender_dominance_dis(df):
female_ratio_for_discipline = df["female_ratio_for_discipline"]
gender_dominance_dis = []
for i in female_ratio_for_discipline:
if i >= 0.5:
gender_dominance_dis.append("female")
else:
gender_dominance_dis.append("male")
# Creating new column
df["gender_dominance_dis"] = gender_dominance_dis
# Changing data type of newly created column "gender_dominance_dis" into category
df["gender_dominance_ind"].astype("category")
return df
def drop_columns(df, list_columns):
'''Drops columns in the list and returns updated dataframe'''
df_new = df.drop(list_columns, axis=1)
return df_new
def balancing(df):
count_not_appealing, count_appealing = df["jobad_appealingness_dis"].value_counts()
# Shuffle the Dataset.
shuffled_df = df.sample(frac=1,random_state=42)
# Put all the appealing ads in a separate dataset.
appealing_df = shuffled_df.loc[shuffled_df["jobad_appealingness_dis"] == 1]
#Randomly select 4914 observations from the not appealing (majority class)
not_appealing_df = shuffled_df.loc[shuffled_df["jobad_appealingness_dis"] == 0].sample(n=count_appealing, random_state=42)
# Concatenate both dataframes again
balanced_df = | pd.concat([appealing_df, not_appealing_df]) | pandas.concat |
# -*- coding: utf-8 -*-
"""Structures data in ML-friendly ways."""
import re
import copy
import datetime as dt
import random
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from avaml import Error, setenvironment as se, _NONE, CSV_VERSION, REGIONS, merge, REGION_ELEV
from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS
from avaml.aggregatedata.time_parameters import to_time_parameters
from varsomdata import getforecastapi as gf
from varsomdata import getmisc as gm
__author__ = 'arwi'
LABEL_PROBLEM_PRIMARY = {
"ext_attr": [
"avalanche_problem_type_id",
"avalanche_problem_type_name",
"avalanche_type_id",
"avalanche_type_name",
"avalanche_ext_id",
"avalanche_ext_name"
],
"values": {
_NONE: [0, "", 0, "", 0, ""],
"new-loose": [3, "Nysnø (løssnøskred)", 20, "Løssnøskred", 10, "Tørre løssnøskred"],
"wet-loose": [5, "Våt snø (løssnøskred)", 20, "Løssnøskred", 15, "Våte løssnøskred"],
"new-slab": [7, "Nysnø (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"drift-slab": [10, "Fokksnø (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"pwl-slab": [30, "Vedvarende svakt lag (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"wet-slab": [45, "Våt snø (flakskred)", 10, "Flakskred", 25, "Våte flakskred"],
"glide": [50, "Glideskred", 10, "Flakskred", 25, "Våte flakskred"]
}
}
LABEL_PROBLEM = {
"cause": {
"ext_attr": ["aval_cause_id", "aval_cause_name"],
"values": {
"0": [0, ""],
"new-snow": [10, "Nedføyket svakt lag med nysnø"],
"hoar": [11, "Nedsnødd eller nedføyket overflaterim"],
"facet": [13, "Nedsnødd eller nedføyket kantkornet snø"],
"crust": [14, "Dårlig binding mellom glatt skare og overliggende snø"],
"snowdrift": [15, "Dårlig binding mellom lag i fokksnøen"],
"ground-facet": [16, "Kantkornet snø ved bakken"],
"crust-above-facet": [18, "Kantkornet snø over skarelag"],
"crust-below-facet": [19, "Kantkornet snø under skarelag"],
"ground-water": [20, "Vann ved bakken/smelting fra bakken"],
"water-layers": [22, "Opphopning av vann i/over lag i snødekket"],
"loose": [24, "Ubunden snø"]
}
},
"dsize": {
"ext_attr": ["destructive_size_ext_id", "destructive_size_ext_name"],
"values": {
'0': [0, "Ikke gitt"],
'1': [1, "1 - Små"],
'2': [2, "2 - Middels"],
'3': [3, "3 - Store"],
'4': [4, "4 - Svært store"],
'5': [5, "5 - Ekstremt store"]
}
},
"prob": {
"ext_attr": ["aval_probability_id", "aval_probability_name"],
"values": {
'0': [0, "Ikke gitt"],
'2': [2, "Lite sannsynlig"],
'3': [3, "Mulig"],
'5': [5, "Sannsynlig"],
}
},
"trig": {
"ext_attr": ["aval_trigger_simple_id", "aval_trigger_simple_name"],
"values": {
'0': [0, "Ikke gitt"],
'10': [10, "Stor tilleggsbelastning"],
'21': [21, "Liten tilleggsbelastning"],
'22': [22, "Naturlig utløst"]
}
},
"dist": {
"ext_attr": ["aval_distribution_id", "aval_distribution_name"],
"values": {
'0': [0, "Ikke gitt"],
'1': [1, "Få bratte heng"],
'2': [2, "Noen bratte heng"],
'3': [3, "Mange bratte heng"],
'4': [4, "De fleste bratte heng"]
}
},
"lev_fill": {
"ext_attr": ["exposed_height_fill"],
"values": {
'0': [0],
'1': [1],
'2': [2],
'3': [3],
'4': [4],
}
}
}
LABEL_PROBLEM_MULTI = {
"aspect": {
"ext_attr": "valid_expositions",
}
}
LABEL_PROBLEM_REAL = {
"lev_max": {
"ext_attr": "exposed_height_1",
},
"lev_min": {
"ext_attr": "exposed_height_2",
}
}
LABEL_GLOBAL = {
"danger_level": {
"ext_attr": ["danger_level", "danger_level_name"],
"values": {
'1': [1, "1 liten"],
'2': [2, "2 Moderat"],
'3': [3, "3 Betydelig"],
'4': [4, "4 Stor"],
'5': [5, "5 Meget stor"]
}
},
"emergency_warning": {
"ext_attr": ["emergency_warning"],
"values": {
"Ikke gitt": ["Ikke gitt"],
"Naturlig utløste skred": ["Naturlig utløste skred"],
}
}
}
COMPETENCE = [0, 110, 115, 120, 130, 150]
class ForecastDataset:
def __init__(self, regobs_types, seasons=('2017-18', '2018-19', '2019-20'), max_file_age=23):
"""
Object contains aggregated data used to generate labeled datasets.
:param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
:param seasons: Tuple/list of string representations of avalanche seasons to fetch.
"""
self.seasons = sorted(list(set(seasons)))
self.date = None
self.regobs_types = regobs_types
self.weather = {}
self.regobs = {}
self.varsom = {}
self.labels = {}
self.use_label = True
for season in seasons:
varsom, labels = _get_varsom_obs(year=season, max_file_age=max_file_age)
self.varsom = merge(self.varsom, varsom)
self.labels = merge(self.labels, labels)
regobs = _get_regobs_obs(season, regobs_types, max_file_age=max_file_age)
self.regobs = merge(self.regobs, regobs)
weather = _get_weather_obs(season, max_file_age=max_file_age)
self.weather = merge(self.weather, weather)
@staticmethod
def date(regobs_types, date: dt.date, days, use_label=True):
"""
Create a dataset containing just a given day's data.
:param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
:param date: Date to fetch and create dataset for.
:param days: How many days to fetch before date. This will be max for .label()'s days parameter.
"""
self = ForecastDataset(regobs_types, [])
self.date = date
self.use_label = use_label
self.regobs = _get_regobs_obs(None, regobs_types, date=date, days=days)
self.varsom, labels = _get_varsom_obs(None, date=date, days=days-1 if days > 0 else 1)
self.weather = _get_weather_obs(None, date=date, days=days-2 if days > 2 else 1)
self.labels = {}
for label_keys, label in labels.items():
if label_keys not in self.labels:
self.labels[label_keys] = {}
for (label_date, label_region), label_data in label.items():
if label_date == date.isoformat():
subkey = (label_date, label_region)
self.labels[label_keys][subkey] = label_data
return self
def label(self, days, with_varsom=True):
"""Creates a LabeledData containing relevant label and features formatted either in a flat structure or as
a time series.
:param days: How far back in time values should data be included.
If 0, only weather data for the forecast day is evaluated.
If 1, day 0 is used for weather, 1 for Varsom.
If 2, day 0 is used for weather, 1 for Varsom, 2 for RegObs.
If 3, days 0-1 is used for weather, 1-2 for Varsom, 2-3 for RegObs.
If 5, days 0-3 is used for weather, 1-4 for Varsom, 2-5 for RegObs.
The reason for this is to make sure that each kind of data contain
the same number of data points, if we want to use some time series
frameworks that are picky about such things.
:param with_varsom: Whether to include previous avalanche bulletins into the indata.
:return: LabeledData
"""
table = {}
row_weight = {}
df = None
df_weight = None
df_label = pd.DataFrame(self.labels, dtype="U")
days_w = {0: 1, 1: 1, 2: 1}.get(days, days - 1)
days_v = {0: 1, 1: 2, 2: 2}.get(days, days)
days_r = days + 1
varsom_index = pd.DataFrame(self.varsom).index
weather_index = pd.DataFrame(self.weather).index
if len(df_label.index) == 0 and self.use_label:
raise NoBulletinWithinRangeError()
if self.date and not self.use_label:
season = gm.get_season_from_date(self.date)
regions = gm.get_forecast_regions(year=season, get_b_regions=True)
date_region = [(self.date.isoformat(), region) for region in regions]
else:
date_region = df_label.index
for monotonic_idx, entry_idx in enumerate(date_region):
date, region_id = dt.date.fromisoformat(entry_idx[0]), entry_idx[1]
def prev_key(day_dist):
return (date - dt.timedelta(days=day_dist)).isoformat(), region_id
# Just check that we can use this entry.
try:
if with_varsom:
for n in range(1, days_v):
if prev_key(n) not in varsom_index:
raise KeyError()
for n in range(0, days_w):
if prev_key(n) not in weather_index:
raise KeyError()
add_row = True
# We don't check for RegObs as it is more of the good to have type of data
except KeyError:
add_row = False
if add_row:
row = {}
for region in REGIONS:
row[(f"region_id_{region}", "0")] = float(region == region_id)
if with_varsom:
for column in self.varsom.keys():
for n in range(1, days_v):
# We try/except an extra time since single dates may run without a forecast.
row[(column, str(n))] = self.varsom[column][prev_key(n)]
for column in self.weather.keys():
for n in range(0, days_w):
try:
row[(column, str(n))] = self.weather[column][prev_key(n)]
except KeyError:
row[(column, str(n))] = 0
for column in self.regobs.keys():
for n in range(2, days_r):
try:
row[(column, str(n))] = self.regobs[column][prev_key(n)]
except KeyError:
row[(column, str(n))] = 0
try:
weight_sum = self.regobs['accuracy'][prev_key(0)]
if weight_sum < 0:
row_weight[entry_idx] = 1 / 2
elif weight_sum == 0:
row_weight[entry_idx] = 1
elif weight_sum > 0:
row_weight[entry_idx] = 2
except KeyError:
row_weight[entry_idx] = 1
# Some restructuring to make DataFrame parse the dict correctly
for key in row.keys():
if key not in table:
table[key] = {}
table[key][entry_idx] = row[key]
# Build DataFrame iteratively to preserve system memory (floats in dicts are apparently expensive).
if (monotonic_idx > 0 and monotonic_idx % 1000 == 0) or monotonic_idx == len(date_region) - 1:
df_new = pd.DataFrame(table, dtype=np.float32).fillna(0)
df_weight_new = pd.Series(row_weight)
df = df_new if df is None else pd.concat([df, df_new])
df_weight = df_weight_new if df is None else pd.concat([df_weight, df_weight_new])
table = {}
row_weight = {}
if df is None or len(df.index) == 0:
raise NoDataFoundError()
if self.use_label:
df_label = df_label.loc[df.index]
df_label.sort_index(axis=0, inplace=True)
df_label.sort_index(axis=1, inplace=True)
df.sort_index(axis=0, inplace=True)
df_weight.sort_index(axis=0, inplace=True)
else:
df_label = None
return LabeledData(df, df_label, df_weight, days, self.regobs_types, with_varsom, self.seasons)
class LabeledData:
is_normalized = False
with_regions = True
elevation_class = (False, False)
scaler = StandardScaler()
def __init__(self, data, label, row_weight, days, regobs_types, with_varsom, seasons=False):
"""Holds labels and features.
:param data: A DataFrame containing the features of the dataset.
:param label: DataFrame of labels.
:param row_weight: Series containing row weights
:param days: How far back in time values should data be included.
If 0, only weather data for the forecast day is evaluated.
If 1, day 0 is used for weather, 1 for Varsom.
If 2, day 0 is used for weather, 1 for Varsom, 2 for RegObs.
If 3, days 0-1 is used for weather, 1-2 for Varsom, 2-3 for RegObs.
If 5, days 0-3 is used for weather, 1-4 for Varsom, 2-5 for RegObs.
The reason for this is to make sure that each kind of data contain
the same number of data points, if we want to use some time series
frameworks that are picky about such things.
:param regobs_types: A tuple/list of strings of types of observations to fetch from RegObs.,
e.g., `("Faretegn")`.
:param with_varsom: Whether to include previous avalanche bulletins into the indata.
"""
self.data = data
self.row_weight = row_weight
if label is not None:
self.label = label
self.label = self.label.replace(_NONE, 0)
self.label = self.label.replace(np.nan, 0)
try: self.label['CLASS', _NONE] = self.label['CLASS', _NONE].replace(0, _NONE).values
except KeyError: pass
try: self.label['MULTI'] = self.label['MULTI'].replace(0, "0").values
except KeyError: pass
try: self.label['REAL'] = self.label['REAL'].astype(np.float)
except KeyError: pass
self.pred = label.copy()
for col in self.pred.columns:
self.pred[col].values[:] = 0
try: self.pred['CLASS', _NONE] = _NONE
except KeyError: pass
try: self.pred['MULTI'] = "0"
except KeyError: pass
else:
self.label = None
self.pred = None
self.days = days
self.with_varsom = with_varsom
self.regobs_types = regobs_types
if self.data is not None:
self.scaler.fit(self.data.values)
self.single = not seasons
self.seasons = sorted(list(set(seasons if seasons else [])))
def normalize(self, by=None):
"""Normalize the data feature-wise using MinMax.
:return: Normalized copy of LabeledData
"""
by = by if by is not None else self
if not self.is_normalized:
ld = self.copy()
data = by.scaler.transform(self.data.values)
ld.data = pd.DataFrame(data=data, index=self.data.index, columns=self.data.columns)
ld.is_normalized = by
return ld
elif self.is_normalized != by:
return self.denormalize().normalize(by=by)
else:
return self.copy()
def denormalize(self):
"""Denormalize the data feature-wise using MinMax.
:return: Denormalized copy of LabeledData
"""
if self.is_normalized:
ld = self.copy()
data = self.is_normalized.scaler.inverse_transform(self.data.values)
ld.data = pd.DataFrame(data=data, index=self.data.index, columns=self.data.columns)
ld.is_normalized = False
return ld
else:
return self.copy()
def drop_regions(self):
"""Remove regions from input data"""
if self.with_regions:
ld = self.copy()
region_columns = list(filter(lambda x: re.match(r'^region_id', x[0]), ld.data.columns))
ld.data.drop(region_columns, axis=1, inplace=True)
ld.with_regions = False
ld.scaler.fit(ld.data.values)
return ld
else:
return self.copy()
def stretch_temperatures(self):
"""Stretch out temperatures near zero"""
ld = self.copy()
if self.data is not None:
temp_cols = [bool(re.match(r"^temp_(max|min)$", title)) for title in ld.data.columns.get_level_values(0)]
ld.data.loc[:, temp_cols] = np.sign(ld.data.loc[:, temp_cols]) * np.sqrt(np.abs(ld.data.loc[:, temp_cols]))
ld.scaler.fit(ld.data.values)
return ld
def problem_graph(self):
label = pd.Series(self.label["CLASS", _NONE, "problem_1"], name="label")
pred1 = pd.Series(self.pred["CLASS", _NONE, "problem_1"], name="problem_1")
pred2 = pd.Series(self.pred["CLASS", _NONE, "problem_2"], name="problem_2")
groups = pd.concat([label, pred1, pred2], axis=1).groupby(["label", "problem_1"], dropna=False)
count = groups.count()["problem_2"].rename("count")
p2 = groups["problem_2"].apply(lambda x: pd.Series.mode(x)[0]).replace(0, np.nan)
return pd.concat([count, p2], axis=1)
def statham(self):
"""Make a danger level in the same manner as Statham et al., 2018."""
if self.pred is None:
raise NotPredictedError
label = self.label[("CLASS", _NONE, "danger_level")].apply(np.int)
pred = self.pred[("CLASS", _NONE, "danger_level")].apply(np.int)
ones = pd.Series(np.ones(pred.shape), index=pred.index)
cols = ["label", "diff", "n"]
df = pd.DataFrame(pd.concat([label, label - pred, ones], axis=1).values, columns=cols)
bias = df.groupby(cols[:-1]).count().unstack().droplevel(0, axis=1)
n = df.groupby(cols[0]).count()["n"]
share = bias.divide(n, axis=0)
return pd.concat([n, share], axis=1)
def adam(self):
if self.pred is None:
raise NotPredictedError
touch = pd.DataFrame({
1: {(2, 10): "A", (3, 10): "A", (3, 21): "B", (5, 21): "B", (3, 22): "B", (5, 22): "B"},
2: {(2, 10): "A", (3, 10): "B", (3, 21): "C", (5, 21): "D", (3, 22): "C", (5, 22): "D"},
3: {(2, 10): "B", (3, 10): "C", (3, 21): "D", (5, 21): "E", (3, 22): "D", (5, 22): "E"},
4: {(2, 10): "B", (3, 10): "C", (3, 21): "D", (5, 21): "E", (3, 22): "D", (5, 22): "E"}
})
danger = pd.DataFrame({
1: {"A": 1, "B": 1, "C": 1, "D": 2, "E": 3},
2: {"A": 1, "B": 2, "C": 2, "D": 3, "E": 4},
3: {"A": 2, "B": 2, "C": 3, "D": 3, "E": 4},
4: {"A": 2, "B": 3, "C": 4, "D": 4, "E": 5},
5: {"A": 2, "B": 3, "C": 4, "D": 4, "E": 5}
})
def get_danger(series):
p1 = series["CLASS", _NONE, "problem_1"]
p2 = series["CLASS", _NONE, "problem_2"]
p3 = series["CLASS", _NONE, "problem_2"]
dl = ("CLASS", _NONE, "danger_level")
ew = ("CLASS", _NONE, "emergency_warning")
if p1 == _NONE:
series[dl] = "1"
series[ew] = "Ikke gitt"
else:
p1 = series["CLASS", p1][["prob", "trig", "dist", "dsize"]].apply(np.int)
try:
dl1 = str(danger.loc[touch.loc[(p1["prob"], p1["trig"]), p1["dist"]], p1["dsize"]])
except KeyError:
dl1 = 0
if p2 != _NONE:
p2 = series["CLASS", p2][["prob", "trig", "dist", "dsize"]].apply(np.int)
try:
dl1 = str(danger.loc[touch.loc[(p1["prob"], p1["trig"]), p1["dist"]], p1["dsize"]])
except KeyError:
series[dl] = "2"
series[ew] = "Ikke gitt"
try:
if p1["trig"] == 22 and p1["dsize"] >= 3:
series[ew] = "Naturlig utløste skred"
except KeyError:
pass
return series
ld = self.copy()
ld.pred = ld.pred.apply(get_danger, axis=1)
return ld
def to_elev_class(self, exclude_label=False):
"""Convert all elevations to classes"""
if self.elevation_class == (True, exclude_label):
return self.copy()
elif self.elevation_class == (True, not exclude_label):
return self.from_elev_class().to_elev_class(exclude_label)
MAX_ELEV = 2500
def round_min(series):
region = int(series.name[1])
elev = float(series.values[0])
tl = REGION_ELEV[region][0]
return 0 if abs(elev - 0) <= abs(elev - tl) else 1
def round_max(series):
region = int(series.name[1])
elev = float(series.values[0])
tl = REGION_ELEV[region][1]
return 0 if abs(elev - MAX_ELEV) <= abs(elev - tl) else 1
def convert_label(df):
problems = df.columns.get_level_values(1).unique().to_series().replace(_NONE, np.nan).dropna()
for problem in problems:
df["CLASS", problem, "lev_min"] = df[[("REAL", problem, "lev_min")]].apply(round_min, axis=1).apply(str)
df["CLASS", problem, "lev_max"] = df[[("REAL", problem, "lev_max")]].apply(round_max, axis=1).apply(str)
df.drop([
("CLASS", problem, "lev_fill"),
("REAL", problem, "lev_min"),
("REAL", problem, "lev_max")
], axis=1, inplace=True)
df.sort_index(inplace=True, axis=1)
def convert_data(df):
prefixes = set(map(lambda y: (y[0][:-7], y[1]), filter(lambda x: re.search(r"lev_fill", x[0]), df.columns)))
for prefix in prefixes:
df[f"{prefix[0]}_min", prefix[1]] = df[[(f"{prefix[0]}_min", prefix[1])]].apply(round_min, axis=1)
df[f"{prefix[0]}_max", prefix[1]] = df[[(f"{prefix[0]}_max", prefix[1])]].apply(round_max, axis=1)
df.drop([
(f"{prefix[0]}_fill_1", prefix[1]),
(f"{prefix[0]}_fill_2", prefix[1]),
(f"{prefix[0]}_fill_3", prefix[1]),
(f"{prefix[0]}_fill_4", prefix[1]),
], axis=1, inplace=True)
range_ld = self.copy().denormalize()
range_ld = range_ld.to_elevation_fmt_4(exclude_label)
if self.label is not None and not exclude_label:
convert_label(range_ld.label)
if self.pred is not None:
convert_label(range_ld.pred)
if self.data is not None:
convert_data(range_ld.data)
range_ld.scaler.fit(range_ld.data)
range_ld.elevation_class = (True, exclude_label)
if self.is_normalized:
return range_ld.normalize()
else:
return range_ld
def from_elev_class(self):
"""Convert all elevation classes to elevations"""
if not self.elevation_class[0]:
return self.copy()
exclude_label = self.elevation_class[1]
MAX_ELEV = 2500
def find_min(series):
region = int(series.name[1])
is_middle = bool(float(series.values[0]))
tl = REGION_ELEV[region][0]
return tl if is_middle else 0
def find_max(series):
region = int(series.name[1])
is_middle = bool(float(series.values[0]))
tl = REGION_ELEV[region][1]
return tl if is_middle else MAX_ELEV
def convert_label(df):
problems = df.columns.get_level_values(1).unique().to_series().replace(_NONE, np.nan).dropna()
for problem in problems:
df["REAL", problem, "lev_min"] = df[[("CLASS", problem, "lev_min")]].apply(find_min, axis=1).apply(str)
df["REAL", problem, "lev_max"] = df[[("CLASS", problem, "lev_max")]].apply(find_max, axis=1).apply(str)
df["CLASS", problem, "lev_fill"] = "4"
df.drop([
("CLASS", problem, "lev_min"),
("CLASS", problem, "lev_max"),
], axis=1, inplace=True)
df.sort_index(inplace=True, axis=1)
def convert_data(df):
prefixes = set(map(lambda y: (y[0][:-7], y[1]), filter(lambda x: re.search(r"lev_fill", x[0]), df.columns)))
for prefix in prefixes:
df[f"{prefix[0]}_min", prefix[1]] = df[[(f"{prefix[0]}_min", prefix[1])]].apply(find_min, axis=1)
df[f"{prefix[0]}_max", prefix[1]] = df[[(f"{prefix[0]}_max", prefix[1])]].apply(find_max, axis=1)
df[f"{prefix[0]}_fill_1", prefix[1]] = 0
df[f"{prefix[0]}_fill_2", prefix[1]] = 0
df[f"{prefix[0]}_fill_3", prefix[1]] = 0
df[f"{prefix[0]}_fill_4", prefix[1]] = 1
df.sort_index(inplace=True, axis=1)
range_ld = self.copy().denormalize()
if self.label is not None and not exclude_label:
convert_label(range_ld.label)
if self.pred is not None:
convert_label(range_ld.pred)
if self.data is not None:
convert_data(range_ld.data)
range_ld.scaler.fit(range_ld.data)
range_ld.elevation_class = (False, False)
if self.is_normalized:
return range_ld.normalize()
else:
return range_ld
def to_elevation_fmt_1(self, exclude_label=False):
"""Convert all elevations to format 1"""
MAX_ELEV = 2500
def convert_label(df):
problems = df.columns.get_level_values(1).unique().to_series().replace(_NONE, np.nan).dropna()
for problem in problems:
fill = df["CLASS", problem, "lev_fill"].apply(str)
twos = fill == "2"
threes = fill == "3"
fours = fill == "4"
df.loc[np.logical_or(twos, threes), ("REAL", problem, "lev_max")] = 0
df.loc[np.logical_or(twos, threes), ("REAL", problem, "lev_min")] = 0
df.loc[np.logical_or(twos, threes), ("CLASS", problem, "lev_fill")] = "1"
df.loc[fours, ("REAL", problem, "lev_max")] = df.loc[fours, ("REAL", problem, "lev_min")]
df.loc[fours, ("REAL", problem, "lev_min")] = 0
df.loc[fours, ("CLASS", problem, "lev_fill")] = "1"
def convert_data(df):
prefixes = set(map(lambda y: (y[0][:-7], y[1]), filter(lambda x: re.search(r"lev_fill", x[0]), df.columns)))
for prefix in prefixes:
ones = df[(f"{prefix[0]}_fill_1", prefix[1])].apply(np.bool)
twos = df[(f"{prefix[0]}_fill_2", prefix[1])].apply(np.bool)
threes = df[(f"{prefix[0]}_fill_3", prefix[1])].apply(np.bool)
fours = df[(f"{prefix[0]}_fill_4", prefix[1])].apply(np.bool)
df.loc[np.logical_or(twos, threes), (f"{prefix[0]}_min", prefix[1])] = 0
df.loc[np.logical_or(twos, threes), (f"{prefix[0]}_max", prefix[1])] = 0
df.loc[np.logical_or(twos, threes), (f"{prefix[0]}_fill_1", prefix[1])] = 1
df[(f"{prefix[0]}_fill_2", prefix[1])] = np.zeros(twos.shape)
df[(f"{prefix[0]}_fill_3", prefix[1])] = np.zeros(threes.shape)
df.loc[fours, (f"{prefix[0]}_max", prefix[1])] = df.loc[fours, (f"{prefix[0]}_min", prefix[1])]
df.loc[fours, (f"{prefix[0]}_min", prefix[1])] = 0
df.loc[threes == True, (f"{prefix[0]}_fill_4", prefix[1])] = 1
df[(f"{prefix[0]}_fill_3", prefix[1])] = np.zeros(threes.shape)
ld = self.copy().denormalize()
if self.label is not None and not exclude_label:
convert_label(ld.label)
if self.pred is not None:
convert_label(ld.pred)
if self.data is not None:
convert_data(ld.data)
ld.scaler.fit(ld.data)
if self.is_normalized:
return ld.normalize()
else:
return ld
def to_elevation_fmt_4(self, exclude_label=False):
"""Convert all elevations to ranges"""
MAX_ELEV = 2500
def convert_label(df):
problems = df.columns.get_level_values(1).unique().to_series().replace(_NONE, np.nan).dropna()
for problem in problems:
fill = df["CLASS", problem, "lev_fill"].apply(str)
ones = fill == "1"
twos = fill == "2"
threes = fill == "3"
df.loc[ones, ("REAL", problem, "lev_min")] = df.loc[ones, ("REAL", problem, "lev_max")]
df.loc[ones, ("REAL", problem, "lev_max")] = MAX_ELEV
df.loc[ones, ("CLASS", problem, "lev_fill")] = "4"
df.loc[twos, ("REAL", problem, "lev_min")] = 0
df.loc[twos, ("CLASS", problem, "lev_fill")] = "4"
df.loc[threes, ("REAL", problem, "lev_min")] = 0
df.loc[threes, ("REAL", problem, "lev_max")] = MAX_ELEV
df.loc[threes, ("CLASS", problem, "lev_fill")] = "4"
def convert_data(df):
prefixes = set(map(lambda y: (y[0][:-7], y[1]), filter(lambda x: re.search(r"lev_fill", x[0]), df.columns)))
for prefix in prefixes:
ones = df[(f"{prefix[0]}_fill_1", prefix[1])].apply(np.bool)
twos = df[(f"{prefix[0]}_fill_2", prefix[1])].apply(np.bool)
threes = df[(f"{prefix[0]}_fill_3", prefix[1])].apply(np.bool)
fours = df[(f"{prefix[0]}_fill_4", prefix[1])].apply(np.bool)
df.loc[ones, (f"{prefix[0]}_min", prefix[1])] = df.loc[ones, (f"{prefix[0]}_max", prefix[1])]
df.loc[ones, (f"{prefix[0]}_max", prefix[1])] = MAX_ELEV
df.loc[ones == True, (f"{prefix[0]}_fill_4", prefix[1])] = 1
df[(f"{prefix[0]}_fill_1", prefix[1])] = np.zeros(ones.shape)
df.loc[twos, (f"{prefix[0]}_min", prefix[1])] = 0
df.loc[twos == True, (f"{prefix[0]}_fill_4", prefix[1])] = 1
df[(f"{prefix[0]}_fill_2", prefix[1])] = np.zeros(twos.shape)
df.loc[threes, (f"{prefix[0]}_min", prefix[1])] = 0
df.loc[threes, (f"{prefix[0]}_max", prefix[1])] = MAX_ELEV
df.loc[threes == True, (f"{prefix[0]}_fill_4", prefix[1])] = 1
df[(f"{prefix[0]}_fill_3", prefix[1])] = np.zeros(threes.shape)
ld = self.copy().denormalize()
if self.label is not None and not exclude_label:
convert_label(ld.label)
if self.pred is not None:
convert_label(ld.pred)
if self.data is not None:
convert_data(ld.data)
ld.scaler.fit(ld.data)
if self.is_normalized:
return ld.normalize()
else:
return ld
def valid_pred(self):
"""Makes the bulletins internally coherent. E.g., removes problem 3 if problem 2 is blank."""
if self.pred is None:
raise NotPredictedError
ld = self.copy()
if self.elevation_class:
ld = ld.from_elev_class()
# Handle Problem 1-3
prob_cols = []
for n in range(1, 4):
if f"problem_{n}" in list(ld.pred["CLASS", _NONE].columns):
prob_cols.append(("CLASS", _NONE, f"problem_{n}"))
prev_eq = np.zeros((ld.pred.shape[0], len(prob_cols)), dtype=bool)
for n, col in enumerate(prob_cols):
for mcol in prob_cols[0:n]:
# If equal to problem_n-1/2, set to _NONE.
prev_eq[:, n] = np.logical_or(
prev_eq[:, n],
np.equal(ld.pred[mcol], ld.pred[col])
)
# Set to None if problem_n-1/2 was _NONE.
prev_eq[:, n] = np.logical_or(
prev_eq[:, n],
ld.pred[mcol] == _NONE
)
ld.pred.loc[prev_eq[:, n], col] = _NONE
# Delete subproblem solutions that are irrelevant
for subprob in PROBLEMS.values():
rows = np.any(np.char.equal(ld.pred.loc[:, prob_cols].values.astype("U"), subprob), axis=1) == False
columns = [name == subprob for name in ld.pred.columns.get_level_values(1)]
ld.pred.loc[rows, columns] = _NONE
# Set problem_amount to the right number
ld.pred['CLASS', _NONE, 'problem_amount'] = np.sum(ld.pred.loc[:, prob_cols] != _NONE, axis=1).astype(str)
# If lev_fill is "3" or "4", lev_min is always "0"
for subprob in PROBLEMS.values():
if "lev_fill" in ld.pred["CLASS", subprob].columns:
fill = ld.pred.astype(str)["CLASS", subprob, "lev_fill"]
if "lev_min" in ld.pred["REAL", subprob]:
ld.pred.loc[np.logical_or(fill == "1", fill == "2"), ("REAL", subprob, "lev_min")] = "0"
if "lev_min" in ld.pred["REAL", subprob] and "lev_max" in ld.pred["REAL", subprob]:
real = ld.pred["REAL", subprob].replace("", np.nan).astype(np.float)
reversed_idx = real["lev_min"] > real["lev_max"]
average = real.loc[reversed_idx, "lev_min"] + real.loc[reversed_idx, "lev_max"] / 2
ld.pred.loc[reversed_idx, ("REAL", subprob, "lev_min")] = average
ld.pred.loc[reversed_idx, ("REAL", subprob, "lev_max")] = average
ld.pred.loc[:, ["CLASS", "MULTI"]] = ld.pred.loc[:, ["CLASS", "MULTI"]].astype(str)
ld.pred["REAL"] = ld.pred["REAL"].replace("", np.nan).astype(np.float)
return ld
def split(self, rounds=3, seed="<PASSWORD>"):
"""Returns a split of the object into a training set, a test set and a validation set.
Parameters rounds and seed are not used any more.
Use as:
for test, train, eval in ld.split():
model.fit(test)
model.predict(train)
model.predict(eval)
"""
train_regions = [3007, 3012, 3010, 3009, 3013, 3017, 3014, 3032, 3027, 3029, 3022, 3031, 3023, 3037, 3024, 3028]
test_regions = [3011, 3016, 3035]
eval_regions = [3006, 3015, 3034]
split = []
for regions in [train_regions, test_regions, eval_regions]:
ld = self.copy()
ld.data = ld.data.iloc[[region in regions for region in ld.data.index.get_level_values(1)]]
ld.label = ld.label.iloc[[region in regions for region in ld.label.index.get_level_values(1)]]
ld.pred = ld.pred.iloc[[region in regions for region in ld.pred.index.get_level_values(1)]]
ld.row_weight = ld.row_weight.iloc[[region in regions for region in ld.row_weight.index.get_level_values(1)]]
split.append(ld)
return [tuple(split)]
def f1(self):
"""Get F1, precision, recall and RMSE of all labels.
:return: Series with scores of all possible labels and values.
"""
if self.label is None or self.pred is None:
raise DatasetMissingLabel()
dummies = self.to_dummies()
old_settings = np.seterr(divide='ignore', invalid='ignore')
df_idx = pd.MultiIndex.from_arrays([[], [], [], []])
df = pd.DataFrame(index=df_idx, columns=["f1", "precision", "recall", "rmse"])
try:
prob_cols = [
name.startswith("problem_") for name in self.label.columns.get_level_values(2)
]
except KeyError:
prob_cols = pd.DataFrame(index=self.label.index)
for column, pred_series in dummies["pred"].items():
if column[1]:
true_idx = self.label.loc[
np.any(np.char.equal(self.label.loc[:, prob_cols].values.astype("U"), column[1]), axis=1)
].index
pred_idx = self.pred.loc[
np.any(np.char.equal(self.pred.loc[:, prob_cols].values.astype("U"), column[1]), axis=1)
].index
idx = list(set(true_idx.to_list()).intersection(set(pred_idx.to_list())))
else:
idx = list(set(self.label.index).intersection(set(self.pred.index)))
if column[0] in ["CLASS", "MULTI"] and column in dummies["label"].columns:
truth = dummies["label"][column][idx]
pred = pred_series[idx]
true_pos = np.sum(truth * pred)
if not np.sum(truth) or (column[0] == "CLASS" and column[1] and column[3] == "0"):
continue
prec = true_pos / np.sum(pred) if np.sum(pred) else 0
recall = true_pos / np.sum(truth)
f1 = 2 * prec * recall / (prec + recall) if prec + recall else 0
df.loc[column] = pd.Series([f1, prec, recall, np.nan], index=df.columns)
elif column[0] in ["REAL"] and column in dummies["label"].columns:
truth = dummies["label"][column][idx]
pred = pred_series[idx]
if not len(truth):
continue
rmse = np.sqrt(np.sum(np.square(pred - truth))) / len(truth)
df.loc[column] = | pd.Series([np.nan, np.nan, np.nan, rmse], index=df.columns) | pandas.Series |
#!/usr/bin/env python
import sys
import json
import os.path
from collections import defaultdict
from pprint import pprint
import scipy.stats as st
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# Some dictionaries to fix text
# MultinomNB:Markov:LinearSVC:SK_Ovr_LR
algorithm = {"MultinomNB":"Multinomial Bayes",
"Markov":"Markov",
"LinearSVC":"Linear SVM",
"SK_Ovr_LR":"Logistic Regression"
}
model = { "MLE_MultinomNB":"MLE",
"BAY_MultinomNB_Alpha_1e-100":"alpha=1e-100",
"BAY_MultinomNB_Alpha_1e-10":"alpha=1e-10",
"BAY_MultinomNB_Alpha_1e-5":"alpha=1e-5",
"BAY_MultinomNB_Alpha_1e-2":"alpha=1e-2",
"BAY_MultinomNB_Alpha_1":"alpha=1",
"MLE_Markov":"MLE",
"BAY_Markov_Alpha_1e-100":"alpha=1e-100",
"BAY_Markov_Alpha_1e-10":"alpha=1e-10",
"BAY_Markov_Alpha_1e-5":"alpha=1e-5",
"BAY_Markov_Alpha_1e-2":"alpha=1e-2",
"BAY_Markov_Alpha_1":"alpha=1",
"SK_Ovr_LR_Liblinear_L1":"LR_L1",
"SK_Ovr_LR_Liblinear_L2":"LR_L2",
"SK_LinearSVC_SquaredHinge_L1_Primal":"LSVM_L1",
"SK_LinearSVC_Hinge_L2_Dual":"hinge_L2_dual",
"SK_LinearSVC_SquaredHinge_L2_Dual":"sqHinge_L2_dual",
"SK_LinearSVC_SquaredHinge_L2_Primal":"LSVM_L2"
}
def compile_data(json_sc, clf_kwds, kList, metric):
scores_tmp = defaultdict(lambda : defaultdict(dict))
scores = defaultdict(dict)
for algo in json_sc:
for kwd in clf_kwds:
if kwd in algo:
if algo not in ("SK_LinearSVC_Hinge_L2_Dual", "SK_LinearSVC_SquaredHinge_L2_Dual"):
values = [ np.array(json_sc[algo][str(k)][metric]) for k in kList ]
scores_tmp[kwd]["mean"][model[algo]] = np.array([ k.mean() for k in values ])
scores_tmp[kwd]["std"][model[algo]] = np.array([ k.std() for k in values ])
#print(scores_tmp)
for kwd in clf_kwds:
scores[algorithm[kwd]]["mean"] = pd.DataFrame(scores_tmp[kwd]["mean"], columns=scores_tmp[kwd]["mean"].keys())
scores[algorithm[kwd]]["mean"].index = kList
scores[algorithm[kwd]]["std"] = pd.DataFrame(scores_tmp[kwd]["std"], columns=scores_tmp[kwd]["std"].keys())
scores[algorithm[kwd]]["std"].index = kList
#print(scores)
return scores
def compile_frgmt_data(score_dict, the_models):
# "Multinomial Bayes": ["MLE", "alpha=1e-100"],
final_scores = defaultdict(lambda : defaultdict(dict))
frgtm_columns = list(score_dict.keys())
for frgmt_size in score_dict:
# "Multinomial Bayes"
for classifier in score_dict[frgmt_size]:
if classifier in the_models:
means = score_dict[frgmt_size][classifier]["mean"]
stds = score_dict[frgmt_size][classifier]["std"]
# "MLE"
for model in the_models[classifier]:
if model not in final_scores[classifier]:
final_scores[classifier][model]["mean"] = | pd.DataFrame(index=means.index, columns=frgtm_columns) | pandas.DataFrame |
import os
import pandas as pd
from pandas.api.types import is_string_dtype
import numpy as np
from numpy import log
from scipy.linalg import norm
from scipy.stats import entropy
import nltk
import nltk.data
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from textblob import TextBlob # Consider changing
from gensim.models import word2vec
from sklearn.manifold import TSNE
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
from sklearn.linear_model import Perceptron
from polo2 import PoloDb
from polo2 import PoloFile
class PoloCorpus(PoloDb):
ngram_prefixes = ['no', 'uni', 'bi', 'tri', 'quadri']
def __init__(self, config):
"""Initialize corpus object"""
# Import Configs
self.config = config
self.config.set_config_attributes(self)
if not os.path.isfile(self.cfg_src_file_name):
raise ValueError("Missing source file. Check value of `src_file_name` in INI file.")
self.dbfile = config.generate_corpus_db_file_path()
PoloDb.__init__(self, self.dbfile)
# self.db = PoloDb(self.dbfile) # Why not do this?
if self.cfg_nltk_data_path:
nltk.data.path.append(self.cfg_nltk_data_path)
# For tokenizing into sentences
# fixme: TOKENIZER ASSUMES ENGLISH -- PARAMETIZE THIS
nltk.download('punkt')
nltk.download('tagsets')
nltk.download('averaged_perceptron_tagger')
self.tokenizer = nltk.data.load('nltk:tokenizers/punkt/english.pickle')
def import_table_doc(self, src_file_name=None, normalize=True):
"""Import source file into doc table"""
# Read in file content
if not src_file_name:
src_file_name = self.cfg_src_file_name
doc = pd.read_csv(src_file_name, header=0, sep=self.cfg_src_file_sep, lineterminator='\n')
doc.index.name = 'doc_id'
# todo: Find a more efficient way of handling this -- such as not duplicating!
# This is a legacy of an older procedure which now has performance implications.
if 'doc_original' not in doc.columns:
doc['doc_original'] = doc.doc_content
# todo: Put this in a separate and configurable function for general text normalization.
# Preliminary normalization of documents
doc['doc_content'] = doc.doc_content.str.replace(r'\n+', ' ', regex=True) # Remove newlines
doc['doc_content'] = doc.doc_content.str.replace(r'<[^>]+>', ' ', regex=True) # Remove tags
doc['doc_content'] = doc.doc_content.str.replace(r'\s+', ' ', regex=True) # Collapse spaces
# Remove empty docs
doc = doc[~doc.doc_content.isnull()]
doc.reindex()
self.put_table(doc, 'doc', index=True)
def import_table_stopword(self, use_nltk=False):
"""Import stopwords"""
swset = set()
# fixme: Cast integers in config object
# fixme: Parametize language
if int(self.cfg_use_nltk) == 1:
nltk_stopwords = set(stopwords.words('english'))
swset.update(nltk_stopwords)
if self.cfg_extra_stops and os.path.isfile(self.cfg_extra_stops):
src = PoloFile(self.cfg_extra_stops)
swset.update([word for word in src.read_bigline().split()])
swdf = pd.DataFrame({'token_str': list(swset)})
self.put_table(swdf, 'stopword')
# todo: Consider changing table name to DOCTERM or TOKEN
def add_table_doctoken(self):
"""Create doctoken and doctokenbow tables; update doc table"""
docs = self.get_table('doc', set_index=True)
# todo: Consider dividing this in two parts, the first to create a Phrase model with Gensim
# This takes a long time
doctokens = pd.DataFrame([(sentences[0], j, k, token[0], token[1])
for sentences in docs.apply(lambda x: (x.name, sent_tokenize(x.doc_content)), 1)
for j, sentence in enumerate(sentences[1])
for k, token in enumerate(nltk.pos_tag(nltk.word_tokenize(sentence)))],
columns=['doc_id', 'sentence_id', 'token_ord', 'token_str', 'pos'])
doctokens = doctokens.set_index(['doc_id', 'sentence_id', 'token_ord'])
# Normalize
doctokens.token_str = doctokens.token_str.str.lower()
doctokens.token_str = doctokens.token_str.str.replace(r'[^a-z]+', '', regex=True)
doctokens = doctokens[~doctokens.token_str.str.match(r'^\s*$')]
# todo: Instead of removing stopwords, identify with feature
stopwords = self.get_table('stopword').token_str.tolist()
doctokens = doctokens[~doctokens.token_str.isin(stopwords)]
self.put_table(doctokens, 'doctoken', if_exists='replace', index=True)
# Creates a BOW model for the doc, removing words in sequence and only keeping counts
doctokenbow = pd.DataFrame(doctokens.groupby('doc_id').token_str.value_counts())
doctokenbow.columns = ['token_count']
self.put_table(doctokenbow, 'doctokenbow', index=True)
# Add token counts to doc
docs['token_count'] = doctokenbow.groupby('doc_id').token_count.sum()
self.put_table(docs, 'doc', if_exists='replace', index=True)
# fixme: TOKEN should be the TERM table (aka VOCAB)
def add_table_token(self):
"""Get token data from doctoken and doctokenbow"""
doctoken = self.get_table('doctoken')
token = pd.DataFrame(doctoken.token_str.value_counts())
token.sort_index(inplace=True)
token.reset_index(inplace=True)
token.columns = ['token_str', 'token_count']
token.index.name = 'token_id'
# Add pos_max to token
pos_max = doctoken.groupby(['token_str', 'pos']).pos.count().unstack().idxmax(1)
token['pos_max'] = token.token_str.map(pos_max)
# Replace token_str with token_id in doctokenbow
token.reset_index(inplace=True)
doctokenbow = self.get_table('doctokenbow')
doctokenbow = doctokenbow.merge(token[['token_id', 'token_str']], on="token_str")
doctokenbow = doctokenbow[['doc_id', 'token_id', 'token_count']]
doctokenbow.sort_values('doc_id', inplace=True)
doctokenbow.set_index(['doc_id', 'token_id'], inplace=True)
self.put_table(doctokenbow, 'doctokenbow', if_exists='replace', index=True)
# Add doc counts to token
token.set_index('token_id', inplace=True)
token['doc_count'] = doctokenbow.groupby('token_id').count()
self.put_table(token, 'token', index=True)
# fixme: Use a better sentiment detector
def _get_sentiment(self, doc):
doc2 = TextBlob(doc)
return doc2.sentiment
def add_tfidf_to_doctokenbow(self):
"""Add TFIDF data to doctokenbow table"""
doctokenbow = self.get_table('doctokenbow', set_index=True)
tokens = self.get_table('token', set_index=True)
docs = pd.read_sql_query("SELECT doc_id, token_count FROM doc", self.conn, index_col='doc_id')
num_docs = docs.index.size
# Compute local and gloabl token (actually term) significance
self.alpha = .4
doc_max = doctokenbow.groupby('doc_id').token_count.max()
tokens['df'] = doctokenbow.groupby('token_id').token_count.count()
# n_docs = len(doctokenbow.index.levels[0])
tokens['idf'] = np.log2(num_docs/tokens.df)
tokens['dfidf'] = tokens.df * tokens.idf
doctokenbow['tf'] = self.alpha + (1 - self.alpha) * (doctokenbow.token_count / doc_max)
doctokenbow['tfidf'] = doctokenbow.tf * tokens.idf
doctokenbow['tfidf_l2'] = doctokenbow['tfidf'] / doctokenbow.groupby(['doc_id']).apply(lambda x: norm(x.tfidf, 2))
tokens['tfidf_sum'] = doctokenbow.groupby('token_id').tfidf_l2.sum()
tokens['tfidf_avg'] = doctokenbow.groupby('token_id').tfidf_l2.mean()
self.put_table(doctokenbow, 'doctokenbow', if_exists='replace', index=True)
self.put_table(tokens, 'token', if_exists='replace', index=True)
def add_stems_to_token(self):
"""Add stems to token table"""
# We only use one stemmer since stemmers suck anyway :-)
porter_stemmer = PorterStemmer()
tokens = self.get_table('token', set_index=True)
tokens['token_stem_porter'] = tokens.token_str.apply(porter_stemmer.stem)
self.put_table(tokens, 'token', if_exists='replace', index=True)
def add_sentimant_to_doc(self):
"""Add sentiment to doc table"""
doc = self.get_table('doc', set_index=True)
doc['doc_sentiment'] = doc.doc_content.apply(self._get_sentiment)
doc['doc_sentiment_polarity'] = doc.doc_sentiment.apply(lambda x: round(x[0], 1))
doc['doc_sentiment_subjectivity'] = doc.doc_sentiment.apply(lambda x: round(x[1], 2))
del(doc['doc_sentiment'])
self.put_table(doc, 'doc', index=True)
def add_tables_ngram_and_docngram(self, n=2):
"""Create ngram and docngram tables for n (using stems)"""
key = {2:'bi', 3:'tri'}
try:
infix = key[n]
except KeyError as e:
print('Invalid ngram length. Must be 2 or 3')
return False
sql = {}
sql['bi'] = """
SELECT dt_x.doc_id AS doc_id,
t_x.token_stem_porter AS tx,
t_y.token_stem_porter AS ty,
t_x.token_stem_porter || '_' || t_y.token_stem_porter AS ngram,
count() AS tf
FROM doctoken dt_x
JOIN doctoken dt_y ON (dt_x.doc_id = dt_y.doc_id
AND dt_x.sentence_id = dt_y.sentence_id
AND dt_y.rowid = (dt_x.rowid + 1))
JOIN token t_x ON dt_x.token_str = t_x.token_str
JOIN token t_y ON dt_y.token_str = t_y.token_str
GROUP BY dt_x.doc_id, ngram
"""
sql['tri'] = """
SELECT dt_x.doc_id AS doc_id,
t_x.token_stem_porter AS tx,
t_y.token_stem_porter AS ty,
t_z.token_stem_porter AS tz,
t_x.token_stem_porter || '_' || t_y.token_stem_porter || '_' || t_z.token_stem_porter AS ngram,
count() AS tf
FROM doctoken dt_x
JOIN doctoken dt_y ON (dt_x.doc_id = dt_y.doc_id
AND dt_x.sentence_id = dt_y.sentence_id
AND dt_y.rowid = (dt_x.rowid + 1))
JOIN doctoken dt_z ON (dt_x.doc_id = dt_z.doc_id
AND dt_x.sentence_id = dt_z.sentence_id
AND dt_z.rowid = (dt_y.rowid + 1))
JOIN token t_x ON dt_x.token_str = t_x.token_str
JOIN token t_y ON dt_y.token_str = t_y.token_str
JOIN token t_z ON dt_z.token_str = t_z.token_str
GROUP BY dt_x.doc_id, ngram
"""
docngrams = pd.read_sql(sql[infix], self.conn)
self.put_table(docngrams, 'ngram{}doc'.format(infix), index=False)
def add_stats_to_ngrams(self, type='bi'):
"""Create distinct ngram tables with stats"""
sql1 = """
SELECT g.doc_id, d.doc_label, g.ngram, g.tf
FROM ngram{}doc g
JOIN doc d USING(doc_id)
""".format(type)
sql2 = """
SELECT ngram, doc_label, sum(tf) AS tf_sum
FROM (
SELECT g.doc_id, d.doc_label, g.ngram, g.tf
FROM ngram{}doc g
JOIN doc d USING(doc_id)
)
GROUP BY doc_label, ngram
""".format(type)
sql3 = """
WITH stats(n) AS (SELECT COUNT() AS n FROM doc)
SELECT ngram, count() AS c, (SELECT n FROM stats) AS n,
CAST(COUNT() AS REAL) / CAST((SELECT n FROM stats) AS REAL) AS df
FROM ngram{}doc
GROUP BY ngram
ORDER BY c DESC
""".format(type)
docngram = | pd.read_sql_query(sql1, self.conn, index_col='doc_id') | pandas.read_sql_query |
#%%
import numpy as np
import pandas as pd
import altair as alt
import anthro.io
# Generate a plot for terrestrial designated protected area in the EEA countries (EEA-38+UK)
data = pd.read_csv('../processed/protected_land_area_europe.csv')
data['year'] = pd.to_datetime(data['year'].astype(str), format='%Y', errors='coerce')
agg_data = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 21 10:30:25 2018
Try to predict in which lab an animal was trained based on its behavior
@author: guido
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from os.path import join
import seaborn as sns
import datajoint as dj
from ibl_pipeline import subject, acquisition, action, behavior, reference
from ibl_pipeline.analyses import behavior as behavior_analysis
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
# Settings
path = '/home/guido/Figures/Behavior/'
iterations = 5 # how often to decode
num_splits = 3 # n in n-fold cross validation
decoding_metrics = ['perf_easy','n_trials','threshold','bias','reaction_time','training_time']
decoding_metrics_control = ['perf_easy','n_trials','threshold','bias','reaction_time','training_time','time_zone']
# Decoding function with n-fold cross validation
def decoding(resp, labels, clf, num_splits):
kf = KFold(n_splits=num_splits, shuffle=True)
y_pred = np.array([])
y_true = np.array([])
for train_index, test_index in kf.split(resp):
train_resp = resp[train_index]
test_resp = resp[test_index]
clf.fit(train_resp, [labels[j] for j in train_index])
y_pred = np.append(y_pred, clf.predict(test_resp))
y_true = np.append(y_true, [labels[j] for j in test_index])
f1 = f1_score(y_true, y_pred, labels=np.unique(labels), average='micro')
return f1
# Query list of subjects
all_sub = subject.Subject * subject.SubjectLab & 'subject_birth_date > "2018-09-01"' & 'subject_line IS NULL OR subject_line="C57BL/6J"'
subjects = all_sub.fetch('subject_nickname')
# Create dataframe with behavioral metrics of all mice
learning = | pd.DataFrame(columns=['mouse','lab','time_zone','learned','date_learned','training_time','perf_easy','n_trials','threshold','bias','reaction_time','lapse_low','lapse_high']) | pandas.DataFrame |
from page.base_page import BasePage
import streamlit as st
import pandas as pd
class TemplatePage(BasePage):
def __init__(self, app_data, **kwargs):
super().__init__(app_data, **kwargs)
self.title = 'Template Page'
st.title(self.title)
def run(self):
df = | pd.DataFrame([f'<a target="_blank" href="https://coinmarketcap.com/ja/currencies/ethereum/">text</a>']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Monthly_CSV_Creator.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1w6g3Ji4hVgK3rJDz5b4UBOGX1mqK65Gt
"""
import pandas as pd
import time
import os
import tarfile
import json
#Set initial directory as the directory where the script is running.
initial_directory = '.'
os.chdir(initial_directory)
print("Current working directory is", os.getcwd())
# Select directories
directory_prompt_selection = 'none'
while directory_prompt_selection != 'w':
current_directories = [d for d in os.listdir('.') if os.path.isdir(d)]
print("Current working directory is", os.getcwd())
for directory in current_directories:
print(current_directories.index(directory), directory)
print("w", "We can use current directory")
print("c", "Change top level directory")
print("u", "Navigate up one level.")
directory_prompt_selection = input("Choose an option:")
print("\n")
if directory_prompt_selection == 'c':
directory_prompt_selection = input('Provide either the relative or full path name of the directory')
os.chdir(directory_prompt_selection)
elif directory_prompt_selection == 'u':
os.chdir('../')
elif directory_prompt_selection == 'w':
None
else:
os.chdir(os.path.join('.', current_directories[int(directory_prompt_selection)]))
print("Current working directory is", os.getcwd())
# Get rid of anything the operating system may add...
files = ([x for x in sorted(filter(os.path.isfile,
os.listdir('.')),
key=os.path.getmtime) if x[-2:] == "xz"])
files
# Start some timers to track how long everything is taking.
stopwatch_start = time.time()
stopwatch_hour = time.time()
stations_big = | pd.DataFrame() | pandas.DataFrame |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
import pyomo.environ as pyo
from collections import deque
import pandas as pd
from idaes.apps.grid_integration import Tracker
from idaes.apps.grid_integration import Bidder
from idaes.apps.grid_integration import PlaceHolderForecaster
from pyomo.common.dependencies import attempt_import
Prescient, prescient_avail = attempt_import("prescient.simulator.Prescient")
class ThermalGenerator:
"""
Simple thermal generator model (MIP). Equations models are from Gao, Knueven,
Siirola, Miller, Dowling (2022). Multiscale Simulation of Integrated Energy
System and Electricity Market Interactions. Applied Energy.
"""
# Using 4 segments to be consistent with models in RTS-GMLC dataset
segment_number = 4
def __init__(self, rts_gmlc_dataframe, horizon=48, generator="102_STEAM_3"):
"""
Initializes the class object by building the thermal generator model.
Arguments:
rts_gmlc_dataframe: the RTS-GMLC generator data in Pandas DataFrame
horizon: the length of the planning horizon of the model.
generator: a generator in RTS-GMLC
Returns:
None
"""
self.generator = generator
self.horizon = horizon
self.model_data = self.assemble_model_data(
generator_name=generator, gen_params=rts_gmlc_dataframe
)
self.result_list = []
@staticmethod
def assemble_model_data(generator_name, gen_params):
"""
This function assembles the parameter data to build the thermal generator
model, given a list of generator names and the RTS-GMLC data directory.
Arguments:
generator_names: a generator name in RTS-GMLC dataset.
gen_params: the RTS-GMLC generator data in Pandas DataFrame
Returns:
model_data: a dictionary which has this structure
{data type name: value}.
"""
gen_params = gen_params.set_index("GEN UID", inplace=False)
properties = [
"PMin MW",
"PMax MW",
"Min Up Time Hr",
"Min Down Time Hr",
"Ramp Rate MW/Min",
"Start Heat Warm MBTU",
"Fuel Price $/MMBTU",
"HR_avg_0",
"HR_incr_1",
"HR_incr_2",
"HR_incr_3",
"Output_pct_1",
"Output_pct_2",
"Output_pct_3",
]
# to dict
model_data = gen_params.loc[generator_name, properties].to_dict()
model_data["RU"] = model_data["Ramp Rate MW/Min"] * 60
model_data["RD"] = model_data["RU"]
model_data["SU"] = min(model_data["PMin MW"], model_data["RU"])
model_data["SD"] = min(model_data["PMin MW"], model_data["RD"])
model_data["SU Cost"] = (
model_data["Start Heat Warm MBTU"] * model_data["Fuel Price $/MMBTU"]
)
model_data["Min Load Cost"] = (
model_data["HR_avg_0"]
/ 1000
* model_data["Fuel Price $/MMBTU"]
* model_data["PMin MW"]
)
model_data["Power Segments"] = {}
model_data["Marginal Costs"] = {}
model_data["Original Marginal Cost Curve"] = {}
model_data["Original Marginal Cost Curve"][model_data["PMin MW"]] = (
model_data["Min Load Cost"] / model_data["PMin MW"]
)
for l in range(1, ThermalGenerator.segment_number):
model_data["Power Segments"][l] = (
model_data["Output_pct_{}".format(l)] * model_data["PMax MW"]
)
model_data["Marginal Costs"][l] = (
model_data["HR_incr_{}".format(l)]
/ 1000
* model_data["Fuel Price $/MMBTU"]
)
model_data["Original Marginal Cost Curve"][
model_data["Power Segments"][l]
] = model_data["Marginal Costs"][l]
return model_data
@staticmethod
def _add_UT_DT_constraints(b):
"""
This function adds the minimum up/down time constraints using eq. 4 - 5
in "On mixed-integer programming formulations for the unit commitment
problem". INFORMS Journal on Computing, 32(4), pp.857-876. <NAME>.,
<NAME>. and <NAME>., 2020.
Arguments:
b: a pyomo block
Returns:
None
"""
def pre_shut_down_trajectory_set_rule(b):
return (t for t in range(-pyo.value(b.min_dw_time) + 1, 0))
b.pre_shut_down_trajectory_set = pyo.Set(
dimen=1, initialize=pre_shut_down_trajectory_set_rule, ordered=True
)
def pre_start_up_trajectory_set_rule(b):
return (t for t in range(-pyo.value(b.min_up_time) + 1, 0))
b.pre_start_up_trajectory_set = pyo.Set(
dimen=1, initialize=pre_start_up_trajectory_set_rule, ordered=True
)
b.pre_shut_down_trajectory = pyo.Param(
b.pre_shut_down_trajectory_set, initialize=0, mutable=True
)
b.pre_start_up_trajectory = pyo.Param(
b.pre_start_up_trajectory_set, initialize=0, mutable=True
)
def min_down_time_rule(b, h):
if h < pyo.value(b.min_dw_time):
return (
sum(
b.pre_shut_down_trajectory[h0]
for h0 in range(h - pyo.value(b.min_dw_time) + 1, 0)
)
+ sum(b.shut_dw[h0] for h0 in range(h + 1))
<= 1 - b.on_off[h]
)
else:
return (
sum(
b.shut_dw[h0]
for h0 in range(h - pyo.value(b.min_dw_time) + 1, h + 1)
)
<= 1 - b.on_off[h]
)
b.min_down_time_con = pyo.Constraint(b.HOUR, rule=min_down_time_rule)
def min_up_time_rule(b, h):
if h < pyo.value(b.min_up_time):
return (
sum(
b.pre_start_up_trajectory[h0]
for h0 in range(h - pyo.value(b.min_up_time) + 1, 0)
)
+ sum(b.start_up[h0] for h0 in range(h + 1))
<= b.on_off[h]
)
else:
return (
sum(
b.start_up[h0]
for h0 in range(h - pyo.value(b.min_up_time) + 1, h + 1)
)
<= b.on_off[h]
)
b.min_up_time_con = pyo.Constraint(b.HOUR, rule=min_up_time_rule)
return
def populate_model(self, b):
"""
This function builds the model for a thermal generator.
Arguments:
b: a pyomo block
Returns:
b: the constructed block.
"""
model_data = self.model_data
## define the sets
b.HOUR = pyo.Set(initialize=list(range(self.horizon)))
b.SEGMENTS = pyo.Set(initialize=list(range(1, self.segment_number)))
## define the parameters
b.start_up_cost = pyo.Param(initialize=model_data["SU Cost"], mutable=False)
# capacity of generators: upper bound (MW)
b.Pmax = pyo.Param(initialize=model_data["PMax MW"], mutable=False)
# minimum power of generators: lower bound (MW)
b.Pmin = pyo.Param(initialize=model_data["PMin MW"], mutable=False)
b.power_segment_bounds = pyo.Param(
b.SEGMENTS, initialize=model_data["Power Segments"], mutable=False
)
# get the cost slopes
b.F = pyo.Param(
b.SEGMENTS, initialize=model_data["Marginal Costs"], mutable=False
)
b.min_load_cost = pyo.Param(
initialize=model_data["Min Load Cost"], mutable=False
)
# Ramp up limits (MW/h)
b.ramp_up = pyo.Param(initialize=model_data["RU"], mutable=False)
# Ramp down limits (MW/h)
b.ramp_dw = pyo.Param(initialize=model_data["RD"], mutable=False)
# start up ramp limit
b.ramp_start_up = pyo.Param(initialize=model_data["SU"], mutable=False)
# shut down ramp limit
b.ramp_shut_dw = pyo.Param(initialize=model_data["SD"], mutable=False)
# minimum down time [hr]
b.min_dw_time = pyo.Param(
initialize=int(model_data["Min Down Time Hr"]), mutable=False
)
# minimum up time [hr]
b.min_up_time = pyo.Param(
initialize=int(model_data["Min Up Time Hr"]), mutable=False
)
# on/off status from previous day
b.pre_on_off = pyo.Param(within=pyo.Binary, default=1, mutable=True)
# define a function to initialize the previous power params
def init_pre_pow_fun(b):
return b.pre_on_off * b.Pmin
b.pre_P_T = pyo.Param(initialize=init_pre_pow_fun, mutable=True)
## define the variables
# generator power (MW)
# power generated by thermal generator
b.P_T = pyo.Var(
b.HOUR, initialize=model_data["PMin MW"], within=pyo.NonNegativeReals
)
# binary variables indicating on/off
b.on_off = pyo.Var(b.HOUR, initialize=True, within=pyo.Binary)
# binary variables indicating start_up
b.start_up = pyo.Var(b.HOUR, initialize=False, within=pyo.Binary)
# binary variables indicating shut down
b.shut_dw = pyo.Var(b.HOUR, initialize=False, within=pyo.Binary)
# power produced in each segment
b.power_segment = pyo.Var(b.HOUR, b.SEGMENTS, within=pyo.NonNegativeReals)
## Constraints
# bounds on gen_pow
def lhs_bnd_gen_pow_fun(b, h):
return b.on_off[h] * b.Pmin <= b.P_T[h]
b.lhs_bnd_gen_pow = pyo.Constraint(b.HOUR, rule=lhs_bnd_gen_pow_fun)
def rhs_bnd_gen_pow_fun(b, h):
return b.P_T[h] <= b.on_off[h] * b.Pmax
b.rhs_bnd_gen_pow = pyo.Constraint(b.HOUR, rule=rhs_bnd_gen_pow_fun)
# linearized power
def linear_power_fun(b, h):
return (
b.P_T[h]
== sum(b.power_segment[h, l] for l in b.SEGMENTS) + b.Pmin * b.on_off[h]
)
b.linear_power = pyo.Constraint(b.HOUR, rule=linear_power_fun)
# bounds on segment power
def seg_pow_bnd_fun(b, h, l):
if l == 1:
return (
b.power_segment[h, l]
<= (b.power_segment_bounds[l] - b.Pmin) * b.on_off[h]
)
else:
return (
b.power_segment[h, l]
<= (b.power_segment_bounds[l] - b.power_segment_bounds[l - 1])
* b.on_off[h]
)
b.seg_pow_bnd = pyo.Constraint(b.HOUR, b.SEGMENTS, rule=seg_pow_bnd_fun)
# start up and shut down logic (Garver 1962)
def start_up_shut_dw_fun(b, h):
if h == 0:
return b.start_up[h] - b.shut_dw[h] == b.on_off[h] - b.pre_on_off
else:
return b.start_up[h] - b.shut_dw[h] == b.on_off[h] - b.on_off[h - 1]
b.start_up_shut_dw = pyo.Constraint(b.HOUR, rule=start_up_shut_dw_fun)
# either start up or shut down
def start_up_or_shut_dw_fun(b, h):
return b.start_up[h] + b.shut_dw[h] <= 1
b.start_up_or_shut_dw = pyo.Constraint(b.HOUR, rule=start_up_or_shut_dw_fun)
# ramp up limits
def ramp_up_fun(b, h):
"""
h stand for hour
"""
if h == 0:
return (
b.P_T[h]
<= b.pre_P_T
+ b.ramp_up * b.pre_on_off
+ b.ramp_start_up * b.start_up[h]
)
else:
return (
b.P_T[h]
<= b.P_T[h - 1]
+ b.ramp_up * b.on_off[h - 1]
+ b.ramp_start_up * b.start_up[h]
)
b.ramp_up_con = pyo.Constraint(b.HOUR, rule=ramp_up_fun)
# ramp shut down limits
def ramp_shut_dw_fun(b, h):
"""
h stand for hour.
"""
if h == 0:
return b.pre_P_T <= b.Pmax * b.on_off[h] + b.ramp_shut_dw * b.shut_dw[h]
else:
return (
b.P_T[h - 1] <= b.Pmax * b.on_off[h] + b.ramp_shut_dw * b.shut_dw[h]
)
b.ramp_shut_dw_con = pyo.Constraint(b.HOUR, rule=ramp_shut_dw_fun)
# ramp down limits
def ramp_dw_fun(b, h):
"""
h stand for hour.
"""
if h == 0:
return (
b.pre_P_T - b.P_T[h]
<= b.ramp_dw * b.on_off[h] + b.ramp_shut_dw * b.shut_dw[h]
)
else:
return (
b.P_T[h - 1] - b.P_T[h]
<= b.ramp_dw * b.on_off[h] + b.ramp_shut_dw * b.shut_dw[h]
)
b.ramp_dw_con = pyo.Constraint(b.HOUR, rule=ramp_dw_fun)
## add min up and down time constraints
self._add_UT_DT_constraints(b)
## Expression
def prod_cost_fun(b, h):
return b.min_load_cost * b.on_off[h] + sum(
b.F[l] * b.power_segment[h, l] for l in b.SEGMENTS
)
b.prod_cost_approx = pyo.Expression(b.HOUR, rule=prod_cost_fun)
# start up costs
def start_cost_fun(b, h):
return b.start_up_cost * b.start_up[h]
b.start_up_cost_expr = pyo.Expression(b.HOUR, rule=start_cost_fun)
# total cost
def tot_cost_fun(b, h):
return b.prod_cost_approx[h] + b.start_up_cost_expr[h]
b.tot_cost = pyo.Expression(b.HOUR, rule=tot_cost_fun)
return
@staticmethod
def _update_UT_DT(b, implemented_shut_down, implemented_start_up):
"""
This method updates the parameters in the minimum up/down time
constraints based on the implemented shut down and start up events.
Arguments:
b: the block that needs to be updated
implemented_shut_down: realized shut down events: [].
implemented_start_up: realized start up events: []
Returns:
None
"""
pre_shut_down_trajectory_copy = deque([])
pre_start_up_trajectory_copy = deque([])
# copy old trajectory
for t in b.pre_shut_down_trajectory_set:
pre_shut_down_trajectory_copy.append(
round(pyo.value(b.pre_shut_down_trajectory[t]))
)
for t in b.pre_start_up_trajectory_set:
pre_start_up_trajectory_copy.append(
round(pyo.value(b.pre_start_up_trajectory[t]))
)
# add implemented trajectory to the queue
pre_shut_down_trajectory_copy += deque(implemented_shut_down)
pre_start_up_trajectory_copy += deque(implemented_start_up)
# pop out outdated trajectory
while len(pre_shut_down_trajectory_copy) > pyo.value(b.min_dw_time) - 1:
pre_shut_down_trajectory_copy.popleft()
while len(pre_start_up_trajectory_copy) > pyo.value(b.min_up_time) - 1:
pre_start_up_trajectory_copy.popleft()
# actual update
for t in b.pre_shut_down_trajectory_set:
b.pre_shut_down_trajectory[t] = pre_shut_down_trajectory_copy.popleft()
for t in b.pre_start_up_trajectory_set:
b.pre_start_up_trajectory[t] = pre_start_up_trajectory_copy.popleft()
return
@staticmethod
def _update_power(b, implemented_power_output):
"""
This method updates the parameters in the ramping constraints based on
the implemented power outputs.
Arguments:
b: the block that needs to be updated
implemented_power_output: realized power outputs: []
Returns:
None
"""
b.pre_P_T = round(implemented_power_output[-1], 2)
b.pre_on_off = round(int(implemented_power_output[-1] > 1e-3))
return
def update_model(
self, b, implemented_shut_down, implemented_start_up, implemented_power_output
):
"""
This method updates the parameters in the model based on
the implemented power outputs, shut down and start up events.
Arguments:
b: the block that needs to be updated
implemented_shut_down: realized shut down events: [].
implemented_start_up: realized start up events: []
implemented_power_output: realized power outputs: []
Returns:
None
"""
self._update_UT_DT(b, implemented_shut_down, implemented_start_up)
self._update_power(b, implemented_power_output)
return
@staticmethod
def get_implemented_profile(b, last_implemented_time_step):
"""
This method gets the implemented variable profiles in the last optimization
solve.
Arguments:
b: the block
model_var: intended variable name in str
last_implemented_time_step: time index for the last implemented time
step
Returns:
profile: the intended profile, {unit: [...]}
"""
implemented_shut_down = deque(
[pyo.value(b.shut_dw[t]) for t in range(last_implemented_time_step + 1)]
)
implemented_start_up = deque(
[pyo.value(b.start_up[t]) for t in range(last_implemented_time_step + 1)]
)
implemented_power_output = deque(
[pyo.value(b.P_T[t]) for t in range(last_implemented_time_step + 1)]
)
return {
"implemented_shut_down": implemented_shut_down,
"implemented_start_up": implemented_start_up,
"implemented_power_output": implemented_power_output,
}
@staticmethod
def get_last_delivered_power(b, last_implemented_time_step):
"""
Returns the last delivered power output.
Arguments:
None
Returns:
None
"""
return pyo.value(b.P_T[last_implemented_time_step])
def record_results(self, b, date=None, hour=None, **kwargs):
"""
Record the operations stats for the model.
Arguments:
date: current simulation date
hour: current simulation hour
Returns:
None
"""
df_list = []
for t in b.HOUR:
result_dict = {}
result_dict["Generator"] = self.generator
result_dict["Date"] = date
result_dict["Hour"] = hour
# simulation inputs
result_dict["Horizon [hr]"] = int(t)
# model vars
result_dict["Thermal Power Generated [MW]"] = float(
round(pyo.value(b.P_T[t]), 2)
)
result_dict["On/off [bin]"] = int(round(pyo.value(b.on_off[t])))
result_dict["Start Up [bin]"] = int(round(pyo.value(b.start_up[t])))
result_dict["Shut Down [bin]"] = int(round(pyo.value(b.shut_dw[t])))
result_dict["Production Cost [$]"] = float(
round(pyo.value(b.prod_cost_approx[t]), 2)
)
result_dict["Start-up Cost [$]"] = float(
round(pyo.value(b.start_up_cost_expr[t]), 2)
)
result_dict["Total Cost [$]"] = float(round(pyo.value(b.tot_cost[t]), 2))
# calculate mileage
if t == 0:
result_dict["Mileage [MW]"] = float(
round(abs(pyo.value(b.P_T[t] - b.pre_P_T)), 2)
)
else:
result_dict["Mileage [MW]"] = float(
round(abs(pyo.value(b.P_T[t] - b.P_T[t - 1])), 2)
)
for key in kwargs:
result_dict[key] = kwargs[key]
result_df = pd.DataFrame.from_dict(result_dict, orient="index")
df_list.append(result_df.T)
# save the result to object property
# wait to be written when simulation ends
self.result_list.append(pd.concat(df_list))
return
def write_results(self, path):
"""
This methods writes the saved operation stats into an csv file.
Arguments:
path: the path to write the results.
Return:
None
"""
pd.concat(self.result_list).to_csv(path, index=False)
@property
def power_output(self):
return "P_T"
@property
def total_cost(self):
return ("tot_cost", 1)
@property
def default_bids(self):
return self.model_data["Original Marginal Cost Curve"]
@property
def pmin(self):
return self.model_data["PMin MW"]
if __name__ == "__main__":
generator = "102_STEAM_3"
horizon = 4
rts_gmlc_dataframe = pd.read_csv("gen.csv")
solver = pyo.SolverFactory("cbc")
run_tracker = True
run_bidder = True
run_prescient = True
if run_tracker:
# create a tracker model
tracking_model_object = ThermalGenerator(
rts_gmlc_dataframe=rts_gmlc_dataframe,
horizon=horizon,
generator="102_STEAM_3",
)
# make a tracker
thermal_tracker = Tracker(
tracking_model_object=tracking_model_object,
n_tracking_hour=1,
solver=solver,
)
market_dispatch = [30, 40, 50, 70]
thermal_tracker.track_market_dispatch(
market_dispatch=market_dispatch, date="2021-07-26", hour="17:00"
)
thermal_tracker.write_results(path="./")
if run_bidder:
# create a tracker model
bidding_model_object = ThermalGenerator(
rts_gmlc_dataframe=rts_gmlc_dataframe, horizon=48, generator="102_STEAM_3"
)
# create forecaster
price_forecasts_df = | pd.read_csv("lmp_forecasts_concat.csv") | pandas.read_csv |
"""Unittests for the map module."""
import unittest
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pygeos
import pyproj
import geopandas as gpd
import shapely.wkt
import numpy.testing as npt
import gnssmapper.common as cm
import gnssmapper.geo as geo
class TestObservationMethods(unittest.TestCase):
def setUp(self):
self.rays = gpd.GeoSeries([shapely.geometry.LineString([[527990, 183005, 0], [528020, 183005, 15]]),
shapely.geometry.LineString([[527990, 183005, 10], [528020, 183005, 25]])],
crs="epsg:27700")
def test_rays(self) -> None:
r = [[0, 0, 0], [1, 1, 1]]
s = [[10000, 0, 0],[10001, 1, 1]]
expected = [pygeos.Geometry("LineString (0 0 0,1000 0 0)"), pygeos.Geometry("LineString (1 1 1,1001 1 1)")]
out=geo.rays(r,s)
self.assertTrue(np.all(pygeos.predicates.equals(out,expected)))
def test_to_crs(self) -> None:
target = pyproj.crs.CRS(cm.constants.epsg_wgs84)
transformed= geo.to_crs(self.rays,target)
self.assertTrue(np.all(s.has_z for s in transformed))
self.assertEqual(target,transformed.crs)
df = gpd.GeoDataFrame(geometry = self.rays,crs=self.rays.crs)
transformed_df = geo.to_crs(df,target)
self.assertTrue(np.all(s.has_z for s in transformed_df.geometry))
self.assertEqual(target,transformed_df.crs)
class TestShapelyMethods(unittest.TestCase):
def setUp(self):
self.building = shapely.wkt.loads("POLYGON((528010 183010, 528010 183000,528000 183000, 528000 183010,528010 183010))")
def test_intersection(self):
five = shapely.geometry.LineString([[527990,183005,0],[528020,183005,15]])
point = geo.intersection([five],[self.building],[10])
self.assertAlmostEqual(np.array(point[0])[2],5)
def test_intersection_projected(self):
fifteen = shapely.geometry.LineString([[527990,183005,10],[528020,183005,25]])
point = geo.intersection_projected([fifteen], [self.building])
npt.assert_array_almost_equal(np.array(list(point)[0].coords).flatten(), [528000, 183005, 15])
inside = shapely.geometry.LineString([[528005,183005,10],[528020,183005,25]])
inside_point = geo.intersection_projected([inside], [self.building])
npt.assert_array_almost_equal(np.array(list(inside_point)[0].coords).flatten(), [528010, 183005, 15])
outside = shapely.geometry.LineString([[527990,183015,10],[528020,183015,25]])
outside_point = geo.intersection_projected([outside], [self.building])
self.assertTrue(list(outside_point)[0].is_empty)
empty = shapely.geometry.LineString()
empty_point = geo.intersection_projected([empty], [self.building])
self.assertTrue(list(empty_point)[0].is_empty)
def test_intersection_projected_height(self):
fifteen = shapely.geometry.LineString([[527990,183005,10],[528020,183005,25]])
point = geo.intersection_projected_height([fifteen],[self.building])
self.assertAlmostEqual(point[0],15)
def test_intersects(self):
five = shapely.geometry.LineString([[527990, 183005, 0], [528020, 183005, 15]])
fifteen = shapely.geometry.LineString([[527990, 183005, 10], [528020, 183005, 25]])
rays = [five, fifteen]
buildings = [self.building, self.building]
heights=[10,10]
npt.assert_array_almost_equal(geo.intersects(rays,buildings,heights),[True,False])
class TestFresnel(unittest.TestCase):
def setUp(self):
self.buildings = [shapely.wkt.loads("POLYGON((528010 183010, 528010 183000,528000 183000, 528000 183010,528010 183010))")]
def test_fresnel_integral(self):
v=np.array([-1,0,1,2.4])
o=np.array([-20*np.log(1.12),-20*np.log(0.5),-20*np.log(0.4-(0.1184-0.28**2)**0.5),-20*np.log(0.225/2.4)])
npt.assert_almost_equal(geo.fresnel_integral(v),o)
def test_fresnel_parameter(self):
five = shapely.geometry.LineString([[527990,183005,5],[528020,183005,5]])
point = shapely.geometry.Point([528000,183005,7])
expected= 2 *( 2 / (0.1903 * 10))**0.5
self.assertAlmostEqual(geo.fresnel_parameter([five],[point])[0],expected)
def test_get_fresnel_single(self):
five = shapely.geometry.LineString([[527990,183005,0],[528020,183005,15]])
expected=geo.fresnel_integral([5 *( 2 / (0.1903 * 10))**0.5])
self.assertAlmostEqual(geo.get_fresnel(five,self.buildings,[10]),expected[0])
def test_get_fresnel_multi(self):
#not yet been tested
pass
class TestMapMethods(unittest.TestCase):
def setUp(self):
self.map_box = gpd.GeoDataFrame({'height': [10]},
geometry=[shapely.wkt.loads("POLYGON((528010 183010, 528010 183000,528000 183000, 528000 183010,528010 183010))")],
crs="epsg:27700",index=[1])
self.map_canyon =gpd.GeoDataFrame({'height': [10,10]},
geometry=list(shapely.wkt.loads("MULTIPOLYGON(((528010 183010, 528010 183000,528000 183000, 528000 183010,528010 183010)),((528030 183010, 528030 183000,528020 183000, 528020 183010,528030 183010)))")),
crs="epsg:27700",index=[3,4])
self.rays_box = gpd.GeoSeries([shapely.geometry.LineString([[527990, 183005, 0], [528020, 183005, 15]]),
shapely.geometry.LineString([[527990, 183005, 10], [528020, 183005, 25]])],
crs="epsg:27700",index=[1,2])
self.rays_canyon = gpd.GeoSeries([shapely.geometry.LineString([(527990, 183005, 5), (528015, 183005, 5)]),
shapely.geometry.LineString([(528015, 183005, 9), (528035, 183005, 9)])],
crs="epsg:27700",index=[1,2])
def test_map_to_crs(self):
output = geo.map_to_crs(self.map_box, cm.constants.epsg_wgs84)
cm.check.check_type(output,'map',raise_errors=True)
same = geo.map_to_crs(self.map_box, "epsg:27700")
pdt.assert_frame_equal(self.map_box,same,check_dtype=False)
reverted = geo.map_to_crs(output, "epsg:27700")
reverted=reverted.set_geometry(pygeos.geometry.set_precision(reverted.geometry.array.data,1))
pdt.assert_frame_equal(self.map_box,reverted,check_dtype=False,atol=0.1,rtol=0.1)
def test_is_outside(self):
point = self.map_box.geometry.representative_point()
self.assertFalse(np.all(geo.is_outside(self.map_box, point)))
point = self.map_canyon.geometry.representative_point()
self.assertFalse(np.all(geo.is_outside(self.map_canyon, point)))
point_series = gpd.GeoSeries(point.array,crs=self.map_canyon.crs,index=[10,11])
pdt.assert_series_equal(geo.is_outside(self.map_canyon, point_series),pd.Series([False,False],index=[10,11]),check_names=False)
def test_ground_level(self):
point = self.map_box.geometry.representative_point()
point_series = gpd.GeoSeries(point.array,crs=self.map_box.crs,index=[10])
test = geo.ground_level(self.map_box, point)
test_series = geo.ground_level(self.map_box, point_series)
expected = pd.Series([0.0],index=[10])
npt.assert_array_almost_equal(test, expected)
pdt.assert_series_equal(test_series,expected,check_names=False)
def test_is_los(self):
pdt.assert_series_equal(geo.is_los(self.map_box, self.rays_box), pd.Series([True, False],index=[1,2]))
pdt.assert_series_equal(geo.is_los(self.map_canyon, self.rays_canyon), pd.Series([False, False],index=[1,2]))
def test_projected_height(self):
a = geo.projected_height(self.map_canyon,self.rays_canyon)
b = | pd.DataFrame(data=[[5,np.nan],[np.nan,9]],columns=[3,4],index=[1,2]) | pandas.DataFrame |
import contextlib
import logging
import os
import random
import re
import string
from collections import defaultdict
import deepmatcher as dm
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from models.ermodel import ERModel
def wrapdm_mojito(model, ignore_columns=['label', 'id']):
def wrapper(dataframe):
data = dataframe.copy().drop([c for c in ignore_columns if c in dataframe.columns], axis=1)
data['id'] = np.arange(len(dataframe))
tmp_name = "./{}.csv".format("".join([random.choice(string.ascii_lowercase) for _ in range(10)]))
data.to_csv(tmp_name, index=False)
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
data_processed = dm.data.process_unlabeled(tmp_name, trained_model=model,
ignore_columns=['ltable_id', 'rtable_id'])
out_proba = model.run_prediction(data_processed, output_attributes=True)
out_proba = out_proba['match_score'].values.reshape(-1)
multi_proba = np.dstack((1 - out_proba, out_proba)).squeeze()
os.remove(tmp_name)
return multi_proba
return wrapper
def wrapDm(test_df, model, given_columns=None, ignore_columns=['label', 'id', 'ltable_id', 'rtable_id'],
outputAttributes=True, batch_size=4):
if isinstance(test_df, csr_matrix):
test_df = pd.DataFrame(data=np.zeros(test_df.shape))
if given_columns is not None:
test_df.columns = given_columns
data = test_df.copy().drop([c for c in ignore_columns if c in test_df.columns], axis=1)
names = []
if data.columns[0] == 0:
try:
if given_columns is not None:
data.columns = given_columns
else:
names = model.state_meta.all_left_fields + model.state_meta.all_right_fields
data.columns = names
except:
pass
if not ('id' in data.columns):
data['id'] = np.arange(len(data))
tmp_name = "./{}.csv".format("".join([random.choice(string.ascii_lowercase) for _ in range(10)]))
data.to_csv(tmp_name, index=False)
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
data_processed = dm.data.process_unlabeled(tmp_name, trained_model=model,
ignore_columns=['ltable_id', 'rtable_id', 'label', 'id',
'originalRightId', 'alteredAttributes',
'droppedValues', 'copiedValues'])
predictions = model.run_prediction(data_processed, output_attributes=outputAttributes,
batch_size=batch_size)
out_proba = predictions['match_score'].values
multi_proba = np.dstack((1 - out_proba, out_proba)).squeeze()
os.remove(tmp_name)
if outputAttributes:
if len(names) == 0:
names = list(test_df.columns)
names.extend(['nomatch_score', 'match_score'])
multi_proba_df = pd.DataFrame(multi_proba)
if multi_proba_df.shape[0] != test_df.shape[0]:
multi_proba_df = multi_proba_df.transpose()
multi_proba_df.index = test_df.index
full_df = pd.concat([test_df, multi_proba_df], axis=1, ignore_index=True, names=names)
full_df.columns = names
return full_df
else:
return multi_proba
def makeAttr(attribute, idx, isLeft):
attr_prefixed = []
for token in attribute.split():
if isLeft:
attr_prefixed.append('L' + str(idx) + '_' + token)
else:
attr_prefixed.append('R' + str(idx) + '_' + token)
return " ".join(attr_prefixed)
def pairs_to_string(df, lprefix, rprefix, ignore_columns=['id', 'label']):
pairs_string = []
l_columns = [col for col in list(df) if (col.startswith(lprefix)) and (col not in ignore_columns)]
r_columns = [col for col in list(df) if col.startswith(rprefix) and (col not in ignore_columns)]
df = df.fillna("")
for i in range(len(df)):
this_row = df.iloc[i]
this_row_str = []
for j, lattr in enumerate(l_columns):
this_attr = makeAttr(str(this_row[lattr]), j, isLeft=True)
this_row_str.append(this_attr)
for k, rattr in enumerate(r_columns):
this_attr = makeAttr(str(this_row[rattr]), k, isLeft=False)
this_row_str.append(this_attr)
pairs_string.append(" ".join(this_row_str))
return pairs_string
an_re = re.compile('[R|L]\d\_.+')
def makeRow(pair_str, attributes, lprefix, rprefix):
row_map = defaultdict(list)
for token in pair_str.split():
if an_re.match(token):
row_map[token[:2]].append(token[3:])
row = {}
for key in row_map.keys():
if key.startswith('L'):
## key[1] is the index of attribute
this_attr = lprefix + attributes[int(key[1])]
row[this_attr] = " ".join(row_map[key])
else:
this_attr = rprefix + attributes[int(key[1])]
row[this_attr] = " ".join(row_map[key])
keys = dict.fromkeys(row.keys(), [])
for r in keys: # add any completely missing attribute (with '' value)
if r.startswith(lprefix):
twin_attr = 'r' + r[1:]
if None == row.get(twin_attr):
row[twin_attr] = ''
elif r.startswith(rprefix):
twin_attr = 'l' + r[1:]
if None == row.get(twin_attr):
row[twin_attr] = ''
for a in attributes.values():
try:
if lprefix + a not in row:
row[lprefix + a] = ''
if rprefix + a not in row:
row[rprefix + a] = ''
except ValueError as e:
pass
return pd.Series(row)
def pairs_str_to_df(pairs_str_l, columns, lprefix, rprefix):
lschema = list(filter(lambda x: x.startswith(lprefix), columns))
schema = {}
for i, s in enumerate(lschema):
schema[i] = s.replace(lprefix, "")
allTuples = []
for pair_str in pairs_str_l:
row = makeRow(pair_str, schema, 'ltable_', 'rtable_')
allTuples.append(row)
df = pd.DataFrame(allTuples)
df['id'] = np.arange(len(df))
return df
def pair_str_to_df(pair_str, columns, lprefix, rprefix):
lschema = list(filter(lambda x: x.startswith(lprefix), columns))
schema = {}
for i, s in enumerate(lschema):
schema[i] = s.replace(lprefix, "")
row = makeRow(pair_str, schema, 'ltable_', 'rtable_')
row['id'] = 0
return pd.DataFrame(data=[row.values], columns=row.index)
an_re = re.compile('[R|L]\d\_.+')
def makeRow(pair_str, attributes, lprefix, rprefix):
row_map = defaultdict(list)
for token in pair_str.split():
if an_re.match(token):
row_map[token[:2]].append(token[3:])
row = {}
for key in row_map.keys():
if key.startswith('L'):
## key[1] is the index of attribute
this_attr = lprefix + attributes[int(key[1])]
row[this_attr] = " ".join(row_map[key])
else:
this_attr = rprefix + attributes[int(key[1])]
row[this_attr] = " ".join(row_map[key])
keys = dict.fromkeys(row.keys(), [])
for r in keys: # add any completely missing attribute (with '' value)
if r.startswith(lprefix):
twin_attr = 'r' + r[1:]
if None == row.get(twin_attr):
row[twin_attr] = ''
elif r.startswith(rprefix):
twin_attr = 'l' + r[1:]
if None == row.get(twin_attr):
row[twin_attr] = ''
for a in attributes.values():
try:
if lprefix + a not in row:
row[lprefix + a] = ''
if rprefix + a not in row:
row[rprefix + a] = ''
except ValueError as e:
pass
return pd.Series(row)
def pairs_str_to_df(pairs_str_l, columns, lprefix, rprefix):
lschema = list(filter(lambda x: x.startswith(lprefix), columns))
schema = {}
for i, s in enumerate(lschema):
schema[i] = s.replace(lprefix, "")
allTuples = []
for pair_str in pairs_str_l:
row = makeRow(pair_str, schema, 'ltable_', 'rtable_')
allTuples.append(row)
df = pd.DataFrame(allTuples)
df['id'] = np.arange(len(df))
return df
def pair_str_to_df(pair_str, columns, lprefix, rprefix):
lschema = list(filter(lambda x: x.startswith(lprefix), columns))
schema = {}
for i, s in enumerate(lschema):
schema[i] = s.replace(lprefix, "")
row = makeRow(pair_str, schema, 'ltable_', 'rtable_')
row['id'] = 0
return | pd.DataFrame(data=[row.values], columns=row.index) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Series,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
def test_where_unsafe_int(any_signed_int_numpy_dtype):
s = Series(np.arange(10), dtype=any_signed_int_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
expected = Series(
list(range(2, 7)) + list(range(5, 10)),
dtype=any_signed_int_numpy_dtype,
)
tm.assert_series_equal(s, expected)
def test_where_unsafe_float(float_numpy_dtype):
s = Series(np.arange(10), dtype=float_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
data = list(range(2, 7)) + list(range(5, 10))
expected = Series(data, dtype=float_numpy_dtype)
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize(
"dtype,expected_dtype",
[
(np.int8, np.float64),
(np.int16, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
(np.float32, np.float32),
(np.float64, np.float64),
],
)
def test_where_unsafe_upcast(dtype, expected_dtype):
# see gh-9743
s = Series(np.arange(10), dtype=dtype)
values = [2.5, 3.5, 4.5, 5.5, 6.5]
mask = s < 5
expected = Series(values + list(range(5, 10)), dtype=expected_dtype)
s[mask] = values
tm.assert_series_equal(s, expected)
def test_where_unsafe():
# see gh-9731
s = Series(np.arange(10), dtype="int64")
values = [2.5, 3.5, 4.5, 5.5]
mask = s > 5
expected = Series(list(range(6)) + values, dtype="float64")
s[mask] = values
tm.assert_series_equal(s, expected)
# see gh-3235
s = Series(np.arange(10), dtype="int64")
mask = s < 5
s[mask] = range(2, 7)
expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")
tm.assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype="int64")
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")
tm.assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[mask] = [5, 4, 3, 2, 1]
with pytest.raises(ValueError, match=msg):
s[mask] = [0] * 5
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
tm.assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isna(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isna(s)]
expected = Series(np.nan, index=[9])
tm.assert_series_equal(result, expected)
def test_where():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
tm.assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
tm.assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert s.shape == rs.shape
assert rs is not s
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
tm.assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
tm.assert_series_equal(rs, expected)
def test_where_non_keyword_deprecation():
# GH 41485
s = Series(range(5))
msg = (
"In a future version of pandas all arguments of "
"Series.where except for the arguments 'cond' "
"and 'other' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = s.where(s > 1, 10, False)
expected = Series([10, 10, 2, 3, 4])
tm.assert_series_equal(expected, result)
def test_where_error():
s = Series(np.random.randn(5))
cond = s > 0
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where(1)
with pytest.raises(ValueError, match=msg):
s.where(cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
tm.assert_series_equal(s, expected)
# failures
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[[True, False]] = [0, 2, 3]
msg = (
"NumPy boolean array indexing assignment cannot assign 0 input "
"values to the 1 output values where the mask is true"
)
with pytest.raises(ValueError, match=msg):
s[[True, False]] = []
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where_array_like(klass):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
result = s.where(klass(cond))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"cond",
[
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")],
],
)
def test_where_invalid_input(cond):
# see gh-15414: only boolean arrays accepted
s = Series([1, 2, 3])
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
s.where(cond)
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where([True])
def test_where_ndframe_align():
msg = "Array conditional must be same shape as self"
s = Series([1, 2, 3])
cond = [True]
with pytest.raises(ValueError, match=msg):
s.where(cond)
expected = Series([1, np.nan, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
cond = np.array([False, True, False, True])
with pytest.raises(ValueError, match=msg):
s.where(cond)
expected = Series([np.nan, 2, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
def test_where_setitem_invalid():
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
msg = (
lambda x: f"cannot set using a {x} indexer with a "
"different length than the value"
)
# slice
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg("slice")):
s[0:3] = list(range(27))
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
tm.assert_series_equal(s.astype(np.int64), expected)
# slice with step
s = Series(list("abcdef"))
with pytest.raises(ValueError, match=msg("slice")):
s[0:4:2] = list(range(27))
s = Series(list("abcdef"))
s[0:4:2] = list(range(2))
expected = Series([0, "b", 1, "d", "e", "f"])
tm.assert_series_equal(s, expected)
# neg slices
s = Series(list("abcdef"))
with pytest.raises(ValueError, match=msg("slice")):
s[:-1] = list(range(27))
s[-3:-1] = list(range(2))
expected = Series(["a", "b", "c", 0, 1, "f"])
tm.assert_series_equal(s, expected)
# list
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg("list-like")):
s[[0, 1, 2]] = list(range(27))
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg("list-like")):
s[[0, 1, 2]] = list(range(2))
# scalar
s = Series(list("abc"))
s[0] = list(range(10))
expected = Series([list(range(10)), "b", "c"])
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("size", range(2, 6))
@pytest.mark.parametrize(
"mask", [[True, False, False, False, False], [True, False], [False]]
)
@pytest.mark.parametrize(
"item", [2.0, np.nan, np.finfo(float).max, np.finfo(float).min]
)
# Test numpy arrays, lists and tuples as the input to be
# broadcast
@pytest.mark.parametrize(
"box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)]
)
def test_broadcast(size, mask, item, box):
selection = np.resize(mask, size)
data = np.arange(size, dtype=float)
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series(
[item if use_item else data[i] for i, use_item in enumerate(selection)]
)
s = Series(data)
s[selection] = box(item)
tm.assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, box(item))
tm.assert_series_equal(result, expected)
s = Series(data)
result = s.mask(selection, box(item))
tm.assert_series_equal(result, expected)
def test_where_inplace():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
tm.assert_series_equal(rs.dropna(), s[cond])
tm.assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
tm.assert_series_equal(rs, s.where(cond, -s))
def test_where_dups():
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(comb, expected)
def test_where_numeric_with_string():
# GH 9280
s = Series([1, 2, 3])
w = s.where(s > 1, "X")
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == "object"
w = s.where(s > 1, ["X", "Y", "Z"])
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == "object"
w = s.where(s > 1, np.array(["X", "Y", "Z"]))
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == "object"
def test_where_timedelta_coerce():
s = Series([1, 2], dtype="timedelta64[ns]")
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
| tm.assert_series_equal(rs, expected) | pandas._testing.assert_series_equal |
import pandas as pd
import os
import matplotlib.pyplot as plt
import numpy as np
# This only gets run at the end of the season to update the stats
def list_of_specific_files(r_then_file_directory):
files = os.listdir(f"{r_then_file_directory}")
new_file = []
for i in files:
new_file.append(i)
return new_file
def year_season():
year = []
for file_number in range(len(list_of_specific_files(r"C:\Users\sabzu\Documents\Fantasy_EPL"))):
year_in_file = list_of_specific_files(r"C:\Users\sabzu\Documents\Fantasy_EPL")[file_number]
split_year_in_file = year_in_file.split(" ")
for i in split_year_in_file:
if 'xlsx' in i:
y = i.split(".")
year.append(y[0])
return year
def weekly_stats(Df, player_name_from_class):
for year in year_season():
weekly = pd.read_excel(rf"C:\Users\sabzu\Documents\Fantasy_EPL\EPL Season {year}.xlsx",
sheet_name=player_name_from_class)
weekly = weekly.iloc[:, 1:9]
weekly.insert(0, "Year", [int(year)] * 38)
df_cols = weekly.columns
selected_cols = df_cols[2:]
lists_of_weekly_stats = []
column = 2
for col in selected_cols:
weekly_stat = []
row = 0
for i in weekly[col]:
if i == weekly.iloc[0, column]:
weekly_stat.append(i)
row += 1
else:
this = int(i) - weekly.iloc[(row - 1), column]
weekly_stat.append(this)
row += 1
lists_of_weekly_stats.append(weekly_stat)
column += 1
for col in selected_cols:
i = 0
weekly[f"Wkly_{col}"] = lists_of_weekly_stats[i]
lists_of_weekly_stats.pop(0)
weekly = weekly[
['Year', 'Mp', 'Wkly_Wins', 'Wkly_Ties', 'Wkly_Loss', 'Wkly_Points',
'Wkly_Tot_GF', 'Wkly_Tot_GA', 'Wkly_GD']]
weekly["Wkly_GF"] = weekly["Wkly_Tot_GF"]
del weekly["Wkly_Tot_GF"]
weekly["Wkly_GA"] = weekly["Wkly_Tot_GA"]
del weekly["Wkly_Tot_GA"]
if Df.empty:
Df = weekly
else:
Df = pd.concat([Df, weekly])
return Df
def player_total_stats():
player_totals = pd.DataFrame()
cols, cols2 = df_all_time_player_standings.columns[1:7], df_all_time_player_standings.columns[8:12]
cols = [i for i in cols]
cols2 = [n for n in cols2]
cols = cols+cols2
player = df_all_time_player_standings["Player"].unique()
for i in cols[1:]:
totals = []
for p in player:
pdf = df_all_time_player_standings[df_all_time_player_standings[cols[0]] == p]
summ = sum(pdf[i])
totals.append(summ)
player_totals[i] = totals
player_totals.insert(6,"Pts/G", (player_totals["Pts"] / player_totals["Mp"]).round(2))
player_totals.insert(0,"Player", player)
player_totals.sort_values(["Pts", "GD"], ascending=False, inplace=True)
return player_totals
sab_wkly_stats_df = pd.DataFrame()
sabastian_weekly_stats = weekly_stats(sab_wkly_stats_df, "Sabastian")
df_team_standings = pd.DataFrame()
for year in year_season():
team_standings = pd.read_excel(
rf"C:\Users\sabzu\Documents\All EPL Project Files\Seasons\Fantasy Premier League {year}.xlsx",
sheet_name="Standings")
team_standings = team_standings.iloc[:20, 0:9]
team_standings.insert(0, "Year", [int(year)] * 20)
if df_team_standings.empty:
df_team_standings = team_standings
else:
df_team_standings = pd.concat([df_team_standings, team_standings])
df_team_standings = df_team_standings.sort_values(["Pts", "GD"], ascending=False).reset_index(drop=True)
df_team_Xstandings = pd.DataFrame()
for year in year_season():
if int(year) > 2017:
team_standings = pd.read_excel(
rf"C:\Users\sabzu\Documents\All EPL Project Files\Seasons\Fantasy Premier League {year}.xlsx",
sheet_name="Standings")
team_standings = team_standings.iloc[:20, 10:19]
team_standings.insert(0, "Year", [int(year)] * 20)
if df_team_Xstandings.empty:
df_team_Xstandings = team_standings
else:
df_team_Xstandings = pd.concat([df_team_Xstandings, team_standings])
df_team_Xstandings = df_team_Xstandings.sort_values(["xPts", "xGD"], ascending=False).reset_index(drop=True)
df_all_time_player_standings = pd.DataFrame()
for year in year_season():
mini_standings = pd.read_excel(
rf"C:\Users\sabzu\Documents\All EPL Project Files\Seasons\Fantasy Premier League {year}.xlsx",
sheet_name="Standings")
mini_standings = mini_standings.iloc[0:4, 20:31]
mini_standings["Year"] = [int(year)] * 4
if df_all_time_player_standings.empty:
df_all_time_player_standings = mini_standings
else:
df_all_time_player_standings = pd.concat([df_all_time_player_standings, mini_standings])
df_all_time_player_standings.rename(
columns={"W.1": "W", "D.1": "D", "L.1": "L", "Pts.1": "Pts", "GF.1": "GF", "GA.1": "GA", "GD.1": "GD"},
inplace=True)
df_all_time_player_standings = df_all_time_player_standings.sort_values(["Pts/G", "GD"], ascending=False).reset_index(
drop=True)
df_all_time_player_Xstandings = pd.DataFrame()
for year in year_season():
if int(year) > 2017:
mini_standings = pd.read_excel(
rf"C:\Users\sabzu\Documents\All EPL Project Files\Seasons\Fantasy Premier League {year}.xlsx",
sheet_name="Standings")
mini_standings = mini_standings.iloc[7:11, 20:31]
mini_standings["Year"] = [int(year)] * 4
if df_all_time_player_Xstandings.empty:
df_all_time_player_Xstandings = mini_standings
else:
df_all_time_player_Xstandings = | pd.concat([df_all_time_player_Xstandings, mini_standings]) | pandas.concat |
"""Contains dataset importers for NYU Depth Dataset V2 and SYNTHIA-SF"""
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import pandas as pd
import tables
from skimage import img_as_float32
from skimage import img_as_float64
from skimage.io import imread
from skimage.transform import resize
from skimage import img_as_ubyte
from skimage import img_as_uint
import segmentation_dics
IMAGE = 0
SEGMENTATION = 1
INSTANCE = 2
DEPTH = 3
TRAIN = 0
VALIDATION = 1
TEST = 2
class DatasetGenerator:
"""Abstract iterator for looping over elements of a dataset .
Arguments:
ratio: ratio of the train-set size to the validation-set size and test-set size
The first number is for the train-set, the second is for validation-set and what
is remained is for test-set.(the sum of two numbers should equal to one or less)
batch_size: Integer batch size.
repeater: If true, the dataset generator starts generating samples from the beginning when
it reaches the end of the dataset.
shuffle: Boolean. Whether to shuffle the order of the batches at the beginning of the
training.
output_shape: size of generated images and labels.
data_type: data type of features.
label_type: Types of labels to be returned.
"""
def __init__(self,
usage='train',
ratio=(1, 0),
batch_size=1,
repeater=False,
shuffle=True,
output_shape=None,
data_type='float64',
label_type=('segmentation', 'instance', 'depth'),
**kwargs):
self.ratio = kwargs[
'ratio'] if 'ratio' in kwargs else ratio
self.batch_size = kwargs[
'batch_size'] if 'batch_size' in kwargs else batch_size
self.repeater = kwargs['repeater'] if 'repeater' in kwargs else repeater
self.shuffle = kwargs['shuffle'] if 'shuffle' in kwargs else shuffle
self.output_shape = kwargs[
'output_shape'] if 'output_shape' in kwargs else output_shape
self.data_type = kwargs[
'data_type'] if 'data_type' in kwargs else data_type
self.label_type = kwargs[
'label_type'] if 'label_type' in kwargs else label_type
self.dataset = self.data_frame_creator()
self.size = self.dataset.shape[0] - 1
self.start_index = 0
self.end_index = np.int32(np.floor(self.ratio[TRAIN] * self.size))
self.dataset_usage(usage)
self.index = self.start_index
def data_frame_creator(self):
"""Pandas dataFrame for addresses of images and corresponding labels"""
return pd.DataFrame()
def dataset_usage(self, usage):
""" Determines the current usage of the dataset:
- 'train'
- 'validation'
- 'test'
"""
if usage is 'train':
self.start_index = 0
self.end_index = np.int32(np.floor(self.ratio[TRAIN] * self.size))
elif usage is 'validation':
self.start_index = np.int32(np.floor(self.ratio[TRAIN] * self.size))
self.end_index = np.int32(np.floor((self.ratio[TRAIN] + self.ratio[VALIDATION])* self.size))
elif usage is 'test':
self.start_index = np.int32(np.floor((self.ratio[TRAIN] + self.ratio[VALIDATION])* self.size))
self.end_index = self.size
else:
print('Invalid input for usage variable')
raise NameError('InvalidInput')
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
"""Retrieve the next pairs from the dataset"""
if self.index - self.batch_size <= 0:
if not self.repeater:
raise StopIteration
else:
self.index = self.dataset.shape[0] - 1
self.index = self.index - self.batch_size
# loading features(images)
features = imread(self.dataset.loc[:, 'image'].iat[0])[:, :, :3]
if self.output_shape is None:
output_shape = features.shape[:2]
else:
output_shape = self.output_shape
# 1) Resize image to match a certain size.
# 2) Also the input image is converted (from 8-bit integer)
# to 64-bit floating point(->preserve_range=False).
# 3) [:, :, :3] -> to remove 4th channel in png
features = np.array([
resize(image=imread(self.dataset.loc[:, 'image'].iat[i])[:, :, :3],
output_shape=output_shape,
mode='constant',
preserve_range=False,
anti_aliasing=True)
for i in range(self.index, self.index + self.batch_size)
])
if self.data_type is 'float32':
features = img_as_float32(features)
# loading labels(segmentation)
if 'segmentation' in self.label_type:
# 1) Resize segmentation to match a certain size.
# 2) [:, :, :3] -> to remove 4th channel in png
segmentation = np.array([
imread(self.dataset.loc[:, 'segmentation'].iat[i])[:, :, :3]
for i in range(self.index, self.index + self.batch_size)
])
# resize(image=,
# output_shape=output_shape,
# mode='constant',
# preserve_range=True,
# anti_aliasing=True)
# new_segmentation = np.zeros(
# shape=(self.batch_size, output_shape[0], output_shape[1],
# len(self.seg_dic)))
# for i in range(self.batch_size):
# for j in range(output_shape[0]):
# for k in range(output_shape[1]):
# new_segmentation[i, j, k,
# self.seg_dic[
# tuple(segmentation[i, j, k]) ][0]] = 1
# segmentation = new_segmentation
if self.data_type is 'float32':
segmentation = img_as_float32(segmentation)
else:
segmentation = img_as_float64(segmentation)
# if self.label_type == 'depth':
# labels = np.array(
# np.array([
# resize(
# image=imread(self.dataset.iloc[i, 1]),
# output_shape=(480, 640))
# for i in range(self.index, self.index + self.batch_size)
# ]),
# dtype=np.int32)
# labels = (labels[:, :, :, 0] + labels[:, :, :, 1] * 256 +
# labels[:, :, :, 2] * 256 * 256) / ((256 * 256 * 256) - 1)
# elif self.label_type == 'segmentation':
# labels = np.array(
# np.array([
# resize(
# image=imread(self.dataset.iloc[i, 2])[:, :, 0],
# output_shape=(480, 640))
# for i in range(self.index, self.index + self.batch_size)
# ]))
# new_segmentation = np.ndarray(shape=(self.batch_size, 480, 640, 22))
# for i in range(self.batch_size):
# for j in range(480):
# for k in range(640):
# if labels[i, j, k] < 22:
# new_segmentation[i, j, k, int(labels[i, j, k])] = 1
# labels = new_segmentation
# labels = np.array(labels, dtype=np.float32)
# else:
# raise ValueError('invalid label type')
# return features, labels
return features, segmentation
class NewSynthiaSf(DatasetGenerator):
"""Iterator for looping over elements of SYNTHIA-SF backwards."""
def __init__(self, synthia_sf_dir, **kwargs):
self.dataset_dir = synthia_sf_dir
self.max_distance = 1000
super().__init__(**kwargs)
def data_frame_creator(self):
""" pandas dataFrame for addresses of rgb, depth and segmentation"""
sequence_folder = [
'/SEQ1', '/SEQ2', '/SEQ3', '/SEQ4', '/SEQ5', '/SEQ6'
]
rgb_folder = ['/RGBLeft/', '/RGBRight/']
depth_folder = ['/DepthLeft/', '/DepthRight/']
segmentation_folder = ['/GTLeft/', '/GTright/']
rgb_dir = [
self.dataset_dir + sequence_f + rgb_f for rgb_f in rgb_folder
for sequence_f in sequence_folder
]
rgb_data = [
rgb_d + rgb for rgb_d in rgb_dir for rgb in os.listdir(rgb_d)
]
depth_dir = [
self.dataset_dir + sequence_f + depth_f
for depth_f in depth_folder
for sequence_f in sequence_folder
]
depth_data = [
depth_d + depth for depth_d in depth_dir
for depth in os.listdir(depth_d)
]
segmentation_dir = [
self.dataset_dir + sequence_f + segmentation_f
for segmentation_f in segmentation_folder
for sequence_f in sequence_folder
]
segmentation_data = [
segmentation_d + segmentation
for segmentation_d in segmentation_dir
for segmentation in os.listdir(segmentation_d)
]
dataset = {
'RGB': rgb_data,
'DEPTH': depth_data,
'SEGMENTATION': segmentation_data
}
if self.shuffle:
return pd.DataFrame(dataset).sample(frac=1, random_state=123)
return | pd.DataFrame(dataset) | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
from meterstick import utils
import mock
import numpy as np
import pandas as pd
from pandas import testing
from scipy import stats
import unittest
class DistributionTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU']
})
sum_x = metrics.Sum('X')
distribution = operations.Distribution('grp', sum_x)
def test_distribution(self):
output = self.distribution.compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_normalize(self):
output = operations.Normalize('grp', self.sum_x).compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_over_multiple_columns(self):
df = pd.DataFrame({
'X': [2, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'platform': ['desktop', 'mobile', 'desktop', 'mobile']
})
sum_x = metrics.Sum('X')
dist = operations.Distribution(['grp', 'platform'], sum_x)
output = dist.compute_on(df, 'country')
expected = pd.DataFrame({
'Distribution of sum(X)': [1., 0.5, 0.25, 0.25],
'country': ['EU', 'US', 'US', 'US'],
'grp': ['B', 'A', 'A', 'B'],
'platform': ['mobile', 'desktop', 'mobile', 'desktop']
})
expected.set_index(['country', 'grp', 'platform'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_melted(self):
output = self.distribution.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0.25, 0.75],
'grp': ['A', 'B'],
'Metric': ['Distribution of sum(X)', 'Distribution of sum(X)']
})
expected.set_index(['Metric', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby(self):
output = self.distribution.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Distribution of sum(X)': [1., 2. / 3, 1. / 3],
'grp': ['B', 'A', 'B'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby_melted(self):
output = self.distribution.compute_on(self.df, 'country', melted=True)
expected = pd.DataFrame({
'Value': [1., 2. / 3, 1. / 3],
'grp': ['B', 'A', 'B'],
'Metric': ['Distribution of sum(X)'] * 3,
'country': ['EU', 'US', 'US']
})
expected.set_index(['Metric', 'country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5, 0, 1, 2, 3.5],
'grp': ['A', 'A', 'B', 'B'] * 2,
'country': ['US', 'US', 'US', 'EU'] * 2,
'grp0': ['foo'] * 4 + ['bar'] * 4
})
output = self.distribution.compute_on(df, ['grp0', 'country'])
bar = self.distribution.compute_on(df[df.grp0 == 'bar'], 'country')
foo = self.distribution.compute_on(df[df.grp0 == 'foo'], 'country')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
testing.assert_frame_equal(output, expected)
def test_distribution_multiple_metrics(self):
metric = metrics.MetricList((self.sum_x, metrics.Count('X')))
metric = operations.Distribution('grp', metric)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
{
'Distribution of sum(X)': [0.25, 0.75],
'Distribution of count(X)': [0.5, 0.5]
},
index=['A', 'B'],
columns=['Distribution of sum(X)', 'Distribution of count(X)'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_where(self):
metric = operations.Distribution('grp', self.sum_x, where='country == "US"')
metric_no_filter = operations.Distribution('grp', self.sum_x)
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.country == 'US'])
| testing.assert_frame_equal(output, expected) | pandas.testing.assert_frame_equal |
import sys
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Read messages and categories data.
Args:
messages_filepath (str): Filepath for a messages csv file.
categories_filepath (str): Filepath for a categories csv file.
Returns:
df (DataFrame): Pandas DataFrame consisting of merged messages and categories.
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages, categories, how="left")
return df
def clean_data(df):
"""
Cleans messages and categories data.
1) Drops duplicate rows.
2) Splits categories column into separate category columns.
3) Converts category values to just numbers 0 or 1.
4) Replaces categories column in df with new category columns and merges with
the rest of the dataframe.
Args:
df (DataFrame): Merged messages and categories pandas DataFrame.
Returns:
df (DataFrame): Cleaned pandas Dataframe.
"""
df = df.drop_duplicates()
# Split categories into separate category columns.
categories = df["categories"].str.split(";", expand=True)
row = categories.iloc[0, :]
category_colnames = row.str.extract(r"([a-zA-Z _]*)")[0].tolist()
categories.columns = category_colnames
# Convert category values to just numbers 0 or 1.
for col in categories.columns:
categories[col] = categories[col].str.extract(
r"([0-9].*)").astype("int")
# Replace categories column in df with new category columns and merge with df.
df = df.drop(["categories"], axis="columns")
df = | pd.concat([df, categories], axis="columns") | pandas.concat |
from flask import Flask, request, jsonify, g, render_template
from flask_json import FlaskJSON, JsonError, json_response, as_json
from app.data_process import bp
from datetime import datetime
import pandas as pd
from pathlib import Path
from bs4 import BeautifulSoup
import glob
import os
positivity_replace = {
'ALG':3526,
'BRN':3527,
'CKH':3540,
'DUR':3530,
'EOH':3558,
'GBH':3533,
'HNH':3534,
'HKP':3535,
'HAL':3536,
'HAM':3537,
'HPE':3538,
'HPH':3539,
'KFL':3541,
'LAM':3542,
'LGL':3543,
'MSL':3544,
'NIA':3546,
'NPS':3547,
'NWR':3549,
'OTT':3551,
'PEL':3553,
'PET':3555,
'PQP':3556,
'WAT':3565,
'REN':3557,
'SMD':3560,
'SWH':3575,
'SUD':3561,
'THB':3562,
'TSK':3563,
'TOR':3595,
'WDG':3566,
'WEK':3568,
'YRK':3570,
'overall':6
}
def get_file_path(data, step='raw', today=datetime.today().strftime('%Y-%m-%d')):
source_dir = 'data/' + data['classification'] + '/' + step + '/'
if data['type'] != '':
file_name = data['table_name'] + '_' + today + '.' + data['type']
else:
file_name = data['table_name'] + '_' + today
save_dir = source_dir + data['source_name'] + '/' + data['table_name']
file_path = save_dir + '/' + file_name
return file_path, save_dir
@bp.cli.command('public_ontario_gov_daily_change_in_cases_by_phu')
def process_public_ontario_gov_daily_change_in_cases_by_phu():
data = {'classification':'public', 'source_name':'ontario_gov', 'table_name':'daily_change_in_cases_by_phu', 'type': 'csv'}
date_field = ['Date']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.melt(id_vars='Date')
replace = {
'Algoma_Public_Health_Unit':3526,
'Algoma_District':3526,
'Brant_County_Health_Unit':3527,
'Brant_County':3527,
'Chatham-Kent_Health_Unit':3540,
'Chatham_Kent':3540,
'Durham_Region_Health_Department':3530,
'Durham_Region':3530,
'Eastern_Ontario_Health_Unit':3558,
'Eastern_Ontario':3558,
'Grey_Bruce_Health_Unit':3533,
'Grey_Bruce':3533,
'Haldimand-Norfolk_Health_Unit':3534,
'Haldimand_Norfolk':3534,
'Haliburton,_Kawartha,_Pine_Ridge_District_Health_Unit':3535,
'Haliburton_Kawartha_Pine_Ridge':3535,
'Halton_Region_Health_Department':3536,
'Halton_Region':3536,
'Hamilton_Public_Health_Services':3537,
'City_of_Hamilton':3537,
'Hastings_and_Prince_Edward_Counties_Health_Unit':3538,
'Hastings_Prince_Edward':3538,
'Huron_Perth_District_Health_Unit':3539,
'Huron_Perth':3539,
'Kingston,_Frontenac_and_Lennox_&_Addington_Public_Health':3541,
'KFLA':3541,
'Lambton_Public_Health':3542,
'Lambton_County':3542,
'Leeds,_Grenville_and_Lanark_District_Health_Unit':3543,
'Leeds_Grenville_Lanark':3543,
'Middlesex-London_Health_Unit':3544,
'Middlesex_London':3544,
'Niagara_Region_Public_Health_Department':3546,
'Niagara_Region':3546,
'North_Bay_Parry_Sound_District_Health_Unit':3547,
'North_Bay_Parry_Sound_District':3547,
'Northwestern_Health_Unit':3549,
'Northwestern':3549,
'Ottawa_Public_Health':3551,
'City_of_Ottawa':3551,
'Peel_Public_Health':3553,
'Peel_Region':3553,
'Peterborough_Public_Health':3555,
'Peterborough_County_City':3555,
'Porcupine_Health_Unit':3556,
'Porcupine':3556,
'Region_of_Waterloo,_Public_Health':3565,
'Waterloo_Region':3565,
'Renfrew_County_and_District_Health_Unit':3557,
'Renfrew_County_and_District':3557,
'Simcoe_Muskoka_District_Health_Unit':3560,
'Simcoe_Muskoka_District':3560,
'Southwestern_Public_Health':3575,
'Southwestern':3575,
'Sudbury_&_District_Health_Unit':3561,
'Sudbury_and_District':3561,
'Thunder_Bay_District_Health_Unit':3562,
'Thunder_Bay_District':3562,
'Timiskaming_Health_Unit':3563,
'Timiskaming':3563,
'Toronto_Public_Health':3595,
'Toronto':3595,
'Wellington-Dufferin-Guelph_Public_Health':3566,
'Wellington_Dufferin_Guelph':3566,
'Windsor-Essex_County_Health_Unit':3568,
'Windsor_Essex_County':3568,
'York_Region_Public_Health_Services':3570,
'York_Region':3570,
'Total':6
}
df['HR_UID'] = df['variable'].replace(replace)
for column in date_field:
df[column] = pd.to_datetime(df[column], errors='coerce')
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('public_ontario_gov_conposcovidloc')
def process_public_ontario_gov_conposcovidloc():
data = {'classification':'public', 'source_name':'ontario_gov', 'table_name':'conposcovidloc', 'type': 'csv'}
field_map = {
"Row_ID":"row_id",
"Accurate_Episode_Date": "accurate_episode_date",
"Case_Reported_Date": "case_reported_date",
"Specimen_Date": "specimen_reported_date",
"Test_Reported_Date": "test_reported_date",
"Age_Group":"age_group",
"Client_Gender":"client_gender",
"Case_AcquisitionInfo": "case_acquisition_info",
"Outcome1": "outcome_1",
"Outbreak_Related": "outbreak_related",
"Reporting_PHU": "reporting_phu",
"Reporting_PHU_Address": "reporting_phu_address",
"Reporting_PHU_City": "reporting_phu_city",
"Reporting_PHU_Postal_Code": "reporting_phu_postal_code",
"Reporting_PHU_Website": "reporting_phu_website",
"Reporting_PHU_Latitude":"reporting_phu_latitude",
"Reporting_PHU_Longitude": "reporting_phu_longitude",
}
date_field = ['accurate_episode_date', 'case_reported_date', 'specimen_reported_date', 'test_reported_date']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.replace("12:00:00 AM", None)
df = df.rename(columns=field_map)
for column in date_field:
df[column] = pd.to_datetime(df[column], errors='coerce')
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('public_ontario_gov_vaccination')
def process_public_ontario_gov_vaccination():
data = {'classification':'public', 'source_name':'ontario_gov', 'table_name':'vaccination', 'type': 'csv'}
date_field = ['date']
field_map = {
'report_date': 'date'
}
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.rename(columns=field_map)
df.dropna(how='all', axis=1, inplace=True)
df.dropna(how='any', inplace=True)
for index, row in df.iterrows():
if type(row['previous_day_total_doses_administered'])==str:
df.at[index,'previous_day_doses_administered'] = row['previous_day_doses_administered'].replace(",","")
if type(row['total_doses_administered'])==str:
df.at[index,'total_doses_administered'] = row['total_doses_administered'].replace(",","")
if type(row['total_doses_in_fully_vaccinated_individuals'])==str:
df.at[index,'total_doses_in_fully_vaccinated_individuals'] = row['total_doses_in_fully_vaccinated_individuals'].replace(",","")
if type(row['total_individuals_fully_vaccinated'])==str:
df.at[index,'total_individuals_fully_vaccinated'] = row['total_individuals_fully_vaccinated'].replace(",","")
for column in date_field:
df[column] = pd.to_datetime(df[column])
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('public_ontario_gov_covidtesting')
def process_public_ontario_gov_covidtesting():
data = {'classification':'public', 'source_name':'ontario_gov', 'table_name':'covidtesting', 'type': 'csv'}
date_field = ['reported_date']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
to_include = []
for column in df.columns:
name = column.replace(' ','_').lower()
df[name] = df[column]
to_include.append(name)
df = df[to_include]
for column in date_field:
df[column] = pd.to_datetime(df[column])
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('confidential_211_call_reports')
def process_confidential_211_call_reports():
data = {'classification':'confidential', 'source_name':'211', 'table_name':'call_reports', 'type': 'csv'}
field_map = {
"CallReportNum":"call_report_num",
"CallDateAndTimeStart": "call_date_and_time_start",
"Demographics of Inquirer - Age Category": "age_of_inquirer"
}
date_field = ['call_date_and_time_start']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.rename(columns=field_map)
df = df[field_map.values()]
for column in date_field:
df[column] = pd.to_datetime(df[column],errors='coerce')
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('confidential_211_met_and_unmet_needs')
def process_confidential_211_met_and_unmet_needs():
data = {'classification':'confidential', 'source_name':'211', 'table_name':'met_and_unmet_needs', 'type': 'csv'}
field_map = {
'DateOfCall':'date_of_call',
'ReportNeedNum':'report_need_num',
'AIRSNeedCategory':'airs_need_category'
}
date_field = ['date_of_call']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.rename(columns=field_map)
df = df[field_map.values()]
for column in date_field:
df[column] = | pd.to_datetime(df[column],errors='coerce') | pandas.to_datetime |
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
import pandas.core.ops as ops
# Basic test for the arithmetic array ops
# -----------------------------------------------------------------------------
@pytest.mark.parametrize(
"opname, exp",
[("add", [1, 3, None, None, 9]), ("mul", [0, 2, None, None, 20])],
ids=["add", "mul"],
)
def test_add_mul(dtype, opname, exp):
a = pd.array([0, 1, None, 3, 4], dtype=dtype)
b = pd.array([1, 2, 3, None, 5], dtype=dtype)
# array / array
expected = pd.array(exp, dtype=dtype)
op = getattr(operator, opname)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
op = getattr(ops, "r" + opname)
result = op(a, b)
| tm.assert_extension_array_equal(result, expected) | pandas._testing.assert_extension_array_equal |
from math import ceil
import numpy as np
import pandas as pd
from scipy.stats import ttest_ind, pearsonr
import matplotlib.pyplot as plt
import mpld3
from mpld3 import plugins
import matplotlib
import matplotlib.patches as patches
#from mpld3.plugins import PluginBase
#import jinja2
#import json
from genda.formats import grab_gene_location
from genda.plotting import (make_rectangle, should_not_plot)
def dosage_round(geno, threshold = 0.5):
""" Rounds dosage to threshold
"""
geno[ geno < 1 - threshold] = 0
geno[np.logical_and(geno >= 1-threshold, geno <= 1 + threshold)] = 1
geno[ geno > 1 + threshold] = 2
return geno
def multiple_snp_aei_test(geno, outliers, allelic_ration, num_threshold=5):
"""
"""
raise NotImplementedError
def single_snp_aei_test(geno, outliers, allelic_ratio, num_threshold=5):
"""
"""
geno = geno[np.logical_not(outliers)]
het_combined = allelic_ratio[np.array(geno == 1)]
homo_combined = allelic_ratio[np.array(np.logical_or(geno==0, geno==2))]
if len(het_combined) < num_threshold or len(homo_combined) < num_threshold:
return(1)
else:
return(ttest_ind(het_combined, homo_combined, equal_var=False)[1])
class AEI_object(object):
""" A class storing the aei data for a particular gene.
"""
def __init__(self, aei_pvalues, gene_name, annot_table,
sufficient_hets, matrix_eQTL):
self.aei_pvalues = aei_pvalues
self.gene_name = gene_name
self.annot_table = annot_table.ix[aei_pvalues.index, :]
self.sufficient_hets = sufficient_hets
self.meQTL = matrix_eQTL
self.gene_names = 'Nothing'
def aei_bar_plot(self, dosage, cis_snp, tag_snp, gene_name=None):
""" AEI barplot
"""
nplots = 1
color = dosage_round(dosage.ix[cis_snp, self.hets_dict[tag_snp]])
fig, ax = plt.subplots(nrows=nplots, ncols=1, figsize=(12, 4*nplots),
sharey=False, sharex=True,
subplot_kw=dict(axisbg='#FFFFFF'))
if gene_name:
title = ('AEI at tag %s for %s and\n'
'colored by genotype at %s') % (gene_name, tag_snp, cis_snp)
else:
title = "AEI at tag %s and\ncolored by genotype at %s" % (tag_snp,
cis_snp)
ax.set_title(title, fontsize=20)
ax.set_xlabel('Samples', fontsize=15)
ax.set_ylabel('Allelic Fraction ($log_{2}$)', fontsize=15)
width = 0.5
allelic_ratio = self.ratios.ix[self.hets_dict[tag_snp],
tag_snp]
allelic_ratio_i = np.argsort(allelic_ratio.values)
allelic_ratio = np.log2(allelic_ratio.iloc[allelic_ratio_i])
outliers = np.logical_not(np.logical_or(
allelic_ratio < -3.0 ,
allelic_ratio > 3.0
))
color_geno = []
color = color[allelic_ratio_i][outliers]
for i in color:
if i == 0 or i == 2:
color_geno.append('green')
else:
color_geno.append('orange')
allelic_ratio = allelic_ratio[outliers]
ind = np.arange(len(allelic_ratio))
rects1 = ax.bar(ind, allelic_ratio, width, color = color_geno)
ax.set_xlim((-1, len(allelic_ratio+1)))
return(fig)
def aei_plot(self, snp_plot=None, n_sufficient_hets=50, common_only=False):
""" AEI plots in mpld3
"""
x_scale=1e6
size_maf =((200 * self.maf) + 20)
cm = plt.cm.get_cmap('winter')
if type(snp_plot) == pd.Series or type(snp_plot) == list:
suff_hets = pd.Series(snp_plot, index = pd.Index(snp_plot))
else:
suff_hets = self.sufficient_hets[
np.logical_and(self.sufficient_hets >= n_sufficient_hets,
self.overall_counts.sum(axis=1)>=500)]
nplots = len(suff_hets) + 2
pos = self.meQTL.loc[:, 'pos']
pos = np.asarray(pos, dtype=np.uint64)/x_scale
min_x = np.min(pos)
max_x = np.max(pos)
#range_x = max_x - min_x
text_x = min_x
fig, ax = plt.subplots(nrows=int(ceil(nplots/2.0)) , ncols=2, figsize=(24, 4*nplots/2),
sharey=False, sharex=True, subplot_kw=dict(axisbg='#EEEEEE'))
ko = 0
print(int(ceil(len(suff_hets)/2.0)))
for j in range(2):
io = 0
for i in range(int(ceil(len(suff_hets)/2.0))):
if ko < len(suff_hets):
curr = suff_hets.index[ko]
adj_pvalue = -1*np.log10(self.aei_pvalues.loc[:, curr])
scatter = ax[io, j].scatter(pos,
adj_pvalue, s=30)
ax[io, j].set_ylabel(r'-1*$log_{10}$ p-value', fontsize=15)
ax[io, j].set_xlabel('Genomic Position (mb)', fontsize=15)
ax[io, j].set_title('AEI plot for %s (N=%i)' %
(curr,
self.overall_counts.ix[suff_hets.index[ko], 'Nhets']), fontsize=25)
# Need to make the text relative positioning
#labels = list(self.annot_table.index)
#tooltip = mpld3.plugins.PointLabelTooltip(scatter, labels=labels)
ko += 1
io += 1
else:
pass
scatter = ax[-1, j].scatter(pos,
-1*np.log10(self.meQTL.ix[: , 'p-value']),
c=self.ld,
s=size_maf, cmap=cm)
ax[-1, j].set_ylabel('-1*$log_{10}$ p-value', fontsize=15)
ax[-1, j].set_xlabel('Genomic Position (mb)', fontsize=15)
ax[-1, j].set_title('%s eQTL plot' % (self.gene_name,), fontsize=25)
labels = list(self.annot_table.index)
tooltip = mpld3.plugins.PointLabelTooltip(scatter, labels=labels)
mpld3.plugins.connect(fig, tooltip, plugins.LinkedBrush(scatter))
fig.tight_layout()
return(fig)
def combind_aei_with_dosage(aei, dosage, snp):
""" aei - aei dataframe
dosage = dosage dataframe
"""
new_columns = [i[0] for i in aei][::4]
hets = dosage_round(dosage.ix[snp,:])[new_columns]
hets = hets[hets == 1]
return aei.ix[snp, hets.index]
def aei_test_2(full, aei_df, annot_table, gene_snps, gene, num_threshold=5):
""" Calculates aei for all heterozygous SNPs within a gene across all cis-eQTL SNPs.
Paremeters
----------
matrixeQTL - full containing multiple population groups
dosage - dosage object
aei_df - aellic expression dataframe
gene_snps - a dictionary with keys being the gene name and
Returns
-------
AEI object
Refrences
---------
"""
base_pair_to_index = {'A':0, 'C': 1, 'G': 2, 'T': 3}
gene_i = full.euro.ix[:, 1] == gene
g_meQTL = full.euro.ix[gene_i, :]
g_meQTL = pd.merge(annot_table, g_meQTL, right_on="SNP", left_index=True,
sort=False, how='inner')
snps_cis = g_meQTL.loc[:, 'SNP']
new_columns = [i[0] for i in aei_df][::4]
not_indel = [i for i in gene_snps[gene] if len(annot_table.ix[i, 'a1']) ==1]
#comb_dosage = pd.concat([full.euro_dos, full)
hets = dosage_round(full.euro_dos.ix[not_indel,:])[new_columns]
pvalues_out = np.zeros((len(snps_cis), len(not_indel)), dtype=np.float64)
sufficient_hets = pd.Series(data=np.repeat(0, len(not_indel)),
index=pd.Index(not_indel),
dtype=np.int32)
aei_ratios= pd.DataFrame(np.zeros((len(new_columns), len(not_indel)), dtype=np.uint32),
index=pd.Index(new_columns), columns=pd.Index(not_indel))
overall_counts = pd.DataFrame(np.zeros((len(not_indel), 2), dtype=np.uint32),
index=pd.Index(not_indel))
hets_dict = {}
maf = calculate_minor_allele_frequency(full.euro_dos.ix[snps_cis, new_columns])
snp_interest = np.nanargmin(g_meQTL['p-value'])
snp_interest = g_meQTL['SNP'].iloc[snp_interest]
g_meQTL.index = pd.Index(g_meQTL['SNP'])
outliers = []
for j, i in enumerate(not_indel):
try:
REF = base_pair_to_index[annot_table.ix[i, 'a0']]
ALT = base_pair_to_index[annot_table.ix[i, 'a1']]
except KeyError:
print('Indel skipping')
hets_j = hets.ix[j,:]
hets_j = hets_j[np.logical_and(hets_j == 1.0,
np.logical_not( | pd.isnull(hets_j) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 20 20:54:46 2019
@author: AKASH
"""
from urllib.request import urlopen
from bs4 import BeautifulSoup
from pandas import DataFrame
urlname = "https://www.flipkart.com/laptops/~buyback-guarantee-on-laptops-/pr?sid=6bo%2Cb5g&uniq"
try:
page = urlopen(urlname)
except:
print("Error in opening the page")
soup = BeautifulSoup(page,'html.parser')
print(soup)
products = []
prices = []
for i in soup.findAll('a',href=True, attrs={'class':'_31qSD5'}):
name=i.find('div',{'class':'_3wU53n'})
price=i.find('div',{'class':'_1vC4OE _2rQ-NK'})
products.append(name.text)
prices.append(price.text)
df = | DataFrame({'Products': products,'Prices':prices}) | pandas.DataFrame |
def load(path, bucket_name = 'bme-bucket'):
import io
import pickle
import boto3
s3_client = boto3.client('s3')
array = io.BytesIO()
s3_client.download_fileobj(bucket_name, path, array)
array.seek(0)
return pickle.load(array)
def df_to_csv_on_s3(dataframe, filename, DESTINATION = 'bme-bucket'):
import boto3
from io import StringIO
csv_buffer = StringIO()
dataframe.to_csv(csv_buffer, index=False)
s3_resource = boto3.resource("s3") # Create S3 object
return s3_resource.Object(DESTINATION, filename).put(Body=csv_buffer.getvalue()) # Write buffer to S3 object
def get_merged_perfiles_fondos():
import pandas as pd
import s3fs
s3 = s3fs.S3FileSystem()
file = 's3://bme-bucket/engineered_data/merged_perfiles_fondos.csv'
if s3.exists(file):
result = | pd.read_csv(file) | pandas.read_csv |
#ARIMA时序模型
import pandas as pd
import numpy as np
import math
from numpy import array
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
forecastnum = 5
df = | pd.read_csv("F:\\test.csv", encoding='gbk') | pandas.read_csv |
# Copyright 2018 <NAME> <EMAIL>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import os
import warnings
import datetime
from .dataset import Dataset
from .dataframe_tools import *
from .exceptions import FailedReindexWarning, PublicationEmbargoWarning, ReindexMapError
class Pdac(Dataset):
def __init__(self, version="latest", no_internet=False):
"""Load all of the dataframes as values in the self._data dict variable, with names as keys, and format them properly.
Parameters:
version (str, optional): The version number to load, or the string "latest" to just load the latest building. Default is "latest".
no_internet (bool, optional): Whether to skip the index update step because it requires an internet connection. This will be skipped automatically if there is no internet at all, but you may want to manually skip it if you have a spotty internet connection. Default is False.
"""
# Set some needed variables, and pass them to the parent Dataset class __init__ function
# This keeps a record of all versions that the code is equipped to handle. That way, if there's a new data release but they didn't update their package, it won't try to parse the new data version it isn't equipped to handle.
valid_versions = ["1.0"]
data_files = {
"1.0": [
"clinical_table_140.tsv.gz",
"microRNA_TPM_log2_Normal.cct.gz",
"microRNA_TPM_log2_Tumor.cct.gz",
"meta_table_140.tsv.gz",
"mRNA_RSEM_UQ_log2_Normal.cct.gz",
"mRNA_RSEM_UQ_log2_Tumor.cct.gz",
"PDAC_mutation.maf.gz",
"phosphoproteomics_site_level_MD_abundance_normal.cct.gz",
"phosphoproteomics_site_level_MD_abundance_tumor.cct.gz",
"proteomics_gene_level_MD_abundance_normal.cct.gz",
"proteomics_gene_level_MD_abundance_tumor.cct.gz",
"RNA_fusion_unfiltered_normal.tsv.gz",
"RNA_fusion_unfiltered_tumor.tsv.gz",
"SCNA_log2_gene_level.cct.gz"],
}
# Call the parent class __init__ function
super().__init__(cancer_type="pdac", version=version, valid_versions=valid_versions, data_files=data_files, no_internet=no_internet)
# Load the data into dataframes in the self._data dict
loading_msg = f"Loading {self.get_cancer_type()} v{self.version()}"
for file_path in self._data_files_paths: # Loops through files variable
# Print a loading message. We add a dot every time, so the user knows it's not frozen.
loading_msg = loading_msg + "."
print(loading_msg, end='\r')
path_elements = file_path.split(os.sep) # Get a list of the levels of the path
file_name = path_elements[-1] # The last element will be the name of the file. We'll use this to identify files for parsing in the if/elif statements below
mark_normal = lambda s: s + ".N"
remove_type_tag = lambda s: s[:-2] # remove _T and similar tags from end of string
if file_name == "clinical_table_140.tsv.gz": # Note that we use the "file_name" variable to identify files. That way we don't have to use the whole path.
df = | pd.read_csv(file_path, sep='\t', index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 24 15:51:47 2017
@author: jsuter
Project: Language Level Analysis and Classification
Seminar "Educational Assessment for Language Technology"
WS 2015/16, <NAME>
<NAME>, January 2019
-----------------------------------------------------------------
language_level_features.py
- load the linguistic features for language level texts
- load the baseline features for language level texts
"""
# Import statements
import os
import glob
import pandas as pd
import numpy as np
# Author dictionary for matching abbreviating with author name
author_dict = {'KA':'Kafka',
'KL':'Kleist',
'SCHN':'Schnitzler',
'ZW':'Zweig',
'HOFF':'Hoffmann',
'TWA':'Twain',
'TCK':'Tieck',
'GTTH':'Gotthelf',
'EICH':'Eichendorff',
'KEL':'Keller',
'SPY':'Spyri',
'BIE':'Bierbaum',
'DAUT':'Dauthendey',
'FON':'Fontane',
'GANG':'Ganghofer',
'GER':'Gerstäcker',
'GRI':'Grimm',
'HALT':'Haltrich',
'HEB':'Hebbel',
'JEA':'<NAME>',
'MAY':'May',
'POE':'Poe',
'RAA':'Raabe',
'SCHE':'Scheerbart',
'SCHW':'Schwab',
'STI':'Stifter',
'STO':'Storm',
'THO':'Thoma'}
def load_data(dataset_name="lang_levels", baseline=False):
"""Load dataset with language level features and return respective features and solutions.
Keyword arguments:
dataset_name (string) -- which dataset to use ("lang_levels" or "classical_lit")
baseline (Boolean) -- whether or not to return baseline features
Return:
dataset (np.array) -- feature array with linguistic/baseline features
labels (np.array) -- solutions/labels for samples
label_set (list) -- list of strings representing labels/classes"""
# Set the data path
data_dir = '../3_Text_features/'
# If "language levels" data set is used
if dataset_name == "lang_levels":
# Set data path and label set
data_dir = data_dir + "Features/"
label_set = ["A1", "A2", "B1", "B2"]
# If "classical literature"
else:
# Set data path and label set
data_dir = data_dir + "Literature_Features/"
label_set = os.listdir(data_dir)
# Get sample file
sample_file = os.listdir(data_dir+"/"+label_set[0])[0]
# Get feature names
feature_names = pd.read_csv(data_dir+label_set[0]+"/"+sample_file, usecols = ["Feature"])
feature_names = [elem[0] for elem in feature_names.values]
# Initalize lists
labels = []
frames = []
# For each label, retrieve data from csv files
for i, label in enumerate(label_set):
# Get all csv files
all_files = glob.glob(os.path.join(data_dir+label, "*.csv"))
# Get and concatenate dataframe structure from all files
df_from_each_file = (pd.read_csv(f, usecols = ['Value']) for f in all_files)
concatenated_df = | pd.concat(df_from_each_file, axis=1, ignore_index=True) | pandas.concat |
import numpy as np
import pytest
from pandas import (
DataFrame,
IndexSlice,
NaT,
Timestamp,
)
import pandas._testing as tm
pytest.importorskip("jinja2")
from pandas.io.formats.style import Styler
from pandas.io.formats.style_render import _str_escape
@pytest.fixture
def df():
return DataFrame(
data=[[0, -0.609], [1, -1.228]],
columns=["A", "B"],
index=["x", "y"],
)
@pytest.fixture
def styler(df):
return Styler(df, uuid_len=0)
def test_display_format(styler):
ctx = styler.format("{:0.1f}")._translate(True, True)
assert all(["display_value" in c for c in row] for row in ctx["body"])
assert all([len(c["display_value"]) <= 3 for c in row[1:]] for row in ctx["body"])
assert len(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3
def test_format_dict(styler):
ctx = styler.format({"A": "{:0.1f}", "B": "{0:.2%}"})._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "0.0"
assert ctx["body"][0][2]["display_value"] == "-60.90%"
def test_format_string(styler):
ctx = styler.format("{:.2f}")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "0.00"
assert ctx["body"][0][2]["display_value"] == "-0.61"
assert ctx["body"][1][1]["display_value"] == "1.00"
assert ctx["body"][1][2]["display_value"] == "-1.23"
def test_format_callable(styler):
ctx = styler.format(lambda v: "neg" if v < 0 else "pos")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "pos"
assert ctx["body"][0][2]["display_value"] == "neg"
assert ctx["body"][1][1]["display_value"] == "pos"
assert ctx["body"][1][2]["display_value"] == "neg"
def test_format_with_na_rep():
# GH 21527 28358
df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
ctx = df.style.format(None, na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
ctx = df.style.format("{:.2%}", na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][1]["display_value"] == "110.00%"
assert ctx["body"][1][2]["display_value"] == "120.00%"
ctx = df.style.format("{:.2%}", na_rep="-", subset=["B"])._translate(True, True)
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][2]["display_value"] == "120.00%"
def test_format_non_numeric_na():
# GH 21527 28358
df = DataFrame(
{
"object": [None, np.nan, "foo"],
"datetime": [None, NaT, Timestamp("20120101")],
}
)
with tm.assert_produces_warning(FutureWarning):
ctx = df.style.set_na_rep("NA")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "NA"
assert ctx["body"][0][2]["display_value"] == "NA"
assert ctx["body"][1][1]["display_value"] == "NA"
assert ctx["body"][1][2]["display_value"] == "NA"
ctx = df.style.format(None, na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][1]["display_value"] == "-"
assert ctx["body"][1][2]["display_value"] == "-"
def test_format_clear(styler):
assert (0, 0) not in styler._display_funcs # using default
styler.format("{:.2f")
assert (0, 0) in styler._display_funcs # formatter is specified
styler.format()
assert (0, 0) not in styler._display_funcs # formatter cleared to default
@pytest.mark.parametrize(
"escape, exp",
[
("html", "<>&"%$#_{}~^\\~ ^ \\ "),
(
"latex",
'<>\\&"\\%\\$\\#\\_\\{\\}\\textasciitilde \\textasciicircum '
"\\textbackslash \\textasciitilde \\space \\textasciicircum \\space "
"\\textbackslash \\space ",
),
],
)
def test_format_escape_html(escape, exp):
chars = '<>&"%$#_{}~^\\~ ^ \\ '
df = DataFrame([[chars]])
s = Styler(df, uuid_len=0).format("&{0}&", escape=None)
expected = f'<td id="T__row0_col0" class="data row0 col0" >&{chars}&</td>'
assert expected in s.render()
# only the value should be escaped before passing to the formatter
s = Styler(df, uuid_len=0).format("&{0}&", escape=escape)
expected = f'<td id="T__row0_col0" class="data row0 col0" >&{exp}&</td>'
assert expected in s.render()
def test_format_escape_na_rep():
# tests the na_rep is not escaped
df = DataFrame([['<>&"', None]])
s = Styler(df, uuid_len=0).format("X&{0}>X", escape="html", na_rep="&")
ex = '<td id="T__row0_col0" class="data row0 col0" >X&<>&">X</td>'
expected2 = '<td id="T__row0_col1" class="data row0 col1" >&</td>'
assert ex in s.render()
assert expected2 in s.render()
def test_format_escape_floats(styler):
# test given formatter for number format is not impacted by escape
s = styler.format("{:.1f}", escape="html")
for expected in [">0.0<", ">1.0<", ">-1.2<", ">-0.6<"]:
assert expected in s.render()
# tests precision of floats is not impacted by escape
s = styler.format(precision=1, escape="html")
for expected in [">0<", ">1<", ">-1.2<", ">-0.6<"]:
assert expected in s.render()
@pytest.mark.parametrize("formatter", [5, True, [2.0]])
def test_format_raises(styler, formatter):
with pytest.raises(TypeError, match="expected str or callable"):
styler.format(formatter)
def test_format_with_precision():
# Issue #13257
df = DataFrame(data=[[1.0, 2.0090], [3.2121, 4.566]], columns=["a", "b"])
s = Styler(df)
ctx = s.format(precision=1)._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "1.0"
assert ctx["body"][0][2]["display_value"] == "2.0"
assert ctx["body"][1][1]["display_value"] == "3.2"
assert ctx["body"][1][2]["display_value"] == "4.6"
ctx = s.format(precision=2)._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "1.00"
assert ctx["body"][0][2]["display_value"] == "2.01"
assert ctx["body"][1][1]["display_value"] == "3.21"
assert ctx["body"][1][2]["display_value"] == "4.57"
ctx = s.format(precision=3)._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "1.000"
assert ctx["body"][0][2]["display_value"] == "2.009"
assert ctx["body"][1][1]["display_value"] == "3.212"
assert ctx["body"][1][2]["display_value"] == "4.566"
def test_format_subset():
df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"])
ctx = df.style.format(
{"a": "{:0.1f}", "b": "{0:.2%}"}, subset=IndexSlice[0, :]
)._translate(True, True)
expected = "0.1"
raw_11 = "1.123400"
assert ctx["body"][0][1]["display_value"] == expected
assert ctx["body"][1][1]["display_value"] == raw_11
assert ctx["body"][0][2]["display_value"] == "12.34%"
ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, :])._translate(True, True)
assert ctx["body"][0][1]["display_value"] == expected
assert ctx["body"][1][1]["display_value"] == raw_11
ctx = df.style.format("{:0.1f}", subset=IndexSlice["a"])._translate(True, True)
assert ctx["body"][0][1]["display_value"] == expected
assert ctx["body"][0][2]["display_value"] == "0.123400"
ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, "a"])._translate(True, True)
assert ctx["body"][0][1]["display_value"] == expected
assert ctx["body"][1][1]["display_value"] == raw_11
ctx = df.style.format("{:0.1f}", subset=IndexSlice[[0, 1], ["a"]])._translate(
True, True
)
assert ctx["body"][0][1]["display_value"] == expected
assert ctx["body"][1][1]["display_value"] == "1.1"
assert ctx["body"][0][2]["display_value"] == "0.123400"
assert ctx["body"][1][2]["display_value"] == raw_11
@pytest.mark.parametrize("formatter", [None, "{:,.1f}"])
@pytest.mark.parametrize("decimal", [".", "*"])
@pytest.mark.parametrize("precision", [None, 2])
def test_format_thousands(formatter, decimal, precision):
s = DataFrame([[1000000.123456789]]).style # test float
result = s.format(
thousands="_", formatter=formatter, decimal=decimal, precision=precision
)._translate(True, True)
assert "1_000_000" in result["body"][0][1]["display_value"]
s = DataFrame([[1000000]]).style # test int
result = s.format(
thousands="_", formatter=formatter, decimal=decimal, precision=precision
)._translate(True, True)
assert "1_000_000" in result["body"][0][1]["display_value"]
s = DataFrame([[1 + 1000000.123456789j]]).style # test complex
result = s.format(
thousands="_", formatter=formatter, decimal=decimal, precision=precision
)._translate(True, True)
assert "1_000_000" in result["body"][0][1]["display_value"]
@pytest.mark.parametrize("formatter", [None, "{:,.4f}"])
@pytest.mark.parametrize("thousands", [None, ",", "*"])
@pytest.mark.parametrize("precision", [None, 4])
def test_format_decimal(formatter, thousands, precision):
s = DataFrame([[1000000.123456789]]).style # test float
result = s.format(
decimal="_", formatter=formatter, thousands=thousands, precision=precision
)._translate(True, True)
assert "000_123" in result["body"][0][1]["display_value"]
s = DataFrame([[1 + 1000000.123456789j]]).style # test complex
result = s.format(
decimal="_", formatter=formatter, thousands=thousands, precision=precision
)._translate(True, True)
assert "000_123" in result["body"][0][1]["display_value"]
def test_str_escape_error():
msg = "`escape` only permitted in {'html', 'latex'}, got "
with pytest.raises(ValueError, match=msg):
_str_escape("text", "bad_escape")
with pytest.raises(ValueError, match=msg):
| _str_escape("text", []) | pandas.io.formats.style_render._str_escape |
import numpy as np
import pandas as pd
# Read the data. Data is already loaded in the variable `path` use the `delimeter = ';'`.
df = | pd.read_csv(path, delimiter=';') | pandas.read_csv |
from calendar import monthrange
from datetime import datetime
import pandas as pd
from flask import Blueprint, jsonify, abort, g
from gatekeeping.api.budget import get_budget
from gatekeeping.api.position import get_positions
from gatekeeping.api.function import get_functions, get_function
from gatekeeping.api.user import get_user_function
def get_line_chart(function=None):
positions = get_positions(check_submitter=False)
budget = get_budget()
columns = [row.keys() for row in positions]
positions = pd.DataFrame(positions, columns=columns[0])
budget = pd.DataFrame(budget, columns=columns[0])
if function:
if function != 'All':
positions = positions.loc[positions['function'] == function]
budget = budget.loc[budget['function'] == function]
if g.user['type'] != 'ADMIN' and function == 'All':
functions = get_user_function(g.user['id'])
function_names = [get_function(function['function_id'])['name'] for function in functions]
positions = positions.loc[positions['function'].isin(function_names)]
budget = budget.loc[budget['function'].isin(function_names)]
positions['FTE'] = pd.to_numeric(positions['hours'], errors='coerce') / 40
budget['FTE'] = pd.to_numeric(budget['hours'], errors='coerce') / 40
positions['salary'] = pd.to_numeric(positions['salary'], errors='coerce')
positions['fringe_benefit'] = pd.to_numeric(positions['fringe_benefit'], errors='coerce')
positions['social_security_contribution'] = | pd.to_numeric(positions['social_security_contribution'], errors='coerce') | pandas.to_numeric |
import logging
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from .regressor_classifier_base import RegressorClassifierBase
__all__ = ['proba_drifting','EventClassifier']
def proba_drifting(x):
"""
gives more weight to outliers -- i.e. close to 0 and 1
the polynomial was constructed with the following constraints:
• f(0) = 0
• f(0.5) = 0.5
• f(1) = 1
• f'(0) = 0
• f'(0.5) = 1
• f'(1) = 0
"""
return 10 * x ** 3 - 15 * x ** 4 + 6 * x ** 5
class EventClassifier(RegressorClassifierBase):
def __init__(self, classifier=RandomForestClassifier,
cam_id_list=("cam"), **kwargs):
super().__init__(model=classifier, cam_id_list=cam_id_list, **kwargs)
def predict_proba_by_event(self, X):
predict_proba = []
for evt in X:
tel_probas = None
tel_weights = []
for cam_id, tels in evt.items():
these_probas = self.model_dict[cam_id].predict_proba(tels)
if tel_probas is not None:
tel_probas = np.append(these_probas, tel_probas, axis=0)
else:
tel_probas = these_probas
try:
# if a `namedtuple` is provided, we can weight the
# different images using some of the provided features
tel_weights += [t.sum_signal_cam / t.impact_dist for t in
tels]
except AttributeError:
# otherwise give every image the same weight
tel_weights += [1] * len(tels)
predict_proba.append(
np.average(proba_drifting(tel_probas),
weights=tel_weights,
axis=0)
)
return np.array(predict_proba)
def predict_by_event(self, X):
proba = self.predict_proba_by_event(X)
predictions = self.classes_[np.argmax(proba, axis=1)]
return predictions
def compute_Qfactor(self, proba, labels: int, nbins):
"""
Compute Q-factor for each gammaness (bin edges are 0 - 1)
Parameters
----------
proba: predicted probabilities to be a gamma!
labels: true labels
nbins: number of bins for gammaness
Returns
-------
Q-factor array
"""
bins = np.linspace(0, 1, nbins)
# assuming labels are 0 for protons, 1 for gammas
# np.nonzero function return indexes
gammas_idx = np.nonzero(proba * labels)
gammas = proba[gammas_idx]
hadrons_idx = np.nonzero(proba * np.logical_not(labels))
hadrons = proba[hadrons_idx]
# tot number of gammas
Ng = len(gammas)
# of protons
Nh = len(hadrons)
# binning and cumsum for gammas
gbins = | pd.cut(gammas, bins) | pandas.cut |
import pandas as pd
import sys
import datetime
import logging
import datetime
from typing import Optional
def create_logger(
log_version_name: str,
logger_name: Optional[str] = 'Log',
log_path: Optional[str] = '../logs') -> logging.Logger:
'''Function to create a logger
Examples
-----------
>>> logger = create_logger(log_version_name)
>>> logger.info('Hello World')
Parameters
------------
log_version_name : the name of the log files
logger_name : if you want to set up different\
logger, please define its name here
log_path : directory path of log files
Returns
------------
logger : logger object
'''
format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=format_str,
filename=f'{log_path}/{log_version_name}.log')
logger = logging.getLogger(logger_name)
return logger
def main(logger):
TOP = 200
dfs = []
for i in range(TOP):
df = pd.read_html(f'https://kabutan.jp/warning/?mode=2_1&market=0&capitalization=-1&stc=&stm=0&page={i+1}')
if df[2].shape[0] == 0:
break
dfs.append(df[2])
dfs_down = []
for i in range(TOP):
df_down = pd.read_html(f'https://kabutan.jp/warning/?mode=2_2&market=0&capitalization=-1&stc=&stm=0&page={i+1}')
if df_down[2].shape[0] == 0:
break
dfs_down.append(df_down[2])
df_all = pd.concat(dfs, axis=0)
df_all.reset_index(inplace=True, drop=True)
df_all = df_all[['コード', '銘柄名', '市場', '株価',
'前日比', '前日比.1', '出来高', 'PER', 'PBR', '利回り']]
df_all.columns = ['コード', '銘柄名', '市場', '株価',
'前日比_価格差', '前日比_変化率', '出来高', 'PER', 'PBR', '利回り']
df_all['date'] = datetime.date.today()
df_all['weekday'] = datetime.date.today().weekday()
df_all_down = | pd.concat(dfs_down, axis=0) | pandas.concat |
import os, codecs
import pandas as pd
import numpy as np
PATH = '../input/'
# 共享单车轨迹数据
bike_track = pd.concat([
pd.read_csv(PATH + 'gxdc_gj20201221.csv'),
pd.read_csv(PATH + 'gxdc_gj20201222.csv'),
| pd.read_csv(PATH + 'gxdc_gj20201223.csv') | pandas.read_csv |
# Copyright 2021 The QUARK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import itertools
import json
import logging
import re
from datetime import datetime
from pathlib import Path
import inquirer
import matplotlib.pyplot as plt
import matplotlib
from collections import defaultdict
import pandas as pd
import seaborn as sns
import yaml
from applications.PVC.PVC import PVC
from applications.SAT.SAT import SAT
from applications.TSP.TSP import TSP
matplotlib.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
matplotlib.rc('font', family='serif')
matplotlib.rcParams['savefig.dpi'] = 300
sns.set_style('darkgrid')
sns.color_palette()
class BenchmarkManager:
"""
The benchmark manager is the main component of QUARK orchestrating the overall benchmarking process.
Based on the configuration, the benchmark manager will create an experimental plan considering all combinations of
configurations, e.g., different problem sizes, solver, and hardware combinations. It will then instantiate the
respective framework components representing the application, the mapping to the algorithmic formulation, solver,
and device. After executing the benchmarks, it collects the generated data and executes the validation and evaluation
functions.
"""
def __init__(self):
"""
Constructor method
"""
self.application = None
self.application_configs = None
self.results = []
self.mapping_solver_device_combinations = {}
self.repetitions = 1
self.store_dir = None
def generate_benchmark_configs(self) -> dict:
"""
Queries the user to get all needed information about application, solver, mapping, device and general settings
to run the benchmark.
:return: Benchmark Config
:rtype: dict
"""
application_answer = inquirer.prompt([inquirer.List('application',
message="What application do you want?",
choices=['TSP', 'PVC', 'SAT'],
default='PVC',
)])
if application_answer["application"] == "TSP":
self.application = TSP()
elif application_answer["application"] == "PVC":
self.application = PVC()
elif application_answer["application"] == "SAT":
self.application = SAT()
application_config = self.application.get_parameter_options()
application_config = BenchmarkManager._query_for_config(application_config,
f"(Option for {application_answer['application']})")
config = {
"application": {
"name": application_answer["application"],
"config": application_config
},
"mapping": {}
}
mapping_answer = inquirer.prompt([inquirer.Checkbox('mapping',
message="What mapping do you want?",
choices=self.application.get_available_mapping_options(),
# default=[self.application.get_available_mapping_options()[0]]
)])
for mapping_single_answer in mapping_answer["mapping"]:
mapping = self.application.get_mapping(mapping_single_answer)
mapping_config = mapping.get_parameter_options()
mapping_config = BenchmarkManager._query_for_config(mapping_config, f"(Option for {mapping_single_answer})")
solver_answer = inquirer.prompt([inquirer.Checkbox('solver',
message=f"What Solver do you want for mapping {mapping_single_answer}?",
choices=mapping.get_available_solver_options()
)])
config["mapping"][mapping_single_answer] = {
"solver": [],
"config": mapping_config
}
for solver_single_answer in solver_answer["solver"]:
solver = mapping.get_solver(solver_single_answer)
solver_config = solver.get_parameter_options()
solver_config = BenchmarkManager._query_for_config(solver_config,
f"(Option for {solver_single_answer})")
device_answer = inquirer.prompt([inquirer.Checkbox('device',
message=f"What Device do you want for solver {solver_single_answer}?",
choices=solver.get_available_device_options()
)])
config["mapping"][mapping_single_answer]["solver"].append({
"name": solver_single_answer,
"config": solver_config,
"device": device_answer["device"]
})
repetitions_answer = inquirer.prompt(
[inquirer.Text('repetitions', message="How many repetitions do you want?",
validate=lambda _, x: re.match("\d", x),
default=self.repetitions
)])
config['repetitions'] = int(repetitions_answer["repetitions"])
logging.info(config)
return config
def load_config(self, config: dict) -> None:
"""
Uses the config file to generate all class instances needed to run the benchmark.
:param config: valid config file
:type config: dict
:rtype: None
"""
logging.info(config)
if config["application"]["name"] == "TSP":
self.application = TSP()
elif config["application"]["name"] == "PVC":
self.application = PVC()
elif config["application"]["name"] == "SAT":
self.application = SAT()
self.repetitions = int(config["repetitions"])
# Build all application configs
keys, values = zip(*config['application']['config'].items())
self.application_configs = [dict(zip(keys, v)) for v in itertools.product(*values)]
self.mapping_solver_device_combinations = {}
for mapping_name, mapping_value in config['mapping'].items():
mapping = self.application.get_mapping(mapping_name)
if len(mapping_value['config'].items()) > 0:
keys, values = zip(*mapping_value['config'].items())
mapping_config = [dict(zip(keys, v)) for v in itertools.product(*values)]
else:
mapping_config = [{}]
self.mapping_solver_device_combinations[mapping_name] = {
"mapping_instance": mapping,
"mapping_config": mapping_config,
"solvers": {}
}
for single_solver in mapping_value['solver']:
# Build all solver configs
if len(single_solver['config'].items()) > 0:
keys, values = zip(*single_solver['config'].items())
solver_config = [dict(zip(keys, v)) for v in itertools.product(*values)]
else:
solver_config = [{}]
solver = mapping.get_solver(single_solver['name'])
self.mapping_solver_device_combinations[mapping_name]["solvers"][single_solver['name']] = {
"solver_instance": solver,
"solver_config": solver_config
}
self.mapping_solver_device_combinations[mapping_name]["solvers"][single_solver['name']][
"devices"] = {}
for single_device in single_solver["device"]:
device_wrapper = solver.get_device(single_device)
self.mapping_solver_device_combinations[mapping_name]["solvers"][single_solver['name']][
"devices"][single_device] = device_wrapper
@staticmethod
def _query_for_config(config: dict, prefix: str = "") -> dict:
for key, config_answer in config.items():
if len(config_answer['values']) == 1:
# When there is only 1 value to choose from skip the user input for now
config[key] = config_answer['values']
else:
answer = inquirer.prompt(
[inquirer.Checkbox(key,
message=f"{prefix} {config_answer['description']}",
choices=config_answer['values']
)])
config[key] = answer[key] # TODO support strings
return config
def _create_store_dir(self, store_dir: str = None, tag: str = None) -> None:
"""
Creates directory for a benchmark run.
:param store_dir: Directory where the new directory should be created
:type store_dir: str
:param tag: prefix of the new directory
:type tag: str
:return:
:rtype: None
"""
if store_dir is None:
store_dir = Path.cwd()
self.store_dir = f"{store_dir}/benchmark_runs/{tag + '-' if not None else ''}{datetime.today().strftime('%Y-%m-%d-%H-%M-%S')}"
Path(self.store_dir).mkdir(parents=True, exist_ok=True)
def orchestrate_benchmark(self, config: dict, store_dir: str = None) -> None:
"""
Executes the benchmarks according to the given settings.
:param config: valid config file
:type config: dict
:param store_dir: target directory to store the results of the benchmark (if you decided to store it)
:type store_dir: str
:rtype: None
"""
# TODO Make this nicer
self.load_config(config)
self._create_store_dir(store_dir, tag=self.application.__class__.__name__.lower())
logger = logging.getLogger()
formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
fh = logging.FileHandler(f"{self.store_dir}/logger.log")
fh.setFormatter(formatter)
logger.addHandler(fh)
logging.info(f"Created Benchmark run directory {self.store_dir}")
with open(f"{self.store_dir}/config.yml", 'w') as fp:
yaml.dump(config, fp)
try:
for idx, application_config in enumerate(self.application_configs):
problem = self.application.generate_problem(application_config)
results = []
path = f"{self.store_dir}/application_config_{idx}"
Path(path).mkdir(parents=True, exist_ok=True)
with open(f"{path}/application_config.json", 'w') as fp:
json.dump(application_config, fp)
self.application.save(path)
for mapping_name, mapping_value in self.mapping_solver_device_combinations.items():
mapping = mapping_value["mapping_instance"]
for mapping_config in mapping_value['mapping_config']:
for solver_name, solver_value in mapping_value["solvers"].items():
solver = solver_value["solver_instance"]
for solver_config in solver_value['solver_config']:
for device_name, device_value in solver_value["devices"].items():
device = device_value
for i in range(1, self.repetitions + 1):
mapped_problem, time_to_mapping = mapping.map(problem, mapping_config)
try:
logging.info(
f"Running {self.application.__class__.__name__} with config {application_config} on solver {solver.__class__.__name__} and device {device.get_device_name()} (Repetition {i}/{self.repetitions})")
solution_raw, time_to_solve = solver.run(mapped_problem, device,
solver_config, store_dir=path,
repetition=i)
processed_solution, time_to_reverse_map = mapping.reverse_map(solution_raw)
try:
processed_solution, time_to_process_solution = self.application.process_solution(
processed_solution)
solution_validity, time_to_validation = self.application.validate(
processed_solution)
except Exception as e:
solution_validity = False
time_to_process_solution = None
time_to_validation = None
if solution_validity:
solution_quality, time_to_evaluation = self.application.evaluate(
processed_solution)
else:
solution_quality = None
time_to_evaluation = None
results.append({
"timestamp": datetime.today().strftime('%Y-%m-%d-%H-%M-%S'),
"time_to_solution": sum(filter(None, [time_to_mapping, time_to_solve,
time_to_reverse_map,
time_to_process_solution,
time_to_validation,
time_to_evaluation])),
"time_to_solution_unit": "ms",
"time_to_process_solution": time_to_process_solution,
"time_to_process_solution_unit": "ms",
"time_to_validation": time_to_validation,
"time_to_validation_unit": "ms",
"time_to_evaluation": time_to_evaluation,
"time_to_evaluation_unit": "ms",
"solution_validity": solution_validity,
"solution_quality": solution_quality,
"solution_quality_unit": self.application.get_solution_quality_unit(),
"solution_raw": str(solution_raw),
# TODO Revise this (I am only doing this for now since json.dumps does not like tuples as keys for dicts
"time_to_solve": time_to_solve,
"time_to_solve_unit": "ms",
"repetition": i,
"application": self.application.__class__.__name__,
"application_config": application_config,
"mapping_config": mapping_config,
"time_to_reverse_map": time_to_reverse_map,
"time_to_reverse_map_unit": "ms",
"time_to_mapping": time_to_mapping,
"time_to_mapping_unit": "ms",
"solver_config": solver_config,
"mapping": mapping.__class__.__name__,
"solver": solver.__class__.__name__,
"device_class": device.__class__.__name__,
"device": device.get_device_name()
})
with open(f"{path}/results.json", 'w') as fp:
json.dump(results, fp)
df = self._collect_all_results()
self._save_as_csv(df)
except Exception as e:
logging.error(f"Error during benchmark run: {e}", exc_info=True)
with open(f"{path}/error.log", 'a') as fp:
fp.write(
f"Solver: {solver_name}, Device: {device_name}, Error: {str(e)} (For more information take a look at logger.log)")
fp.write("\n")
with open(f"{path}/results.json", 'w') as fp:
json.dump(results, fp)
# catching ctrl-c and killing network if desired
except KeyboardInterrupt:
logger.info("CTRL-C detected. Still trying to create results.csv.")
df = self._collect_all_results()
self._save_as_csv(df)
def _collect_all_results(self) -> pd.DataFrame:
"""
Collect all results from the multiple results.json.
:return: a pandas dataframe
:rtype: pd.Dataframe
"""
dfs = []
for filename in glob.glob(f"{self.store_dir}/**/results.json"):
dfs.append(pd.read_json(filename, orient='records'))
if len(dfs) == 0:
logging.error("No results.json files could be found! Probably an error was previously happening.")
return pd.concat(dfs, axis=0, ignore_index=True)
def _save_as_csv(self, df: pd.DataFrame) -> None:
"""
Save all the results of this experiments in a single CSV.
:param df: Dataframe which should be saved
:type df: pd.Dataframe
"""
# Since these configs are dicts it is not so nice to store them in a df/csv. But this is a workaround that works for now
df['application_config'] = df.apply(lambda row: json.dumps(row["application_config"]), axis=1)
df['solver_config'] = df.apply(lambda row: json.dumps(row["solver_config"]), axis=1)
df['mapping_config'] = df.apply(lambda row: json.dumps(row["mapping_config"]), axis=1)
df.to_csv(path_or_buf=f"{self.store_dir}/results.csv")
def load_results(self, input_dirs: list = None) -> pd.DataFrame:
"""
Load results from one or many results.csv files.
:param input_dirs: If you want to load more than 1 results.csv (default is just 1, the one from the experiment)
:type input_dirs: list
:return: a pandas dataframe
:rtype: pd.Dataframe
"""
if input_dirs is None:
input_dirs = [self.store_dir]
dfs = []
for input_dir in input_dirs:
for filename in glob.glob(f"{input_dir}/results.csv"):
dfs.append(pd.read_csv(filename, index_col=0, encoding="utf-8"))
df = pd.concat(dfs, axis=0, ignore_index=True)
df['application_config'] = df.apply(lambda row: json.loads(row["application_config"]), axis=1)
df['solver_config'] = df.apply(lambda row: json.loads(row["solver_config"]), axis=1)
df['mapping_config'] = df.apply(lambda row: json.loads(row["mapping_config"]), axis=1)
return df
def summarize_results(self, input_dirs: list) -> None:
"""
Helper function to summarize multiple experiments.
:param input_dirs: list of directories
:type input_dirs: list
:rtype: None
"""
self._create_store_dir(tag="summary")
df = self.load_results(input_dirs)
# Deep copy, else it messes with the json.loads in save_as_csv
self._save_as_csv(df.copy())
self.vizualize_results(df, self.store_dir)
def vizualize_results(self, df: pd.DataFrame, store_dir: str = None) -> None:
"""
Generates various plots for the benchmark.
:param df: pandas dataframe
:type df: pd.Dataframe
:param store_dir: directory where to store the plots
:type store_dir: str
:rtype: None
"""
if store_dir is None:
store_dir = self.store_dir
if len(df['application'].unique()) > 1:
logging.error("At the moment only 1 application can be visualized! Aborting plotting process!")
return
# Let's create some custom columns
df['configCombo'] = df.apply(lambda row: f"{row['mapping']}/\n{row['solver']}/\n{row['device']}", axis=1)
df, eval_axis_name = self._compute_application_config_combo(df)
df['solverConfigCombo'] = df.apply(
lambda row: '/\n'.join(
['%s: %s' % (key, value) for (key, value) in row['solver_config'].items()]) +
"\ndevice:" + row['device'] + "\nmapping:" + '/\n'.join(
['%s: %s' % (key, value) for (key, value) in row['mapping_config'].items()]), axis=1)
df_complete = df.copy()
df = df.loc[df["solution_validity"] == True]
if df.shape[0] < 1:
logging.warning("Not enough (valid) data to visualize results, skipping the plot generation!")
return
self._plot_overall(df, store_dir, eval_axis_name)
self._plot_solvers(df, store_dir, eval_axis_name)
self._plot_solution_validity(df_complete, store_dir)
@staticmethod
def _compute_application_config_combo(df: pd.DataFrame) -> (pd.DataFrame, str):
"""
Tries to infer the column and the axis name used for solution_quality in a smart way.
:param df: pandas dataframe
:type df: pd.Dataframe
:return: Dataframe and the axis name
:rtype: tuple(pd.DataFrame, str)
"""
column = df['application_config']
affected_keys = []
helper_dict = defaultdict(list)
# Try to find out which key in the dict change
for d in column.values: # you can list as many input dicts as you want here
for key, value in d.items():
helper_dict[key].append(value)
helper_dict[key] = list(set(helper_dict[key]))
for key, value in helper_dict.items():
# If there is more than 1 value and it is a float/int, then we can order it
if len(value) > 1: # and isinstance(value[0], (int, float))
affected_keys.append(key)
# def custom_sort(series):
# return sorted(range(len(series)), key=lambda k: tuple([series[k][x] for x in affected_keys]))
#
# # Sort by these keys
# df.sort_values(by=["application_config"], key=custom_sort, inplace=True)
if len(affected_keys) == 1:
# X axis name should be this and fixed parameters in parenthesis
df['applicationConfigCombo'] = df.apply(
lambda row: row['application_config'][affected_keys[0]],
axis=1)
axis_name = f"{affected_keys[0]}" if len(
helper_dict.keys()) == 1 else f"{affected_keys[0]} with {','.join(['%s %s' % (value[0], key) for (key, value) in helper_dict.items() if key not in affected_keys])}"
else:
df['applicationConfigCombo'] = df.apply(
lambda row: '/\n'.join(['%s: %s' % (key, value) for (key, value) in row['application_config'].items() if
key in affected_keys]), axis=1)
axis_name = None
return df, axis_name
@staticmethod
def _plot_solution_validity(df_complete: pd.DataFrame, store_dir: str) -> None:
"""
Generates plot for solution_validity.
:param df_complete: pandas dataframe
:type df_complete: pd.DataFrame
:param store_dir: directory where to store the plot
:type store_dir: str
:rtype: None
"""
def countplot(x, hue, **kwargs):
sns.countplot(x=x, hue=hue, **kwargs)
g = sns.FacetGrid(df_complete,
col="applicationConfigCombo")
g.map(countplot, "configCombo", "solution_validity")
g.add_legend(fontsize='7', title="Result Validity")
g.set_ylabels("Count")
g.set_xlabels("Solver Setting")
for ax in g.axes.ravel():
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
g.tight_layout()
plt.savefig(f"{store_dir}/plot_solution_validity.pdf", dpi=300)
plt.clf()
@staticmethod
def _plot_solvers(df: pd.DataFrame, store_dir: str, eval_axis_name: str) -> None:
"""
Generates plot for each individual solver.
:param eval_axis_name: name of the evaluation metric
:type eval_axis_name: str
:param df: pandas dataframe
:type df: pd.Dataframe
:param store_dir: directory where to store the plot
:type store_dir: str
:rtype: None
"""
def _barplot(data, x, y, hue=None, title="TBD", ax=None, order=None,
hue_order=None, capsize=None):
sns.barplot(x=x, y=y, hue=hue, data=data, ax=ax, order=order, hue_order=hue_order,
capsize=capsize) # , palette="Dark2"
plt.title(title)
return plt
for solver in df['solver'].unique():
figu, ax = plt.subplots(1, 2, figsize=(15, 10))
_barplot(
df.loc[df["solver"] == solver],
"applicationConfigCombo", "time_to_solve", hue='solverConfigCombo', order=None,
title=None, ax=ax[0])
_barplot(
df.loc[df["solver"] == solver],
"applicationConfigCombo", "solution_quality", hue='solverConfigCombo', order=None,
title=None, ax=ax[1])
ax[0].get_legend().remove()
# ax[1].get_legend().remove()
# plt.legend(bbox_to_anchor=[1.5, .5], loc=9, frameon=False, title="Solver Settings")
ax[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), title="Solver Settings")
ax[0].set_xlabel(xlabel=eval_axis_name, fontsize=16)
ax[1].set_xlabel(xlabel=eval_axis_name, fontsize=16)
ax[0].set_ylabel(ylabel=df['time_to_solve_unit'].unique()[0], fontsize=16)
# ax[0].set_yscale('log', base=10)
ax[1].set_ylabel(ylabel=df['solution_quality_unit'].unique()[0], fontsize=16)
plt.suptitle(f"{solver}")
for ax in figu.axes:
matplotlib.pyplot.sca(ax)
# If column values are very long and of type string rotate the ticks
if ( | pd.api.types.is_string_dtype(df.applicationConfigCombo.dtype) | pandas.api.types.is_string_dtype |
#!/usr/bin/env python
# coding: utf-8
import inspect
import json
import os
import urllib.request
from functools import reduce
from glob import glob
from time import sleep
from urllib.parse import quote
import jieba
import numpy as np
import pandas as pd
import seaborn as sns
from icecream import ic
from snorkel.labeling import PandasLFApplier, labeling_function
from snorkel.labeling.model import LabelModel
pd.set_option("display.max_rows", 200)
def read_fkg(path):
req = [[]]
with open(path, "r") as f:
lines = f.readlines()
for l in lines:
if l.startswith("{"):
req.append([])
req[-1].append(l)
#return req
return list(map(lambda x: json.loads("".join(map(lambda y: y.replace("\n", "").strip(), x)).replace("\n", "").strip()), req[1:]))
def finance_tag_filter(tag):
if type(tag) != type(""):
return False
kws = ["股票"]
return True if sum(map(lambda kw: kw in tag, kws)) else False
def retrieve_finance_tag_df(json_files):
def produce_df(req):
return pd.DataFrame(list(map(lambda y: y["data"] ,filter(lambda x: x["data"] and type(x["data"]) == type({}), req))))[["entity", "tag"]].explode("tag")
word_tag_df = pd.concat(list(map(lambda p: produce_df(read_fkg(p)), json_files)), axis = 0).drop_duplicates()
tags = word_tag_df["tag"].drop_duplicates()
finance_tags = tags[tags.map(finance_tag_filter)].tolist()
return word_tag_df[word_tag_df["tag"].isin(finance_tags)]
def filter_high_evidence_func(df):
with_title_part = high_evidence_finance_df[~pd.isna(high_evidence_finance_df["title"])].copy()
without_title_part = high_evidence_finance_df[pd.isna(high_evidence_finance_df["title"])].copy()
need_words = ["股", "证券"]
without_title_part = without_title_part[without_title_part["header"].map(lambda h_list: sum(map(lambda h: sum(map(lambda w: w in h, need_words)), h_list))).astype(bool)]
return pd.concat([with_title_part, without_title_part], axis = 0)
total_tables = | pd.read_csv("train_table_extract_with_topic.csv") | pandas.read_csv |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Utilities for defects module.
"""
import itertools
import logging
import math
import operator
from collections import defaultdict
from copy import deepcopy
import numpy as np
import pandas as pd
from monty.dev import requires
from monty.json import MSONable
from numpy.linalg import norm
from scipy.cluster.hierarchy import fcluster, linkage
from scipy.spatial import Voronoi
from scipy.spatial.distance import squareform
from pymatgen.analysis.local_env import (
LocalStructOrderParams,
MinimumDistanceNN,
cn_opt_params,
)
from pymatgen.analysis.phase_diagram import get_facets
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.periodic_table import Element, get_el_sp
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.outputs import Chgcar
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import pbc_diff
from pymatgen.vis.structure_vtk import StructureVis
try:
from skimage.feature import peak_local_max
peak_local_max_found = True
except ImportError:
peak_local_max_found = False
__author__ = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
__copyright__ = "Copyright 2014, The Materials Project"
__version__ = "1.0"
__maintainer__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Development"
__date__ = "January 11, 2018"
logger = logging.getLogger(__name__)
hart_to_ev = 27.2114
ang_to_bohr = 1.8897
invang_to_ev = 3.80986
kumagai_to_V = 1.809512739e2 # = Electron charge * 1e10 / VacuumPermittivity Constant
motif_cn_op = {}
for cn, di in cn_opt_params.items():
for mot, li in di.items():
motif_cn_op[mot] = {"cn": int(cn), "optype": li[0]}
motif_cn_op[mot]["params"] = deepcopy(li[1]) if len(li) > 1 else None
class QModel(MSONable):
"""
Model for the defect charge distribution.
A combination of exponential tail and gaussian distribution is used
(see Freysoldt (2011), DOI: 10.1002/pssb.201046289 )
q_model(r) = q [x exp(-r/gamma) + (1-x) exp(-r^2/beta^2)]
without normalization constants
By default, gaussian distribution with 1 Bohr width is assumed.
If defect charge is more delocalized, exponential tail is suggested.
"""
def __init__(self, beta=1.0, expnorm=0.0, gamma=1.0):
"""
Args:
beta: Gaussian decay constant. Default value is 1 Bohr.
When delocalized (eg. diamond), 2 Bohr is more appropriate.
expnorm: Weight for the exponential tail in the range of [0-1].
Default is 0.0 indicating no tail .
For delocalized charges ideal value is around 0.54-0.6.
gamma: Exponential decay constant
"""
self.beta = beta
self.expnorm = expnorm
self.gamma = gamma
self.beta2 = beta * beta
self.gamma2 = gamma * gamma
if expnorm and not gamma:
raise ValueError("Please supply exponential decay constant.")
def rho_rec(self, g2):
"""
Reciprocal space model charge value
for input squared reciprocal vector.
Args:
g2: Square of reciprocal vector
Returns:
Charge density at the reciprocal vector magnitude
"""
return self.expnorm / np.sqrt(1 + self.gamma2 * g2) + (1 - self.expnorm) * np.exp(-0.25 * self.beta2 * g2)
@property
def rho_rec_limit0(self):
"""
Reciprocal space model charge value
close to reciprocal vector 0 .
rho_rec(g->0) -> 1 + rho_rec_limit0 * g^2
"""
return -2 * self.gamma2 * self.expnorm - 0.25 * self.beta2 * (1 - self.expnorm)
def eV_to_k(energy):
"""
Convert energy to reciprocal vector magnitude k via hbar*k^2/2m
Args:
a: Energy in eV.
Returns:
(double) Reciprocal vector magnitude (units of 1/Bohr).
"""
return math.sqrt(energy / invang_to_ev) * ang_to_bohr
def genrecip(a1, a2, a3, encut):
"""
Args:
a1, a2, a3: lattice vectors in bohr
encut: energy cut off in eV
Returns:
reciprocal lattice vectors with energy less than encut
"""
vol = np.dot(a1, np.cross(a2, a3)) # 1/bohr^3
b1 = (2 * np.pi / vol) * np.cross(a2, a3) # units 1/bohr
b2 = (2 * np.pi / vol) * np.cross(a3, a1)
b3 = (2 * np.pi / vol) * np.cross(a1, a2)
# create list of recip space vectors that satisfy |i*b1+j*b2+k*b3|<=encut
G_cut = eV_to_k(encut)
# Figure out max in all recipricol lattice directions
i_max = int(math.ceil(G_cut / norm(b1)))
j_max = int(math.ceil(G_cut / norm(b2)))
k_max = int(math.ceil(G_cut / norm(b3)))
# Build index list
i = np.arange(-i_max, i_max)
j = np.arange(-j_max, j_max)
k = np.arange(-k_max, k_max)
# Convert index to vectors using meshgrid
indices = np.array(np.meshgrid(i, j, k)).T.reshape(-1, 3)
# Multiply integer vectors to get recipricol space vectors
vecs = np.dot(indices, [b1, b2, b3])
# Calculate radii of all vectors
radii = np.sqrt(np.einsum("ij,ij->i", vecs, vecs))
# Yield based on radii
for vec, r in zip(vecs, radii):
if r < G_cut and r != 0:
yield vec
def generate_reciprocal_vectors_squared(a1, a2, a3, encut):
"""
Generate reciprocal vector magnitudes within the cutoff along the specified
lattice vectors.
Args:
a1: Lattice vector a (in Bohrs)
a2: Lattice vector b (in Bohrs)
a3: Lattice vector c (in Bohrs)
encut: Reciprocal vector energy cutoff
Returns:
[[g1^2], [g2^2], ...] Square of reciprocal vectors (1/Bohr)^2
determined by a1, a2, a3 and whose magntidue is less than gcut^2.
"""
for vec in genrecip(a1, a2, a3, encut):
yield np.dot(vec, vec)
def closestsites(struct_blk, struct_def, pos):
"""
Returns closest site to the input position
for both bulk and defect structures
Args:
struct_blk: Bulk structure
struct_def: Defect structure
pos: Position
Return: (site object, dist, index)
"""
blk_close_sites = struct_blk.get_sites_in_sphere(pos, 5, include_index=True)
blk_close_sites.sort(key=lambda x: x[1])
def_close_sites = struct_def.get_sites_in_sphere(pos, 5, include_index=True)
def_close_sites.sort(key=lambda x: x[1])
return blk_close_sites[0], def_close_sites[0]
class StructureMotifInterstitial:
"""
Generate interstitial sites at positions
where the interstitialcy is coordinated by nearest neighbors
in a way that resembles basic structure motifs
(e.g., tetrahedra, octahedra). The algorithm is called InFiT
(Interstitialcy Finding Tool), it was introducted by
<NAME>, <NAME>, <NAME>,
and <NAME> (Front. Mater., 4, 34, 2017),
and it is used by the Python Charged Defect Toolkit
(PyCDT: <NAME> et al., Comput. Phys. Commun., in press, 2018).
"""
def __init__(
self,
struct,
inter_elem,
motif_types=("tetrahedral", "octahedral"),
op_threshs=(0.3, 0.5),
dl=0.2,
doverlap=1,
facmaxdl=1.01,
verbose=False,
):
"""
Generates symmetrically distinct interstitial sites at positions
where the interstitial is coordinated by nearest neighbors
in a pattern that resembles a supported structure motif
(e.g., tetrahedra, octahedra).
Args:
struct (Structure): input structure for which symmetrically
distinct interstitial sites are to be found.
inter_elem (string): element symbol of desired interstitial.
motif_types ([string]): list of structure motif types that are
to be considered. Permissible types are:
tet (tetrahedron), oct (octahedron).
op_threshs ([float]): threshold values for the underlying order
parameters to still recognize a given structural motif
(i.e., for an OP value >= threshold the coordination pattern
match is positive, for OP < threshold the match is
negative.
dl (float): grid fineness in Angstrom. The input
structure is divided into a grid of dimension
a/dl x b/dl x c/dl along the three crystallographic
directions, with a, b, and c being the lengths of
the three lattice vectors of the input unit cell.
doverlap (float): distance that is considered
to flag an overlap between any trial interstitial site
and a host atom.
facmaxdl (float): factor to be multiplied with the maximum grid
width that is then used as a cutoff distance for the
clustering prune step.
verbose (bool): flag indicating whether (True) or not (False;
default) to print additional information to screen.
"""
# Initialize interstitial finding.
self._structure = struct.copy()
self._motif_types = motif_types[:]
if len(self._motif_types) == 0:
raise RuntimeError("no motif types provided.")
self._op_threshs = op_threshs[:]
self.cn_motif_lostop = {}
self.target_cns = []
for motif in self._motif_types:
if motif not in list(motif_cn_op.keys()):
raise RuntimeError(f"unsupported motif type: {motif}.")
cn = int(motif_cn_op[motif]["cn"])
if cn not in self.target_cns:
self.target_cns.append(cn)
if cn not in list(self.cn_motif_lostop.keys()):
self.cn_motif_lostop[cn] = {}
tmp_optype = motif_cn_op[motif]["optype"]
if tmp_optype == "tet_max":
tmp_optype = "tet"
if tmp_optype == "oct_max":
tmp_optype = "oct"
self.cn_motif_lostop[cn][motif] = LocalStructOrderParams(
[tmp_optype], parameters=[motif_cn_op[motif]["params"]], cutoff=-10.0
)
self._dl = dl
self._defect_sites = []
self._defect_types = []
self._defect_site_multiplicity = []
self._defect_cns = []
self._defect_opvals = []
rots, trans = SpacegroupAnalyzer(struct)._get_symmetry()
nbins = [
int(struct.lattice.a / dl),
int(struct.lattice.b / dl),
int(struct.lattice.c / dl),
]
dls = [
struct.lattice.a / float(nbins[0]),
struct.lattice.b / float(nbins[1]),
struct.lattice.c / float(nbins[2]),
]
maxdl = max(dls)
if verbose:
print(f"Grid size: {nbins[0]} {nbins[1]} {nbins[2]}")
print(f"dls: {dls[0]} {dls[1]} {dls[2]}")
struct_w_inter = struct.copy()
struct_w_inter.append(inter_elem, [0, 0, 0])
natoms = len(list(struct_w_inter.sites))
trialsites = []
# Build index list
i = np.arange(0, nbins[0]) + 0.5
j = np.arange(0, nbins[1]) + 0.5
k = np.arange(0, nbins[2]) + 0.5
# Convert index to vectors using meshgrid
indices = np.array(np.meshgrid(i, j, k)).T.reshape(-1, 3)
# Multiply integer vectors to get recipricol space vectors
vecs = np.multiply(indices, np.divide(1, nbins))
# Loop over trial positions that are based on a regular
# grid in fractional coordinate space
# within the unit cell.
for vec in vecs:
struct_w_inter.replace(natoms - 1, inter_elem, coords=vec, coords_are_cartesian=False)
if len(struct_w_inter.get_sites_in_sphere(struct_w_inter.sites[natoms - 1].coords, doverlap)) == 1:
neighs_images_weigths = MinimumDistanceNN(tol=0.8, cutoff=6).get_nn_info(struct_w_inter, natoms - 1)
neighs_images_weigths_sorted = sorted(neighs_images_weigths, key=lambda x: x["weight"], reverse=True)
for nsite in range(1, len(neighs_images_weigths_sorted) + 1):
if nsite not in self.target_cns:
continue
allsites = [neighs_images_weigths_sorted[i]["site"] for i in range(nsite)]
indices_neighs = list(range(len(allsites)))
allsites.append(struct_w_inter.sites[natoms - 1])
for mot, ops in self.cn_motif_lostop[nsite].items():
opvals = ops.get_order_parameters(allsites, len(allsites) - 1, indices_neighs=indices_neighs)
if opvals[0] > op_threshs[motif_types.index(mot)]:
cns = {}
for isite in range(nsite):
site = neighs_images_weigths_sorted[isite]["site"]
if isinstance(site.specie, Element):
elem = site.specie.symbol
else:
elem = site.specie.element.symbol
if elem in list(cns.keys()):
cns[elem] = cns[elem] + 1
else:
cns[elem] = 1
trialsites.append(
{
"mtype": mot,
"opval": opvals[0],
"coords": struct_w_inter.sites[natoms - 1].coords[:],
"fracs": vec,
"cns": dict(cns),
}
)
break
# Prune list of trial sites by clustering and find the site
# with the largest order parameter value in each cluster.
nintersites = len(trialsites)
unique_motifs = []
for ts in trialsites:
if ts["mtype"] not in unique_motifs:
unique_motifs.append(ts["mtype"])
labels = {}
connected = []
for i in range(nintersites):
connected.append([])
for j in range(nintersites):
dist, image = struct_w_inter.lattice.get_distance_and_image(
trialsites[i]["fracs"], trialsites[j]["fracs"]
)
connected[i].append(bool(dist < (maxdl * facmaxdl)))
include = []
for motif in unique_motifs:
labels[motif] = []
for i, ts in enumerate(trialsites):
labels[motif].append(i if ts["mtype"] == motif else -1)
change = True
while change:
change = False
for i in range(nintersites - 1):
if change:
break
if labels[motif][i] == -1:
continue
for j in range(i + 1, nintersites):
if labels[motif][j] == -1:
continue
if connected[i][j] and labels[motif][i] != labels[motif][j]:
if labels[motif][i] < labels[motif][j]:
labels[motif][j] = labels[motif][i]
else:
labels[motif][i] = labels[motif][j]
change = True
break
unique_ids = []
for l in labels[motif]:
if l != -1 and l not in unique_ids:
unique_ids.append(l)
if verbose:
print(f"unique_ids {motif} {unique_ids}")
for uid in unique_ids:
maxq = 0.0
imaxq = -1
for i in range(nintersites):
if labels[motif][i] == uid:
if imaxq < 0 or trialsites[i]["opval"] > maxq:
imaxq = i
maxq = trialsites[i]["opval"]
include.append(imaxq)
# Prune by symmetry.
multiplicity = {}
discard = []
for motif in unique_motifs:
discard_motif = []
for indi, i in enumerate(include):
if trialsites[i]["mtype"] != motif or i in discard_motif:
continue
multiplicity[i] = 1
symposlist = [trialsites[i]["fracs"].dot(np.array(m, dtype=float)) for m in rots]
for t in trans:
symposlist.append(trialsites[i]["fracs"] + np.array(t))
for indj in range(indi + 1, len(include)):
j = include[indj]
if trialsites[j]["mtype"] != motif or j in discard_motif:
continue
for sympos in symposlist:
dist, image = struct.lattice.get_distance_and_image(sympos, trialsites[j]["fracs"])
if dist < maxdl * facmaxdl:
discard_motif.append(j)
multiplicity[i] += 1
break
for i in discard_motif:
if i not in discard:
discard.append(i)
if verbose:
print(
"Initial trial sites: {}\nAfter clustering: {}\n"
"After symmetry pruning: {}".format(len(trialsites), len(include), len(include) - len(discard))
)
for i in include:
if i not in discard:
self._defect_sites.append(
PeriodicSite(
Element(inter_elem),
trialsites[i]["fracs"],
self._structure.lattice,
to_unit_cell=False,
coords_are_cartesian=False,
properties=None,
)
)
self._defect_types.append(trialsites[i]["mtype"])
self._defect_cns.append(trialsites[i]["cns"])
self._defect_site_multiplicity.append(multiplicity[i])
self._defect_opvals.append(trialsites[i]["opval"])
def enumerate_defectsites(self):
"""
Get all defect sites.
Returns:
defect_sites ([PeriodicSite]): list of periodic sites
representing the interstitials.
"""
return self._defect_sites
def get_motif_type(self, i):
"""
Get the motif type of defect with index i (e.g., "tet").
Returns:
motif (string): motif type.
"""
return self._defect_types[i]
def get_defectsite_multiplicity(self, n):
"""
Returns the symmtric multiplicity of the defect site at the index.
"""
return self._defect_site_multiplicity[n]
def get_coordinating_elements_cns(self, i):
"""
Get element-specific coordination numbers of defect with index i.
Returns:
elem_cn (dict): dictionary storing the coordination numbers (int)
with string representation of elements as keys.
(i.e., {elem1 (string): cn1 (int), ...}).
"""
return self._defect_cns[i]
def get_op_value(self, i):
"""
Get order-parameter value of defect with index i.
Returns:
opval (float): OP value.
"""
return self._defect_opvals[i]
def make_supercells_with_defects(self, scaling_matrix):
"""
Generate a sequence of supercells
in which each supercell contains a single interstitial,
except for the first supercell in the sequence
which is a copy of the defect-free input structure.
Args:
scaling_matrix (3x3 integer array): scaling matrix
to transform the lattice vectors.
Returns:
scs ([Structure]): sequence of supercells.
"""
scs = []
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
scs.append(sc)
for ids, defect_site in enumerate(self._defect_sites):
sc_with_inter = sc.copy()
sc_with_inter.append(
defect_site.species_string,
defect_site.frac_coords,
coords_are_cartesian=False,
validate_proximity=False,
properties=None,
)
if not sc_with_inter:
raise RuntimeError(f"could not generate supercell with interstitial {ids + 1}")
scs.append(sc_with_inter.copy())
return scs
class TopographyAnalyzer:
"""
This is a generalized module to perform topological analyses of a crystal
structure using Voronoi tessellations. It can be used for finding potential
interstitial sites. Applications including using these sites for
inserting additional atoms or for analyzing diffusion pathways.
Note that you typically want to do some preliminary postprocessing after
the initial construction. The initial construction will create a lot of
points, especially for determining potential insertion sites. Some helper
methods are available to perform aggregation and elimination of nodes. A
typical use is something like::
a = TopographyAnalyzer(structure, ["O"], ["P"])
a.cluster_nodes()
a.remove_collisions()
"""
def __init__(
self,
structure,
framework_ions,
cations,
tol=0.0001,
max_cell_range=1,
check_volume=True,
constrained_c_frac=0.5,
thickness=0.5,
):
"""
Init.
Args:
structure (Structure): An initial structure.
framework_ions ([str]): A list of ions to be considered as a
framework. Typically, this would be all anion species. E.g.,
["O", "S"].
cations ([str]): A list of ions to be considered as non-migrating
cations. E.g., if you are looking at Li3PS4 as a Li
conductor, Li is a mobile species. Your cations should be [
"P"]. The cations are used to exclude polyhedra from
diffusion analysis since those polyhedra are already occupied.
tol (float): A tolerance distance for the analysis, used to
determine if something are actually periodic boundary images of
each other. Default is usually fine.
max_cell_range (int): This is the range of periodic images to
construct the Voronoi tessellation. A value of 1 means that we
include all points from (x +- 1, y +- 1, z+- 1) in the
voronoi construction. This is because the Voronoi poly
extends beyond the standard unit cell because of PBC.
Typically, the default value of 1 works fine for most
structures and is fast. But for really small unit
cells with high symmetry, you may need to increase this to 2
or higher.
check_volume (bool): Set False when ValueError always happen after
tuning tolerance.
constrained_c_frac (float): Constraint the region where users want
to do Topology analysis the default value is 0.5, which is the
fractional coordinate of the cell
thickness (float): Along with constrained_c_frac, limit the
thickness of the regions where we want to explore. Default is
0.5, which is mapping all the site of the unit cell.
"""
self.structure = structure
self.framework_ions = {get_el_sp(sp) for sp in framework_ions}
self.cations = {get_el_sp(sp) for sp in cations}
# Let us first map all sites to the standard unit cell, i.e.,
# 0 ≤ coordinates < 1.
# structure = Structure.from_sites(structure, to_unit_cell=True)
# lattice = structure.lattice
# We could constrain the region where we want to dope/explore by setting
# the value of constrained_c_frac and thickness. The default mode is
# mapping all sites to the standard unit cell
s = structure.copy()
constrained_sites = []
for i, site in enumerate(s):
if (
site.frac_coords[2] >= constrained_c_frac - thickness
and site.frac_coords[2] <= constrained_c_frac + thickness
):
constrained_sites.append(site)
structure = Structure.from_sites(sites=constrained_sites)
lattice = structure.lattice
# Divide the sites into framework and non-framework sites.
framework = []
non_framework = []
for site in structure:
if self.framework_ions.intersection(site.species.keys()):
framework.append(site)
else:
non_framework.append(site)
# We construct a supercell series of coords. This is because the
# Voronoi polyhedra can extend beyond the standard unit cell. Using a
# range of -2, -1, 0, 1 should be fine.
coords = []
cell_range = list(range(-max_cell_range, max_cell_range + 1))
for shift in itertools.product(cell_range, cell_range, cell_range):
for site in framework:
shifted = site.frac_coords + shift
coords.append(lattice.get_cartesian_coords(shifted))
# Perform the voronoi tessellation.
voro = Voronoi(coords)
# Store a mapping of each voronoi node to a set of points.
node_points_map = defaultdict(set)
for pts, vs in voro.ridge_dict.items():
for v in vs:
node_points_map[v].update(pts)
logger.debug(f"{len(voro.vertices)} total Voronoi vertices")
# Vnodes store all the valid voronoi polyhedra. Cation vnodes store
# the voronoi polyhedra that are already occupied by existing cations.
vnodes = []
cation_vnodes = []
def get_mapping(poly):
"""
Helper function to check if a vornoi poly is a periodic image
of one of the existing voronoi polys.
"""
for v in vnodes:
if v.is_image(poly, tol):
return v
return None
# Filter all the voronoi polyhedra so that we only consider those
# which are within the unit cell.
for i, vertex in enumerate(voro.vertices):
if i == 0:
continue
fcoord = lattice.get_fractional_coords(vertex)
poly = VoronoiPolyhedron(lattice, fcoord, node_points_map[i], coords, i)
if np.all([-tol <= c < 1 + tol for c in fcoord]):
if len(vnodes) == 0:
vnodes.append(poly)
else:
ref = get_mapping(poly)
if ref is None:
vnodes.append(poly)
logger.debug(f"{len(vnodes)} voronoi vertices in cell.")
# Eliminate all voronoi nodes which are closest to existing cations.
if len(cations) > 0:
cation_coords = [
site.frac_coords for site in non_framework if self.cations.intersection(site.species.keys())
]
vertex_fcoords = [v.frac_coords for v in vnodes]
dist_matrix = lattice.get_all_distances(cation_coords, vertex_fcoords)
indices = np.where(dist_matrix == np.min(dist_matrix, axis=1)[:, None])[1]
cation_vnodes = [v for i, v in enumerate(vnodes) if i in indices]
vnodes = [v for i, v in enumerate(vnodes) if i not in indices]
logger.debug(f"{len(vnodes)} vertices in cell not with cation.")
self.coords = coords
self.vnodes = vnodes
self.cation_vnodes = cation_vnodes
self.framework = framework
self.non_framework = non_framework
if check_volume:
self.check_volume()
def check_volume(self):
"""
Basic check for volume of all voronoi poly sum to unit cell volume
Note that this does not apply after poly combination.
"""
vol = sum(v.volume for v in self.vnodes) + sum(v.volume for v in self.cation_vnodes)
if abs(vol - self.structure.volume) > 1e-8:
raise ValueError(
"Sum of voronoi volumes is not equal to original volume of "
"structure! This may lead to inaccurate results. You need to "
"tweak the tolerance and max_cell_range until you get a "
"correct mapping."
)
def cluster_nodes(self, tol=0.2):
"""
Cluster nodes that are too close together using a tol.
Args:
tol (float): A distance tolerance. PBC is taken into account.
"""
lattice = self.structure.lattice
vfcoords = [v.frac_coords for v in self.vnodes]
# Manually generate the distance matrix (which needs to take into
# account PBC.
dist_matrix = np.array(lattice.get_all_distances(vfcoords, vfcoords))
dist_matrix = (dist_matrix + dist_matrix.T) / 2
for i in range(len(dist_matrix)):
dist_matrix[i, i] = 0
condensed_m = squareform(dist_matrix)
z = linkage(condensed_m)
cn = fcluster(z, tol, criterion="distance")
merged_vnodes = []
for n in set(cn):
poly_indices = set()
frac_coords = []
for i, j in enumerate(np.where(cn == n)[0]):
poly_indices.update(self.vnodes[j].polyhedron_indices)
if i == 0:
frac_coords.append(self.vnodes[j].frac_coords)
else:
fcoords = self.vnodes[j].frac_coords
# We need the image to combine the frac_coords properly.
d, image = lattice.get_distance_and_image(frac_coords[0], fcoords)
frac_coords.append(fcoords + image)
merged_vnodes.append(VoronoiPolyhedron(lattice, np.average(frac_coords, axis=0), poly_indices, self.coords))
self.vnodes = merged_vnodes
logger.debug(f"{len(self.vnodes)} vertices after combination.")
def remove_collisions(self, min_dist=0.5):
"""
Remove vnodes that are too close to existing atoms in the structure
Args:
min_dist(float): The minimum distance that a vertex needs to be
from existing atoms.
"""
vfcoords = [v.frac_coords for v in self.vnodes]
sfcoords = self.structure.frac_coords
dist_matrix = self.structure.lattice.get_all_distances(vfcoords, sfcoords)
all_dist = np.min(dist_matrix, axis=1)
new_vnodes = []
for i, v in enumerate(self.vnodes):
if all_dist[i] > min_dist:
new_vnodes.append(v)
self.vnodes = new_vnodes
def get_structure_with_nodes(self):
"""
Get the modified structure with the voronoi nodes inserted. The
species is set as a DummySpecies X.
"""
new_s = Structure.from_sites(self.structure)
for v in self.vnodes:
new_s.append("X", v.frac_coords)
return new_s
def print_stats(self):
"""
Print stats such as the MSE dist.
"""
latt = self.structure.lattice
def get_min_dist(fcoords):
n = len(fcoords)
dist = latt.get_all_distances(fcoords, fcoords)
all_dist = [dist[i, j] for i in range(n) for j in range(i + 1, n)]
return min(all_dist)
voro = [s.frac_coords for s in self.vnodes]
print(f"Min dist between voronoi vertices centers = {get_min_dist(voro):.4f}")
def get_non_framework_dist(fcoords):
cations = [site.frac_coords for site in self.non_framework]
dist_matrix = latt.get_all_distances(cations, fcoords)
min_dist = np.min(dist_matrix, axis=1)
if len(cations) != len(min_dist):
raise Exception("Could not calculate distance to all cations")
return np.linalg.norm(min_dist), min(min_dist), max(min_dist)
print(len(self.non_framework))
print(f"MSE dist voro = {str(get_non_framework_dist(voro))}")
def write_topology(self, fname="Topo.cif"):
"""
Write topology to a file.
:param fname: Filename
"""
new_s = Structure.from_sites(self.structure)
for v in self.vnodes:
new_s.append("Mg", v.frac_coords)
new_s.to(filename=fname)
def analyze_symmetry(self, tol):
"""
:param tol: Tolerance for SpaceGroupAnalyzer
:return: List
"""
s = Structure.from_sites(self.framework)
site_to_vindex = {}
for i, v in enumerate(self.vnodes):
s.append("Li", v.frac_coords)
site_to_vindex[s[-1]] = i
print(len(s))
finder = SpacegroupAnalyzer(s, tol)
print(finder.get_space_group_operations())
symm_structure = finder.get_symmetrized_structure()
print(len(symm_structure.equivalent_sites))
return [
[site_to_vindex[site] for site in sites]
for sites in symm_structure.equivalent_sites
if sites[0].specie.symbol == "Li"
]
def vtk(self):
"""
Show VTK visualization.
"""
if StructureVis is None:
raise NotImplementedError("vtk must be present to view.")
lattice = self.structure.lattice
vis = StructureVis()
vis.set_structure(Structure.from_sites(self.structure))
for v in self.vnodes:
vis.add_site(PeriodicSite("K", v.frac_coords, lattice))
vis.add_polyhedron(
[PeriodicSite("S", c, lattice, coords_are_cartesian=True) for c in v.polyhedron_coords],
PeriodicSite("Na", v.frac_coords, lattice),
color="element",
draw_edges=True,
edges_color=(0, 0, 0),
)
vis.show()
class VoronoiPolyhedron:
"""
Convenience container for a voronoi point in PBC and its associated polyhedron.
"""
def __init__(self, lattice, frac_coords, polyhedron_indices, all_coords, name=None):
"""
:param lattice:
:param frac_coords:
:param polyhedron_indices:
:param all_coords:
:param name:
"""
self.lattice = lattice
self.frac_coords = frac_coords
self.polyhedron_indices = polyhedron_indices
self.polyhedron_coords = np.array(all_coords)[list(polyhedron_indices), :]
self.name = name
def is_image(self, poly, tol):
"""
:param poly: VoronoiPolyhedron
:param tol: Coordinate tolerance.
:return: Whether a poly is an image of the current one.
"""
frac_diff = pbc_diff(poly.frac_coords, self.frac_coords)
if not np.allclose(frac_diff, [0, 0, 0], atol=tol):
return False
to_frac = self.lattice.get_fractional_coords
for c1 in self.polyhedron_coords:
found = False
for c2 in poly.polyhedron_coords:
d = pbc_diff(to_frac(c1), to_frac(c2))
if not np.allclose(d, [0, 0, 0], atol=tol):
found = True
break
if not found:
return False
return True
@property
def coordination(self):
"""
:return: Coordination number
"""
return len(self.polyhedron_indices)
@property
def volume(self):
"""
:return: Volume
"""
return calculate_vol(self.polyhedron_coords)
def __str__(self):
return f"Voronoi polyhedron {self.name}"
class ChargeDensityAnalyzer(MSONable):
"""
Analyzer to find potential interstitial sites based on charge density. The
`total` charge density is used.
"""
def __init__(self, chgcar):
"""
Initialization.
Args:
chgcar (pmg.Chgcar): input Chgcar object.
"""
self.chgcar = chgcar
self.structure = chgcar.structure
self.extrema_coords = [] # list of frac_coords of local extrema
self.extrema_type = None # "local maxima" or "local minima"
self._extrema_df = None # extrema frac_coords - chg density table
self._charge_distribution_df = None # frac_coords - chg density table
@classmethod
def from_file(cls, chgcar_filename):
"""
Init from a CHGCAR.
:param chgcar_filename:
:return:
"""
chgcar = Chgcar.from_file(chgcar_filename)
return cls(chgcar=chgcar)
@property
def charge_distribution_df(self):
"""
:return: Charge distribution.
"""
if self._charge_distribution_df is None:
return self._get_charge_distribution_df()
return self._charge_distribution_df
@property
def extrema_df(self):
"""
:return: The extrema in charge density.
"""
if self.extrema_type is None:
logger.warning("Please run ChargeDensityAnalyzer.get_local_extrema first!")
return self._extrema_df
def _get_charge_distribution_df(self):
"""
Return a complete table of fractional coordinates - charge density.
"""
# Fraction coordinates and corresponding indices
axis_grid = np.array([np.array(self.chgcar.get_axis_grid(i)) / self.structure.lattice.abc[i] for i in range(3)])
axis_index = np.array([range(len(axis_grid[i])) for i in range(3)])
data = {}
for index in itertools.product(*axis_index):
a, b, c = index
f_coords = (axis_grid[0][a], axis_grid[1][b], axis_grid[2][c])
data[f_coords] = self.chgcar.data["total"][a][b][c]
# Fraction coordinates - charge density table
df = | pd.Series(data) | pandas.Series |
################################################################################
# The contents of this file are Teradata Public Content and have been released
# to the Public Domain.
# <NAME> & <NAME> - April 2020 - v.1.1
# Copyright (c) 2020 by Teradata
# Licensed under BSD; see "license.txt" file in the bundle root folder.
#
################################################################################
# R and Python TechBytes Demo - Part 5: Python in-nodes with SCRIPT
# ------------------------------------------------------------------------------
# File: stoRFScoreMM.py
# ------------------------------------------------------------------------------
# The R and Python TechBytes Demo comprises of 5 parts:
# Part 1 consists of only a Powerpoint overview of R and Python in Vantage
# Part 2 demonstrates the Teradata R package tdplyr for clients
# Part 3 demonstrates the Teradata Python package teradataml for clients
# Part 4 demonstrates using R in-nodes with the SCRIPT and ExecR Table Operators
# Part 5 demonstrates using Python in-nodes with the SCRIPT Table Operator
################################################################################
#
# This TechBytes demo utilizes a use case to predict the propensity of a
# financial services customer base to open a credit card account.
#
# The present file is the Python scoring script to be used with the SCRIPT
# table operator, as described in the following use case 2 of the present demo
# Part 5:
#
# 2) Fitting and scoring multiple models
#
# We utilize the statecode variable as a partition to built a Random
# Forest model for every state. This is done by using SCRIPT Table Operator
# to run a model fitting script with a PARTITION BY statecode in the query.
# This creates a model for each of the CA, NY, TX, IL, AZ, OH and Other
# state codes, and perists the model in the database via CREATE TABLE AS
# statement.
# Then we run a scoring script via the SCRIPT Table Operator against
# these persisted Random Forest models to score the entire data set.
#
# For this use case, we build an analytic data set nearly identical to the
# one in the teradataml demo (Part 3), with one change as indicated by item
# (d) below. This is so we can demonstrate the in-database capability of
# simultaneously building many models.
# 60% of the analytic data set rows are sampled to create a training
# subset. The remaining 40% is used to create a testing/scoring dataset.
# The train and test/score datasets are used in the SCRIPT operations.
################################################################################
# File Changelog
# v.1.0 2019-10-29 First release
# v.1.1 2020-04-02 Added change log; no code changes in present file
################################################################################
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import pickle
import base64
###
### Read input
###
delimiter = '\t'
inputData = []
try:
line = input()
if line == '': # Exit if user provides blank line
pass
else:
allArgs = line.split(delimiter)
inputData.append(allArgs[0:-2])
modelSerB64 = allArgs[-1]
except (EOFError): # Exit if reached EOF or CTRL-D
pass
while 1:
try:
line = input()
if line == '': # Exit if user provides blank line
break
else:
allArgs = line.split(delimiter)
inputData.append(allArgs[0:-2])
except (EOFError): # Exit if reached EOF or CTRL-D
break
#for line in sys.stdin.read().splitlines():
# line = line.split(delimiter)
# inputData.append(line)
###
### If no data received, gracefully exit rather than producing an error later.
###
if not inputData:
sys.exit()
## In the input information, all rows have the same number of column elements
## except for the first row. The latter also contains the model info in its
## last column. Isolate the serialized model from the end of first row.
#modelSerB64 = inputData[0][-1]
###
### Set up input DataFrame according to input schema
###
# Know your data: You must know in advance the number and data types of the
# incoming columns from the database!
# For numeric columns, the database sends in floats in scientific format with a
# blank space when the exponential is positive; e.g., 1.0 is sent as 1.000E 000.
# The following input data read deals with any such blank spaces in numbers.
columns = ['cust_id', 'tot_income', 'tot_age', 'tot_cust_years', 'tot_children',
'female_ind', 'single_ind', 'married_ind', 'separated_ind',
'statecode', 'ck_acct_ind', 'sv_acct_ind', 'cc_acct_ind',
'ck_avg_bal', 'sv_avg_bal', 'cc_avg_bal', 'ck_avg_tran_amt',
'sv_avg_tran_amt', 'cc_avg_tran_amt', 'q1_trans_cnt',
'q2_trans_cnt', 'q3_trans_cnt', 'q4_trans_cnt', 'SAMPLE_ID']
df = pd.DataFrame(inputData, columns=columns)
#df = pd.DataFrame.from_records(inputData, exclude=['nRow', 'model'], columns=columns)
del inputData
df['cust_id'] = pd.to_numeric(df['cust_id'])
df['tot_income'] = df['tot_income'].apply(lambda x: "".join(x.split()))
df['tot_income'] = pd.to_numeric(df['tot_income'])
df['tot_age'] = pd.to_numeric(df['tot_age'])
df['tot_cust_years'] = pd.to_numeric(df['tot_cust_years'])
df['tot_children'] = pd.to_numeric(df['tot_children'])
df['female_ind'] = pd.to_numeric(df['female_ind'])
df['single_ind'] = pd.to_numeric(df['single_ind'])
df['married_ind'] = pd.to_numeric(df['married_ind'])
df['separated_ind'] = pd.to_numeric(df['separated_ind'])
df['statecode'] = df['statecode'].apply(lambda x: x.replace('"', ''))
df['ck_acct_ind'] = pd.to_numeric(df['ck_acct_ind'])
df['sv_acct_ind'] = pd.to_numeric(df['sv_acct_ind'])
df['cc_acct_ind'] = pd.to_numeric(df['cc_acct_ind'])
df['sv_acct_ind'] = pd.to_numeric(df['sv_acct_ind'])
df['cc_acct_ind'] = pd.to_numeric(df['cc_acct_ind'])
df['ck_avg_bal'] = df['ck_avg_bal'].apply(lambda x: "".join(x.split()))
df['ck_avg_bal'] = pd.to_numeric(df['ck_avg_bal'])
df['sv_avg_bal'] = df['sv_avg_bal'].apply(lambda x: "".join(x.split()))
df['sv_avg_bal'] = pd.to_numeric(df['sv_avg_bal'])
df['cc_avg_bal'] = df['cc_avg_bal'].apply(lambda x: "".join(x.split()))
df['cc_avg_bal'] = pd.to_numeric(df['cc_avg_bal'])
df['ck_avg_tran_amt'] = df['ck_avg_tran_amt'].apply(lambda x: "".join(x.split()))
df['ck_avg_tran_amt'] = pd.to_numeric(df['ck_avg_tran_amt'])
df['sv_avg_tran_amt'] = df['sv_avg_tran_amt'].apply(lambda x: "".join(x.split()))
df['sv_avg_tran_amt'] = pd.to_numeric(df['sv_avg_tran_amt'])
df['cc_avg_tran_amt'] = df['cc_avg_tran_amt'].apply(lambda x: "".join(x.split()))
df['cc_avg_tran_amt'] = pd.to_numeric(df['cc_avg_tran_amt'])
df['q1_trans_cnt'] = | pd.to_numeric(df['q1_trans_cnt']) | pandas.to_numeric |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from worker_functions import num_to_month
import pandas as pd
import numpy as np
import react_buttons
# import data
df2 = | pd.read_csv("activities_5_correct.csv") | pandas.read_csv |
import math
import json
import os
import joblib
import re
import shutil
import pandas as pd
from multiprocessing import Pool
import traceback
from tqdm import tqdm
import numpy as np
from collections import OrderedDict, Counter
import logging
from pathlib import Path
from corpus.corpus import Corpus
def id2stem(study_id, day, rep):
filename = 'study{:03d}_day{:03d}_rep{:01d}'.format(study_id, day, rep)
return filename
def id2filename(id):
return id2stem(*id)
def parse_filename(filename):
stem = Path(filename).stem
study_id, day, rep = tuple(stem.split('_'))
study_id = int(re.sub('study', '', study_id))
day = int(re.sub('day', '', day))
rep = int(re.sub('rep', '', rep))
return (study_id, day, rep)
def range2set(indices):
if indices is None:
return None
else:
return set(list(range(indices[0], indices[1] + 1)))
class CorpusXray(Corpus):
'''
Corpus container (collection of documents)
'''
def __init__(self):
Corpus.__init__(self)
#@override
def doc_filter(self, day_range=None, rep_range=None, out_type='list', include=None, exclude=None):
'''
Get filtered set of documents
'''
# include is only included for compatibility but is not used here
assert include is None
# exclude is only included for compatibility but is not used here
assert exclude is None
day_set = range2set(day_range)
rep_set = range2set(rep_range)
# Loop on documents
docs = OrderedDict()
for (study_id, day, rep), doc in self.docs_.items():
# Check day/rep
if ((day_range is None) or (day in day_set)) and \
((rep_range is None) or (rep in rep_set)) :
assert (study_id, day, rep) not in docs
docs[(study_id, day, rep)] = doc
return docs
#@override
def id2stem(self, id):
'''
Convert document ID to filename stem
'''
study_id, day, rep = id
stem = id2stem(study_id, day, rep)
return stem
def accessions(self, **kwargs):
'''
'''
accessions = []
for doc in self.docs(out_type='list', **kwargs):
accessions.append(doc.accession)
return accessions
#@override
def counts(self, **kwargs):
pc = self.patient_count(**kwargs)
dc = self.doc_count(**kwargs)
sc = self.sent_count(**kwargs)
wc = self.word_count(**kwargs)
columns = ['patient count', 'doc count', 'sent count', 'word count']
df = | pd.DataFrame([[pc, dc, sc, wc]], columns=columns) | pandas.DataFrame |
# link: https://github.com/zhiyongc/Seattle-Loop-Data
import json
import util
import pandas as pd
import numpy as np
# 输出文件所在的目录
output_dir = 'output/LOOP_SEATTLE'
util.ensure_dir(output_dir)
#
data_url = "input/Loop Seattle/Seattle_Loop_Dataset/"
# 输出文件的前缀
output_name = output_dir + "/LOOP_SEATTLE"
dataset = | pd.read_csv(data_url + "nodes_loop_mp_list.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 24 09:02:16 2017
@author: hp
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.grid_search import GridSearchCV
import stats
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import xgboost as xgb
from sklearn.metrics import roc_auc_score
from sklearn import cross_validation, metrics
train_df=pd.read_csv('...train.csv')
test_df=pd.read_csv('...test.csv')
df=pd.concat([train_df.loc[:,'MSSubClass':'SaleCondition'],test_df.loc[:,'MSSubClass':'SaleCondition']])
'''
df.reset_index(inplace=True)
df.drop('index',axis=1,inplace=True)
df=df.reindex_axis(train_df.columns,axis=1)
'''
#数据转换
p=train_df.loc[:,'SalePrice']
train_df.drop('SalePrice',axis=1,inplace=True)
for col in train_df.columns:
if train_df[col].dtype!=np.object:
if train_df[col].dropna().skew()>0.75:
train_df[col]=np.log(train_df[col]+1)
else:
pass
else:
pass
for col in test_df.columns:
if test_df[col].dtype!=np.object:
if test_df[col].dropna().skew()>0.75:
test_df[col]=np.log(test_df[col]+1)
else:
pass
else:
pass
#数据初探
train_df['SalePrice'].describe()
#sns.distplot(pd.DataFrame(train_df['SalePrice']))
#查看类别个数情况
def cat_num(df,columns):
print(df[columns].value_counts())
def cat_null(df,columns,value):
df.loc[df[columns].isnull(),columns]=value
#MZ
cat_num(test_df,'MSZoning')
cat_num(train_df,'MSSubClass')
test_df['MSZoning'].groupby(test_df['MSSubClass']).agg('count')
pd.crosstab(test_df['MSZoning'],test_df['MSSubClass'])
test_df.loc[test_df['MSZoning'].isnull(),'MSZoning']
print(test_df[test_df['MSZoning'].isnull() == True])
test_df.loc[(test_df['MSZoning'].isnull())&(test_df['MSSubClass']==20),'MSZoning']='RL'
test_df.loc[(test_df['MSZoning'].isnull())&(test_df['MSSubClass']==30),'MSZoning']='RM'
test_df.loc[(test_df['MSZoning'].isnull())&(test_df['MSSubClass']==70),'MSZoning']='RM'
#Utilities
cat_num(test_df,'Utilities')
cat_num(train_df,'Utilities')
test_df.drop(['Utilities'],axis=1,inplace=True)
train_df.drop(['Utilities'],axis=1,inplace=True)
#Exterior
cat_num(test_df,'Exterior1st')
cat_num(test_df,'Exterior2nd')
pd.crosstab(test_df['Exterior1st'],test_df['Exterior2nd'])
print(test_df[test_df['Exterior1st'].isnull()==True])
test_df['Exterior1st'][test_df['Exterior1st'].isnull()]='VinylSd'
test_df['Exterior2nd'][test_df['Exterior2nd'].isnull()]='VinylSd'
# MasVnrType & MasVnrArea
print(test_df[['MasVnrType','MasVnrArea']][test_df['MasVnrType'].isnull()==True])
print(train_df[['MasVnrType','MasVnrArea']][train_df['MasVnrType'].isnull()==True])
cat_num(test_df, 'MasVnrType')
cat_num(train_df, 'MasVnrType')
test_df['MasVnrType'][test_df['MasVnrType'].isnull()]='None'
train_df['MasVnrType'][train_df['MasVnrType'].isnull()]='None'
test_df['MasVnrArea'][test_df['MasVnrArea'].isnull()]=0
train_df['MasVnrArea'][train_df['MasVnrArea'].isnull()]=0
#Bsmt
columns=['BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinSF1','BsmtFinType2','BsmtFinSF2','BsmtUnfSF','BsmtFullBath','BsmtHalfBath']
cat_columns=['BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinType2']
print(test_df[columns][test_df['BsmtFinType2'].isnull()==True])
print(train_df[columns][train_df['BsmtFinType2'].isnull()==True])
cat_num(test_df,'BsmtQual')
cat_num(test_df,'BsmtCond')
cat_num(test_df,'BsmtExposure')
cat_num(test_df,'BsmtFinType1')
cat_num(test_df,'BsmtFinType2')
cat_num(train_df,'BsmtQual')
cat_num(train_df,'BsmtCond')
cat_num(train_df,'BsmtExposure')
cat_num(train_df,'BsmtFinType1')
cat_num(train_df,'BsmtFinType2')
cat_null(test_df,'BsmtFinSF1',0)
cat_null(test_df,'BsmtFinSF2',0)
cat_null(test_df,'BsmtUnfSF',0)
cat_null(test_df,'BsmtFullBath',0)
cat_null(test_df,'BsmtHalfBath',0)
for col in cat_columns:
cat_null(train_df,col,'None')
pd.crosstab(test_df['BsmtQual'],test_df['BsmtCond'])
test_df.loc[(test_df['BsmtQual'].isnull())&(test_df['BsmtCond']=='TA'),'BsmtQual']='TA'
test_df.loc[(test_df['BsmtQual'].isnull())&(test_df['BsmtCond']=='Fa'),'BsmtQual']='TA'
for col in cat_columns:
cat_null(test_df,col,'None')
test_df[test_df.columns[test_df.isnull().any()].tolist()].isnull().sum()
train_df[train_df.columns[train_df.isnull().any()].tolist()].isnull().sum()
#df['BsmtFinType2'].value_counts()
#TotalBsmtSF
TB=pd.concat([train_df.TotalBsmtSF,train_df.SalePrice],axis=1)
TB.plot.scatter(x='TotalBsmtSF',y='SalePrice',ylim=(0,800000),xlim=(0,7000))
test_df.loc[test_df['TotalBsmtSF'].isnull(),'TotalBsmtSF']=0
#KitchenQual
test_df['KitchenQual'].value_counts()
pd.crosstab(train_df['KitchenQual'],train_df['KitchenAbvGr'])
test_df.loc[test_df['KitchenQual'].isnull(),'KitchenQual']='TA'
test_df.drop(['Alley','PoolQC','Fence','MiscFeature'],axis=1,inplace=True)
train_df.drop(['Alley','PoolQC','Fence','MiscFeature'],axis=1,inplace=True)
#lotarea
test_df['SqrtLotArea'] = np.sqrt(test_df['LotArea'])
train_df['SqrtLotArea'] = np.sqrt(train_df['LotArea'])
test_df['LotFrontage'].corr(test_df['LotArea'])#0.64
train_df['LotFrontage'].corr(train_df['LotArea'])#0.42
test_df['LotFrontage'].corr(test_df['SqrtLotArea'])#0.7
train_df['LotFrontage'].corr(train_df['SqrtLotArea'])#0.6
test_df['LotFrontage'][test_df['LotFrontage'].isnull()]=test_df['SqrtLotArea'][test_df['LotFrontage'].isnull()]
train_df['LotFrontage'][train_df['LotFrontage'].isnull()]=train_df['SqrtLotArea'][train_df['LotFrontage'].isnull()]
#Functional
test_df['Functional'].value_counts()
test_df['Functional'][test_df['Functional'].isnull()]='Typ'
#FireplaceQu
train_df['GarageFinish'].value_counts()
test_df['GarageFinish'].value_counts()
pd.crosstab(test_df['FireplaceQu'],test_df['Fireplaces'])
test_df['Fireplaces'][test_df['FireplaceQu'].isnull()==True].describe()
train_df['Fireplaces'][train_df['FireplaceQu'].isnull()==True].describe()
test_df['FireplaceQu'][test_df['FireplaceQu'].isnull()]='None'
train_df['FireplaceQu'][train_df['FireplaceQu'].isnull()]='None'
#Garage
col=['GarageType','GarageYrBlt','GarageFinish','GarageCars','GarageArea','GarageQual','GarageCond']
print(test_df[col][test_df['GarageType'].isnull()==True])
for columns in col:
if test_df[columns].dtype==np.object:
test_df[columns][test_df[columns].isnull()==True]='None'
else:
test_df[columns][test_df[columns].isnull()==True]=0
for columns in col:
if train_df[columns].dtype==np.object:
train_df[columns][train_df[columns].isnull()==True]='None'
else:
train_df[columns][train_df[columns].isnull()==True]=0
#SaleType
test_df['SaleType'].value_counts()
test_df['SaleType'][test_df['SaleType'].isnull()==True]='WD'
#Electrical
train_df['Electrical'].value_counts()
train_df['Electrical'][train_df['Electrical'].isnull()==True]='SBrkr'
for col in test_df.columns:
if test_df[col].dtype!=train_df[col].dtype:
print(col,test_df[col].dtype,train_df[col].dtype)
cols=['BsmtFinSF1','BsmtFinSF2','BsmtUnfSF','TotalBsmtSF','BsmtFullBath','BsmtHalfBath','GarageCars','GarageArea']
for col in cols:
tm=test_df[col].astype(pd.np.int64)
tm=pd.DataFrame({col:tm})
test_df.drop(col,axis=1,inplace=True)
test_df=pd.concat([test_df,tm],axis=1)
for col in cols:
tm=train_df[col].astype(pd.np.int64)
tm=pd.DataFrame({col:tm})
train_df.drop(col,axis=1,inplace=True)
train_df=pd.concat([train_df,tm],axis=1)
test_df = test_df.replace({"MSSubClass": {20: "A", 30: "B", 40: "C", 45: "D", 50: "E",
60: "F", 70: "G", 75: "H", 80: "I", 85: "J",
90: "K", 120: "L", 150: "M", 160: "N", 180: "O", 190: "P"}})
train_df = train_df.replace({"MSSubClass": {20: "A", 30: "B", 40: "C", 45: "D", 50: "E",
60: "F", 70: "G", 75: "H", 80: "I", 85: "J",
90: "K", 120: "L", 150: "M", 160: "N", 180: "O", 190: "P"}})
test_df=test_df.replace({'ExterQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'ExterQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'ExterCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'ExterCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'GarageQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'GarageQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'GarageCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'GarageCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'GarageFinish':{'Fin':3,'RFn':2,'Unf':1, 'None':0}})
train_df=train_df.replace({'GarageFinish':{'Fin':3,'RFn':2,'Unf':1,'None':0}})
#heatingqc
test_df=test_df.replace({'HeatingQC':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'HeatingQC':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'FireplaceQu':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'FireplaceQu':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'KitchenQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'KitchenQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'BsmtQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'BsmtQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'BsmtCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
train_df=train_df.replace({'BsmtCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2, 'Po': 1,'None':0}})
test_df=test_df.replace({'BsmtExposure':{'Gd':5,'Av':4,'Mn':3,'No':2, 'NA': 1,'None':0}})
train_df=train_df.replace({'BsmtExposure':{'Gd':5,'Av':4,'Mn':3,'No':2, 'NA': 1,'None':0}})
test_df=test_df.replace({'BsmtFinType2':{'GLQ':6,'ALQ':5,'BLQ':4,'Rec':3, 'LwQ': 2,'Unf':1,'None':0}})
train_df=train_df.replace({'BsmtFinType2':{'GLQ':6,'ALQ':5,'BLQ':4,'Rec':3, 'LwQ': 2,'Unf':1,'None':0}})
test_df=test_df.replace({'BsmtFinType1':{'GLQ':6,'ALQ':5,'BLQ':4,'Rec':3, 'LwQ': 2,'Unf':1,'None':0}})
train_df=train_df.replace({'BsmtFinType1':{'GLQ':6,'ALQ':5,'BLQ':4,'Rec':3, 'LwQ': 2,'Unf':1,'None':0}})
#定量变量
sns.distplot(test_df['SalePrice'], fit='norm')
sns.distplot(test_df['GrLivArea'], fit='norm')
sns.distplot(test_df['LotArea'], fit='norm')
sns.distplot(test_df['MasVnrArea'], fit='norm')######删?
sns.distplot(test_df['2ndFlrSF'], fit='norm')##shan
sns.distplot(test_df['WoodDeckSF'], fit='norm')##shan
sns.distplot(test_df['OpenPorchSF'], fit='norm')##shan
sns.distplot(test_df['EnclosedPorch'], fit='norm')##shan
sns.distplot(test_df['3SsnPorch'], fit='norm')##删
sns.distplot(test_df['ScreenPorch'], fit='norm')##删
sns.distplot(test_df['PoolArea'], fit='norm')##删
plt.scatter(train_df['Heating'],train_df['SalePrice'])
sns.boxplot(x=train_df['Heating'],y=train_df['SalePrice'])
train_df['Heating'].value_counts()
sns.distplot(test_df['MiscVal'], fit='norm')##删
sns.distplot(test_df['BsmtFinSF1'], fit='norm')##shan
sns.distplot(test_df['BsmtFinSF2'], fit='norm')##删
sns.distplot(test_df['BsmtUnfSF'], fit='norm')
sns.distplot(train_df['TotalBsmtSF'], fit='norm')
sns.distplot(int(test_df['GarageArea']), fit='norm')
#TotalBsmtSF
'''
for n in train_df['TotalBsmtSF'].values:
if n>0:
train_df.loc[train_df['TotalBsmtSF']==n,'Bsmt_has']=1
else:
train_df.loc[train_df['TotalBsmtSF']==n,'Bsmt_has']=0
train_df['TotalBsmtSF']=np.log(train_df['TotalBsmtSF'])
for n in test_df['TotalBsmtSF'].values:
if n>0:
test_df.loc[test_df['TotalBsmtSF']==n,'Bsmt_has']=1
else:
test_df.loc[test_df['TotalBsmtSF']==n,'Bsmt_has']=0
'''
#
var='OverallQual'
f,ax=plt.subplots(figsize=(16,8))
data=pd.concat([train_df['KitchenQual'],train_df['SalePrice']],axis=1)
fig=sns.boxplot(x='KitchenQual',y='SalePrice',data=data)
plt.xticks(rotation=90)#rotation刻度旋转角度
#train_df['SalePrice'].skew()#1.88偏度右偏
#train_df['SalePrice'].kurt()#6.54峰度尖顶峰
#train_df['logprice']=np.log(train_df['SalePrice'])
#data=pd.concat([train_df['GrLivArea'],train_df['logprice']],axis=1)
#data.plot.scatter(x='GrLivArea',y='logprice')
train_df[train_df.columns[train_df.isnull().any()].tolist()].isnull().sum()#Alley PoolQC Fence MiscFeature
test_df[test_df.columns[test_df.isnull().any()].tolist()].isnull().sum()#Alley PoolQC Fence MiscFeature
#sns.distplot(pd.DataFrame(train_df['logprice']))
#train_df['logprice'].skew()#0.12偏度右偏
#train_df['logprice'].kurt()#0.8
#虚拟变量和连续值转换
test_df.drop(['Id','Street','LandSlope','Condition2','RoofMatl','Heating','3SsnPorch','ScreenPorch','PoolArea','MiscVal'],axis=1,inplace=True)
train_df.drop(['Id','Street','LandSlope','Condition2','RoofMatl','Heating','3SsnPorch','ScreenPorch','PoolArea','MiscVal'],axis=1,inplace=True)
test_df.drop(['Id'],axis=1,inplace=True)
train_df.drop(['Id'],axis=1,inplace=True)
n=0
for col in train_df.columns:
if train_df[col].dtype==np.object:
print(col,cat_num(train_df,col))
n+=1
m=0
for col in test_df.columns:
if test_df[col].dtype==np.object:
print(col,cat_num(test_df,col))
m+=1
#定性变量中可能特征不一样,导致dummy后的变量不统一
df=pd.concat([train_df,test_df],axis=1)
df.reset_index(inplace=True)
#ont_hot编码
dm=pd.DataFrame()
pm=pd.DataFrame()
for col in train_df.columns:
if train_df[col].dtype==np.object:
dm=pd.get_dummies(train_df[col]).rename(columns=lambda x:col+'_'+str(x))
train_df=pd.concat([train_df,dm],axis=1)
train_df.drop(col,axis=1,inplace=True)
pm=pd.concat([pm,dm],axis=1)
dm_test=pd.DataFrame()
pm_test=pd.DataFrame()
for col in test_df.columns:
if test_df[col].dtype==np.object:
dm_test=pd.get_dummies(test_df[col]).rename(columns=lambda x:col+'_'+str(x))
test_df= | pd.concat([test_df,dm_test],axis=1) | pandas.concat |
from inferelator_prior.processor.gtf import GTF_GENENAME, GTF_CHROMOSOME, SEQ_START, SEQ_STOP
from inferelator_prior.motifs.motif_scan import MotifScan
from inferelator_prior.motifs import INFO_COL, MOTIF_COL, LEN_COL, SCAN_SCORE_COL, MOTIF_NAME_COL, SCORE_PER_BASE
import pandas as pd
import numpy as np
import pathos.multiprocessing as multiprocessing
from sklearn.cluster import DBSCAN
PRIOR_TF = 'regulator'
PRIOR_GENE = 'target'
PRIOR_COUNT = 'count'
PRIOR_SCORE = 'score'
PRIOR_MOTIF_IC = 'motif_ic'
PRIOR_PVAL = 'pvalue'
PRIOR_SEQ = 'sequence'
PRIOR_START = 'start'
PRIOR_STOP = 'stop'
PRIOR_CHR = 'chromosome'
PRIOR_COLS = [PRIOR_TF, PRIOR_GENE, PRIOR_COUNT, PRIOR_SCORE, PRIOR_MOTIF_IC, PRIOR_START, PRIOR_STOP, PRIOR_CHR]
PRIOR_FDR = 'qvalue'
PRIOR_SIG = 'significance'
MINIMUM_MOTIF_IC_BITS = None
MAXIMUM_TANDEM_DISTANCE = 100
class MotifScorer:
min_binding_ic = MINIMUM_MOTIF_IC_BITS
max_dist = MAXIMUM_TANDEM_DISTANCE
@classmethod
def set_information_criteria(cls, min_binding_ic=None, max_dist=None):
"""
Set parameters for
:param min_binding_ic:
:param max_dist:
:return:
"""
cls.min_binding_ic = cls.min_binding_ic if min_binding_ic is None else min_binding_ic
cls.max_dist = cls.max_dist if max_dist is None else max_dist
@classmethod
def score_tf(cls, tf_motifs):
"""
Score a single TF
:param tf_motifs: Motif binding sites from FIMO/HOMER
:type tf_motifs: pd.DataFrame
:return: Score if the TF should be kept, None otherwise
"""
assert isinstance(tf_motifs, pd.DataFrame)
# Drop sites that don't meet threshold
if cls.min_binding_ic is not None:
tf_motifs = tf_motifs.loc[tf_motifs[SCAN_SCORE_COL] >= cls.min_binding_ic, :]
n_sites = tf_motifs.shape[0]
# If there's no data return None
if n_sites == 0:
return None
# Sort and check for overlapping motifs
tf_motifs = tf_motifs.sort_values(by=MotifScan.start_col)
overlap = tf_motifs[MotifScan.start_col] < tf_motifs[MotifScan.stop_col].shift()
# Collapse together any overlapping motifs to the maximum score on a per-base basis
if overlap.any():
tf_motifs["GROUP"] = (~overlap).cumsum()
tf_motifs = pd.concat([cls._agg_per_base(group) for _, group in tf_motifs.groupby("GROUP")])
n_sites = tf_motifs.shape[0]
# If there's only one site check it and then return
if n_sites == 1:
return cls._top_hit(tf_motifs)
# If there's only two sites check it and then return
if n_sites == 2:
consider_tandem = tf_motifs.iloc[0, :][MotifScan.stop_col] - tf_motifs.iloc[1, :][MotifScan.start_col]
if consider_tandem > cls.max_dist:
return cls._top_hit(tf_motifs)
else:
start = tf_motifs.iloc[0, :][MotifScan.start_col]
stop = tf_motifs.iloc[1, :][MotifScan.stop_col]
score = tf_motifs[SCAN_SCORE_COL].sum()
return score, 2, start, stop
# If there's more than two sites do the complicated tandem checking stuff
else:
# Find things that are in tandems
consider_tandem = (tf_motifs[MotifScan.start_col] - tf_motifs[MotifScan.stop_col].shift(1))
consider_tandem = consider_tandem <= cls.max_dist
# Skip the rest if nothing is close enough to matter
if not consider_tandem.any():
return cls._top_hit(tf_motifs)
# Ffill the tandem group to have the same start
tandem_starts = tf_motifs[MotifScan.start_col].copy()
tandem_starts.loc[consider_tandem] = pd.NA
tandem_starts = tandem_starts.ffill()
# Backfill the tandem group to have the same stop
tandem_stops = tf_motifs[MotifScan.stop_col].copy()
tandem_stops.loc[consider_tandem.shift(-1, fill_value=False)] = pd.NA
tandem_stops = tandem_stops.bfill()
# Concat, group by start/stop, and then sum IC scores
tandem_peaks = pd.concat([tandem_starts, tandem_stops, tf_motifs[SCAN_SCORE_COL]], axis=1)
tandem_peaks.columns = [PRIOR_START, PRIOR_STOP, PRIOR_SCORE]
tandem_peaks = tandem_peaks.groupby(by=[PRIOR_START, PRIOR_STOP]).agg('sum').reset_index()
# Return the highest tandem array group
peak = tandem_peaks.loc[tandem_peaks[PRIOR_SCORE].argmax(), :]
return peak[PRIOR_SCORE], peak.shape[0], peak[PRIOR_START], peak[PRIOR_STOP]
@classmethod
def preprocess_motifs(cls, gene_motif_data, motif_information):
if cls.min_binding_ic is not None:
motif_information = motif_information.loc[motif_information[INFO_COL] >= cls.min_binding_ic, :]
keeper_motifs = motif_information[MOTIF_COL].unique().tolist()
keeper_idx = (gene_motif_data[MotifScan.name_col].isin(keeper_motifs))
keeper_idx &= (gene_motif_data[SCAN_SCORE_COL] >= cls.min_binding_ic)
return gene_motif_data.loc[keeper_idx, :], motif_information
else:
return gene_motif_data, motif_information
@staticmethod
def _top_hit(tf_motifs):
if tf_motifs.shape[0] == 0:
return None
elif tf_motifs.shape[0] == 1:
top_hit = tf_motifs.iloc[0, :]
else:
top_hit = tf_motifs.iloc[tf_motifs[SCAN_SCORE_COL].values.argmax(), :]
start = MotifScorer._first_value(top_hit[MotifScan.start_col])
stop = MotifScorer._first_value(top_hit[MotifScan.stop_col])
score = MotifScorer._first_value(top_hit[SCAN_SCORE_COL])
return score, 1, start, stop
@staticmethod
def _first_value(series):
try:
return series.iloc[0]
except AttributeError:
return series
@staticmethod
def _agg_per_base(overlap_df):
"""
Aggregate an overlapping set of motif peaks by summing the maximum per-base IC for each base
:param overlap_df:
:return:
"""
if len(overlap_df) == 1:
return overlap_df[[MotifScan.start_col, MotifScan.stop_col, SCAN_SCORE_COL, MOTIF_NAME_COL]]
overlap_df.reset_index(inplace=True)
# Melt the per-base information contents for each matching motif into a new dataframe
# Base number ["B"] and float score ["S"]
new_df = pd.DataFrame([(a, b) for i in overlap_df.index
for a, b in zip(range(overlap_df.loc[i, MotifScan.start_col],
overlap_df.loc[i, MotifScan.stop_col] + 1),
overlap_df.loc[i, SCORE_PER_BASE])], columns=["B", "S"])
# Return a new dataframe with the maximum per-base scores aggregated
return pd.DataFrame({MotifScan.start_col: [overlap_df[MotifScan.start_col].min()],
MotifScan.stop_col: [overlap_df[MotifScan.stop_col].max()],
SCAN_SCORE_COL: new_df.groupby("B").agg('max').sum(),
MOTIF_NAME_COL: [overlap_df[MOTIF_NAME_COL].unique()[0]]})
def summarize_target_per_regulator(genes, motif_peaks, motif_information, num_workers=None, debug=False,
by_chromosome=True, silent=False):
"""
Process a large dataframe of motif hits into a dataframe with the best hit for each regulator-target pair
:param genes: pd.DataFrame [G x n]
:param motif_peaks: pd.DataFrame
Motif search data loaded from FIMO or HOMER
:param motif_information: pd.DataFrame [n x 5]
Motif characteristics loaded from a MEME file
:param num_workers: int
Number of cores to use
:return summarized_data: pd.DataFrame [G x K]
A information matrix connecting genes and regulators
"""
pfunc = print if not silent else lambda *x: None
motif_ids = motif_information[MOTIF_COL].unique()
motif_names = motif_information[MOTIF_NAME_COL].unique()
pfunc("Building prior from {g} genes and {k} Motifs ({t} TFs)".format(g=genes.shape[0], k=len(motif_ids),
t=len(motif_names)))
motif_peaks, motif_information = MotifScorer.preprocess_motifs(motif_peaks, motif_information)
pfunc("Preliminary search identified {n} binding sites".format(n=motif_peaks.shape[0]))
# Trim down the motif dataframe and put it into a dict by chromosome
motif_peaks = motif_peaks.reindex([MotifScan.name_col, MotifScan.chromosome_col, MotifScan.start_col,
MotifScan.stop_col, SCAN_SCORE_COL, SCORE_PER_BASE], axis=1)
motif_id_to_name = motif_information.reindex([MOTIF_COL, MOTIF_NAME_COL], axis=1)
invalid_names = (pd.isnull(motif_id_to_name[MOTIF_NAME_COL]) |
(motif_id_to_name[MOTIF_NAME_COL] == "") |
(motif_id_to_name is None))
motif_id_to_name.loc[invalid_names, MOTIF_NAME_COL] = motif_id_to_name.loc[invalid_names, MOTIF_COL]
motif_peaks = motif_peaks.join(motif_id_to_name.set_index(MOTIF_COL, verify_integrity=True), on=MotifScan.name_col)
motif_peaks = {chromosome: df for chromosome, df in motif_peaks.groupby(MotifScan.chromosome_col)}
_gen_func = _gene_gen if by_chromosome else _gene_gen_no_chromosome
if num_workers == 1:
prior_data = list(map(lambda x: _build_prior_for_gene(*x),
_gen_func(genes, motif_peaks, motif_information, debug=debug, silent=silent)))
else:
with multiprocessing.Pool(num_workers, maxtasksperchild=1000) as pool:
prior_data = pool.starmap(_build_prior_for_gene,
_gen_func(genes, motif_peaks, motif_information, debug=debug, silent=silent),
chunksize=20)
# Combine priors for all genes
prior_data = pd.concat(prior_data).reset_index(drop=True)
prior_data[PRIOR_START] = prior_data[PRIOR_START].astype(int)
prior_data[PRIOR_STOP] = prior_data[PRIOR_STOP].astype(int)
# Pivot to a matrix, extend to all TFs, and fill with 0s
summarized_data = prior_data.pivot(index=PRIOR_GENE, columns=PRIOR_TF, values=PRIOR_SCORE)
summarized_data = summarized_data.reindex(motif_names, axis=1).reindex(genes[GTF_GENENAME], axis=0).fillna(0)
summarized_data.index.name = PRIOR_GENE
return summarized_data, prior_data
def build_prior_from_motifs(raw_matrix, num_workers=None, seed=42, do_threshold=True, debug=False, silent=False):
"""
Construct a prior [G x K] interaction matrix
:param raw_matrix: pd.DataFrame [G x K]
Scored matrix between targets and regulators
:param num_workers: int
Number of cores to use
:param seed: int
Random seed for numpy random pool
:param do_threshold: bool
Threshold using DBSCAN if true; retain all non-zero edges if false
:return prior_matrix: pd.DataFrame [G x K]
An interaction matrix data frame
"""
np.random.seed(seed)
pfunc = print if not silent else lambda *x: None
if do_threshold:
# Threshold per-TF using DBSCAN
pfunc("Selecting edges to retain with DBSCAN")
prior_matrix = pd.DataFrame(False, index=raw_matrix.index, columns=raw_matrix.columns)
if num_workers == 1:
prior_matrix_idx = list(map(lambda x: _prior_clusterer(*x),
_prior_gen(raw_matrix, debug=debug, silent=silent)))
else:
with multiprocessing.Pool(num_workers, maxtasksperchild=1) as pool:
prior_matrix_idx = pool.starmap(_prior_clusterer, _prior_gen(raw_matrix, debug=debug, silent=silent),
chunksize=1)
pfunc("Completed edge selection with DBSCAN")
for reg, reg_idx in prior_matrix_idx:
prior_matrix.loc[reg_idx, reg] = True
return prior_matrix
else:
pfunc("Retaining all edges")
return raw_matrix != 0
def _prior_gen(prior_matrix, debug=False, silent=False):
n = len(prior_matrix.columns)
for i, col_name in enumerate(prior_matrix.columns):
yield i, col_name, prior_matrix[col_name], n, debug, silent
def _prior_clusterer(i, col_name, col_data, n, debug=False, silent=False):
pfunc = print if not silent else lambda *x: None
if not debug and (i % 50 == 0):
pfunc("Clustering {col} [{i} / {n}]".format(i=i, n=n, col=col_name))
keep_idx = _find_outliers_dbscan(col_data)
if debug:
pfunc("Keeping {ed} edges for gene {col} [{i} / {n}]".format(ed=keep_idx.sum(), i=i, n=n, col=col_name))
return col_name, keep_idx
def _gene_gen_no_chromosome(genes, motif_peaks, motif_information, debug=False, silent=False):
"""
Yield the peaks for each group by seqname (which should be the gene promoter)
:param genes:
:param motif_peaks:
:param motif_information:
:param debug:
:return:
"""
for i, gene in enumerate(motif_peaks.keys()):
gene_loc = {GTF_GENENAME: gene, GTF_CHROMOSOME: None}
if i % 100 == 0 and not silent:
print("Processing gene {i} [{gn}]".format(i=i, gn=gene))
yield gene_loc, motif_peaks[gene], motif_information
def _gene_gen(genes, motif_peaks, motif_information, debug=False, silent=False):
"""
Yield the peaks for each gene
:param genes:
:param motif_peaks:
:param motif_information:
:param debug:
:return:
"""
gene_names = genes[GTF_GENENAME].unique().tolist()
bad_chr = {}
pfunc = print if not silent else lambda *x: None
for i, gene in enumerate(gene_names):
gene_data = genes.loc[genes[GTF_GENENAME] == gene, :]
gene_loc = {GTF_GENENAME: gene, GTF_CHROMOSOME: gene_data.iloc[0, :][GTF_CHROMOSOME]}
if i % 100 == 0:
pfunc("Processing gene {i} [{gn}]".format(i=i, gn=gene))
gene_motifs = []
for _, row in gene_data.iterrows():
gene_chr, gene_start, gene_stop = row[GTF_CHROMOSOME], row[SEQ_START], row[SEQ_STOP]
try:
motif_data = motif_peaks[gene_chr]
except KeyError:
# If this chromosome is some weird scaffold or not in the genome, skip it
pfunc("Chromosome {c} not found; skipping gene {g}".format(c=gene_chr, g=gene)) if debug else None
if gene_chr not in bad_chr.keys():
bad_chr[gene_chr] = 1
else:
bad_chr[gene_chr] += 1
continue
motif_mask = motif_data[MotifScan.stop_col] >= gene_start
motif_mask &= motif_data[MotifScan.start_col] <= gene_stop
gene_motifs.append(motif_data.loc[motif_mask, :])
if len(gene_motifs) == 0:
continue
gene_motifs = pd.concat(gene_motifs)
yield gene_loc, gene_motifs, motif_information
for chromosome, bad_genes in bad_chr.items():
pfunc("{n} genes annotated to chromosome {c} have been skipped".format(n=bad_genes, c=chromosome))
def _find_outliers_dbscan(tf_data, max_sparsity=0.05):
scores, weights = np.unique(tf_data.values, return_counts=True)
labels = DBSCAN(min_samples=max(int(scores.size * 0.001), 10), eps=1, n_jobs=None)\
.fit_predict(scores.reshape(-1, 1), sample_weight=weights)
# Short circuit if all the labels are outliers
# This shouldn't happen real-world unless there aren't many genes in the network
if np.all(labels == -1):
return | pd.Series(tf_data.values > 0, index=tf_data.index) | pandas.Series |
import pandas as pd
import numpy as np
import requests
import json
from dotenv import load_dotenv, find_dotenv
import os
from os.path import abspath, join, dirname
import pygsheets
from geopy.distance import great_circle
import time
import datetime
from datetime import date
def get_google_api_key():
# load api keys file
dotenv_path = join(dirname(__file__), 'settings.env')
load_dotenv(dotenv_path)
# get api key
google_api_key = os.getenv("google_api_key")
# return api key
return google_api_key
def get_realtor_api_key():
# load api keys file
dotenv_path = join(dirname(__file__), 'settings.env')
load_dotenv(dotenv_path)
# get api key
realtor_api_key = os.getenv("realtor_api_key")
# return api key
return realtor_api_key
def api_property_list_for_sale(api_key, postal_code, prop_type, limit):
# url for api
url = "https://realtor.p.rapidapi.com/properties/v2/list-for-sale"
# enter parameters
querystring = {
"offset":"0",
"limit":limit,
"postal_code": postal_code,
"prop_type":prop_type
}
# header
headers = {
'x-rapidapi-host': "realtor.p.rapidapi.com",
'x-rapidapi-key': api_key
}
# response
response = requests.request("GET", url, headers=headers, params=querystring)
return response.json() # json format
def process_list_for_sale_response(response_json):
"""
Process the list for sale API response.
Convert each listing to a dataframe, append to a list, and concatenate to one dataframe.
Parameters
----------
@response_json [dictionary]: API response for list for sale
Returns
-------
[dataframe] Dataframe of all list for sale responses
"""
# empty dataframe
dataframe_list = []
# iterate through each for sale listing
for l in response_json['properties']:
# convert each listing to dataframe
_temp_df = pd.DataFrame.from_dict(l, orient='index').T
# append to dataframe list for all listings
dataframe_list.append(_temp_df)
# concatenate all dataframes, for missing col values enter null value
return pd.concat(dataframe_list, axis=0, ignore_index=True, sort=False)
def post_to_sheets():
client = pygsheets.authorize(service_file='Realtor-viz-data-b5a9fbcd94bf.json')
print("-----------------Authorized--------------------")
sheet = client.open('MLS ' + todaysDate)
print("-----------------Sheet Opened------------------")
fullList_wks = sheet[2]
buyList_wks = sheet[3]
compareList = sheet[4]
print("-----------------All Tabs Accessed----------")
fullList_wks.set_dataframe(df_properties_for_sale_raw,(1,1))
buyList_wks.set_dataframe(df_buy_list,(1,1))
compareList.set_dataframe(df_compare_list,(1,1))
print("-----------------Data Updated------------------")
def post_to_opportunity_tab(spacer):
client = pygsheets.authorize(service_file='Realtor-viz-data-b5a9fbcd94bf.json')
print("-----------------Authorized--------------------")
sheet = client.open('MLS ' + todaysDate)
print("-----------------Sheet Opened------------------")
opporList_wks = sheet[0]
print("-----------------Opportunity Tab Accessed----------")
opporList_wks.set_dataframe(df_opportunity_list,(spacer,1))
print("-----------------Data Updated------------------")
def post_to_neighbor_tab(neighbor_spacing):
client = pygsheets.authorize(service_file='Realtor-viz-data-b5a9fbcd94bf.json')
print("-----------------Authorized--------------------")
sheet = client.open('MLS ' + todaysDate)
print("-----------------Sheet Opened------------------")
opporList_wks = sheet[5]
print("-----------------Opportunity Tab Accessed----------")
opporList_wks.set_dataframe(df_neighbor_list,(neighbor_spacing,1))
print("-----------------Data Updated------------------")
def post_to_comparables_tab(group_spacing):
client = pygsheets.authorize(service_file='Realtor-viz-data-b5a9fbcd94bf.json')
print("-----------------Authorized--------------------")
sheet = client.open('MLS ' + todaysDate)
print("-----------------Sheet Opened------------------")
analyzedList_wks = sheet[1]
print("-----------------Analyzed Tab Accessed----------")
analyzedList_wks.set_dataframe(df_analyzed_list,(group_spacing,1))
print("-----------------Data Updated------------------")
def is_similar():
def group_interior_sqft():
if buyable_building_size - compared_building_size <= 500 and buyable_building_size - compared_building_size >= -500:
return True
def group_bed():
if buyable_beds - compared_beds <= 1 and buyable_beds - compared_beds >= -1:
return True
def group_bath():
if buyable_baths - compared_baths <= 1 and buyable_baths - compared_baths >= -1:
return True
def group_lot_sqft():
if buyable_lot_size - compared_lot_size <= 500 and buyable_lot_size - compared_lot_size >= -500:
return True
def group_distance():
if great_circle(buyable_address, compared_address).miles <= 1:
return True
if group_interior_sqft() and group_bed() and group_bath() and group_lot_sqft() and group_distance()== True:
return True
def is_opportunity():
if compared_price - buyable_price >= 10000:
return True
#---------------------------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------------------------
now = datetime.datetime.now()
todaysDate = str(now.month) + '/' + str(now.day) + '/' + str(now.year)
realtor_api_key = get_realtor_api_key() # api key to access data
prop_type = "single_family"
postal_code = "60647" #Searches Logan Square, Chicago
limit = 100000
#store full list in a variable
property_list_for_sale_response = api_property_list_for_sale(api_key=realtor_api_key, postal_code=postal_code, prop_type=prop_type, limit=limit)
#convert full list json into a dataframe
df_properties_for_sale_raw = process_list_for_sale_response(response_json=property_list_for_sale_response)
df_buy_list = | pd.DataFrame() | pandas.DataFrame |
"""Sinks for the event loop."""
import logging
try:
import pandas as pd
import numpy as np
NO_PANDAS = False
except ImportError:
NO_PANDAS = True
import paho.mqtt.client as mqtt
LOGGER = logging.getLogger(__name__)
class Sink:
def process_value(self, param_name, timestamp, value):
"""Handle a single datapoint."""
raise NotImplementedError('subclass to implement.')
def finalise(self):
"""Tidy-up, handle any needed serialisation, etc."""
pass
class PrintingSink(Sink):
"""Sink that prints all values."""
def process_value(self, param_name, timestamp, value):
"""Handle a single datapoint."""
print(param_name, timestamp, value)
class LoggingSink(Sink):
"""Sink that logs all values."""
def __init__(self):
self.logger = logging.getLogger('sink')
def process_value(self, param_name, timestamp, value):
"""Handle a single datapoint."""
self.logger.warn('%s - %s - %f', timestamp, param_name, value)
class MQTTSink(Sink):
"""Sink that logs all values."""
def __init__(self, broker=None, topic_root=''):
self.client = mqtt.Client()
self.client.connect(broker, 1883, 60)
self.topic_root = topic_root + '/'
def process_value(self, param_name, timestamp, value):
"""Handle a single datapoint."""
topic = self.topic_root + param_name
self.client.publish(topic, value)
def round_datetime(dtime, freq):
"""Rounds datetime to freq."""
freq = pd.tseries.frequencies.to_offset(freq).nanos / 1000
dtime = np.datetime64(dtime)
return (np.round(dtime.astype('i8') /
float(freq)) * freq).astype(
'datetime64[us]'
).astype('datetime64[ns]')
class DataFrameSink(Sink):
"""Sink that logs all values."""
def __init__(self):
self.df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import sqlalchemy
from directories import print_log, engine
N_WINDOWS = 48
def main():
with engine.connect() as connection:
vaso_episodes = | pd.read_sql("pressors_by_icustay", con=connection, index_col="ICUSTAY_ID") | pandas.read_sql |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/9/23 15:38
Desc: Drewry集装箱指数
https://www.drewry.co.uk/supply-chain-advisors/supply-chain-expertise/world-container-index-assessed-by-drewry
https://infogram.com/world-container-index-1h17493095xl4zj
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from akshare.utils import demjson
def drewry_wci_index(symbol: str = "composite") -> pd.DataFrame:
"""
Drewry 集装箱指数
https://infogram.com/world-container-index-1h17493095xl4zj
:return: choice of {"composite", "shanghai-rotterdam", "rotterdam-shanghai", "shanghai-los angeles", "los angeles-shanghai", "shanghai-genoa", "new york-rotterdam", "rotterdam-new york"}
:type: str
:return: Drewry 集装箱指数
:rtype: pandas.DataFrame
"""
symbol_map = {
"composite": 0,
"shanghai-rotterdam": 1,
"rotterdam-shanghai": 2,
"shanghai-los angeles": 3,
"los angeles-shanghai": 4,
"shanghai-genoa": 5,
"new york-rotterdam": 6,
"rotterdam-new york": 7,
}
url = "https://infogram.com/world-container-index-1h17493095xl4zj"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
data_text = soup.find_all("script")[-5].string.strip("window.infographicData=")[:-1]
data_json = demjson.decode(data_text)
temp_df = | pd.DataFrame(data_json["elements"][2]["data"][symbol_map[symbol]]) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
""" test get/set & misc """
import pytest
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas import (Series, DataFrame, MultiIndex,
Timestamp, Timedelta, Categorical)
from pandas.tseries.offsets import BDay
from pandas.compat import lrange, range
from pandas.util.testing import (assert_series_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestMisc(TestData):
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
@pytest.mark.parametrize(
'result_1, duplicate_item, expected_1',
[
[
pd.Series({1: 12, 2: [1, 2, 2, 3]}), pd.Series({1: 313}),
pd.Series({1: 12, }, dtype=object),
],
[
pd.Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
pd.Series({1: [1, 2, 3]}), pd.Series({1: [1, 2, 3], }),
],
])
def test_getitem_with_duplicates_indices(
self, result_1, duplicate_item, expected_1):
# GH 17610
result = result_1.append(duplicate_item)
expected = expected_1.append(duplicate_item)
assert_series_equal(result[1], expected)
assert result[2] == result_1[2]
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
pytest.raises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
assert s.iloc[0] == s['a']
s.iloc[0] = 5
tm.assert_almost_equal(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
def test_series_box_timestamp(self):
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng)
assert isinstance(ser[5], pd.Timestamp)
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng, index=rng)
assert isinstance(ser[5], pd.Timestamp)
assert isinstance(ser.iat[5], pd.Timestamp)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
pytest.raises(KeyError, s.loc.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
assert is_scalar(obj['c'])
assert obj['c'] == 0
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = s.loc[['foo', 'bar', 'bah', 'bam']]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
pytest.raises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
assert result == s.loc['A']
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.loc[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
assert self.series.index[9] not in numSlice.index
assert self.objSeries.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == self.series.index[11]
assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:])
# Test return view.
sl = self.series[10:20]
sl[:] = 0
assert (self.series[10:20] == 0).all()
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
assert np.isnan(self.ts[6])
assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
assert (series[::2] == 0).all()
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = self.ts.set_value(idx, 0)
assert res is self.ts
assert self.ts[idx] == 0
# equiv
s = self.series.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = s.set_value('foobar', 0)
assert res is s
assert res.index[-1] == 'foobar'
assert res['foobar'] == 0
s = self.series.copy()
s.loc['foobar'] = 0
assert s.index[-1] == 'foobar'
assert s['foobar'] == 0
def test_setslice(self):
sl = self.ts[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
pytest.raises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
pytest.raises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.loc[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
assert result == expected
result = s.iloc[0]
assert result == expected
result = s['a']
assert result == expected
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00-04:00', tz=tz),
pd.Timestamp('2011-01-01 00:00-05:00', tz=tz),
pd.Timestamp('2016-11-06 01:00-05:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_categorial_assigning_ops(self):
orig = Series(Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]),
index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3], categories=[1, 2, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_take(self):
s = Series([-1, 5, 6, 2, 4])
actual = s.take([1, 3, 4])
expected = Series([5, 2, 4], index=[1, 3, 4])
tm.assert_series_equal(actual, expected)
actual = s.take([-1, 3, 4])
expected = Series([4, 2, 4], index=[4, 3, 4])
tm.assert_series_equal(actual, expected)
pytest.raises(IndexError, s.take, [1, 10])
pytest.raises(IndexError, s.take, [2, 5])
with tm.assert_produces_warning(FutureWarning):
s.take([-1, 3, 4], convert=False)
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.loc[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.iloc[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.loc[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.loc[d1] = 4
self.series.loc[d2] = 6
assert self.series[d1] == 4
assert self.series[d2] == 6
def test_setitem_na(self):
# these induce dtype changes
expected = | Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan]) | pandas.Series |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from unittest import TestCase
from parameterized import parameterized
from numpy import nan
from pandas import (
Series,
DataFrame,
date_range,
MultiIndex,
Int64Index,
Index,
DatetimeIndex,
Timedelta
)
from pandas.tseries.offsets import (BDay, Day, CDay)
from pandas.util.testing import (assert_frame_equal,
assert_series_equal)
from .. performance import (factor_information_coefficient,
mean_information_coefficient,
mean_return_by_quantile,
quantile_turnover,
factor_rank_autocorrelation,
factor_returns, factor_alpha_beta,
cumulative_returns, factor_weights,
common_start_returns,
average_cumulative_return_by_quantile)
from .. utils import (get_forward_returns_columns,
get_clean_factor_and_forward_returns)
class PerformanceTestCase(TestCase):
dr = date_range(start='2015-1-1', end='2015-1-2')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
factor = DataFrame(index=dr,
columns=tickers,
data=[[1, 2, 3, 4],
[4, 3, 2, 1]]).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor.name = 'factor'
factor_data = DataFrame()
factor_data['factor'] = factor
factor_data['group'] = Series(index=factor.index,
data=[1, 1, 2, 2, 1, 1, 2, 2],
dtype="category")
@parameterized.expand([(factor_data, [4, 3, 2, 1, 1, 2, 3, 4],
False, False,
dr,
[-1., -1.],
),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1],
False, False,
dr,
[1., 1.],
),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1],
False, True,
MultiIndex.from_product(
[dr, [1, 2]], names=['date', 'group']),
[1., 1., 1., 1.],
),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1],
True, True,
MultiIndex.from_product(
[dr, [1, 2]], names=['date', 'group']),
[1., 1., 1., 1.],
)])
def test_information_coefficient(self,
factor_data,
forward_returns,
group_adjust,
by_group,
expected_ix,
expected_ic_val):
factor_data['1D'] = Series(index=factor_data.index,
data=forward_returns)
ic = factor_information_coefficient(factor_data=factor_data,
group_adjust=group_adjust,
by_group=by_group)
expected_ic_df = DataFrame(index=expected_ix,
columns=Index(['1D'], dtype='object'),
data=expected_ic_val)
assert_frame_equal(ic, expected_ic_df)
@parameterized.expand([(factor_data,
[4, 3, 2, 1, 1, 2, 3, 4],
False,
False,
'D',
dr,
[-1., -1.]),
(factor_data,
[1, 2, 3, 4, 4, 3, 2, 1],
False,
False,
'W',
DatetimeIndex(['2015-01-04'],
name='date',
freq='W-SUN'),
[1.]),
(factor_data,
[1, 2, 3, 4, 4, 3, 2, 1],
False,
True,
None,
Int64Index([1, 2], name='group'),
[1., 1.]),
(factor_data,
[1, 2, 3, 4, 4, 3, 2, 1],
False,
True,
'W',
MultiIndex.from_product(
[DatetimeIndex(['2015-01-04'],
name='date',
freq='W-SUN'),
[1, 2]], names=['date', 'group']),
[1., 1.])])
def test_mean_information_coefficient(self,
factor_data,
forward_returns,
group_adjust,
by_group,
by_time,
expected_ix,
expected_ic_val):
factor_data['1D'] = Series(index=factor_data.index,
data=forward_returns)
ic = mean_information_coefficient(factor_data,
group_adjust=group_adjust,
by_group=by_group,
by_time=by_time)
expected_ic_df = DataFrame(index=expected_ix,
columns=Index(['1D'], dtype='object'),
data=expected_ic_val)
assert_frame_equal(ic, expected_ic_df)
@parameterized.expand([([1.1, 1.2, 1.1, 1.2, 1.1, 1.2],
[[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]],
2, False,
[0.1, 0.2]),
([1.1, 1.2, 1.1, 1.2, 1.1, 1.2],
[[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]],
2, True,
[0.1, 0.1, 0.2, 0.2]),
([1.1, 1.1, 1.1, 1.2, 1.2, 1.2],
[[1, 2, 3, 1, 2, 3],
[1, 2, 3, 1, 2, 3],
[1, 2, 3, 1, 2, 3]],
3, False,
[0.15, 0.15, 0.15]),
([1.1, 1.1, 1.1, 1.2, 1.2, 1.2],
[[1, 2, 3, 1, 2, 3],
[1, 2, 3, 1, 2, 3],
[1, 2, 3, 1, 2, 3]],
3, True,
[0.1, 0.2, 0.1, 0.2, 0.1, 0.2]),
([1.5, 1.5, 1.2, 1.0, 1.0, 1.0],
[[1, 1, 2, 2, 2, 2],
[2, 2, 1, 2, 2, 2],
[2, 2, 1, 2, 2, 2]],
2, False,
[0.3, 0.15]),
([1.5, 1.5, 1.2, 1.0, 1.0, 1.0],
[[1, 1, 3, 2, 2, 2],
[3, 3, 1, 2, 2, 2],
[3, 3, 1, 2, 2, 2]],
3, False,
[0.3, 0.0, 0.4]),
([1.6, 1.6, 1.0, 1.0, 1.0, 1.0],
[[1, 1, 2, 2, 2, 2],
[2, 2, 1, 1, 1, 1],
[2, 2, 1, 1, 1, 1]],
2, False,
[0.2, 0.4]),
([1.6, 1.6, 1.0, 1.6, 1.6, 1.0],
[[1, 1, 2, 1, 1, 2],
[2, 2, 1, 2, 2, 1],
[2, 2, 1, 2, 2, 1]],
2, True,
[0.2, 0.2, 0.4, 0.4])])
def test_mean_return_by_quantile(self,
daily_rets,
factor,
bins,
by_group,
expected_data):
"""
Test mean_return_by_quantile
"""
tickers = ['A', 'B', 'C', 'D', 'E', 'F']
factor_groups = {'A': 1, 'B': 1, 'C': 1, 'D': 2, 'E': 2, 'F': 2}
price_data = [[daily_rets[0]**i, daily_rets[1]**i, daily_rets[2]**i,
daily_rets[3]**i, daily_rets[4]**i, daily_rets[5]**i]
for i in range(1, 5)] # 4 days
start = '2015-1-11'
factor_end = '2015-1-13'
price_end = '2015-1-14' # 1D fwd returns
price_index = date_range(start=start, end=price_end)
price_index.name = 'date'
prices = DataFrame(index=price_index, columns=tickers, data=price_data)
factor_index = date_range(start=start, end=factor_end)
factor_index.name = 'date'
factor = DataFrame(index=factor_index, columns=tickers,
data=factor).stack()
factor_data = get_clean_factor_and_forward_returns(
factor, prices,
groupby=factor_groups,
quantiles=None,
bins=bins,
periods=(1,))
mean_quant_ret, std_quantile = \
mean_return_by_quantile(factor_data,
by_date=False,
by_group=by_group,
demeaned=False,
group_adjust=False)
expected = DataFrame(index=mean_quant_ret.index.copy(),
columns=mean_quant_ret.columns.copy(),
data=expected_data)
expected.index.name = 'factor_quantile'
assert_frame_equal(mean_quant_ret, expected)
@parameterized.expand([([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1B', 4.0, 1,
[nan, 1.0, 1.0, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1D', 4.0, 1,
[nan, 1.0, 1.0, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1B', 4.0, 2,
[nan, nan, 0.0, 1.0]),
([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1D', 4.0, 2,
[nan, nan, 0.0, 1.0]),
([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1B', 4.0, 3,
[nan, nan, nan, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1D', 4.0, 3,
[nan, nan, nan, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1B', 3.0, 1,
[nan, 0.0, 0.0, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1D', 3.0, 1,
[nan, 0.0, 0.0, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1B', 3.0, 2,
[nan, nan, 0.0, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1D', 3.0, 2,
[nan, nan, 0.0, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1B', 3.0, 3,
[nan, nan, nan, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1D', 3.0, 3,
[nan, nan, nan, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0]],
'1B', 2.0, 1,
[nan, 1.0, 1.0, 1.0]),
([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0]],
'1D', 2.0, 1,
[nan, 1.0, 1.0, 1.0]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0]],
'1B', 3.0, 4,
[nan, nan, nan, nan,
0., 0., 0., 0.,
0., 0., 0., 0.]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0]],
'1D', 3.0, 4,
[nan, nan, nan, nan,
0., 0., 0., 0.,
0., 0., 0., 0.]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1B', 3.0, 10,
[nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan,
0., 1.]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 3.0, 2.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1D', 3.0, 10,
[nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan,
0., 1.])
])
def test_quantile_turnover(self, quantile_values, freq, test_quantile,
period, expected_vals):
dr = date_range(start='2015-1-1', periods=len(quantile_values),
freq=freq)
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
quantized_test_factor = Series(DataFrame(index=dr,
columns=tickers,
data=quantile_values)
.stack())
quantized_test_factor.index = quantized_test_factor.index.set_names(
['date', 'asset'])
to = quantile_turnover(quantized_test_factor, test_quantile, period)
expected = Series(
index=quantized_test_factor.index.levels[0], data=expected_vals)
expected.name = test_quantile
assert_series_equal(to, expected)
@parameterized.expand([([[3, 4, 2, 1, nan],
[3, 4, -2, -1, nan],
[3, nan, nan, 1, 4]],
['A', 'B', 'C', 'D', 'E'],
{'A': 'Group1', 'B': 'Group2', 'C': 'Group1',
'D': 'Group2', 'E': 'Group1'},
False, False, False,
[0.30, 0.40, 0.20, 0.10,
0.30, 0.40, -0.20, -0.10,
0.375, 0.125, 0.50]),
([[3, 4, 2, 1, nan],
[3, 4, -2, -1, nan],
[3, nan, nan, 1, 4]],
['A', 'B', 'C', 'D', 'E'],
{'A': 'Group1', 'B': 'Group2', 'C': 'Group1',
'D': 'Group2', 'E': 'Group1'},
True, False, False,
[0.125, 0.375, -0.125, -0.375,
0.20, 0.30, -0.30, -0.20,
0.10, -0.50, 0.40]),
([[3, 4, 2, 1, nan],
[-3, 4, -2, 1, nan],
[2, 2, 2, 3, 1]],
['A', 'B', 'C', 'D', 'E'],
{'A': 'Group1', 'B': 'Group2', 'C': 'Group1',
'D': 'Group2', 'E': 'Group1'},
False, True, False,
[0.30, 0.40, 0.20, 0.10,
-0.30, 0.40, -0.20, 0.10,
0.20, 0.20, 0.20, 0.30, 0.10]),
([[3, 4, 2, 1, nan],
[3, 4, -2, -1, nan],
[3, nan, nan, 1, 4]],
['A', 'B', 'C', 'D', 'E'],
{'A': 'Group1', 'B': 'Group2', 'C': 'Group1',
'D': 'Group2', 'E': 'Group1'},
True, True, False,
[0.25, 0.25, -0.25, -0.25,
0.25, 0.25, -0.25, -0.25,
-0.50, nan, 0.50]),
([[3, 4, 2, 1, 5],
[3, 4, -2, -1, 5],
[3, nan, nan, 1, nan]],
['A', 'B', 'C', 'D', 'E'],
{'A': 'Group1', 'B': 'Group2', 'C': 'Group1',
'D': 'Group2', 'E': 'Group1'},
False, False, True,
[0.20, 0.20, 0.20, 0.20, 0.20,
0.20, 0.20, -0.20, -0.20, 0.20,
0.50, 0.50]),
([[1, 4, 2, 3, nan],
[1, 4, -2, -3, nan],
[3, nan, nan, 2, 7]],
['A', 'B', 'C', 'D', 'E'],
{'A': 'Group1', 'B': 'Group2', 'C': 'Group1',
'D': 'Group2', 'E': 'Group1'},
True, False, True,
[-0.25, 0.25, -0.25, 0.25,
0.25, 0.25, -0.25, -0.25,
0., -0.50, 0.50]),
([[3, 4, 2, 1, nan],
[-3, 4, -2, 1, nan],
[3, nan, nan, 1, 4],
[3, nan, nan, -1, 4],
[3, nan, nan, 1, -4]],
['A', 'B', 'C', 'D', 'E'],
{'A': 'Group1', 'B': 'Group2', 'C': 'Group1',
'D': 'Group2', 'E': 'Group1'},
False, True, True,
[0.25, 0.25, 0.25, 0.25,
-0.25, 0.25, -0.25, 0.25,
0.25, 0.50, 0.25,
0.25, -0.50, 0.25,
0.25, 0.50, -0.25]),
([[1, 4, 2, 3, nan],
[3, 4, -2, -1, nan],
[3, nan, nan, 2, 7],
[3, nan, nan, 2, -7]],
['A', 'B', 'C', 'D', 'E'],
{'A': 'Group1', 'B': 'Group2', 'C': 'Group1',
'D': 'Group2', 'E': 'Group1'},
True, True, True,
[-0.25, 0.25, 0.25, -0.25,
0.25, 0.25, -0.25, -0.25,
-0.50, nan, 0.50,
0.50, nan, -0.50]),
])
def test_factor_weights(self,
factor_vals,
tickers,
groups,
demeaned,
group_adjust,
equal_weight,
expected_vals):
index = date_range('1/12/2000', periods=len(factor_vals))
factor = DataFrame(index=index,
columns=tickers,
data=factor_vals).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor.name = 'factor'
factor_data = DataFrame()
factor_data['factor'] = factor
groups = Series(groups)
factor_data['group'] = \
Series(index=factor.index,
data=groups[factor.index.get_level_values('asset')].values)
weights = \
factor_weights(factor_data, demeaned, group_adjust, equal_weight)
expected = Series(data=expected_vals,
index=factor_data.index,
name='factor')
assert_series_equal(weights, expected)
@parameterized.expand([([1, 2, 3, 4, 4, 3, 2, 1],
[4, 3, 2, 1, 1, 2, 3, 4],
False,
[-1.25000, -1.25000]),
([1, 1, 1, 1, 1, 1, 1, 1],
[4, 3, 2, 1, 1, 2, 3, 4],
False,
[nan, nan]),
([1, 2, 3, 4, 4, 3, 2, 1],
[4, 3, 2, 1, 1, 2, 3, 4],
True,
[-0.5, -0.5]),
([1, 2, 3, 4, 1, 2, 3, 4],
[1, 4, 1, 2, 1, 2, 2, 1],
True,
[1.0, 0.0]),
([1, 1, 1, 1, 1, 1, 1, 1],
[4, 3, 2, 1, 1, 2, 3, 4],
True,
[nan, nan])
])
def test_factor_returns(self,
factor_vals,
fwd_return_vals,
group_adjust,
expected_vals):
factor_data = self.factor_data.copy()
factor_data['1D'] = fwd_return_vals
factor_data['factor'] = factor_vals
factor_returns_s = factor_returns(factor_data=factor_data,
demeaned=True,
group_adjust=group_adjust)
expected = DataFrame(
index=self.dr,
data=expected_vals,
columns=get_forward_returns_columns(
factor_data.columns))
assert_frame_equal(factor_returns_s, expected)
@parameterized.expand([([1, 2, 3, 4, 1, 1, 1, 1],
-1,
5. / 6.)])
def test_factor_alpha_beta(self, fwd_return_vals, alpha, beta):
factor_data = self.factor_data.copy()
factor_data['1D'] = fwd_return_vals
ab = factor_alpha_beta(factor_data=factor_data)
expected = DataFrame(columns=['1D'],
index=['Ann. alpha', 'beta'],
data=[alpha, beta])
assert_frame_equal(ab, expected)
@parameterized.expand([
(
[1.0, 0.5, 1.0, 0.5, 0.5],
'1D',
'1D',
[2.0, 3.0, 6.0, 9.0, 13.50],
),
(
[0.1, 0.1, 0.1, 0.1, 0.1],
'1D',
'1D',
[1.1, 1.21, 1.331, 1.4641, 1.61051],
),
(
[-0.1, -0.1, -0.1, -0.1, -0.1],
'1D',
'1D',
[0.9, 0.81, 0.729, 0.6561, 0.59049],
),
(
[1.0, 0.5, 1.0, 0.5, 0.5],
'1B',
'1D',
[2.0, 3.0, 6.0, 9.0, 13.50],
),
(
[0.1, 0.1, 0.1, 0.1, 0.1],
'1B',
'1D',
[1.1, 1.21, 1.331, 1.4641, 1.61051],
),
(
[-0.1, -0.1, -0.1, -0.1, -0.1],
'1B',
'1D',
[0.9, 0.81, 0.729, 0.6561, 0.59049],
),
(
[1.0, 0.5, 1.0, 0.5, 0.5],
'1CD',
'1D',
[2.0, 3.0, 6.0, 9.0, 13.50],
),
(
[0.1, 0.1, 0.1, 0.1, 0.1],
'1CD',
'1D',
[1.1, 1.21, 1.331, 1.4641, 1.61051],
),
(
[-0.1, -0.1, -0.1, -0.1, -0.1],
'1CD',
'1D',
[0.9, 0.81, 0.729, 0.6561, 0.59049],
),
])
def test_cumulative_returns(self,
returns,
ret_freq,
period_len,
expected_vals):
if 'CD' in ret_freq:
ret_freq_class = CDay(weekmask='Tue Wed Thu Fri Sun')
ret_freq = ret_freq_class
elif 'B' in ret_freq:
ret_freq_class = BDay()
else:
ret_freq_class = Day()
period_len = Timedelta(period_len)
index = date_range('1/1/1999', periods=len(returns), freq=ret_freq)
returns = Series(returns, index=index)
cum_ret = cumulative_returns(returns)
expected = Series(expected_vals, index=cum_ret.index)
assert_series_equal(cum_ret, expected, check_less_precise=True)
@parameterized.expand([([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1B', 1,
[nan, 1.0, 1.0, 1.0]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'1D', 1,
[nan, 1.0, 1.0, 1.0]),
([[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0]],
'1B', 1,
[nan, -1.0, -1.0, -1.0]),
([[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0]],
'1D', 1,
[nan, -1.0, -1.0, -1.0]),
([[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0]],
'1B', 3,
[nan, nan, nan, 1.0, 1.0,
1.0, 0.6, -0.6, -1.0, 1.0,
-0.6, -1.0]),
([[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0]],
'1D', 3,
[nan, nan, nan, 1.0, 1.0,
1.0, 0.6, -0.6, -1.0, 1.0,
-0.6, -1.0])
])
def test_factor_rank_autocorrelation(self,
factor_values,
freq,
period,
expected_vals):
dr = date_range(start='2015-1-1', periods=len(factor_values),
freq=freq)
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
factor = DataFrame(index=dr,
columns=tickers,
data=factor_values).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor_df = DataFrame()
factor_df['factor'] = factor
fa = factor_rank_autocorrelation(factor_df, period)
expected = Series(index=dr, data=expected_vals)
expected.name = period
assert_series_equal(fa, expected)
@parameterized.expand([
(
2, 3, False, False,
[[4.93048307, 8.68843922], [6.60404312, 12.22369139],
[8.92068367, 17.1794088], [12.1275523, 24.12861778],
[16.5694159, 33.8740100], [22.7273233, 47.53995233]],
),
(
3, 2, False, True,
[[0.0, 5.63219176], [0.0, 7.96515233],
[0.0, 11.2420646], [0.0, 15.8458720],
[0.0, 22.3134160], [0.0, 31.3970961]],
),
(
3, 5, True, False,
[[3.7228318, 2.6210478], [4.9304831, 3.6296796], [6.6040431, 5.0193734], # noqa
[8.9206837, 6.9404046], [12.127552, 9.6023405], [16.569416, 13.297652], # noqa
[22.727323, 18.434747], [31.272682, 25.584180], [34.358565, 25.497254]], # noqa
),
(
1, 4, True, True,
[[0., 0.], [0., 0.], [0., 0.],
[0., 0.], [0., 0.], [0., 0.]],
),
(
6, 6, False, False,
[[2.02679565, 2.38468223], [2.38769454, 3.22602748],
[2.85413029, 4.36044469], [3.72283181, 6.16462715],
[4.93048307, 8.68843922], [6.60404312, 12.2236914],
[8.92068367, 17.1794088], [12.1275523, 24.1286178],
[16.5694159, 33.8740100], [22.7273233, 47.5399523],
[31.2726821, 66.7013483], [34.3585654, 70.1828776],
[37.9964585, 74.3294620]],
),
(
6, 6, False, True,
[[0.0, 2.20770299], [0.0, 2.95942924], [0.0, 3.97022414],
[0.0, 5.63219176], [0.0, 7.96515233], [0.0, 11.2420646],
[0.0, 15.8458720], [0.0, 22.3134160], [0.0, 31.3970962],
[0.0, 44.1512888], [0.0, 62.0533954], [0.0, 65.8668371],
[0.0, 70.4306483]],
),
(
6, 6, True, False,
[[2.0267957, 0.9562173], [2.3876945, 1.3511898], [2.8541303, 1.8856194], # noqa
[3.7228318, 2.6210478], [4.9304831, 3.6296796], [6.6040431, 5.0193734], # noqa
[8.9206837, 6.9404046], [12.127552, 9.6023405], [16.569416, 13.297652], # noqa
[22.727323, 18.434747], [31.272682, 25.584180], [34.358565, 25.497254], # noqa
[37.996459, 25.198051]],
),
(
6, 6, True, True,
[[0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.],
[0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.],
[0., 0.], [0., 0.], [0., 0.]],
),
])
def test_common_start_returns(self,
before,
after,
mean_by_date,
demeaned,
expected_vals):
dr = date_range(start='2015-1-17', end='2015-2-2')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
r1, r2, r3, r4 = (1.20, 1.40, 0.90, 0.80)
data = [[r1**i, r2**i, r3**i, r4**i] for i in range(1, 18)]
returns = DataFrame(data=data, index=dr, columns=tickers)
dr2 = date_range(start='2015-1-21', end='2015-1-29')
factor = DataFrame(index=dr2, columns=tickers,
data=[[3, 4, 2, 1],
[3, 4, 2, 1],
[3, 4, 2, 1],
[3, 4, 2, 1],
[3, 4, 2, 1],
[3, 4, 2, 1],
[3, 4, 2, 1],
[3, 4, 2, 1],
[3, 4, 2, 1]]).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor.name = 'factor'
cmrt = common_start_returns(
factor,
returns,
before,
after,
cumulative=True,
mean_by_date=mean_by_date,
demean_by=factor if demeaned else None,
)
cmrt = DataFrame({'mean': cmrt.mean(axis=1), 'std': cmrt.std(axis=1)})
expected = DataFrame(index=range(-before, after + 1),
columns=['mean', 'std'], data=expected_vals)
assert_frame_equal(cmrt, expected)
@parameterized.expand([
(
1, 2, False, 4,
[[0.00512695, 0.00256348, 0.00128174, 6.40869e-4],
[0.00579185, 0.00289592, 0.00144796, 7.23981e-4],
[1.00000000, 1.00000000, 1.00000000, 1.00000000],
[0.00000000, 0.00000000, 0.00000000, 0.00000000],
[7.15814531, 8.94768164, 11.1846020, 13.9807526],
[2.93784787, 3.67230984, 4.59038730, 5.73798413],
[39.4519043, 59.1778564, 88.7667847, 133.150177],
[28.3717330, 42.5575995, 63.8363992, 95.7545989]],
),
(
1, 2, True, 4,
[[-11.898667, -17.279462, -25.236885, -37.032252],
[7.82587034, 11.5529583, 17.0996881, 25.3636472],
[-10.903794, -16.282025, -24.238167, -36.032893],
[7.82140124, 11.5507268, 17.0985737, 25.3630906],
[-4.7456488, -8.3343438, -14.053565, -23.052140],
[4.91184665, 7.91180853, 12.5481552, 19.6734224],
[27.5481102, 41.8958311, 63.5286176, 96.1172844],
[20.5510133, 31.0075980, 46.7385910, 70.3923129]],
),
(
3, 0, False, 4,
[[7.0, 3.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-0.488, -0.36, -0.2, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-0.703704, -0.55555555, -0.333333333, 0.0],
[0.0, 0.0, 0.0, 0.0]],
),
(
0, 3, True, 4,
[[-17.279462, -25.236885, -37.032252, -54.550061],
[11.5529583, 17.0996881, 25.3636472, 37.6887906],
[-16.282025, -24.238167, -36.032893, -53.550382],
[11.5507268, 17.0985737, 25.3630906, 37.6885125],
[-8.3343438, -14.053565, -23.052140, -37.074441],
[7.91180853, 12.5481552, 19.6734224, 30.5748605],
[41.8958311, 63.5286176, 96.1172844, 145.174884],
[31.0075980, 46.7385910, 70.3923129, 105.944230]]),
(
3, 3, False, 2,
[[0.5102539, 0.50512695, 0.50256348, 0.50128174, 0.50064087, 0.50032043, 0.50016022], # noqa
[0.0115837, 0.00579185, 0.00289592, 1.44796e-3, 7.23981e-4, 3.61990e-4, 1.80995e-4], # noqa
[11.057696, 16.0138929, 23.3050248, 34.0627690, 49.9756934, 73.5654648, 108.600603], # noqa
[7.2389454, 10.6247239, 15.6450367, 23.1025693, 34.1977045, 50.7264595, 75.3771641]], # noqa
),
(
3, 3, True, 2,
[[-5.273721, -7.754383, -11.40123, -16.78074, -24.73753, -36.53257, -54.05022], # noqa
[3.6239580, 5.3146000, 7.8236356, 11.551843, 17.099131, 25.363369, 37.688652], # noqa
[5.2737212, 7.7543830, 11.401231, 16.780744, 24.737526, 36.532572, 54.050221], # noqa
[3.6239580, 5.3146000, 7.8236356, 11.551843, 17.099131, 25.363369, 37.688652]], # noqa
),
])
def test_average_cumulative_return_by_quantile(self,
before,
after,
demeaned,
quantiles,
expected_vals):
dr = date_range(start='2015-1-15', end='2015-2-1')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
r1, r2, r3, r4 = (1.25, 1.50, 1.00, 0.50)
data = [[r1**i, r2**i, r3**i, r4**i] for i in range(1, 19)]
returns = | DataFrame(index=dr, columns=tickers, data=data) | pandas.DataFrame |
# Dash dependencies import
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output,State
import dash_table
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
px.defaults.template = "ggplot2"
plt.style.use('ggplot')
import pathlib
# End Dash dependencies import
from sklearn.model_selection import train_test_split
# Lifelimes libraries
from lifelines import KaplanMeierFitter,CoxPHFitter, WeibullAFTFitter
from io import BytesIO
import base64
# End Lifelimes libraries
from app import app, server
PATH=pathlib.Path(__file__).parent
DATA_PATH=PATH.joinpath("../datasets").resolve()
df=pd.read_csv(DATA_PATH.joinpath("telco-customer-churn.csv"))
def process_data(df):
df['TotalCharges']=pd.to_numeric(df['TotalCharges'], errors='coerce')
df['SeniorCitizen'] = df['SeniorCitizen'].astype(str)
df['Churn']=df['Churn'].apply(lambda x: 1 if x == 'Yes' else 0 )
df=df.dropna()
return df
df=process_data(df)
time = df['tenure']
event= df['Churn']
# Train Kaplan Meier model
kmf = KaplanMeierFitter()
def train_kmf(time,event,kmf):
kmf.fit(time, event,label='Kaplan Meier Estimate')
return kmf
kmf_model=train_kmf(time,event,kmf)
# Kaplan Meier overrall population visualization
def kmf_survival_function(kmf):
kmf_survival_func_df=pd.DataFrame(kmf.survival_function_).reset_index()
kmf_confidence_df= | pd.DataFrame(kmf.confidence_interval_) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.