prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from py2neo import Graph
from py2neo import ClientError
import sroka.config.config as config
def neo4j_query_data(cypher, parameters=None, **kwparameters):
if type(cypher) != str:
print('Cypher query needs to be a string')
return | pd.DataFrame([]) | pandas.DataFrame |
#!/usr/bin/env python3
"""
File: datasets.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/lgalke
Description: Parsing and loading for all the data sets.
"""
import pandas as pd
import numpy as np
import os
from html.parser import HTMLParser
from abc import abstractmethod, ABC
from collections import defaultdict
from .thesaurus_reader import ThesaurusReader
from .base import harvest
import csv
# NTCIR_ROOT_PATH = # think about this
DEFAULT_CACHEDIR = os.path.expanduser("~/.cache")
class IRDataSetBase(ABC):
@property
@abstractmethod
def docs(self):
pass
@property
@abstractmethod
def rels(self):
pass
@property
@abstractmethod
def topics(self):
pass
def load(self, verbose=False):
labels, docs = self.docs
if verbose:
print(len(docs), "documents.")
queries = self.topics
if verbose:
n_queries = len(queries)
print(n_queries, "queries.")
rels = self.rels
if verbose:
n_rels = np.asarray([len([r for r in harvest(rels, qid) if r > 0])
for qid, __ in queries])
print("{:2f} ({:2f}) relevant documents per query."
.format(n_rels.mean(), n_rels.std()))
return docs, labels, queries, rels
def mine_gold(path, verify_integrity=False):
""" returns a dict of dicts label -> docid -> 1"""
def zero_default():
return defaultdict(int)
gold = defaultdict(zero_default)
with open(path, 'r') as f:
rd = csv.reader(f, delimiter='\t')
for line in rd:
doc_id = int(line[0])
labels = line[1:]
for label in labels:
gold[label][doc_id] = 1
return gold
def _first_preflabel(node):
return node['prefLabel'][0]
def synthesize_topics(gold, thesaurus, accessor=_first_preflabel):
""" Returns a list of (query_id, querystring) pairs"""
topics = [(label, accessor(thesaurus[label])) for label in
set(gold.keys())]
return topics
def harvest_docs(path, verify_integrity):
if os.path.isdir(path):
fnames = os.listdir(path)
data = dict()
for fname in fnames:
with open(os.path.join(path, fname), 'r') as f:
label, __ = os.path.splitext(fname)
data[int(label)] = f.read()
# fulltext documents
docs = pd.DataFrame.from_dict(data, orient='index')
labels, docs = docs.index.values, docs.iloc[:, 0].values
elif os.path.isfile(path):
# title doucments
docs = pd.read_csv(path, sep='\t', names=["title"], index_col=0)
labels, docs = docs.index.values, docs["title"].values
else:
raise UserWarning("No symlinks allowed.")
print("labels of type {}, docs of type {}".format(labels.dtype,
docs.dtype))
return labels, docs
class QuadflorLike(IRDataSetBase):
"""The famous quadflor-like dataset specification"""
def __init__(self,
y=None,
thes=None,
X=None, # Path to dir of documents
verify_integrity=False):
self.__docs = None
self.__rels = None
self.__topics = None
self.gold_path = y
self.thesaurus_reader = ThesaurusReader(thes, normalize=False)
self.doc_path = X
self.verify_integrity = verify_integrity
@property
def docs(self):
# in memory cache
if self.__docs is not None:
return self.__docs
path = self.doc_path
labels, docs = harvest_docs(path,
verify_integrity=self.verify_integrity)
self.__docs = docs
return labels, docs
@property
def rels(self):
if self.__rels is not None:
return self.__rels
# acquire rels
path = self.gold_path
rels = mine_gold(path)
self.__rels = rels
return rels
@property
def topics(self):
""" Synthesizes the topics for the dataset, rels will be computed
first."""
if self.__topics is not None:
return self.__topics
rels, thesaurus = self.rels, self.thesaurus_reader.thesaurus
# acquire topics
topics = synthesize_topics(rels, thesaurus)
self.__topics = topics
return topics
class NTCIRTopicParser(HTMLParser):
def __init__(self, *args, record_tag="topic", tags=["title"], **kwargs):
self.tags = tags
self.record_tag = record_tag
self.records = []
super().__init__(*args, **kwargs)
def handle_starttag(self, tag, attrs):
self.current_tag = tag
if tag == self.record_tag:
self.current_record = {}
if len(attrs) > 1:
raise ValueError
self.current_record["qid"] = int(attrs[0][1])
def handle_data(self, data):
ctag = self.current_tag
if ctag is None:
return
if ctag in self.tags:
self.current_record[ctag] = data.strip()
def handle_endtag(self, tag):
if tag == self.record_tag:
self.records.append(self.current_record)
self.current_tag = None
class NTCIRParser(HTMLParser):
def __init__(self,
*args,
paragraph_sep="\n",
record_tag="rec",
id_tag="accn",
title_tag="tite",
content_tag="abse",
paragraph_tag="abse.p",
**kwargs):
self.records = []
self.record_tag = record_tag
self.id_tag = id_tag
self.title_tag = title_tag
self.content_tag = content_tag
self.paragraph_tag = paragraph_tag
self.paragraph_sep = paragraph_sep
super().__init__(*args, **kwargs)
def handle_starttag(self, tag, attrs):
if tag == self.record_tag:
self.current_record = {}
elif tag == self.content_tag:
self.current_paragraphs = []
self.current_tag = tag
def handle_endtag(self, tag):
if tag == self.content_tag:
s = self.paragraph_sep
self.current_record['content'] = s.join(self.current_paragraphs)
elif tag == self.record_tag:
self.records.append(self.current_record)
self.current_tag = None
def handle_data(self, data):
if self.current_tag is None: # we are not inside any tag
return
elif self.current_tag == self.paragraph_tag:
self.current_paragraphs.append(data)
elif self.current_tag == self.id_tag:
self.current_record['docid'] = data
elif self.current_tag == self.title_tag:
self.current_record['title'] = data
class NTCIR(IRDataSetBase):
def __init__(self,
root_path,
kaken=True,
gakkai=True,
rels=2,
topic="title",
field="title",
verify_integrity=False,
cache_dir=os.path.join(DEFAULT_CACHEDIR, "vec4ir", "ntcir"),
verbose=0):
self.__gakkai = gakkai
self.__kaken = kaken
self.__rels = int(rels)
self.__topic = topic
self.__verify_integrity = verify_integrity
self.__verbose = verbose
self.__field = field
self.root_path = root_path
if not cache_dir:
print(UserWarning("No cachedir specified"))
else:
print("Using cache:", cache_dir)
os.makedirs(cache_dir, exist_ok=True)
self.cache_dir = cache_dir
def _read_docs(path, title_tag, verify_integrity=False):
parser = NTCIRParser(title_tag=title_tag)
with open(path, 'r') as f:
parser.feed(f.read())
df = pd.DataFrame(parser.records)
df.set_index("docid", inplace=True, verify_integrity=verify_integrity)
return df
def _read_rels(path, verify_integrity=False):
df = pd.read_csv(path,
names=["qid", "rating", "docid", "relevance"],
sep='\t')
df.set_index(["qid", "docid"], inplace=True, drop=True,
verify_integrity=verify_integrity)
return df
def _read_topics(path, names, verify_integrity=False):
parser = NTCIRTopicParser(tags=names)
with open(path, 'r') as f:
parser.feed(f.read())
df = pd.DataFrame(parser.records)
df.set_index("qid", inplace=True, verify_integrity=verify_integrity)
return df
@property
def docs(self):
""" Method to access NTCIR documents with caching """
kaken = self.__kaken
gakkai = self.__gakkai
verify_integrity = self.__verify_integrity
verbose = self.__verbose
field = self.__field
if not kaken and not gakkai:
raise ValueError("So... you call me and want no documents?")
if self.cache_dir:
identifier = {
(False, True): "kaken.pkl",
(True, False): "gakkai.pkl",
(True, True): "gakkeikaken.pkl"
}[(gakkai, kaken)]
cache = os.path.join(self.cache_dir, identifier)
else:
cache = None
if cache:
try:
if verbose > 0:
print("Cache hit:", cache)
df = | pd.read_pickle(cache) | pandas.read_pickle |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * Series(rng5f + 0.1)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_df_fail):
# RangeIndex fails to return NotImplemented, for others
# DataFrame tries to broadcast incorrectly
box = box_df_fail
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__
def test_td64arr_div_nat_invalid(self, box_df_fail):
# don't allow division by NaT (maybe could in the future)
box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
def test_td64arr_div_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = | tm.box_expected(rng, box) | pandas.util.testing.box_expected |
import numpy as np
import pandas as pd
from numpy import inf, nan
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pandas import DataFrame, Series, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry.point import Point
from pymove import MoveDataFrame
from pymove.utils import integration
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
list_random_banks = [
[39.984094, 116.319236, 1, 'bank'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bancos_postos'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'bancos_PAE'],
[39.984710, 116.319865, 6, 'bancos_postos'],
[39.984674, 116.319810, 7, 'bancos_agencias'],
[39.984623, 116.319773, 8, 'bancos_filiais'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
]
list_random_bus_station = [
[39.984094, 116.319236, 1, 'transit_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'transit_station'],
[39.984211, 116.319389, 4, 'pontos_de_onibus'],
[39.984217, 116.319422, 5, 'transit_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_random_bar_restaurant = [
[39.984094, 116.319236, 1, 'restaurant'],
[39.984198, 116.319322, 2, 'restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar'],
[39.984217, 116.319422, 5, 'bar'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
]
list_random_parks = [
[39.984094, 116.319236, 1, 'pracas_e_parques'],
[39.984198, 116.319322, 2, 'park'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'park'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'pracas_e_parques'],
]
list_random_police = [
[39.984094, 116.319236, 1, 'distritos_policiais'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'distritos_policiais'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
list_pois = [
[39.984094, 116.319236, 1, 'policia', 'distrito_pol_1'],
[39.991013, 116.326384, 2, 'policia', 'policia_federal'],
[40.01, 116.312615, 3, 'comercio', 'supermercado_aroldo'],
[40.013821, 116.306531, 4, 'show', 'forro_tropykalia'],
[40.008099, 116.31771100000002, 5, 'risca-faca',
'rinha_de_galo_world_cup'],
[39.985704, 116.326877, 6, 'evento', 'adocao_de_animais'],
[39.979393, 116.3119, 7, 'show', 'dia_do_municipio']
]
# Testes de Unions
def test_union_poi_bank():
pois_df = DataFrame(
data=list_random_banks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'banks'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'banks'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'banks'],
[39.984710, 116.319865, 6, 'banks'],
[39.984674, 116.319810, 7, 'banks'],
[39.984623, 116.319773, 8, 'banks'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
integration.union_poi_bank(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bus_station():
pois_df = DataFrame(
data=list_random_bus_station,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bus_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bus_station'],
[39.984211, 116.319389, 4, 'bus_station'],
[39.984217, 116.319422, 5, 'bus_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bus_station(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bar_restaurant():
pois_df = DataFrame(
data=list_random_bar_restaurant,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bar-restaurant'],
[39.984198, 116.319322, 2, 'bar-restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar-restaurant'],
[39.984217, 116.319422, 5, 'bar-restaurant'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bar_restaurant(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_parks():
pois_df = DataFrame(
data=list_random_parks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'parks'],
[39.984198, 116.319322, 2, 'parks'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'parks'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'parks'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_parks(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_police():
pois_df = DataFrame(
data=list_random_police,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'police'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'police'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_police(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_join_colletive_areas():
move_df = MoveDataFrame(
data=list_move,
)
move_df['geometry'] = move_df.apply(lambda x: Point(x['lon'], x['lat']), axis=1)
expected = move_df.copy()
indexes_ac = np.linspace(0, move_df.shape[0], 5, dtype=int)
area_c = move_df[move_df.index.isin(indexes_ac)].copy()
integration.join_collective_areas(move_df, area_c, inplace=True)
expected[VIOLATING] = [True, False, True, False, True, False, True, False, False]
assert_frame_equal(move_df, expected)
def test__reset_and_creates_id_and_lat_lon():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, True
)
)
id_expected = np.full(9, '', dtype='object_')
tag_expected = np.full(9, '', dtype='object_')
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
lat_expected = np.full(7, np.Infinity, dtype=np.float64)
lon_expected = np.full(7, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, True
)
)
lat_expected = np.full(9, np.Infinity, dtype=np.float64)
lon_expected = np.full(9, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
def test__reset_set_window__and_creates_event_id_type():
list_events = [
[39.984094, 116.319236, 1,
Timestamp('2008-10-24 01:57:57'), 'show do tropykalia'],
[39.991013, 116.326384, 2,
Timestamp('2008-10-24 00:22:01'), 'evento da prefeitura'],
[40.01, 116.312615, 3,
Timestamp('2008-10-25 00:21:01'), 'show do seu joao'],
[40.013821, 116.306531, 4,
Timestamp('2008-10-26 00:22:01'), 'missa']
]
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
list_win_start = [
'2008-10-22T17:23:05.000000000', '2008-10-22T22:07:26.000000000',
'2008-10-22T22:20:16.000000000', '2008-10-22T22:33:06.000000000',
'2008-10-22T23:28:33.000000000', '2008-10-23T11:20:45.000000000',
'2008-10-23T11:32:14.000000000', '2008-10-23T11:52:01.000000000',
'2008-10-23T13:27:57.000000000'
]
win_start_expected = Series(pd.to_datetime(list_win_start), name=DATETIME)
list_win_end = [
'2008-10-23T18:23:05.000000000', '2008-10-23T23:07:26.000000000',
'2008-10-23T23:20:16.000000000', '2008-10-23T23:33:06.000000000',
'2008-10-24T00:28:33.000000000', '2008-10-24T12:20:45.000000000',
'2008-10-24T12:32:14.000000000', '2008-10-24T12:52:01.000000000',
'2008-10-24T14:27:57.000000000'
]
win_end_expected = Series(pd.to_datetime(list_win_end), name=DATETIME)
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
type_expected = np.full(9, '', dtype='object_')
id_expected = np.full(9, '', dtype='object_')
window_starts, window_ends, current_distances, event_id, event_type = (
integration._reset_set_window__and_creates_event_id_type(
move_df, pois, 45000, DATETIME
)
)
assert_series_equal(window_starts, win_start_expected)
assert_series_equal(window_ends, win_end_expected)
assert_array_almost_equal(current_distances, dist_expected)
assert_array_equal(event_id, id_expected)
assert_array_equal(event_type, type_expected)
def test_reset_set_window_and_creates_event_id_type_all():
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, | Timestamp('2008-10-23 11:58:33') | pandas.Timestamp |
import os
import pandas as pd
import numpy as np
from autumn.settings import PROJECTS_PATH
from autumn.settings import INPUT_DATA_PATH
from autumn.tools.utils.utils import update_timeseries
from autumn.models.covid_19.constants import COVID_BASE_DATETIME
from autumn.tools.utils.utils import create_date_index
from autumn.settings import PASSWORD_ENVAR
from getpass import getpass
from autumn.tools.utils import secrets
COVID_AU_DIRPATH = os.path.join(INPUT_DATA_PATH, "covid_au")
CHRIS_CSV = os.path.join(COVID_AU_DIRPATH, "monitoringreport.secret.csv")
COVID_DHHS_DEATH_CSV = os.path.join(COVID_AU_DIRPATH, "monashmodelextract_deaths.secret.csv")
COVID_DHHS_CASE_CSV = os.path.join(COVID_AU_DIRPATH, "monashmodelextract_cases.secret.csv")
COVID_DHHS_ADMN_CSV = os.path.join(COVID_AU_DIRPATH, "monashmodelextract_admissions.secret.csv")
COVID_DHHS_VAC_CSV = os.path.join(COVID_AU_DIRPATH, "monashmodelextract_vaccination.secret.csv")
COVID_VIDA_VAC_CSV = os.path.join(COVID_AU_DIRPATH, "vida_vac.secret.csv")
COVID_VIDA_POP_CSV = os.path.join(COVID_AU_DIRPATH, "vida_pop.csv")
COVID_VAC_CSV = os.path.join(COVID_AU_DIRPATH, "vac_cov.csv")
COVID_DHHS_POSTCODE_LGA_CSV = os.path.join(COVID_AU_DIRPATH, "postcode lphu concordance.csv")
COVID_VICTORIA_TARGETS_CSV = os.path.join(
PROJECTS_PATH, "covid_19", "victoria", "victoria", "targets.secret.json"
)
# Two different mappings
LGA_TO_CLUSTER = os.path.join(
INPUT_DATA_PATH, "mobility", "LGA to Cluster mapping dictionary with proportions.csv"
)
LGA_TO_HSP = os.path.join(INPUT_DATA_PATH, "covid_au", "LGA_HSP map_v2.csv")
COVID_DHHS_MAPING = LGA_TO_HSP # This is the new mapping
TODAY = ( | pd.to_datetime("today") | pandas.to_datetime |
import os
import glob
import torch
import numpy as np
import pandas as pd
import librosa as lr
import soundfile as sf
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader, ConcatDataset, random_split
from asteroid.data import TimitDataset, TimitLegacyDataset
from asteroid.data.utils import CachedWavSet
from tqdm import tqdm
from torch import optim
from pytorch_lightning import Trainer, loggers as pl_loggers
from asteroid_filterbanks.transforms import mag
from asteroid.losses import singlesrc_neg_sisdr
from asteroid.data.utils import find_audio_files
from asteroid.metrics import get_metrics
TIMIT_CACHE_DIR = '/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/test_noisy_drone_cache'
TIMIT_DIR_8kHZ = '/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/TIMIT_8kHZ'
SAMPLE_RATE = 8000
csv_path_dict = {
'-30': '/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/test_noisy_drone_cache/-30db/test-drones_68.csv',
'-25': '/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/test_noisy_drone_cache/-25db/test-drones_69.csv',
'-20': '/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/test_noisy_drone_cache/-20db/test-drones_70.csv',
'-15': '/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/test_noisy_drone_cache/-15db/test-drones_71.csv',
'-10': '/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/test_noisy_drone_cache/-10db/test-drones_72.csv',
'-5': '/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/test_noisy_drone_cache/-5db/test-drones_73.csv',
'0': '/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/test_noisy_drone_cache/0db/test-drones_74.csv',
}
noises = CachedWavSet('/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/noises-test-drones', sample_rate=SAMPLE_RATE, precache=True, with_path=True)
test_snrs = [-30, -25, -20, -15, -10, -5, 0]
test_sets = {}
i = 0
for snr in tqdm(test_snrs, 'Load datasets'):
test_sets[snr] = TimitLegacyDataset(
TIMIT_DIR_8kHZ, noises, sample_rate=SAMPLE_RATE,
cache_dir=TIMIT_CACHE_DIR, snr=snr, dset_name='test-drones',
subset='test', random_seed=68 + i, with_path=True)
i += 1
#directories to save denoised audio in
save_enhanced_dir = "/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/drone_noise_out/"
# get csvfilenames for metadata information
def get_all_metrics_from_model(model, test_sets, model_name=None):
series_list = []
torch.no_grad().__enter__()
model = model.cuda()
for snr, test_set in test_sets.items():
# makde dirs for each models and separate dir for each snr
os.makedirs(f'{save_enhanced_dir}/{str(model_name)}/{snr}dB/data/', exist_ok=True)
denoised_file_paths = []
print(f'SNR: {snr}db')
loader = DataLoader(test_set, num_workers=0)
for i, (mix, clean, path) in tqdm(enumerate(loader)):
mix = mix.cuda()
estimate = model(mix).detach().flatten().cpu().numpy()
denoised_file_name = path[0].split('/')[-1]
#add a "_" in front of the denoised fie
denoised_file_path = f'{save_enhanced_dir}/{str(model_name)}/{snr}dB/data/{model_name}_{denoised_file_name}'
denoised_file_paths.append(denoised_file_path)
sf.write(denoised_file_path, estimate, samplerate=SAMPLE_RATE)
##Dont calculate metric just save separated plus, meta data
metrics_dict = get_metrics(mix.cpu().numpy(), clean.numpy(), estimate, sample_rate=SAMPLE_RATE, metrics_list=["pesq"])
metrics_dict["mix_path"] = path
metrics_dict["snr"] = snr
series_list.append(pd.Series(metrics_dict))
all_metrics_df = pd.DataFrame(series_list)
if i == 30 : break
csv_path_tmp = csv_path_dict[str(snr)]
df = pd.read_csv(csv_path_tmp)
denoised_file_paths = | pd.Series(denoised_file_paths) | pandas.Series |
import os
import shutil
import pandas as pd
from numpy import linspace
from functions.helpers import _format_header, _process_data_transposed, _process_data
#IMPORT_FOLDER = '20201224/matrix_1mic/'
#EXPORT_FOLDER = '20201224/matrix_1mic_exported/' #'exported'
IMPORT_FOLDER = '20201224/matrix_beamforming/'
EXPORT_FOLDER = '20201224/matrix_beamforming_exported/' #'exported'
IMPORT_FOLDER = '20201224/respeaker_4mic/'
EXPORT_FOLDER = '20201224/respeaker_4mic_exported/' #'exported'
#"20202112/raw_data_verification_test/matrix_fix_speech_level/testset_55db_level_21dec_v2.bag_NOT RECOGNIZED.csv"
EXPORT_DATA = False # if false show on screen the pictures
#TEST_PREFIX = "testset_spk80_75db_level_21dec_v3" # "testset_spk80_75db_level_21dec_v2" #"testset_quiet_room_21dec_v2" #"testset_70db_level_21dec_v2" # "testset_quiet_room_21dec_v2"
# TEST_PREFIX = "1mic_spk80_75db_level"
OUTCOME_TO_ANALYSE = ["WAKEDUP",
"NOT WAKEDUP",
"NOT RECOGNIZED",
"RECOGNIZED",
"WRONG RECOGNIZED",
"TEXT ACQUIRED"]
#TEST_PREFIX = TEST_PREFIX + ".bag" # this remains from the export of csv
Z_SCORE = 1.96 # 95 percent of the value
###################
### IMPORT DATA ###
###################
print(_format_header("IMPORT data"))
FILE_TO_TEST = []
for file in os.listdir(IMPORT_FOLDER):
if file.endswith(".bag"):
FILE_TO_TEST.append(os.path.join(IMPORT_FOLDER, file))
if IMPORT_FOLDER == EXPORT_FOLDER:
print("you dont wanna do this, trust me....")
exit(1)
if not os.path.exists(EXPORT_FOLDER):
os.makedirs(EXPORT_FOLDER)
else:
if os.path.exists(EXPORT_FOLDER) and os.path.isdir(EXPORT_FOLDER):
shutil.rmtree(EXPORT_FOLDER)
os.makedirs(EXPORT_FOLDER)
for TEST_PREFIX in FILE_TO_TEST:
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print(_format_header(TEST_PREFIX))
pd_dict = {}
for o in OUTCOME_TO_ANALYSE:
#pd_dict[o] = pd.read_csv(IMPORT_FOLDER + '/' + TEST_PREFIX + "_" + o + ".csv")
pd_dict[o] = pd.read_csv(TEST_PREFIX + "_" + o + ".csv")
pd_dict[o].index = pd_dict[o].iloc[:, 0] # set the row name as first column and remove it
pd_dict[o] = pd_dict[o].drop(columns=pd_dict[o].columns[0])
if not pd_dict[o].empty:
pd_dict[o] = pd_dict[o].drop(['outcome'])
pd_dict[o]=pd_dict[o].astype(float)
#print("\n----------------\n", o)
#print("COL\n", pd_dict[o].columns)
#print("ROWS\n", pd_dict[o].index)
#print(pd_dict[o].to_string())
#######################
### DATA PROCESSING ###
#######################
print(_format_header("PROCESSING data"))
descr_dict = {}
df_all_dict = {}
pd_all = pd.DataFrame() # Prepare a df with all the values not aggreagated
for k, v in pd_dict.items():
if len(v.columns) and len(v.index): # empty df are discarded
#print("----------------------------------------------")
#print("key dict", k)
descr_dict[k] = _process_data(v.T, z_score=Z_SCORE)
df_all_dict[k] = v.T.dropna()
pd_all = pd_all.append(v.T)
#print(descr_dict[k])
#print("----------------------------------------------")
else:
print("Discarding empty df: ", k)
results_count_dict = {}
for k, v in pd_dict.items():
results_count_dict[k] = len(v.columns)
results_count = pd.DataFrame(results_count_dict, index=[0])
total_tests = float(results_count.sum(axis=1))
#results_count = results_count / total_tests
total_possible_intents = float(results_count['RECOGNIZED'] + results_count['NOT RECOGNIZED'] + results_count['WRONG RECOGNIZED'] + results_count['TEXT ACQUIRED'])
if total_possible_intents == 0:
INTENTS_ER = 1.0
else:
INTENTS_ER = float(results_count['NOT RECOGNIZED'] + results_count['WRONG RECOGNIZED'] + results_count['TEXT ACQUIRED'])/total_possible_intents
if total_tests==0:
WAKEUP_ER = 1.0
else:
WAKEUP_ER = float(results_count['WAKEDUP'] + results_count['NOT WAKEDUP']) / total_tests
df_all = {}
df_all_describe = {}
for c in pd_all.columns:
s = pd.DataFrame(pd_all[c].dropna())
df_all[c] = s
df_all_describe[c] = s.describe()
df_all_dict["ALL"] = df_all
#print(df_all_dict)
#################
### PLOT DATA ###
#################
print(_format_header("Prepare plots"))
from functions.helpers import plot_boxs, plot_boxs_2,plot_boxs_3, props_1, props_2, props_3, BOX_WIDTH, _show, plot_items_descr, _export_all_figures_2 , _write_to_excel # data plotting
plot_descr = plot_items_descr(title="All non aggregated", xlabel="Operation times",
ylabel="Sec.",
ylim=[0.0, 3.0],
bkg_color=None)
# plot all the data
props = [props_1, props_2, props_3]
props = dict(zip(df_all.keys(), props))
plot_boxs_2(df_all, plot_descr, props)
plot_descr_2 = plot_items_descr(title="All aggregated", xlabel="Operation times",
ylabel="Sec.",
ylim=[0.0, 3.0],
bkg_color=None)
# plot the data by key
props_2 = [props_3, props_2, props_1, props_3]
#props_2 = dict(zip(descr_dict.keys(), props_2))
plot_boxs_3(descr_dict, plot_descr_2, props_2, showmean=False)
HEIGHT = 9
sizes = [(5,HEIGHT), (2.5,HEIGHT), (4,HEIGHT), (4,HEIGHT), (3,HEIGHT)]
print("INTENT ERROR RATE:", INTENTS_ER)
print("WAKEUP ERROR RATE:", WAKEUP_ER)
print("TEST PERFORMED:", total_tests)
print("AVAILABLE INTENTS:", total_possible_intents)
for k,v in df_all_describe.items():
print(k, v)
if EXPORT_DATA:
fname = os.path.basename(TEST_PREFIX)
_export_all_figures_2(EXPORT_FOLDER, fname + "_verification_evaluation", size=sizes)
with | pd.ExcelWriter(EXPORT_FOLDER +'/' + fname+'_verification.xlsx') | pandas.ExcelWriter |
import numpy as np
from numpy import where
from flask import Flask, request, jsonify, render_template
import pandas as pd
from sklearn.ensemble import IsolationForest
from pyod.models.knn import KNN
import json
from flask import send_from_directory
from flask import current_app
app = Flask(__name__)
class Detect:
def __init__(self, file, non_num):
self.file = file
self.non_num = non_num
def IQR(self):
# anomaly=pd.DataFrame()
data = pd.DataFrame(self.file)
non_num=pd.DataFrame(self.non_num)
data.dropna(axis=0,inplace=True)
# data=data.select_dtypes(include=['float64','int64'])
Q1 = data.quantile(0.25)
Q3 = data.quantile(0.75)
IQR = Q3 - Q1
IQR_Out = data[((data < (Q1 - 1.5 * IQR)) |(data > (Q3 + 1.5 * IQR))).any(axis=1)]
IQR_Out = non_num.join(IQR_Out, how='inner')
IQR_Out.to_csv(r'IQR_Outlier.csv')
# IQR Method
def isolation(self):
anomaly=pd.DataFrame()
data_n=pd.DataFrame(self.file)
non_num=pd.DataFrame(self.non_num)
data_n.dropna(axis=0,inplace=True)
# data_n=data_n.select_dtypes(include=['float64','int64'])
model = IsolationForest(n_estimators=50, max_samples=500, contamination=.01, bootstrap=False, n_jobs=1, random_state=1, verbose=0, warm_start=False).fit(data_n)
data_n['anomaly_score'] = model.predict(data_n)
anomaly=data_n[data_n['anomaly_score']==-1]
anomaly = non_num.join(anomaly, how='inner')
anomaly.to_csv("outlierss_isolation.csv")
# Isolation forest Method
def mcd(self):
anomaly=pd.DataFrame()
data=pd.DataFrame(self.file)
non_num=pd.DataFrame(self.non_num)
data.dropna(axis=0,inplace=True)
# data=data.select_dtypes(include=['float64','int64'])
from sklearn.covariance import EllipticEnvelope
model = EllipticEnvelope(contamination=0.01).fit(data)
data['anomaly_score'] = model.predict(data)
anomaly=data[data['anomaly_score']==-1]
anomaly = non_num.join(anomaly, how='inner')
anomaly.to_csv("outlierss_mcd.csv")
# Minimum covariance determinant Method
def local(self):
from numpy import quantile, where, random
import pandas as pd
anomaly = pd.DataFrame()
df = pd.DataFrame(self.file)
non_num=pd.DataFrame(self.non_num)
df.dropna(axis = 0, inplace = True)
# x = df.select_dtypes(include = ['float64','int64'])
from sklearn.neighbors import LocalOutlierFactor
lof = LocalOutlierFactor()
clusters = lof.fit_predict(df)
anom_index = where(clusters == -1)
values = df.iloc[anom_index]
values = non_num.join(values, how='inner')
values.to_csv(r'LOF_outliers.csv' ,index=True,header=True)
# Local Outlier Factor Method
def SVM(self):
from sklearn.svm import OneClassSVM
from numpy import quantile, where, random
anomaly=pd.DataFrame()
data= | pd.DataFrame(self.file) | pandas.DataFrame |
from flowsa.common import WITHDRAWN_KEYWORD
from flowsa.flowbyfunctions import assign_fips_location_system
from flowsa.location import US_FIPS
import math
import pandas as pd
import io
from flowsa.settings import log
from string import digits
YEARS_COVERED = {
"asbestos": "2014-2018",
"barite": "2014-2018",
"bauxite": "2013-2017",
"beryllium": "2014-2018",
"boron": "2014-2018",
"chromium": "2014-2018",
"clay": "2015-2016",
"cobalt": "2013-2017",
"copper": "2011-2015",
"diatomite": "2014-2018",
"feldspar": "2013-2017",
"fluorspar": "2013-2017",
"fluorspar_inports": ["2016", "2017"],
"gallium": "2014-2018",
"garnet": "2014-2018",
"gold": "2013-2017",
"graphite": "2013-2017",
"gypsum": "2014-2018",
"iodine": "2014-2018",
"ironore": "2014-2018",
"kyanite": "2014-2018",
"lead": "2012-2018",
"lime": "2014-2018",
"lithium": "2013-2017",
"magnesium": "2013-2017",
"manganese": "2012-2016",
"manufacturedabrasive": "2017-2018",
"mica": "2014-2018",
"molybdenum": "2014-2018",
"nickel": "2012-2016",
"niobium": "2014-2018",
"peat": "2014-2018",
"perlite": "2013-2017",
"phosphate": "2014-2018",
"platinum": "2014-2018",
"potash": "2014-2018",
"pumice": "2014-2018",
"rhenium": "2014-2018",
"salt": "2013-2017",
"sandgravelconstruction": "2013-2017",
"sandgravelindustrial": "2014-2018",
"silver": "2012-2016",
"sodaash": "2010-2017",
"sodaash_t4": ["2016", "2017"],
"stonecrushed": "2013-2017",
"stonedimension": "2013-2017",
"strontium": "2014-2018",
"talc": "2013-2017",
"titanium": "2013-2017",
"tungsten": "2013-2017",
"vermiculite": "2014-2018",
"zeolites": "2014-2018",
"zinc": "2013-2017",
"zirconium": "2013-2017",
}
def usgs_myb_year(years, current_year_str):
"""
Sets the column for the string based on the year. Checks that the year
you picked is in the last file.
:param years: string, with hypthon
:param current_year_str: string, year of interest
:return: string, year
"""
years_array = years.split("-")
lower_year = int(years_array[0])
upper_year = int(years_array[1])
current_year = int(current_year_str)
if lower_year <= current_year <= upper_year:
column_val = current_year - lower_year + 1
return "year_" + str(column_val)
else:
log.info("Your year is out of scope. Pick a year between %s and %s",
lower_year, upper_year)
def usgs_myb_name(USGS_Source):
"""
Takes the USGS source name and parses it so it can be used in other parts
of Flow by activity.
:param USGS_Source: string, usgs source name
:return:
"""
source_split = USGS_Source.split("_")
name_cc = str(source_split[2])
name = ""
for char in name_cc:
if char.isupper():
name = name + " " + char
else:
name = name + char
name = name.lower()
name = name.strip()
return name
def usgs_myb_static_variables():
"""
Populates the data values for Flow by activity that are the same
for all of USGS_MYB Files
:return:
"""
data = {}
data["Class"] = "Geological"
data['FlowType'] = "ELEMENTARY_FLOWS"
data["Location"] = US_FIPS
data["Compartment"] = "ground"
data["Context"] = None
data["ActivityConsumedBy"] = None
return data
def usgs_myb_remove_digits(value_string):
"""
Eliminates numbers in a string
:param value_string:
:return:
"""
remove_digits = str.maketrans('', '', digits)
return_string = value_string.translate(remove_digits)
return return_string
def usgs_myb_url_helper(*, build_url, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param args: dictionary, arguments specified when running flowbyactivity.py
flowbyactivity.py ('year' and 'source')
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
return [build_url]
def usgs_asbestos_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['asbestos'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_asbestos_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
product = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Exports and reexports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(dataframe,
str(year))
return dataframe
def usgs_barite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(
io.BytesIO(resp.content), sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['barite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_barite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Crude, sold or used by producers:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_bauxite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['bauxite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_bauxite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['bauxite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, as shipped:":
prod = "import"
elif df.iloc[index]["Production"].strip() == \
"Exports, as shipped:":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
flow_amount = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = flow_amount
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_beryllium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:9]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['beryllium'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_beryllium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["United States6", "Mine shipments1",
"Imports for consumption, beryl2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['beryllium'], year)
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"Imports for consumption, beryl2":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_boron_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data.loc[8:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = pd.DataFrame(df_raw_data.loc[21:22]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_three = | pd.DataFrame(df_raw_data.loc[27:28]) | pandas.DataFrame |
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(
pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start="4/2/2012", periods=10, freq="B")
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(
year=years, month=months, freq="M", start=Period("2007-01", freq="M")
)
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range("2007-1-1", periods=500, freq="X")
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N"
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="N"),
Period(ordinal=2, freq="N"),
Period(ordinal=3, freq="N"),
Period(ordinal=4, freq="N"),
],
freq="N",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC")
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq="A")
start = Period("2007", freq="A-JUN")
end = Period("2010", freq="A-DEC")
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = (
"Of the three parameters: start, end, and periods, exactly two"
" must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range("2007-01", periods=10.5, freq="M")
exp = period_range("2007-01", periods=10, freq="M")
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range("2007-01", periods=20, freq="M")
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx._ndarray_values)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx._ndarray_values))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period("2007", freq="A"))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq="M")
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == "M"
result = PeriodIndex(idx, freq="2M")
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq="D")
exp = idx.asfreq("D", "e")
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype("M8[us]"))
msg = r"Wrong dtype: datetime64\[us\]"
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals, freq="D")
@pytest.mark.parametrize("box", [None, "series", "index"])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = pd.date_range("2017", periods=4, freq="M")
if box is None:
data = data._values
elif box == "series":
data = pd.Series(data)
result = PeriodIndex(data, freq="D")
expected = PeriodIndex(
["2017-01-31", "2017-02-28", "2017-03-31", "2017-04-30"], freq="D"
)
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(["2013-01", "2013-03"], dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-03"], freq="M")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[M]"
idx = PeriodIndex(["2013-01-05", "2013-03-05"], dtype="period[3D]")
exp = PeriodIndex(["2013-01-05", "2013-03-05"], freq="3D")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[3D]"
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(["2013-01-01", "2013-01-02"], freq="D")
res = PeriodIndex(idx, dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-01"], freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
res = PeriodIndex(idx, freq="M")
| tm.assert_index_equal(res, exp) | pandas.util.testing.assert_index_equal |
from __future__ import division
from builtins import str
from builtins import range
from builtins import object
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import os
import sys
import numpy as np
import pandas as pd
from .Error import DemandInputError
from .Logger import FastTripsLogger
from .Route import Route
from .TAZ import TAZ
from .Trip import Trip
from .Util import Util
class Passenger(object):
"""
Passenger class.
One instance represents all of the households and persons that could potentially make transit trips.
Stores household information in :py:attr:`Passenger.households_df` and person information in
:py:attr:`Passenger.persons_df`, which are both :py:class:`pandas.DataFrame` instances.
"""
#: File with households
INPUT_HOUSEHOLDS_FILE = "household.txt"
#: Households column: Household ID
HOUSEHOLDS_COLUMN_HOUSEHOLD_ID = 'hh_id'
#: File with persons
INPUT_PERSONS_FILE = "person.txt"
#: Persons column: Household ID
PERSONS_COLUMN_HOUSEHOLD_ID = HOUSEHOLDS_COLUMN_HOUSEHOLD_ID
#: Persons column: Person ID (string)
PERSONS_COLUMN_PERSON_ID = 'person_id'
# ========== Added by fasttrips =======================================================
#: Persons column: Person ID number
PERSONS_COLUMN_PERSON_ID_NUM = 'person_id_num'
#: File with trip list
INPUT_TRIP_LIST_FILE = "trip_list.txt"
#: Trip list column: Person ID
TRIP_LIST_COLUMN_PERSON_ID = PERSONS_COLUMN_PERSON_ID
#: Trip list column: Person Trip ID
TRIP_LIST_COLUMN_PERSON_TRIP_ID = "person_trip_id"
#: Trip list column: Origin TAZ ID
TRIP_LIST_COLUMN_ORIGIN_TAZ_ID = "o_taz"
#: Trip list column: Destination TAZ ID
TRIP_LIST_COLUMN_DESTINATION_TAZ_ID = "d_taz"
#: Trip list column: Mode
TRIP_LIST_COLUMN_MODE = "mode"
#: Trip list column: Departure Time. DateTime.
TRIP_LIST_COLUMN_DEPARTURE_TIME = 'departure_time'
#: Trip list column: Arrival Time. DateTime.
TRIP_LIST_COLUMN_ARRIVAL_TIME = 'arrival_time'
#: Trip list column: Time Target (either 'arrival' or 'departure')
TRIP_LIST_COLUMN_TIME_TARGET = 'time_target'
# ========== Added by fasttrips =======================================================
#: Trip list column: Unique numeric ID for this passenger/trip
TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM = "trip_list_id_num"
#: Trip list column: Origin TAZ Numeric ID
TRIP_LIST_COLUMN_ORIGIN_TAZ_ID_NUM = "o_taz_num"
#: Trip list column: Destination Numeric TAZ ID
TRIP_LIST_COLUMN_DESTINATION_TAZ_ID_NUM = "d_taz_num"
#: Trip list column: Departure Time. Float, minutes after midnight.
TRIP_LIST_COLUMN_DEPARTURE_TIME_MIN = 'departure_time_min'
#: Trip list column: Departure Time. Float, minutes after midnight.
TRIP_LIST_COLUMN_ARRIVAL_TIME_MIN = 'arrival_time_min'
#: Trip list column: Transit Mode
TRIP_LIST_COLUMN_TRANSIT_MODE = "transit_mode"
#: Trip list column: Access Mode
TRIP_LIST_COLUMN_ACCESS_MODE = "access_mode"
#: Trip list column: Egress Mode
TRIP_LIST_COLUMN_EGRESS_MODE = "egress_mode"
#: Trip list column: Outbound (bool), true iff time target is arrival
TRIP_LIST_COLUMN_OUTBOUND = "outbound"
#: Option for :py:attr:`Passenger.TRIP_LIST_COLUMN_TIME_TARGET` (arrival time)
TIME_TARGET_ARRIVAL = "arrival"
#: Option for :py:attr:`Passenger.TRIP_LIST_COLUMN_TIME_TARGET` (departure time)
TIME_TARGET_DEPARTURE = "departure"
#: Generic transit. Specify this for mode when you mean walk, any transit modes, walk
#: TODO: get rid of this? Maybe user should always specify.
MODE_GENERIC_TRANSIT = "transit"
#: Generic transit - Numeric mode number
MODE_GENERIC_TRANSIT_NUM = 1000
#: Minumum Value of Time: 1 dollar shouldn't be worth 180 minutes
MIN_VALUE_OF_TIME = 60.0/180.0
#: Trip list column: User class. String.
TRIP_LIST_COLUMN_USER_CLASS = "user_class"
#: Trip list column: Purpose. String.
TRIP_LIST_COLUMN_PURPOSE = "purpose"
#: Trip list column: Value of time. Float.
TRIP_LIST_COLUMN_VOT = "vot"
#: Trip list column: Trace. Boolean.
TRIP_LIST_COLUMN_TRACE = "trace"
#: Column names from pathfinding
PF_COL_PF_ITERATION = 'pf_iteration' #: 0.01*pathfinding_iteration + iteration during which this path was found
PF_COL_PAX_A_TIME = 'pf_A_time' #: time path-finder thinks passenger arrived at A
PF_COL_PAX_B_TIME = 'pf_B_time' #: time path-finder thinks passenger arrived at B
PF_COL_LINK_TIME = 'pf_linktime' #: time path-finder thinks passenger spent on link
PF_COL_LINK_FARE = 'pf_linkfare' #: fare path-finder thinks passenger spent on link
PF_COL_LINK_COST = 'pf_linkcost' #: cost (generalized) path-finder thinks passenger spent on link
PF_COL_LINK_DIST = 'pf_linkdist' #: dist path-finder thinks passenger spent on link
PF_COL_WAIT_TIME = 'pf_waittime' #: time path-finder thinks passenger waited for vehicle on trip links
PF_COL_PATH_NUM = 'pathnum' #: path number, starting from 0
PF_COL_LINK_NUM = 'linknum' #: link number, starting from access
PF_COL_LINK_MODE = 'linkmode' #: link mode (Access, Trip, Egress, etc)
PF_COL_MODE = TRIP_LIST_COLUMN_MODE #: supply mode
PF_COL_ROUTE_ID = Trip.TRIPS_COLUMN_ROUTE_ID #: link route ID
PF_COL_TRIP_ID = Trip.TRIPS_COLUMN_TRIP_ID #: link trip ID
PF_COL_DESCRIPTION = 'description' #: path text description
#: todo replace/rename ??
PF_COL_PAX_A_TIME_MIN = 'pf_A_time_min'
#: pathfinding results
PF_PATHS_CSV = r"enumerated_paths.csv"
PF_LINKS_CSV = r"enumerated_links.csv"
#: results - PathSets
PATHSET_PATHS_CSV = r"pathset_paths.csv"
PATHSET_LINKS_CSV = r"pathset_links.csv"
def __init__(self, input_dir, output_dir, today, stops, routes, capacity_constraint):
"""
Constructor from dictionary mapping attribute to value.
"""
# if no demand dir, nothing to do
if input_dir == None:
self.trip_list_df = pd.DataFrame()
return
FastTripsLogger.info("-------- Reading demand --------")
FastTripsLogger.info("Capacity constraint? %x" % capacity_constraint )
self.trip_list_df = pd.read_csv(os.path.join(input_dir, Passenger.INPUT_TRIP_LIST_FILE),
skipinitialspace=True, ##LMZ
dtype={Passenger.TRIP_LIST_COLUMN_PERSON_ID :'S',
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID :'S',
Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID :'S',
Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID:'S',
Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME :'S',
Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME :'S',
Passenger.TRIP_LIST_COLUMN_PURPOSE :'S'})
trip_list_cols = list(self.trip_list_df.columns.values)
assert(Passenger.TRIP_LIST_COLUMN_PERSON_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_TIME_TARGET in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_VOT in trip_list_cols)
FastTripsLogger.debug("=========== TRIP LIST ===========\n" + str(self.trip_list_df.head()))
FastTripsLogger.debug("\n"+str(self.trip_list_df.index.dtype)+"\n"+str(self.trip_list_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.trip_list_df), "person trips", Passenger.INPUT_TRIP_LIST_FILE))
# Error on missing person ids or person_trip_ids
missing_person_ids = self.trip_list_df[pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_PERSON_ID])|
pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])]
if len(missing_person_ids)>0:
error_msg = "Missing person_id or person_trip_id fields:\n%s\n" % str(missing_person_ids)
error_msg += "Use 0 for person_id for trips without corresponding person."
FastTripsLogger.fatal(error_msg)
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# Drop (warn) on missing origins or destinations
missing_ods = self.trip_list_df[ pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID])|
pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID]) ]
if len(missing_ods)>0:
FastTripsLogger.warn("Missing origin or destination for the following trips. Dropping.\n%s" % str(missing_ods))
self.trip_list_df = self.trip_list_df.loc[ pd.notnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID ])&
pd.notnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID]) ].reset_index(drop=True)
FastTripsLogger.warn("=> Have %d person trips" % len(self.trip_list_df))
non_zero_person_ids = len(self.trip_list_df.loc[self.trip_list_df[Passenger.TRIP_LIST_COLUMN_PERSON_ID]!="0"])
if non_zero_person_ids > 0 and os.path.exists(os.path.join(input_dir, Passenger.INPUT_PERSONS_FILE)):
self.persons_df = pd.read_csv(os.path.join(input_dir, Passenger.INPUT_PERSONS_FILE),
skipinitialspace=True,
dtype={Passenger.PERSONS_COLUMN_PERSON_ID:'S'})
self.persons_id_df = Util.add_numeric_column(self.persons_df[[Passenger.PERSONS_COLUMN_PERSON_ID]],
id_colname=Passenger.PERSONS_COLUMN_PERSON_ID,
numeric_newcolname=Passenger.PERSONS_COLUMN_PERSON_ID_NUM)
self.persons_df = pd.merge(left=self.persons_df, right=self.persons_id_df,
how="left")
persons_cols = list(self.persons_df.columns.values)
FastTripsLogger.debug("=========== PERSONS ===========\n" + str(self.persons_df.head()))
FastTripsLogger.debug("\n"+str(self.persons_df.index.dtype)+"\n"+str(self.persons_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.persons_df), "persons", Passenger.INPUT_PERSONS_FILE))
self.households_df = pd.read_csv(os.path.join(input_dir, Passenger.INPUT_HOUSEHOLDS_FILE), skipinitialspace=True)
household_cols = list(self.households_df.columns.values)
FastTripsLogger.debug("=========== HOUSEHOLDS ===========\n" + str(self.households_df.head()))
FastTripsLogger.debug("\n"+str(self.households_df.index.dtype)+"\n"+str(self.households_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.households_df), "households", Passenger.INPUT_HOUSEHOLDS_FILE))
else:
self.persons_df = pd.DataFrame()
self.households_df = pd.DataFrame()
# make sure that each tuple TRIP_LIST_COLUMN_PERSON_ID, TRIP_LIST_COLUMN_PERSON_TRIP_ID is unique
self.trip_list_df["ID_dupes"] = self.trip_list_df.duplicated(subset=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID],
keep=False)
if self.trip_list_df["ID_dupes"].sum() > 0:
error_msg = "Duplicate IDs (%s, %s) found:\n%s" % \
(Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
self.trip_list_df.loc[self.trip_list_df["ID_dupes"]==True].to_string())
FastTripsLogger.fatal(error_msg)
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# Create unique numeric index
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM] = self.trip_list_df.index + 1
# datetime version
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME].map(lambda x: Util.read_time(x))
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME].map(lambda x: Util.read_time(x))
# float version
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME_MIN] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME].map(lambda x: \
60*x.time().hour + x.time().minute + (x.time().second/60.0) )
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME_MIN] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME].map(lambda x: \
60*x.time().hour + x.time().minute + (x.time().second/60.0) )
# TODO: validate fields?
# value of time must be greater than a threshhold or any fare becomes prohibitively expensive
low_vot = self.trip_list_df.loc[ self.trip_list_df[Passenger.TRIP_LIST_COLUMN_VOT] < Passenger.MIN_VALUE_OF_TIME ]
if len(low_vot) > 0:
FastTripsLogger.warn("These trips have value of time lower than the minimum threshhhold (%f): raising to minimum.\n%s" %
(Passenger.MIN_VALUE_OF_TIME, str(low_vot) ))
self.trip_list_df.loc[ self.trip_list_df[Passenger.TRIP_LIST_COLUMN_VOT] < Passenger.MIN_VALUE_OF_TIME,
Passenger.TRIP_LIST_COLUMN_VOT] = Passenger.MIN_VALUE_OF_TIME
if len(self.persons_df) > 0:
# Join trips to persons
self.trip_list_df = pd.merge(left=self.trip_list_df, right=self.persons_df,
how='left',
on=Passenger.TRIP_LIST_COLUMN_PERSON_ID)
# are any null?
no_person_ids = self.trip_list_df.loc[ pd.isnull(self.trip_list_df[Passenger.PERSONS_COLUMN_PERSON_ID_NUM])&
(self.trip_list_df[Passenger.PERSONS_COLUMN_PERSON_ID]!="0")]
if len(no_person_ids) > 0:
error_msg = "Even though a person list is given, failed to find person information for %d trips" % len(no_person_ids)
FastTripsLogger.fatal(error_msg)
FastTripsLogger.fatal("\n%s\n" % no_person_ids.to_string())
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# And then to households
self.trip_list_df = pd.merge(left=self.trip_list_df, right=self.households_df,
how='left',
on=Passenger.PERSONS_COLUMN_HOUSEHOLD_ID)
else:
# Give each passenger a unique person ID num
self.trip_list_df[Passenger.PERSONS_COLUMN_PERSON_ID_NUM] = self.trip_list_df.index + 1
# add TAZ numeric ids (stored in the stop mapping)
self.trip_list_df = stops.add_numeric_stop_id(self.trip_list_df,
id_colname =Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID,
numeric_newcolname=Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID_NUM,
warn =True,
warn_msg ="TAZ numbers configured as origins in demand file are not found in the network")
self.trip_list_df = stops.add_numeric_stop_id(self.trip_list_df,
id_colname =Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID,
numeric_newcolname=Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID_NUM,
warn =True,
warn_msg ="TAZ numbers configured as destinations in demand file are not found in the network")
# trips with invalid TAZs have been dropped
FastTripsLogger.debug("Have %d person trips" % len(self.trip_list_df))
# figure out modes:
if Passenger.TRIP_LIST_COLUMN_MODE not in trip_list_cols:
# default to generic walk-transit-walk
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE] = Passenger.MODE_GENERIC_TRANSIT
self.trip_list_df['mode_dash_count'] = 0
else:
# count the dashes in the mode
self.trip_list_df['mode_dash_count'] = self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]\
.map(lambda x: x.count('-'))
# The only modes allowed are access-transit-egress or MODE_GENERIC_TRANSIT
bad_mode_df = self.trip_list_df.loc[((self.trip_list_df['mode_dash_count']!=2)&
((self.trip_list_df['mode_dash_count']!=0)|
(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]!=Passenger.MODE_GENERIC_TRANSIT)))]
if len(bad_mode_df) > 0:
FastTripsLogger.fatal("Could not understand column '%s' in the following: \n%s" %
(Passenger.TRIP_LIST_COLUMN_MODE,
bad_mode_df[[Passenger.TRIP_LIST_COLUMN_MODE,'mode_dash_count']].to_string()))
sys.exit(2)
# Take care of the transit generic
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==0,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE] = Passenger.MODE_GENERIC_TRANSIT
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==0,
Passenger.TRIP_LIST_COLUMN_ACCESS_MODE ] = "%s" % TAZ.ACCESS_EGRESS_MODES[0]
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==0,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE ] = "%s" % TAZ.ACCESS_EGRESS_MODES[0]
# Take care of the access-transit-egress
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==2,
Passenger.TRIP_LIST_COLUMN_ACCESS_MODE] = self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]\
.map(lambda x: "%s" % x[:x.find('-')])
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==2,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE] = self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]\
.map(lambda x: x[x.find('-')+1:x.rfind('-')])
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==2,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE] = self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]\
.map(lambda x: "%s" % x[x.rfind('-')+1:])
# We're done with mode_dash_count, thanks for your service
self.trip_list_df.drop('mode_dash_count', axis=1, inplace=True) # replace with cumsum
# validate time_target
invalid_time_target = self.trip_list_df.loc[ self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET].isin(
[Passenger.TIME_TARGET_ARRIVAL, Passenger.TIME_TARGET_DEPARTURE])==False ]
if len(invalid_time_target) > 0:
error_msg = "Invalid value in column %s:\n%s" % (Passenger.TRIP_LIST_COLUMN_TIME_TARGET, str(invalid_time_target))
FastTripsLogger.fatal(error_msg)
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# set outbound
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_OUTBOUND] = (self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == Passenger.TIME_TARGET_ARRIVAL)
# Set the user class for each trip
from .PathSet import PathSet
PathSet.set_user_class(self.trip_list_df, Passenger.TRIP_LIST_COLUMN_USER_CLASS)
# Verify that PathSet has all the configuration for these user classes + transit modes + access modes + egress modes
# => Figure out unique user class + mode combinations
self.modes_df = self.trip_list_df[[Passenger.TRIP_LIST_COLUMN_USER_CLASS,
Passenger.TRIP_LIST_COLUMN_PURPOSE,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE,
Passenger.TRIP_LIST_COLUMN_ACCESS_MODE,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE]].set_index([Passenger.TRIP_LIST_COLUMN_USER_CLASS, Passenger.TRIP_LIST_COLUMN_PURPOSE])
# stack - so before we have three columns: transit_mode, access_mode, egress_mode
# after, we have two columns: demand_mode_type and the value, demand_mode
self.modes_df = self.modes_df.stack().to_frame()
self.modes_df.index.names = [Passenger.TRIP_LIST_COLUMN_USER_CLASS, Passenger.TRIP_LIST_COLUMN_PURPOSE, PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE]
self.modes_df.columns = [PathSet.WEIGHTS_COLUMN_DEMAND_MODE]
self.modes_df.reset_index(inplace=True)
self.modes_df.drop_duplicates(inplace=True)
# fix demand_mode_type since transit_mode is just transit, etc
self.modes_df[PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE] = self.modes_df[PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE].apply(lambda x: x[:-5])
FastTripsLogger.debug("Demand mode types by class & purpose: \n%s" % str(self.modes_df))
# Make sure we have all the weights required for these user_class/mode combinations
self.trip_list_df = PathSet.verify_weight_config(self.modes_df, output_dir, routes, capacity_constraint, self.trip_list_df)
# add column trace
from .Assignment import Assignment
if len(Assignment.TRACE_IDS) > 0:
trace_df = pd.DataFrame.from_records(data=Assignment.TRACE_IDS,
columns=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID]).astype(object)
trace_df[Passenger.TRIP_LIST_COLUMN_TRACE] = True
# combine
self.trip_list_df = pd.merge(left=self.trip_list_df,
right=trace_df,
how="left",
on=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])
# make nulls into False
self.trip_list_df.loc[pd.isnull(
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRACE]), Passenger.TRIP_LIST_COLUMN_TRACE] = False
else:
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRACE] = False
FastTripsLogger.info("Have %d person trips" % len(self.trip_list_df))
FastTripsLogger.debug("Final trip_list_df\n"+str(self.trip_list_df.index.dtype)+"\n"+str(self.trip_list_df.dtypes))
FastTripsLogger.debug("\n"+self.trip_list_df.head().to_string())
#: Maps trip_list_id to :py:class:`PathSet` instance. Use trip_list_id instead of (person_id, person_trip_id) for simplicity and to iterate sequentially
#: in setup_passenger_pathsets()
self.id_to_pathset = collections.OrderedDict()
def add_pathset(self, trip_list_id, pathset):
"""
Stores this path set for the trip_list_id.
"""
self.id_to_pathset[trip_list_id] = pathset
def get_pathset(self, trip_list_id):
"""
Retrieves a stored path set for the given trip_list_id
"""
return self.id_to_pathset[trip_list_id]
def get_person_id(self, trip_list_id):
to_ret = self.trip_list_df.loc[self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM]==trip_list_id,
[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID]]
return(to_ret.iloc[0,0], to_ret.iloc[0,1])
def read_passenger_pathsets(self, pathset_dir, stops, modes_df, include_asgn=True):
"""
Reads the dataframes described in :py:meth:`Passenger.setup_passenger_pathsets` and returns them.
:param pathset_dir: Location of csv files to read
:type pathset_dir: string
:param include_asgn: If true, read from files called :py:attr:`Passenger.PF_PATHS_CSV` and :py:attr:`Passenger.PF_LINKS_CSV`.
Otherwise read from files called :py:attr:`Passenger.PATHSET_PATHS_CSV` and :py:attr:`Passenger.PATHSET_LINKS_CSV` which include assignment results.
:return: See :py:meth:`Assignment.setup_passengers`
for documentation on the passenger paths :py:class:`pandas.DataFrame`
:rtype: a tuple of (:py:class:`pandas.DataFrame`, :py:class:`pandas.DataFrame`)
"""
# read existing paths
paths_file = os.path.join(pathset_dir, Passenger.PATHSET_PATHS_CSV if include_asgn else Passenger.PF_PATHS_CSV)
pathset_paths_df = pd.read_csv(paths_file,
skipinitialspace=True,
dtype={Passenger.TRIP_LIST_COLUMN_PERSON_ID :'S',
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID:'S'})
FastTripsLogger.info("Read %s" % paths_file)
FastTripsLogger.debug("pathset_paths_df.dtypes=\n%s" % str(pathset_paths_df.dtypes))
from .Assignment import Assignment
date_cols = [Passenger.PF_COL_PAX_A_TIME, Passenger.PF_COL_PAX_B_TIME]
if include_asgn:
date_cols.extend([Assignment.SIM_COL_PAX_BOARD_TIME,
Assignment.SIM_COL_PAX_ALIGHT_TIME,
Assignment.SIM_COL_PAX_A_TIME,
Assignment.SIM_COL_PAX_B_TIME])
links_dtypes = {Passenger.TRIP_LIST_COLUMN_PERSON_ID :'S',
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID:'S',
Trip.TRIPS_COLUMN_TRIP_ID :'S',
"A_id" :'S',
"B_id" :'S',
Passenger.PF_COL_ROUTE_ID :'S',
Passenger.PF_COL_TRIP_ID :'S'}
# read datetimes as string initially
for date_col in date_cols:
links_dtypes[date_col] = 'S'
links_file = os.path.join(pathset_dir, Passenger.PATHSET_LINKS_CSV if include_asgn else Passenger.PF_LINKS_CSV)
pathset_links_df = pd.read_csv(links_file, skipinitialspace=True, dtype=links_dtypes)
# convert time strings to datetimes
for date_col in date_cols:
if date_col in pathset_links_df.columns.values:
pathset_links_df[date_col] = pathset_links_df[date_col].map(lambda x: Util.read_time(x))
# convert time duration columns to time durations
link_cols = list(pathset_links_df.columns.values)
if Passenger.PF_COL_LINK_TIME in link_cols:
pathset_links_df[Passenger.PF_COL_LINK_TIME] = pd.to_timedelta(pathset_links_df[Passenger.PF_COL_LINK_TIME])
elif "%s min" % Passenger.PF_COL_LINK_TIME in link_cols:
pathset_links_df[Passenger.PF_COL_LINK_TIME] = pd.to_timedelta(pathset_links_df["%s min" % Passenger.PF_COL_LINK_TIME], unit='m')
if Passenger.PF_COL_WAIT_TIME in link_cols:
pathset_links_df[Passenger.PF_COL_WAIT_TIME] = pd.to_timedelta(pathset_links_df[Passenger.PF_COL_WAIT_TIME])
elif "%s min" % Passenger.PF_COL_WAIT_TIME in link_cols:
pathset_links_df[Passenger.PF_COL_WAIT_TIME] = pd.to_timedelta(pathset_links_df["%s min" % Passenger.PF_COL_WAIT_TIME], unit='m')
# if simulation results are available
if Assignment.SIM_COL_PAX_LINK_TIME in link_cols:
pathset_links_df[Assignment.SIM_COL_PAX_LINK_TIME] = | pd.to_timedelta(pathset_links_df[Assignment.SIM_COL_PAX_LINK_TIME]) | pandas.to_timedelta |
# coding: utf8
import torch
import pandas as pd
import numpy as np
from os import path
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import abc
from clinicadl.tools.inputs.filename_types import FILENAME_TYPE
import os
import nibabel as nib
import torch.nn.functional as F
from scipy import ndimage
import socket
from utils import get_dynamic_image
from .batchgenerators.transforms.color_transforms import ContrastAugmentationTransform, BrightnessTransform, \
GammaTransform, BrightnessGradientAdditiveTransform, LocalSmoothingTransform
from .batchgenerators.transforms.crop_and_pad_transforms import CenterCropTransform, RandomCropTransform, \
RandomShiftTransform
from .batchgenerators.transforms.noise_transforms import RicianNoiseTransform, GaussianNoiseTransform, \
GaussianBlurTransform
from .batchgenerators.transforms.spatial_transforms import Rot90Transform, MirrorTransform, SpatialTransform
from .batchgenerators.transforms.abstract_transforms import Compose
from .batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter
from .data_tool import hilbert_2dto3d_cut, hilbert_3dto2d_cut, hilbert_2dto3d, hilbert_3dto2d, linear_2dto3d_cut, \
linear_3dto2d_cut, linear_2dto3d, linear_3dto2d
#################################
# Datasets loaders
#################################
class MRIDataset(Dataset):
"""Abstract class for all derived MRIDatasets."""
def __init__(self, caps_directory, data_file,
preprocessing, transformations=None):
self.caps_directory = caps_directory
self.transformations = transformations
self.diagnosis_code = {
'CN': 0,
'AD': 1,
'sMCI': 0,
'pMCI': 1,
'MCI': 2,
'unlabeled': -1}
self.preprocessing = preprocessing
self.num_fake_mri = 0
if not hasattr(self, 'elem_index'):
raise ValueError(
"Child class of MRIDataset must set elem_index attribute.")
if not hasattr(self, 'mode'):
raise ValueError(
"Child class of MRIDataset must set mode attribute.")
# Check the format of the tsv file here
if isinstance(data_file, str):
self.df = pd.read_csv(data_file, sep='\t')
elif isinstance(data_file, pd.DataFrame):
self.df = data_file
else:
raise Exception('The argument data_file is not of correct type.')
mandatory_col = {"participant_id", "session_id", "diagnosis"}
if self.elem_index == "mixed":
mandatory_col.add("%s_id" % self.mode)
if not mandatory_col.issubset(set(self.df.columns.values)):
raise Exception("the data file is not in the correct format."
"Columns should include %s" % mandatory_col)
self.elem_per_image = self.num_elem_per_image()
def __len__(self):
return len(self.df) * self.elem_per_image
def _get_path(self, participant, session, mode="image", fake_caps_path=None):
if self.preprocessing == "t1-linear":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_linear',
participant + '_' + session
+ FILENAME_TYPE['cropped'] + '.pt')
origin_nii_path = path.join(self.caps_directory, 'subjects', participant, session,
't1_linear', participant + '_' + session
+ FILENAME_TYPE['cropped'] + '.nii.gz')
# temp_path = path.join(self.caps_directory, 'subjects', participant, session,
# 't1_linear')
# for file in os.listdir(temp_path):
# if file.find('_run-01_') != '-1':
# new_name = file.replace('_run-01_', '_')
# os.rename(os.path.join(temp_path, file), os.path.join(temp_path, new_name))
# print('rename {} to {}'.format(os.path.join(temp_path, file), os.path.join(temp_path, new_name)))
if fake_caps_path is not None:
fake_image_path = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['cropped'] + '.pt')
fake_nii_path = path.join(fake_caps_path, 'subjects', participant, session,
't1_linear', participant + '_' + session
+ FILENAME_TYPE['cropped'] + '.nii.gz')
# first use fake image, because some image lacked in tsv but have in caps
if os.path.exists(fake_image_path):
image_path = fake_image_path
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(fake_nii_path):
image_array = nib.load(fake_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), fake_image_path)
print('save fake image: {}'.format(fake_image_path))
self.num_fake_mri = self.num_fake_mri + 1
image_path = fake_image_path
elif os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real nii file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print(
'Can not find:{} and {} and {} in both real and fake folder'.format(image_path, fake_image_path,
fake_nii_path))
else:
if os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_linear')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
elif self.preprocessing == "t1-extensive":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_extensive',
participant + '_' + session
+ FILENAME_TYPE['skull_stripped'] + '.pt')
elif self.preprocessing == "t1-spm-graymatter":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-graymatter'] + '.pt')
origin_nii_path = path.join(self.caps_directory, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-graymatter'] + '.nii.gz')
temp_path = path.join(self.caps_directory, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space')
# for file in os.listdir(temp_path):
# if file.find('_run-01_') != '-1':
# new_name = file.replace('_run-01_', '_')
# os.rename(os.path.join(temp_path, file), os.path.join(temp_path, new_name))
# print('rename {} to {}'.format(os.path.join(temp_path, file), os.path.join(temp_path, new_name)))
if fake_caps_path is not None:
fake_image_path = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-graymatter'] + '.pt')
fake_nii_path = path.join(fake_caps_path, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-graymatter'] + '.nii.gz')
# first use fake image, because some image lacked in tsv but have in caps
if os.path.exists(fake_image_path):
image_path = fake_image_path
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(fake_nii_path):
image_array = nib.load(fake_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), fake_image_path)
print('save fake image: {}'.format(fake_image_path))
self.num_fake_mri = self.num_fake_mri + 1
image_path = fake_image_path
elif os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print(
'Can not find:{} and {} and {} in both real and fake folder'.format(image_path, fake_image_path,
fake_nii_path))
else:
if os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
print('Can not find:{}'.format(origin_nii_path))
elif self.preprocessing == "t1-spm-whitematter":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-whitematter'] + '.pt')
origin_nii_path = path.join(self.caps_directory, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-whitematter'] + '.nii.gz')
if fake_caps_path is not None:
fake_image_path = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-whitematter'] + '.pt')
fake_nii_path = path.join(fake_caps_path, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-whitematter'] + '.nii.gz')
# first use fake image, because some image lacked in tsv but have in caps
if os.path.exists(fake_image_path):
image_path = fake_image_path
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(fake_nii_path):
image_array = nib.load(fake_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), fake_image_path)
image_path = fake_image_path
print('save fake image: {}'.format(fake_image_path))
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
else:
if os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
elif self.preprocessing == "t1-spm-csf":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-csf'] + '.pt')
origin_nii_path = path.join(self.caps_directory, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-csf'] + '.nii.gz')
if fake_caps_path is not None:
fake_image_path = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-csf'] + '.pt')
fake_nii_path = path.join(fake_caps_path, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-csf'] + '.nii.gz')
# first use fake image, because some image lacked in tsv but have in caps
if os.path.exists(fake_image_path):
image_path = fake_image_path
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(fake_nii_path):
image_array = nib.load(fake_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), fake_image_path)
image_path = fake_image_path
print('save fake image: {}'.format(fake_image_path))
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
else:
if os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
return image_path
def _get_meta_data(self, idx):
image_idx = idx // self.elem_per_image
participant = self.df.loc[image_idx, 'participant_id']
session = self.df.loc[image_idx, 'session_id']
if self.elem_index is None:
elem_idx = idx % self.elem_per_image
elif self.elem_index == "mixed":
elem_idx = self.df.loc[image_idx, '%s_id' % self.mode]
else:
elem_idx = self.elem_index
diagnosis = self.df.loc[image_idx, 'diagnosis']
label = self.diagnosis_code[diagnosis]
return participant, session, elem_idx, label
def _get_full_image(self):
from ..data.utils import find_image_path as get_nii_path
import nibabel as nib
if self.preprocessing in ["t1-linear", "t1-extensive"]:
participant_id = self.df.loc[0, 'participant_id']
session_id = self.df.loc[0, 'session_id']
try:
image_path = self._get_path(participant_id, session_id, "image")
image = torch.load(image_path)
except FileNotFoundError:
try:
image_path = get_nii_path(
self.caps_directory,
participant_id,
session_id,
preprocessing=self.preprocessing)
image_nii = nib.load(image_path)
image_np = image_nii.get_fdata()
image = ToTensor()(image_np)
except:
# if we use moved folder which only has slice/patch, we can not find the whole image in folder, so use this file to get full image
# image_path = os.path.join(self.caps_directory,'sub-ADNI002S0295_ses-M00_T1w_space-MNI152NLin2009cSym_desc-Crop_res-1x1x1_T1w.nii.gz')
# image_nii = nib.load(image_path)
# image_np = image_nii.get_fdata()
# image = ToTensor()(image_np)
image = torch.zeros([169, 208, 179]) # in those segm data, size : [169, 208, 179]
elif self.preprocessing in ["t1-spm-whitematter", "t1-spm-whitematter", "t1-spm-csf"]:
image = torch.zeros([121, 145, 121]) # in those segm data, size : [121, 145, 121]
return image
@abc.abstractmethod
def __getitem__(self, idx):
pass
@abc.abstractmethod
def num_elem_per_image(self):
pass
class MRIDatasetImage(MRIDataset):
"""Dataset of MRI organized in a CAPS folder."""
def __init__(self, caps_directory, data_file,
preprocessing='t1-linear', transformations=None, crop_padding_to_128=False, resample_size=None,
fake_caps_path=None, roi=False, roi_size=32, model=None, data_preprocess='MinMax',
data_Augmentation=False, method_2d=None):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
transformations (callable, optional): Optional transform to be applied on a sample.
"""
self.elem_index = None
self.mode = "image"
self.model = model
self.data_preprocess = data_preprocess
self.data_Augmentation = data_Augmentation
self.crop_padding_to_128 = crop_padding_to_128
self.resample_size = resample_size
self.fake_caps_path = fake_caps_path
self.roi = roi
self.roi_size = roi_size
self.method_2d = method_2d
# if self.roi:
# if socket.gethostname() == 'zkyd':
# aal_mask_dict_dir = '/root/Downloads/atlas/aal_mask_dict_128.npy'
# elif socket.gethostname() == 'tian-W320-G10':
# aal_mask_dict_dir = '/home/tian/pycharm_project/MRI_GNN/atlas/aal_mask_dict_128.npy'
# self.aal_mask_dict = np.load(aal_mask_dict_dir, allow_pickle=True).item() # 116; (181,217,181)
super().__init__(caps_directory, data_file, preprocessing, transformations)
print('crop_padding_to_128 type:{}'.format(self.crop_padding_to_128))
def __getitem__(self, idx):
participant, session, _, label = self._get_meta_data(idx)
image_path = self._get_path(participant, session, "image", fake_caps_path=self.fake_caps_path)
if self.preprocessing == 't1-linear':
ori_name = 't1_linear'
else:
ori_name = 't1_spm'
resampled_image_path = image_path.replace(ori_name, '{}_{}_resample_{}'.format(ori_name, self.data_preprocess,
self.resample_size))
CNN2020_DEEPCNN_image_path = image_path.replace(ori_name,
'{}_{}_model_{}'.format(ori_name, self.data_preprocess,
self.model))
roi_image_path = resampled_image_path.replace('image_based',
'AAL_roi_based_{}'.format(self.roi_size))
# delate_image_path = image_path.replace('image_based',
# 'AAL_roi_based_{}'.format(self.roi_size))
# if os.path.exists(delate_image_path):
# os.remove(delate_image_path)
# print('delating:{}'.format(delate_image_path))
if not self.data_Augmentation: # No data_Augmentation, 1. check local disk whether have saved data. 2. If not, process data and save to desk
if self.roi and 'ROI' in self.model:
# Get resampled_image
if os.path.exists(resampled_image_path):
try:
resampled_image = torch.load(resampled_image_path)
# print('loading:{}'.format(roi_image_path))
except:
print('Wrong file:{}'.format(resampled_image_path))
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
resampled_image = self.transformations(begin_trans_indx=0, **dict)
resampled_data = resampled_image.squeeze() # [128, 128, 128]
try:
resampled_image = resampled_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(resampled_data, 0)
dir, file = os.path.split(resampled_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(resampled_image, resampled_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, resampled_image_path))
# Get roi image
if os.path.exists(roi_image_path):
try:
ROI_image = torch.load(roi_image_path)
# print('loading:{}'.format(roi_image_path))
except:
print('Wrong file:{}'.format(roi_image_path))
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
image = self.transformations(begin_trans_indx=0, **dict)
data = image.squeeze() # [128, 128, 128]
data = self.roi_extract(data, roi_size=self.roi_size, sub_id=participant,
preprocessing=self.preprocessing,
session=session, save_nii=False)
ROI_image = data.unsqueeze(dim=0) # [1, num_roi, 128, 128, 128]
# sample = {'image': image, 'roi_image': ROI_image, 'label': label, 'participant_id': participant,
# 'session_id': session,
# 'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
dir, file = os.path.split(roi_image_path)
if not os.path.exists(dir):
os.makedirs(dir)
torch.save(ROI_image, roi_image_path)
print('Save roi image: {}'.format(roi_image_path))
sample = {'image': ROI_image, 'label': label, 'participant_id': participant,
'session_id': session, 'all_image': resampled_image,
'image_path': roi_image_path, 'num_fake_mri': self.num_fake_mri}
elif self.model in ["CNN2020", "DeepCNN"]:
if os.path.exists(CNN2020_DEEPCNN_image_path):
CNN2020_DEEPCNN_image_image = torch.load(CNN2020_DEEPCNN_image_path)
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
image = self.transformations(begin_trans_indx=0, **dict)
data = image.squeeze() # [128, 128, 128]
try:
CNN2020_DEEPCNN_image_image = data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
CNN2020_DEEPCNN_image_image = np.expand_dims(data, 0)
dir, file = os.path.split(CNN2020_DEEPCNN_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(image, CNN2020_DEEPCNN_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, CNN2020_DEEPCNN_image_path))
sample = {'image': CNN2020_DEEPCNN_image_image, 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': CNN2020_DEEPCNN_image_path, 'num_fake_mri': self.num_fake_mri}
elif self.method_2d is not None:
path, file = os.path.split(resampled_image_path)
file_2d = file.split('.')[0] + '_' + self.method_2d + '.' + file.split('.')[1]
path_2d = os.path.join(path, file_2d)
if os.path.exists(path_2d):
try:
data_2d = torch.load(path_2d)
except:
print('Wrong file:{}'.format(path_2d))
else:
if os.path.exists(resampled_image_path):
try:
resampled_image = torch.load(resampled_image_path)
# print('loading:{}'.format(roi_image_path))
except:
print('Wrong file:{}'.format(resampled_image_path))
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
resampled_image = self.transformations(begin_trans_indx=0, **dict)
resampled_data = resampled_image.squeeze() # [128, 128, 128]
try:
resampled_image = resampled_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(resampled_data, 0)
dir, file = os.path.split(resampled_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(resampled_image, resampled_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, resampled_image_path))
if self.method_2d == 'hilbert_cut':
data_2d = hilbert_3dto2d_cut(resampled_image)
torch.save(data_2d.clone(), path_2d)
print('saving:{}'.format(path_2d))
elif self.method_2d == 'linear_cut':
data_2d = linear_3dto2d_cut(resampled_image)
torch.save(data_2d.clone(), path_2d)
print('saving:{}'.format(path_2d))
elif self.method_2d == 'hilbert_downsampling':
data_low = self.__resize_data__(resampled_image.squeeze(), target_size=[64, 64, 64])
data_low = torch.from_numpy(data_low)
data_2d = hilbert_3dto2d(data_low)
data_2d = data_2d.unsqueeze(0) # [1,512,512]
torch.save(data_2d.clone(), path_2d)
print('saving:{}'.format(path_2d))
elif self.method_2d == 'linear_downsampling':
data_low = self.__resize_data__(resampled_image.squeeze(), target_size=[64, 64, 64])
data_low = torch.from_numpy(data_low)
data_2d = linear_3dto2d(data_low)
data_2d = data_2d.unsqueeze(0) # [1,512,512]
torch.save(data_2d.clone(), path_2d)
print('saving:{}'.format(path_2d))
sample = {'image': data_2d.squeeze(), 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': path_2d, 'num_fake_mri': self.num_fake_mri}
elif self.model not in [
"Conv5_FC3",
'DeepCNN',
'CNN2020',
'CNN2020_gcn',
'DeepCNN_gcn',
"Dynamic2D_net_Alex",
"Dynamic2D_net_Res34",
"Dynamic2D_net_Res18",
"Dynamic2D_net_Vgg16",
"Dynamic2D_net_Vgg11",
"Dynamic2D_net_Mobile",
'ROI_GCN']:
if os.path.exists(resampled_image_path):
try:
resampled_image = torch.load(resampled_image_path)
except:
raise FileExistsError('file error:{}'.format(resampled_image_path))
# if self.data_Augmentation and self.transformations:
# dict = {}
# dict['data'] = resampled_image
# begin_trans_indx = 0
# for i in range(len(self.transformations.transforms)):
# if self.transformations.transforms[i].__class__.__name__ in ['ItensityNormalizeNonzeorVolume',
# 'ItensityNormalizeNonzeorVolume',
# 'MinMaxNormalization']:
# begin_trans_indx = i + 1
# resampled_image = self.transformations(begin_trans_indx=begin_trans_indx, **dict)
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
resampled_image = self.transformations(begin_trans_indx=0, **dict)
resampled_data = resampled_image.squeeze() # [128, 128, 128]
try:
resampled_image = resampled_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(resampled_data, 0)
dir, file = os.path.split(resampled_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(image, resampled_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, resampled_image_path))
sample = {'image': resampled_image, 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': resampled_image_path, 'num_fake_mri': self.num_fake_mri}
elif self.model in ["Dynamic2D_net_Alex", "Dynamic2D_net_Res34", "Dynamic2D_net_Res18",
"Dynamic2D_net_Vgg16", "Dynamic2D_net_Vgg11", "Dynamic2D_net_Mobile"]:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
resampled_image = self.transformations(begin_trans_indx=0, **dict)
resampled_data = resampled_image.squeeze() # [128, 128, 128]
image_np = np.array(resampled_data)
image_np = np.expand_dims(image_np, 0) # 0,w,h,d
image_np = np.swapaxes(image_np, 0, 3) # w,h,d,0
im = get_dynamic_image(image_np)
im = np.expand_dims(im, 0)
im = np.concatenate([im, im, im], 0)
im = torch.from_numpy(im)
im = im.float()
sample = {'image': im, 'label': label, 'participant_id': participant, 'session_id': session,
'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
return sample
return sample
else: # Use data_Augmentation, 1. Just load original data and process it
# 1. load original data
image = torch.load(image_path)
if self.transformations: # Augmentation
dict = {}
dict['data'] = image
image = self.transformations(begin_trans_indx=0, **dict)
augmentation_data = image.squeeze() # [128, 128, 128]
# print(self.transformations)
# print(self.transformations[0])
# if self.crop_padding_to_128 and image.shape[1] != 128:
# image = image[:, :, 8:-9, :] # [1, 121, 128, 121]
# image = image.unsqueeze(0) # [1, 1, 121, 128, 121]
# pad = torch.nn.ReplicationPad3d((4, 3, 0, 0, 4, 3))
# image = pad(image) # [1, 1, 128, 128, 128]
# image = image.squeeze(0) # [1, 128, 128, 128]
# if self.resample_size is not None:
# assert self.resample_size > 0, 'resample_size should be a int positive number'
# image = image.unsqueeze(0)
# image = F.interpolate(image,
# size=self.resample_size) # resize to resample_size * resample_size * resample_size
# print('resample before trans shape:{}'.format(image.shape))
# print('resample before trans mean:{}'.format(image.mean()))
# print('resample before trans std:{}'.format(image.std()))
# print('resample before trans max:{}'.format(image.max()))
# print('resample before trans min:{}'.format(image.min()))
# # image = self.transformations(image)
# # print('resample after trans shape:{}'.format(image.shape))
# # print('resample after trans mean:{}'.format(image.mean()))
# # print('resample after trans std:{}'.format(image.std()))
# # print('resample after trans max:{}'.format(image.max()))
# # print('resample after trans min:{}'.format(image.min()))
# image = image.squeeze(0)
#
# if self.model in ['DeepCNN', 'DeepCNN_gcn']:
# image = image.unsqueeze(0)
# image = F.interpolate(image, size=[49, 39, 38])
# image = image.squeeze(0)
# elif self.model in ['CNN2020', 'CNN2020_gcn']:
# image = image.unsqueeze(0)
# image = F.interpolate(image, size=[139, 177, 144])
# image = image.squeeze(0)
# # preprocessing data
# data = image.squeeze() # [128, 128, 128]
# # print(data.shape)
# input_W, input_H, input_D = data.shape
# if self.model not in ["ConvNet3D", "ConvNet3D_gcn", "VoxCNN", "Conv5_FC3", 'DeepCNN', 'CNN2020', 'CNN2020_gcn',
# "VoxCNN_gcn", 'DeepCNN_gcn', "ConvNet3D_v2", "ConvNet3D_ori", "Dynamic2D_net_Alex",
# "Dynamic2D_net_Res34", "Dynamic2D_net_Res18", "Dynamic2D_net_Vgg16",
# "Dynamic2D_net_Vgg11", "Dynamic2D_net_Mobile"]:
# # drop out the invalid range
# # if self.preprocessing in ['t1-spm-graymatter', 't1-spm-whitematter', 't1-spm-csf']:
# data = self.__drop_invalid_range__(data)
# print('drop_invalid_range shape:{}'.format(data.shape))
# print('drop_invalid_range mean:{}'.format(data.mean()))
# print('drop_invalid_range std:{}'.format(data.std()))
# print('drop_invalid_range max:{}'.format(data.max()))
# print('drop_invalid_range min:{}'.format(data.min()))
# # resize data
# data = self.__resize_data__(data, input_W, input_H, input_D)
# print('resize_data shape:{}'.format(data.shape))
# print('resize_data mean:{}'.format(data.mean()))
# print('resize_data std:{}'.format(data.std()))
# print('resize_data max:{}'.format(data.max()))
# print('resize_data min:{}'.format(data.min()))
# # normalization datas
# data = np.array(data)
# data = self.__itensity_normalize_one_volume__(data)
# print('itensity_normalize shape:{}'.format(data.shape))
# print('itensity_normalize mean:{}'.format(data.mean()))
# print('itensity_normalize std:{}'.format(data.std()))
# print('itensity_normalize max:{}'.format(data.max()))
# print('itensity_normalize min:{}'.format(data.min()))
# # if self.transformations and self.model in ["ConvNet3D", "VoxCNN"]:
# # data = self.transformations(data)
# data = torch.from_numpy(data)
# if self.model in ['CNN2020', 'CNN2020_gcn']:
# data = np.array(data)
# data = self.__itensity_normalize_one_volume__(data, normalize_all=True)
# data = torch.from_numpy(data)
if self.model in ["Dynamic2D_net_Alex", "Dynamic2D_net_Res34", "Dynamic2D_net_Res18",
"Dynamic2D_net_Vgg16", "Dynamic2D_net_Vgg11", "Dynamic2D_net_Mobile"]:
image_np = np.array(augmentation_data)
image_np = np.expand_dims(image_np, 0) # 0,w,h,d
image_np = np.swapaxes(image_np, 0, 3) # w,h,d,0
im = get_dynamic_image(image_np)
im = np.expand_dims(im, 0)
im = np.concatenate([im, im, im], 0)
im = torch.from_numpy(im)
im = im.float()
sample = {'image': im, 'label': label, 'participant_id': participant, 'session_id': session,
'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
return sample
if self.roi and 'ROI' in self.model:
try:
resampled_image = augmentation_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(augmentation_data, 0)
augmentation_data = self.roi_extract(augmentation_data, roi_size=self.roi_size, sub_id=participant,
preprocessing=self.preprocessing,
session=session, save_nii=False)
ROI_image = augmentation_data.unsqueeze(dim=0) # [1, num_roi, 128, 128, 128]
sample = {'image': ROI_image, 'all_image': resampled_image, 'label': label,
'participant_id': participant,
'session_id': session,
'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
elif self.method_2d is not None:
path, file = os.path.split(resampled_image_path)
file_2d = file.split('.')[0] + '_' + self.method_2d + '.' + file.split('.')[1]
path_2d = os.path.join(path, file_2d)
try:
resampled_image = augmentation_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(augmentation_data, 0)
if self.method_2d == 'hilbert_cut':
data_2d = hilbert_3dto2d_cut(resampled_image)
elif self.method_2d == 'linear_cut':
data_2d = linear_3dto2d_cut(resampled_image)
elif self.method_2d == 'hilbert_downsampling':
data_low = self.__resize_data__(resampled_image.squeeze(), target_size=[64, 64, 64])
data_low = torch.from_numpy(data_low)
data_2d = hilbert_3dto2d(data_low)
data_2d = data_2d.unsqueeze(0) # [1,512,512]
elif self.method_2d == 'linear_downsampling':
data_low = self.__resize_data__(resampled_image.squeeze(), target_size=[64, 64, 64])
data_low = torch.from_numpy(data_low)
data_2d = linear_3dto2d(data_low)
data_2d = data_2d.unsqueeze(0) # [1,512,512]
sample = {'image': data_2d.squeeze(), 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': path_2d, 'num_fake_mri': self.num_fake_mri}
elif self.model in ["CNN2020", "DeepCNN"]:
try:
CNN2020_DEEPCNN_image_image = augmentation_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
CNN2020_DEEPCNN_image_image = np.expand_dims(augmentation_data, 0)
dir, file = os.path.split(CNN2020_DEEPCNN_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(image, CNN2020_DEEPCNN_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, CNN2020_DEEPCNN_image_path))
sample = {'image': CNN2020_DEEPCNN_image_image, 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': CNN2020_DEEPCNN_image_path, 'num_fake_mri': self.num_fake_mri}
else:
try:
image = augmentation_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
image = np.expand_dims(augmentation_data, 0)
sample = {'image': image, 'label': label, 'participant_id': participant, 'session_id': session,
'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
return sample
def __drop_invalid_range__(self, volume):
"""
Cut off the invalid area
"""
zero_value = volume[0, 0, 0]
# print('zero:{}'.format(zero_value))
non_zeros_idx = np.where(volume != zero_value)
# print('zero idx:{}'.format(non_zeros_idx))
try:
[max_z, max_h, max_w] = np.max(np.array(non_zeros_idx), axis=1)
[min_z, min_h, min_w] = np.min(np.array(non_zeros_idx), axis=1)
except:
print(zero_value)
print(non_zeros_idx)
return volume[min_z:max_z + 1, min_h:max_h + 1, min_w:max_w + 1]
def __resize_data__(self, data, input_W, input_H, input_D):
"""
Resize the data to the input size
"""
[depth, height, width] = data.shape
scale = [input_W * 1.0 / depth, input_H * 1.0 / height, input_D * 1.0 / width]
data = ndimage.interpolation.zoom(data, scale, order=0)
return data
def __itensity_normalize_one_volume__(self, volume, normalize_all=False):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
if normalize_all:
pixels = volume
else:
pixels = volume[volume > 0]
mean = pixels.mean()
std = pixels.std()
out = (volume - mean) / std
if not normalize_all:
out_random = np.random.normal(0, 1, size=volume.shape)
out[volume == 0] = out_random[volume == 0]
return out
def num_elem_per_image(self):
return 1
def roi_extract(self, MRI, roi_size=32, sub_id=None, preprocessing=None, session=None, save_nii=False):
roi_data_list = []
roi_label_list = []
if 'slave' in socket.gethostname():
aal_mask_dict_dir = '/root/Downloads/atlas/aal_mask_dict_right.npy'
elif socket.gethostname() == 'tian-W320-G10':
aal_mask_dict_dir = '/home/tian/pycharm_project/MRI_GNN/atlas/aal_mask_dict_right.npy'
elif socket.gethostname() == 'zkyd':
aal_mask_dict_dir = '/data/fanchenchen/atlas/aal_mask_dict_right.npy'
self.aal_mask_dict = np.load(aal_mask_dict_dir, allow_pickle=True).item() # 116; (181,217,181)
for i, key in enumerate(self.aal_mask_dict.keys()):
# useful_data = self.__drop_invalid_range__(self.aal_mask_dict[key])
# useful_data = resize_data(useful_data, target_size=[128, 128, 128])
# useful_data = useful_data[np.newaxis, np.newaxis, :, :, :] # 1,1,128,128,128
# roi_batch_data = MRI.cpu().numpy() * useful_data # batch, 1, 128,128,128
mask = self.aal_mask_dict[key]
# print('mask min:{}'.format(mask.min()))
# print('mask max:{}'.format(mask.max()))
# print('mask:{}'.format(mask))
ww, hh, dd = MRI.shape
MRI = self.__resize_data__(MRI, 181, 217, 181)
# MRI = (MRI - MRI.min()) / (MRI.max() - MRI.min())
roi_data = MRI * mask.squeeze() # batch, 1, 128,128,128
# print('roi_data min:{}'.format(roi_data.min()))
# print('roi_data max:{}'.format(roi_data.max()))
roi_label_list.append(key)
# save nii to Visualization
# print(image_np.max())
# print(image_np.min())
# print(roi_data.shape)
if save_nii:
image_nii = nib.Nifti1Image(roi_data, np.eye(4))
MRI_path = '/data/fanchenchen/atlas/{}_{}_{}_ori_roi_{}.nii.gz'.format(sub_id, session,
preprocessing, i)
nib.save(image_nii, MRI_path)
try:
roi_data = self.__drop_invalid_range__(roi_data) # xx,xx,xx
except:
print(sub_id)
print(session)
assert True
# roi_data = self.__drop_invalid_range__(mask.squeeze()) # xx,xx,xx
if save_nii:
image_nii = nib.Nifti1Image(roi_data, np.eye(4))
MRI_path = '/data/fanchenchen/atlas/{}_{}_{}_drop_invalid_roi_{}.nii.gz'.format(sub_id, session,
preprocessing, i)
nib.save(image_nii, MRI_path)
# print(roi_data.shape)
roi_data = self.__resize_data__(roi_data, roi_size, roi_size, roi_size) # roi_size, roi_size, roi_size
# print(roi_data.shape)
roi_data = torch.from_numpy(roi_data)
roi_data_list.append(roi_data) # roi_size, roi_size, roi_size
# save nii to Visualization
if save_nii:
image_np = roi_data.numpy()
image_nii = nib.Nifti1Image(image_np, np.eye(4))
MRI_path = '/data/fanchenchen/atlas/{}_{}_{}_resize_roi_{}.nii.gz'.format(sub_id, session,
preprocessing, i)
nib.save(image_nii, MRI_path)
if i >= 89:
break
roi_batch = torch.stack(roi_data_list).type(torch.float32) # num_roi, roi_size, roi_size, roi_size
return roi_batch
class MRIDatasetPatch(MRIDataset):
def __init__(self, caps_directory, data_file, patch_size, stride_size, transformations=None, prepare_dl=False,
patch_index=None, preprocessing="t1-linear"):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
transformations (callable, optional): Optional transform to be applied on a sample.
prepare_dl (bool): If true pre-extracted patches will be loaded.
patch_index (int, optional): If a value is given the same patch location will be extracted for each image.
else the dataset will load all the patches possible for one image.
patch_size (int): size of the regular cubic patch.
stride_size (int): length between the centers of two patches.
"""
self.patch_size = patch_size
self.stride_size = stride_size
self.elem_index = patch_index
self.mode = "patch"
super().__init__(caps_directory, data_file, preprocessing, transformations)
self.prepare_dl = prepare_dl
def __getitem__(self, idx):
participant, session, patch_idx, label = self._get_meta_data(idx)
if self.prepare_dl:
patch_path = path.join(self._get_path(participant, session, "patch")[0:-7]
+ '_patchsize-' + str(self.patch_size)
+ '_stride-' + str(self.stride_size)
+ '_patch-' + str(patch_idx) + '_T1w.pt')
image = torch.load(patch_path)
else:
image_path = self._get_path(participant, session, "image")
full_image = torch.load(image_path)
image = self.extract_patch_from_mri(full_image, patch_idx)
if self.transformations:
image = self.transformations(image)
sample = {'image': image, 'label': label,
'participant_id': participant, 'session_id': session, 'patch_id': patch_idx}
return sample
def num_elem_per_image(self):
if self.elem_index is not None:
return 1
image = self._get_full_image()
patches_tensor = image.unfold(1, self.patch_size, self.stride_size
).unfold(2, self.patch_size, self.stride_size
).unfold(3, self.patch_size, self.stride_size).contiguous()
patches_tensor = patches_tensor.view(-1,
self.patch_size,
self.patch_size,
self.patch_size)
num_patches = patches_tensor.shape[0]
return num_patches
def extract_patch_from_mri(self, image_tensor, index_patch):
patches_tensor = image_tensor.unfold(1, self.patch_size, self.stride_size
).unfold(2, self.patch_size, self.stride_size
).unfold(3, self.patch_size, self.stride_size).contiguous()
patches_tensor = patches_tensor.view(-1,
self.patch_size,
self.patch_size,
self.patch_size)
extracted_patch = patches_tensor[index_patch, ...].unsqueeze_(
0).clone()
return extracted_patch
class MRIDatasetRoi(MRIDataset):
def __init__(self, caps_directory, data_file, preprocessing="t1-linear",
transformations=None, prepare_dl=False):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
transformations (callable, optional): Optional transform to be applied on a sample.
prepare_dl (bool): If true pre-extracted patches will be loaded.
"""
self.elem_index = None
self.mode = "roi"
super().__init__(caps_directory, data_file, preprocessing, transformations)
self.prepare_dl = prepare_dl
def __getitem__(self, idx):
participant, session, roi_idx, label = self._get_meta_data(idx)
if self.prepare_dl:
raise NotImplementedError(
'The extraction of ROIs prior to training is not implemented.')
else:
image_path = self._get_path(participant, session, "image")
image = torch.load(image_path)
patch = self.extract_roi_from_mri(image, roi_idx)
if self.transformations:
patch = self.transformations(patch)
sample = {'image': patch, 'label': label,
'participant_id': participant, 'session_id': session,
'roi_id': roi_idx}
return sample
def num_elem_per_image(self):
return 2
def extract_roi_from_mri(self, image_tensor, left_is_odd):
"""
:param image_tensor: (Tensor) the tensor of the image.
:param left_is_odd: (int) if 1 the left hippocampus is extracted, else the right one.
:return: Tensor of the extracted hippocampus
"""
if self.preprocessing == "t1-linear":
if left_is_odd == 1:
# the center of the left hippocampus
crop_center = (61, 96, 68)
else:
# the center of the right hippocampus
crop_center = (109, 96, 68)
else:
raise NotImplementedError("The extraction of hippocampi was not implemented for "
"preprocessing %s" % self.preprocessing)
crop_size = (50, 50, 50) # the output cropped hippocampus size
extracted_roi = image_tensor[
:,
crop_center[0] - crop_size[0] // 2: crop_center[0] + crop_size[0] // 2:,
crop_center[1] - crop_size[1] // 2: crop_center[1] + crop_size[1] // 2:,
crop_center[2] - crop_size[2] // 2: crop_center[2] + crop_size[2] // 2:
].clone()
return extracted_roi
class MRIDatasetSlice(MRIDataset):
def __init__(self, caps_directory, data_file, preprocessing="t1-linear",
transformations=None, mri_plane=0, prepare_dl=False,
discarded_slices=20, mixed=False):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
transformations (callable, optional): Optional transform to be applied on a sample.
prepare_dl (bool): If true pre-extracted patches will be loaded.
mri_plane (int): Defines which mri plane is used for slice extraction.
discarded_slices (int or list): number of slices discarded at the beginning and the end of the image.
If one single value is given, the same amount is discarded at the beginning and at the end.
mixed (bool): If True will look for a 'slice_id' column in the input DataFrame to load each slice
independently.
"""
# Rename MRI plane
self.mri_plane = mri_plane
self.direction_list = ['sag', 'cor', 'axi']
if self.mri_plane >= len(self.direction_list):
raise ValueError(
"mri_plane value %i > %i" %
(self.mri_plane, len(
self.direction_list)))
# Manage discarded_slices
if isinstance(discarded_slices, int):
discarded_slices = [discarded_slices, discarded_slices]
if isinstance(discarded_slices, list) and len(discarded_slices) == 1:
discarded_slices = discarded_slices * 2
self.discarded_slices = discarded_slices
if mixed:
self.elem_index = "mixed"
else:
self.elem_index = None
self.mode = "slice"
super().__init__(caps_directory, data_file, preprocessing, transformations)
self.prepare_dl = prepare_dl
def __getitem__(self, idx):
participant, session, slice_idx, label = self._get_meta_data(idx)
slice_idx = slice_idx + self.discarded_slices[0]
if self.prepare_dl:
# read the slices directly
slice_path = path.join(self._get_path(participant, session, "slice")[0:-7]
+ '_axis-%s' % self.direction_list[self.mri_plane]
+ '_channel-rgb_slice-%i_T1w.pt' % slice_idx)
image = torch.load(slice_path)
else:
image_path = self._get_path(participant, session, "image")
full_image = torch.load(image_path)
image = self.extract_slice_from_mri(full_image, slice_idx)
if self.transformations:
image = self.transformations(image)
sample = {'image': image, 'label': label,
'participant_id': participant, 'session_id': session,
'slice_id': slice_idx}
return sample
def num_elem_per_image(self):
if self.elem_index == "mixed":
return 1
image = self._get_full_image()
return image.size(self.mri_plane + 1) - \
self.discarded_slices[0] - self.discarded_slices[1]
def extract_slice_from_mri(self, image, index_slice):
"""
This is a function to grab one slice in each view and create a rgb image for transferring learning: duplicate the slices into R, G, B channel
:param image: (tensor)
:param index_slice: (int) index of the wanted slice
:return:
To note, for each view:
Axial_view = "[:, :, slice_i]"
Coronal_view = "[:, slice_i, :]"
Sagittal_view= "[slice_i, :, :]"
"""
image = image.squeeze(0)
simple_slice = image[(slice(None),) * self.mri_plane + (index_slice,)]
triple_slice = torch.stack((simple_slice, simple_slice, simple_slice))
return triple_slice
def return_dataset(mode, input_dir, data_df, preprocessing,
transformations, params, cnn_index=None):
"""
Return appropriate Dataset according to given options.
Args:
mode: (str) input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
input_dir: (str) path to a directory containing a CAPS structure.
data_df: (DataFrame) List subjects, sessions and diagnoses.
preprocessing: (str) type of preprocessing wanted ('t1-linear' or 't1-extensive')
transformations: (transforms) list of transformations performed on-the-fly.
params: (Namespace) options used by specific modes.
cnn_index: (int) Index of the CNN in a multi-CNN paradigm (optional).
Returns:
(Dataset) the corresponding dataset.
"""
if cnn_index is not None and mode in ["image", "roi", "slice"]:
raise ValueError("Multi-CNN is not implemented for %s mode." % mode)
if params.model == "ROI_GCN":
use_roi = True
else:
use_roi = False
if mode == "image":
return MRIDatasetImage(
input_dir,
data_df,
preprocessing,
transformations=transformations,
crop_padding_to_128=params.crop_padding_to_128,
resample_size=params.resample_size,
fake_caps_path=params.fake_caps_path,
# only_use_fake=params.only_use_fake,
roi=use_roi,
roi_size=params.roi_size,
model=params.model,
data_preprocess=params.data_preprocess,
data_Augmentation=params.data_Augmentation,
method_2d=params.method_2d
)
if mode == "patch":
return MRIDatasetPatch(
input_dir,
data_df,
params.patch_size,
params.stride_size,
preprocessing=preprocessing,
transformations=transformations,
prepare_dl=params.prepare_dl,
patch_index=cnn_index
)
elif mode == "roi":
return MRIDatasetRoi(
input_dir,
data_df,
preprocessing=preprocessing,
transformations=transformations
)
elif mode == "slice":
return MRIDatasetSlice(
input_dir,
data_df,
preprocessing=preprocessing,
transformations=transformations,
mri_plane=params.mri_plane,
prepare_dl=params.prepare_dl,
discarded_slices=params.discarded_slices)
else:
raise ValueError("Mode %s is not implemented." % mode)
def compute_num_cnn(input_dir, tsv_path, options, data="train"):
transformations = get_transforms(options)
if data == "train":
example_df, _ = load_data(tsv_path, options.diagnoses, 0, options.n_splits, options.baseline)
elif data == "classify":
example_df = pd.read_csv(tsv_path, sep='\t')
else:
example_df = load_data_test(tsv_path, options.diagnoses)
full_dataset = return_dataset(options.mode, input_dir, example_df,
options.preprocessing, transformations, options)
return full_dataset.elem_per_image
##################################
# Transformations
##################################
class GaussianSmoothing(object):
def __init__(self, sigma):
self.sigma = sigma
def __call__(self, sample):
from scipy.ndimage.filters import gaussian_filter
image = sample['image']
np.nan_to_num(image, copy=False)
smoothed_image = gaussian_filter(image, sigma=self.sigma)
sample['image'] = smoothed_image
return sample
def __repr__(self):
return self.__class__.__name__ + '(sigma={})'.format(self.sigma)
class ToTensor(object):
"""Convert image type to Tensor and diagnosis to diagnosis code"""
def __call__(self, image):
np.nan_to_num(image, copy=False)
image = image.astype(float)
return torch.from_numpy(image[np.newaxis, :]).float()
def __repr__(self):
return self.__class__.__name__
class MinMaxNormalization(object):
"""Normalizes a tensor between 0 and 1"""
def __call__(self, **data_dict):
image = data_dict['data']
image = (image - image.min()) / (image.max() - image.min())
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__
class ItensityNormalizeNonzeorVolume(object):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
def __call__(self, **data_dict):
image = data_dict['data']
image = image.squeeze()
image = np.array(image)
pixels = image[image > 0]
mean = pixels.mean()
std = pixels.std()
out = (image - mean) / std
out_random = np.random.normal(0, 1, size=image.shape)
out[image == 0] = out_random[image == 0]
out = torch.from_numpy(out.copy())
data_dict['data'] = out.unsqueeze(0)
return data_dict
def __repr__(self):
return self.__class__.__name__
class ItensityNormalizeAllVolume(object):
"""
normalize the itensity of an nd volume based on the mean and std of all region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
def __call__(self, **data_dict):
image = data_dict['data']
image = (image - image.mean()) / image.std()
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__
class CropPpadding128(object):
"""
crop padding image to 128
"""
def __call__(self, **data_dict):
image = data_dict['data']
if image.shape[1] == 121 and image.shape[2] == 145 and image.shape[
3] == 121:
image = image[:, :, 8:-9, :] # [1, 121, 128, 121]
image = image.unsqueeze(0) # [1, 1, 121, 128, 121]
pad = torch.nn.ReplicationPad3d((4, 3, 0, 0, 4, 3))
image = pad(image) # [1, 1, 128, 128, 128]
image = image.squeeze(0) # [1, 128, 128, 128]
elif image.shape[1] == 128 and image.shape[2] == 128 and image.shape[
3] == 128:
pass
else:
assert image.shape[1] == 121 and image.shape[2] == 145 and image.shape[
3] == 121, "image shape must be 1*121*145*122 or 1*128*128*128, but given shape:{}".format(image.shape)
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__
class Resize(torch.nn.Module):
"""
Resize data to target size
"""
def __init__(self, resample_size):
super().__init__()
# assert resample_size > 0, 'resample_size should be a int positive number'
self.resample_size = resample_size
def forward(self, **data_dict):
image = data_dict['data']
image = image.unsqueeze(0)
image = F.interpolate(image, size=self.resample_size)
image = image.squeeze(0)
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__ + '(resample_size={0})'.format(self.resample_size)
class CheckDictSize(object):
"""
check dict size to dim 5 :1,1,128,128,128
"""
def __call__(self, **dict):
image = dict['data']
if len(image.shape) == 4:
image = np.array(image.unsqueeze(0))
elif len(image.shape) == 3:
image = np.array(image.unsqueeze(0).unsqueeze(0))
assert len(image.shape) == 5
dict['data'] = image
return dict
def __repr__(self):
return self.__class__.__name__
class DictToImage(object):
"""
dict 2 data
"""
def __call__(self, **dict):
image = dict['data']
if len(image.shape) == 5:
image = image.squeeze(0)
elif len(image.shape) == 3:
image = image.unsqueeze(0)
return image
def __repr__(self):
return self.__class__.__name__
class DropInvalidRange(torch.nn.Module):
"""
Cut off the invalid area
"""
def __init__(self, keep_size=True):
super().__init__()
self.keep_size = keep_size
def __call__(self, **data_dict):
image = data_dict['data']
image = image.squeeze(0)
zero_value = image[0, 0, 0]
z, h, w = image.shape
non_zeros_idx = np.where(image != zero_value)
[max_z, max_h, max_w] = np.max(np.array(non_zeros_idx), axis=1)
[min_z, min_h, min_w] = np.min(np.array(non_zeros_idx), axis=1)
image = image[min_z:max_z, min_h:max_h, min_w:max_w].unsqueeze(0)
if self.keep_size:
image = image.unsqueeze(0)
image = F.interpolate(image, size=[z, h, w])
image = image.squeeze(0)
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__ + '(keep_size={})'.format(self.keep_size)
def get_transforms(params, is_training=True):
if params.mode == 'image':
trans_list = []
trans_list.append(MinMaxNormalization())
if params.preprocessing != 't1-linear':
trans_list.append(CropPpadding128())
trans_list.append(DropInvalidRange(keep_size=True))
if params.resample_size is not None:
trans_list.append(Resize(params.resample_size))
if params.data_preprocess == 'MinMax':
trans_list.append(MinMaxNormalization())
elif params.data_preprocess == 'NonzeorZscore':
trans_list.append(ItensityNormalizeNonzeorVolume())
elif params.data_preprocess == 'AllzeorZscore':
trans_list.append(ItensityNormalizeAllVolume())
if is_training:
if params.ContrastAugmentationTransform > 0:
trans_list.append(CheckDictSize()) # for this code library, input data must be dim=5, 1,1,128,128,128
trans_list.append(ContrastAugmentationTransform((0.3, 3.), preserve_range=True,
p_per_sample=params.ContrastAugmentationTransform))
if params.BrightnessTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
BrightnessTransform(mu=0, sigma=1, per_channel=False, p_per_sample=params.BrightnessTransform))
if params.GammaTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
GammaTransform(gamma_range=(0.5, 2), invert_image=False, per_channel=False, retain_stats=False,
p_per_sample=params.GammaTransform))
if params.BrightnessGradientAdditiveTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(BrightnessGradientAdditiveTransform(scale=(5, 5),
p_per_sample=params.BrightnessGradientAdditiveTransform))
if params.LocalSmoothingTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(LocalSmoothingTransform(scale=(5, 5),
p_per_sample=params.LocalSmoothingTransform))
if params.RandomShiftTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
RandomShiftTransform(shift_mu=0, shift_sigma=3, p_per_sample=params.RandomShiftTransform))
if params.RicianNoiseTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
RicianNoiseTransform(noise_variance=(0, 0.1), p_per_sample=params.RicianNoiseTransform))
if params.GaussianNoiseTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
GaussianNoiseTransform(noise_variance=(0, 0.1), p_per_sample=params.RicianNoiseTransform))
if params.GaussianBlurTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
GaussianBlurTransform(blur_sigma=(1, 5), different_sigma_per_channel=False,
p_per_sample=params.RicianNoiseTransform))
if params.Rot90Transform > 0:
trans_list.append(CheckDictSize())
trans_list.append(Rot90Transform(num_rot=(1, 2, 3), axes=(0, 1, 2), p_per_sample=params.Rot90Transform))
if params.MirrorTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(MirrorTransform(axes=(0, 1, 2), p_per_sample=params.MirrorTransform))
if params.SpatialTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
SpatialTransform(patch_size=(params.resample_size, params.resample_size, params.resample_size),
p_el_per_sample=params.SpatialTransform,
p_rot_per_axis=params.SpatialTransform,
p_scale_per_sample=params.SpatialTransform,
p_rot_per_sample=params.SpatialTransform))
trans_list.append(DictToImage())
transformations = Compose(trans_list)
if params.model in ['DeepCNN', 'DeepCNN_gcn']:
trans_list = []
trans_list.append(MinMaxNormalization())
trans_list.append(Resize(resample_size=[49, 39, 38]))
trans_list.append(DictToImage())
transformations = Compose(trans_list)
if params.model in ['CNN2020', 'CNN2020_gcn']:
trans_list = []
trans_list.append(MinMaxNormalization())
trans_list.append(Resize(resample_size=[139, 177, 144]))
trans_list.append(ItensityNormalizeAllVolume())
trans_list.append(DictToImage())
transformations = Compose(trans_list)
elif params.mode in ["patch", "roi"]:
if params.minmaxnormalization:
transformations = Compose([MinMaxNormalization(), DictToImage()])
else:
transformations = None
elif params.mode == "slice":
trg_size = (224, 224)
if params.minmaxnormalization:
transformations = transforms.Compose([MinMaxNormalization(),
transforms.ToPILImage(),
transforms.Resize(trg_size),
transforms.ToTensor()])
else:
transformations = transforms.Compose([transforms.ToPILImage(),
transforms.Resize(trg_size),
transforms.ToTensor()])
else:
raise ValueError("Transforms for mode %s are not implemented." % params.mode)
print('transformer:{}'.format(transformations.__repr__))
return transformations
################################
# tsv files loaders
################################
def load_data(train_val_path, diagnoses_list,
split, n_splits=None, baseline=True, fake_caps_path=None, only_use_fake=False):
train_df = pd.DataFrame()
valid_df = pd.DataFrame()
if n_splits is None:
train_path = path.join(train_val_path, 'train')
valid_path = path.join(train_val_path, 'validation')
else:
train_path = path.join(train_val_path, 'train_splits-' + str(n_splits),
'split-' + str(split))
valid_path = path.join(train_val_path, 'validation_splits-' + str(n_splits),
'split-' + str(split))
print("Train", train_path)
print("Valid", valid_path)
for diagnosis in diagnoses_list:
if isinstance(baseline, str):
if baseline in ['true', 'True']:
train_diagnosis_path = path.join(
train_path, diagnosis + '_baseline.tsv')
elif baseline in ['false', 'False']:
train_diagnosis_path = path.join(train_path, diagnosis + '.tsv')
else:
if baseline:
train_diagnosis_path = path.join(
train_path, diagnosis + '_baseline.tsv')
else:
train_diagnosis_path = path.join(train_path, diagnosis + '.tsv')
valid_diagnosis_path = path.join(
valid_path, diagnosis + '_baseline.tsv')
train_diagnosis_df = pd.read_csv(train_diagnosis_path, sep='\t')
valid_diagnosis_df = pd.read_csv(valid_diagnosis_path, sep='\t')
train_df = pd.concat([train_df, train_diagnosis_df])
valid_df = | pd.concat([valid_df, valid_diagnosis_df]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
code to combine data from different algorithms with different degrees and
coefficient
"""
import pandas as pd
print('Reading data...')
xgb1 = | pd.read_csv("../output/xgboost_1.csv") | pandas.read_csv |
import typing as t
import numpy as np
import pandas as pd
from .report import Report
def plot_performance(freq: str = '1h', **kwargs: t.Union[pd.Series, Report]) -> None:
comparison = pd.DataFrame(dtype=np.float64)
price = min([x.initial_aum for x in kwargs.values() if isinstance(x, Report)])
report_count = 0
for name, arg in kwargs.items():
if isinstance(arg, Report):
report_count += 1
aum = arg.holdings.resample(freq).asfreq().sum(axis=1)
comparison[name] = aum
if isinstance(arg, pd.Series):
factor = price / arg.dropna().iloc[0]
comparison[name] = (arg.resample(freq).ffill() * factor)
comparison.plot()
def plot_holdings(report: Report, *, freq: str = '1h', **kwargs) -> None:
holding_sample = report.holdings.resample(freq).asfreq()
holding_sample.where(holding_sample > 0., 0.).plot.area(**kwargs)
def plot_cost_proceeds(report: Report, **kwargs) -> None:
df = | pd.DataFrame({'Cost': report.costs, 'Proceeds': report.proceeds}) | pandas.DataFrame |
"""
Spatial DataFrame Object developed off of the Panda's Dataframe object
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import arcgis
from six import string_types, integer_types
HAS_PANDAS = True
try:
import pandas as pd
from pandas import DataFrame, Series, Index
import numpy
from .base import BaseSpatialPandas
from .geoseries import GeoSeries
except:
HAS_PANDAS = False
class DataFrame:
pass
class BaseSpatialPandas:
pass
from arcgis.gis import GIS
from six import PY3
from six import string_types
from arcgis.geometry import _types
GEO_COLUMN_DEFAULT = "SHAPE"
GEOM_TYPES = (_types.Point, _types.MultiPoint,
_types.Polygon,_types.Geometry,
_types.Polyline,
_types.BaseGeometry)
try:
import arcpy
from arcpy import Geometry
HASARCPY = True
HAS_ARCPY = True
GEOM_TYPES = [arcpy.Point, arcpy.Polygon,
arcpy.Geometry, arcpy.PointGeometry,
arcpy.Polyline, arcpy.Multipatch,
arcpy.Multipoint] + list(GEOM_TYPES)
GEOM_TYPES = tuple(GEOM_TYPES)
except:
# warning.warn("Missing Pro will cause functionality to be limited")
HASARCPY = False
HAS_ARCPY = False
try:
import shapely
from shapely.geometry.base import BaseGeometry as _BaseGeometry
GEOM_TYPES = [_BaseGeometry] + list(GEOM_TYPES)
GEOM_TYPES = tuple(GEOM_TYPES)
HASSHAPELY = True
except:
HASSHAPELY = False
class SpatialDataFrame(BaseSpatialPandas, DataFrame):
"""
A Spatial Dataframe is an object to manipulate, manage and translate
data into new forms of information for users.
Functionality of the Spatial DataFrame is determined by the Geometry Engine
available to the object at creation. It will first leverage the arcpy
geometry engine, then shapely, then it will create the geometry objects
without any engine.
**Scenerios**
================= ======================================================
**Engine Type** **Functionality**
----------------- ------------------------------------------------------
ArcPy Users will have the full functionality provided by the
API.
----------------- ------------------------------------------------------
Shapely Users get a sub-set of operations, and all properties.
:Valid Properties:
- JSON
- WKT
- WKB
- area
- centroid
- extent
- first_point
- hull_rectangle
- is_multipart
- label_point
- last_point
- length
- length3D
- part_count
- point_count
- true_centroid
:Valid Functions:
- boundary
- buffer
- contains
- convex_hull
- crosses
- difference
- disjoint
- distance_to
- equals
- generalize
- intersect
- overlaps
- symmetric_difference
- touches
- union
- within
Everything else will return None
----------------- ------------------------------------------------------
No Engine Values will return None by default
================= ======================================================
Required Parameters:
None
Optional:
:param data: panda's dataframe containing attribute information
:param geometry: list/array/geoseries of arcgis.geometry objects
:param sr: spatial reference of the dataframe. This can be the factory
code, WKT string, arcpy.SpatialReference object, or
arcgis.SpatailReference object.
:param gis: passing a gis.GIS object set to Pro will ensure arcpy is
installed and a full swatch of functionality is available to
the end user.
"""
_internal_names = ['_data', '_cacher', '_item_cache', '_cache',
'is_copy', '_subtyp', '_index',
'_default_kind', '_default_fill_value', '_metadata',
'__array_struct__', '__array_interface__']
_metadata = ['sr', '_geometry_column_name', '_gis']
_geometry_column_name = GEO_COLUMN_DEFAULT
#----------------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
.. deprecated:: 1.5.0
see :class:`GeoAccessor` and :class:`GeoSeriesAccessor`
.. note::
This class is no longer maintained past version 1.4.2
A Spatial Dataframe is an object to manipulate, manage and translate
data into new forms of information for users.
Required Parameters:
None
===================== ===============================================================
**optional argument** **Description**
--------------------- ---------------------------------------------------------------
data optional Panda's dataframe, object containing the attribute
information.
--------------------- ---------------------------------------------------------------
index optional Index or array-like
Index to use for resulting frame. Will default to np.arange(n)
if no indexing information part of input data and no index
provided
--------------------- ---------------------------------------------------------------
columns optional Index or array-like, Column labels to use for
resulting frame. Will default to np.arange(n) if no column
labels are provided
--------------------- ---------------------------------------------------------------
dtype dytpe, default None, Data type to force, otherwise infer
--------------------- ---------------------------------------------------------------
copy optional boolean, default False. Copy data from inputs.
--------------------- ---------------------------------------------------------------
geometry optional list, default None, list/array/geoseries of
arcgis.geometry objects
--------------------- ---------------------------------------------------------------
sr optional spatial reference of the dataframe.
--------------------- ---------------------------------------------------------------
gis optional gis.GIS object, default None. The GIS object allowes
users to use non-public GIS information.
===================== ===============================================================
Example: Creating SpatialDataFrame from a CSV
df = pd.read_csv(r'D:\ipython_working_folder\joel\store_locations.csv', index_col='OBJECTID')
geoms = []
for i in range(0, len(df)):
x = df.iloc[i]['X']
y = df.iloc[i]['Y']
geoms.append(Point({"x" : x, "y" : y, "spatialReference" : {"wkid" : 4326}}))
sdf = arcgis.features.SpatialDataFrame(data=df, geometry=geoms)
Example: Creating SpatialDataFrame Using List Comprehension
coords = [[1,2], [3,4]]
sdf = SpatialDataFrame(df,
geometry=[arcgis.geometry.Geometry({'x':r[0],
'y':r[1], 'spatialReference':{'wkid':4326}}) for r in coords])
.. Note: When passing in a geometry to the SpatialDataFrame, always assign it to the parameter geometry=<var>
Example: Creating From Feature Class
sdf = SpatialDataFrame.from_featureclass(r"c:\temp\data.gdb\cities)
Example: Create A SpatialDataFrame from a Service
gis = GIS(username="user1", password="<PASSWORD>")
item = gis.content.search("Roads")[0]
feature_layer = item.layers[0]
sdf = SpatialDataFrame.from_layer(feature_layer)
"""
warnings.warn("SpatialDataFrame has been deprecated. Please switch to the GeoAccessor/GeoSeriesAccessor.")
if not HAS_PANDAS:
warnings.warn("pandas and numpy are required for SpatialDataFrame.")
warnings.warn("Please install them.")
gis = kwargs.pop('gis', arcgis.env.active_gis)
self._gis = gis
sr = self._sr(kwargs.pop('sr', 4326))
geometry = kwargs.pop('geometry', None)
super(SpatialDataFrame, self).__init__(*args, **kwargs)
if isinstance(sr, _types.SpatialReference):
self.sr = sr
elif isinstance(sr, integer_types):
self.sr = _types.SpatialReference({'wkid' : sr})
elif isinstance(sr, string_types):
self.sr = _types.SpatialReference({'wkt' : sr})
elif hasattr(sr, 'factoryCode'):
self.sr = _types.SpatialReference({'wkid' : sr.factoryCode})
elif hasattr(sr, 'exportToString'):
self.sr = _types.SpatialReference({'wkt' : sr.exportToString()})
elif not sr is None:
raise ValueError("sr (spatial reference) must be a _types.SpatialReference object")
else:
self.sr = None
if geometry is not None:
self.set_geometry(geometry, inplace=True)
elif 'SHAPE' in self.columns:
if isinstance(self['SHAPE'], (GeoSeries, pd.Series)) and self['SHAPE'].dtype.name == 'object':
if all(isinstance(x, _types.Geometry) for x in self[self._geometry_column_name]) == False:
geometry = [_types.Geometry(g) for g in self['SHAPE'].tolist()]
del self['SHAPE']
self.set_geometry(geometry, inplace=True)
if self.sr is None:
try:
self.sr = self.geometry[self.geometry.first_valid_index()].spatialReference
except:
self.sr = self._sr(sr)
self._delete_index()
#----------------------------------------------------------------------
@property
def _constructor(self):
"""constructor for class as per Pandas' github page"""
return SpatialDataFrame
#----------------------------------------------------------------------
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None):
"""
Concise summary of a DataFrame.
Parameters
----------
verbose : {None, True, False}, optional
Whether to print the full summary.
None follows the `display.max_info_columns` setting.
True or False overrides the `display.max_info_columns` setting.
buf : writable buffer, defaults to sys.stdout
max_cols : int, default None
Determines whether full summary or short summary is printed.
None follows the `display.max_info_columns` setting.
memory_usage : boolean/string, default None
Specifies whether total memory usage of the DataFrame
elements (including index) should be displayed. None follows
the `display.memory_usage` setting. True or False overrides
the `display.memory_usage` setting. A value of 'deep' is equivalent
of True, with deep introspection. Memory usage is shown in
human-readable units (base-2 representation).
null_counts : boolean, default None
Whether to show the non-null counts
- If None, then only show if the frame is smaller than
max_info_rows and max_info_columns.
- If True, always show counts.
- If False, never show counts.
"""
cols = list(self.columns)
cols.pop(cols.index("SHAPE"))
return self[cols].info(verbose, buf, max_cols,
memory_usage, null_counts)
#----------------------------------------------------------------------
def _sr(self, sr):
"""sets the spatial reference"""
if isinstance(sr, _types.SpatialReference):
return sr
elif isinstance(sr, dict):
return _types.SpatialReference(sr)
elif isinstance(sr, integer_types):
return _types.SpatialReference({'wkid' : sr})
elif isinstance(sr, string_types):
return _types.SpatialReference({'wkt' : sr})
elif hasattr(sr, 'factoryCode'):
return _types.SpatialReference({'wkid' : sr.factoryCode})
elif hasattr(sr, 'exportToString'):
return _types.SpatialReference({'wkt' : sr.exportToString()})
elif not sr is None:
raise ValueError("sr (spatial reference) must be a _types.SpatialReference object")
else:
return None
#----------------------------------------------------------------------
@property
def __feature_set__(self):
"""returns a dictionary representation of an Esri FeatureSet"""
import numpy as np
import datetime
import time
cols_norm = [col for col in self.columns]
cols_lower = [col.lower() for col in self.columns]
fields = []
features = []
date_fields = []
_geom_types = {
arcgis.geometry._types.Point : "esriGeometryPoint",
arcgis.geometry._types.Polyline : "esriGeometryPolyline",
arcgis.geometry._types.MultiPoint : "esriGeometryMultipoint",
arcgis.geometry._types.Polygon : "esriGeometryPolygon"
}
if self.sr is None:
sr = {'wkid' : 4326}
else:
sr = self.sr
fs = {
"objectIdFieldName" : "",
"globalIdFieldName" : "",
"displayFieldName" : "",
"geometryType" : _geom_types[type(self.geometry[self.geometry.first_valid_index()])],
"spatialReference" : sr,
"fields" : [],
"features" : []
}
if 'objectid' in cols_lower:
fs['objectIdFieldName'] = cols_norm[cols_lower.index('objectid')]
fs['displayFieldName'] = cols_norm[cols_lower.index('objectid')]
elif 'fid' in cols_lower:
fs['objectIdFieldName'] = cols_norm[cols_lower.index('fid')]
fs['displayFieldName'] = cols_norm[cols_lower.index('fid')]
elif 'oid' in cols_lower:
fs['objectIdFieldName'] = cols_norm[cols_lower.index('oid')]
fs['displayFieldName'] = cols_norm[cols_lower.index('oid')]
else:
self['OBJECTID'] = list(range(1, self.shape[0] + 1))
res = self.__feature_set__
del self['OBJECTID']
return res
if 'objectIdFieldName' in fs:
fields.append({
"name" : fs['objectIdFieldName'],
"type" : "esriFieldTypeOID",
"alias" : fs['objectIdFieldName']
})
cols_norm.pop(cols_norm.index(fs['objectIdFieldName']))
if 'globalIdFieldName' in fs and len(fs['globalIdFieldName']) > 0:
fields.append({
"name" : fs['globalIdFieldName'],
"type" : "esriFieldTypeGlobalID",
"alias" : fs['globalIdFieldName']
})
cols_norm.pop(cols_norm.index(fs['globalIdFieldName']))
elif 'globalIdFieldName' in fs and \
len(fs['globalIdFieldName']) == 0:
del fs['globalIdFieldName']
if self._geometry_column_name in cols_norm:
cols_norm.pop(cols_norm.index(self._geometry_column_name))
for col in cols_norm:
try:
idx = self[col].first_valid_index()
col_val = self[col].loc[idx]
except:
col_val = ""
if isinstance(col_val, (str, np.str)):
l = self[col].str.len().max()
if str(l) == 'nan':
l = 255
fields.append({
"name" : col,
"type" : "esriFieldTypeString",
"length" : int(l),
"alias" : col
})
if fs['displayFieldName'] == "":
fs['displayFieldName'] = col
elif isinstance(col_val, (datetime.datetime,
pd.Timestamp,
np.datetime64,
pd.datetime)):
fields.append({
"name" : col,
"type" : "esriFieldTypeDate",
"alias" : col
})
date_fields.append(col)
elif isinstance(col_val, (np.int32, np.int16, np.int8)):
fields.append({
"name" : col,
"type" : "esriFieldTypeSmallInteger",
"alias" : col
})
elif isinstance(col_val, (int, np.int, np.int64)):
fields.append({
"name" : col,
"type" : "esriFieldTypeInteger",
"alias" : col
})
elif isinstance(col_val, (float, np.float64)):
fields.append({
"name" : col,
"type" : "esriFieldTypeDouble",
"alias" : col
})
elif isinstance(col_val, (np.float32)):
fields.append({
"name" : col,
"type" : "esriFieldTypeSingle",
"alias" : col
})
fs['fields'] = fields
for row in self.to_dict('records'):
geom = {}
if self._geometry_column_name in row:
geom = row[self._geometry_column_name]
del row[self._geometry_column_name]
for f in date_fields:
try:
row[f] = int(row[f].to_pydatetime().timestamp() * 1000)
except:
row[f] = None
features.append(
{
"geometry" : dict(geom),
"attributes" : row
}
)
del row
del geom
fs['features'] = features
return fs
#----------------------------------------------------------------------
@property
def __geo_interface__(self):
"""returns the object as an Feature Collection JSON string"""
if HASARCPY:
template = {
"type": "FeatureCollection",
"features": []
}
geom_type = self.geometry_type
if geom_type.lower() == "point":
geom_type = "Point"
elif geom_type.lower() == "polyline":
geom_type = "LineString"
elif geom_type.lower() == "polygon":
geom_type = "Polygon"
df_copy = self.copy(deep=True)
df_copy['geom_json'] = self.geometry.JSON
df_copy['SHAPE'] = df_copy['geom_json']
del df_copy['geom_json']
for index, row in df_copy.iterrows():
geom = row['SHAPE']
del row['SHAPE']
template['features'].append(
{"type" : geom_type,
"geometry" : pd.io.json.loads(geom),
"attributes":row}
)
return pd.io.json.dumps(template)
@property
def geoextent(self):
"""returns the extent of the spatial dataframe"""
return self.series_extent
#----------------------------------------------------------------------
def __getstate__(self):
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(_data=self._data, _typ=self._typ,
_metadata=self._metadata, **meta)
#----------------------------------------------------------------------
def __setattr__(self, attr, val):
if attr.lower() in ['geometry', 'shape', 'shape@']:
object.__setattr__(self, attr, val)
else:
super(SpatialDataFrame, self).__setattr__(attr, val)
#----------------------------------------------------------------------
def _get_geometry(self):
"""returns the geometry series"""
if self._geometry_column_name not in self.columns:
raise AttributeError("Geometry Column Not Present: %s" % self._geometry_column_name)
return self[self._geometry_column_name]
#----------------------------------------------------------------------
def _set_geometry(self, col):
"""sets the geometry for the panda's dataframe"""
if isinstance(col, (GeoSeries, list, numpy.array, numpy.ndarray, Series)):
self.set_geometry(col, inplace=True)
else:
raise ValueError("Must be a list, np.array, or GeoSeries")
#----------------------------------------------------------------------
geometry = property(fget=_get_geometry,
fset=_set_geometry,
fdel=None,
doc="Get/Set the geometry data for SpatialDataFrame")
#----------------------------------------------------------------------
def __finalize__(self, other, method=None, **kwargs):
"""propagate metadata from other to self """
# merge operation: using metadata of the left object
if method == 'merge':
for name in self._metadata:
object.__setattr__(self, name, getattr(other.left, name, None))
# concat operation: using metadata of the first object
elif method == 'concat':
for name in self._metadata:
object.__setattr__(self, name, getattr(other.objs[0], name, None))
else:
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
#----------------------------------------------------------------------
def copy(self, deep=True):
"""
Make a copy of this SpatialDataFrame object
Parameters:
:deep: boolean, default True
Make a deep copy, i.e. also copy data
Returns:
:copy: of SpatialDataFrame
"""
data = self._data
if deep:
data = data.copy()
return SpatialDataFrame(data, sr=self.sr).__finalize__(self)
#----------------------------------------------------------------------
def plot(self, *args, **kwargs):
"""
Plot draws the data on a web map. The user can describe in simple terms how to
renderer spatial data using symbol. To make the process simpler a palette
for which colors are drawn from can be used instead of explicit colors.
====================== =========================================================
**Explicit Argument** **Description**
---------------------- ---------------------------------------------------------
df required SpatialDataFrame or GeoSeries. This is the data
to map.
---------------------- ---------------------------------------------------------
map_widget optional WebMap object. This is the map to display the
data on.
---------------------- ---------------------------------------------------------
palette optional string/dict. Color mapping. For simple renderer,
just provide a string. For more robust renderers like
unique renderer, a dictionary can be given.
---------------------- ---------------------------------------------------------
renderer_type optional string. Determines the type of renderer to use
for the provided dataset. The default is 's' which is for
simple renderers.
Allowed values:
+ 's' - is a simple renderer that uses one symbol only.
+ 'u' - unique renderer symbolizes features based on one
or more matching string attributes.
+ 'c' - A class breaks renderer symbolizes based on the
value of some numeric attribute.
+ 'h' - heatmap renders point data into a raster
visualization that emphasizes areas of higher
density or weighted values.
---------------------- ---------------------------------------------------------
symbol_style optional string. This is the type of symbol the user
needs to create. Valid inputs are: simple, picture, text,
or carto. The default is simple.
---------------------- ---------------------------------------------------------
symbol_type optional string. This is the symbology used by the
geometry. For example 's' for a Line geometry is a solid
line. And '-' is a dash line.
Allowed symbol types based on geometries:
**Point Symbols**
+ 'o' - Circle (default)
+ '+' - Cross
+ 'D' - Diamond
+ 's' - Square
+ 'x' - X
**Polyline Symbols**
+ 's' - Solid (default)
+ '-' - Dash
+ '-.' - Dash Dot
+ '-..' - Dash Dot Dot
+ '.' - Dot
+ '--' - Long Dash
+ '--.' - Long Dash Dot
+ 'n' - Null
+ 's-' - Short Dash
+ 's-.' - Short Dash Dot
+ 's-..' - Short Dash Dot Dot
+ 's.' - Short Dot
**Polygon Symbols**
+ 's' - Solid Fill (default)
+ '\' - Backward Diagonal
+ '/' - Forward Diagonal
+ '|' - Vertical Bar
+ '-' - Horizontal Bar
+ 'x' - Diagonal Cross
+ '+' - Cross
---------------------- ---------------------------------------------------------
col optional string/list. Field or fields used for heatmap,
class breaks, or unique renderers.
---------------------- ---------------------------------------------------------
palette optional string. The color map to draw from in order to
visualize the data. The default palette is 'jet'. To
get a visual representation of the allowed color maps,
use the **display_colormaps** method.
---------------------- ---------------------------------------------------------
alpha optional float. This is a value between 0 and 1 with 1
being the default value. The alpha sets the transparancy
of the renderer when applicable.
====================== =========================================================
**Render Syntax**
The render syntax allows for users to fully customize symbolizing the data.
**Simple Renderer**
A simple renderer is a renderer that uses one symbol only.
====================== =========================================================
**Optional Argument** **Description**
---------------------- ---------------------------------------------------------
symbol_style optional string. This is the type of symbol the user
needs to create. Valid inputs are: simple, picture, text,
or carto. The default is simple.
---------------------- ---------------------------------------------------------
symbol_type optional string. This is the symbology used by the
geometry. For example 's' for a Line geometry is a solid
line. And '-' is a dash line.
**Point Symbols**
+ 'o' - Circle (default)
+ '+' - Cross
+ 'D' - Diamond
+ 's' - Square
+ 'x' - X
**Polyline Symbols**
+ 's' - Solid (default)
+ '-' - Dash
+ '-.' - Dash Dot
+ '-..' - Dash Dot Dot
+ '.' - Dot
+ '--' - Long Dash
+ '--.' - Long Dash Dot
+ 'n' - Null
+ 's-' - Short Dash
+ 's-.' - Short Dash Dot
+ 's-..' - Short Dash Dot Dot
+ 's.' - Short Dot
**Polygon Symbols**
+ 's' - Solid Fill (default)
+ '\' - Backward Diagonal
+ '/' - Forward Diagonal
+ '|' - Vertical Bar
+ '-' - Horizontal Bar
+ 'x' - Diagonal Cross
+ '+' - Cross
---------------------- ---------------------------------------------------------
description Description of the renderer.
---------------------- ---------------------------------------------------------
rotation_expression A constant value or an expression that derives the angle
of rotation based on a feature attribute value. When an
attribute name is specified, it's enclosed in square
brackets.
---------------------- ---------------------------------------------------------
rotation_type String value which controls the origin and direction of
rotation on point features. If the rotationType is
defined as arithmetic, the symbol is rotated from East in
a counter-clockwise direction where East is the 0 degree
axis. If the rotationType is defined as geographic, the
symbol is rotated from North in a clockwise direction
where North is the 0 degree axis.
Must be one of the following values:
+ arithmetic
+ geographic
---------------------- ---------------------------------------------------------
visual_variables An array of objects used to set rendering properties.
====================== =========================================================
**Heatmap Renderer**
The HeatmapRenderer renders point data into a raster visualization that emphasizes
areas of higher density or weighted values.
====================== =========================================================
**Optional Argument** **Description**
---------------------- ---------------------------------------------------------
blur_radius The radius (in pixels) of the circle over which the
majority of each point's value is spread.
---------------------- ---------------------------------------------------------
field This is optional as this renderer can be created if no
field is specified. Each feature gets the same
value/importance/weight or with a field where each
feature is weighted by the field's value.
---------------------- ---------------------------------------------------------
max_intensity The pixel intensity value which is assigned the final
color in the color ramp.
---------------------- ---------------------------------------------------------
min_intensity The pixel intensity value which is assigned the initial
color in the color ramp.
---------------------- ---------------------------------------------------------
ratio A number between 0-1. Describes what portion along the
gradient the colorStop is added.
====================== =========================================================
**Unique Renderer**
This renderer symbolizes features based on one or more matching string attributes.
====================== =========================================================
**Optional Argument** **Description**
---------------------- ---------------------------------------------------------
background_fill_symbol A symbol used for polygon features as a background if the
renderer uses point symbols, e.g. for bivariate types &
size rendering. Only applicable to polygon layers.
PictureFillSymbols can also be used outside of the Map
Viewer for Size and Predominance and Size renderers.
---------------------- ---------------------------------------------------------
default_label Default label for the default symbol used to draw
unspecified values.
---------------------- ---------------------------------------------------------
default_symbol Symbol used when a value cannot be matched.
---------------------- ---------------------------------------------------------
col String or List of Strings. Attribute field(s) the
renderer uses to match values.
---------------------- ---------------------------------------------------------
field_delimiter String inserted between the values if multiple attribute
fields are specified.
---------------------- ---------------------------------------------------------
rotation_expression A constant value or an expression that derives the angle
of rotation based on a feature attribute value. When an
attribute name is specified, it's enclosed in square
brackets. Rotation is set using a visual variable of type
rotation info with a specified field or value expression
property.
---------------------- ---------------------------------------------------------
rotation_type String property which controls the origin and direction
of rotation. If the rotation type is defined as
arithmetic the symbol is rotated from East in a
counter-clockwise direction where East is the 0 degree
axis. If the rotation type is defined as geographic, the
symbol is rotated from North in a clockwise direction
where North is the 0 degree axis.
Must be one of the following values:
+ arithmetic
+ geographic
---------------------- ---------------------------------------------------------
arcade_expression An Arcade expression evaluating to either a string or a
number.
---------------------- ---------------------------------------------------------
arcade_title The title identifying and describing the associated
Arcade expression as defined in the valueExpression
property.
---------------------- ---------------------------------------------------------
visual_variables An array of objects used to set rendering properties.
====================== =========================================================
**Class Breaks Renderer**
A class breaks renderer symbolizes based on the value of some numeric attribute.
====================== =========================================================
**Optional Argument** **Description**
---------------------- ---------------------------------------------------------
background_fill_symbol A symbol used for polygon features as a background if the
renderer uses point symbols, e.g. for bivariate types &
size rendering. Only applicable to polygon layers.
PictureFillSymbols can also be used outside of the Map
Viewer for Size and Predominance and Size renderers.
---------------------- ---------------------------------------------------------
default_label Default label for the default symbol used to draw
unspecified values.
---------------------- ---------------------------------------------------------
default_symbol Symbol used when a value cannot be matched.
---------------------- ---------------------------------------------------------
method Determines the classification method that was used to
generate class breaks.
Must be one of the following values:
+ esriClassifyDefinedInterval
+ esriClassifyEqualInterval
+ esriClassifyGeometricalInterval
+ esriClassifyNaturalBreaks
+ esriClassifyQuantile
+ esriClassifyStandardDeviation
+ esriClassifyManual
---------------------- ---------------------------------------------------------
field Attribute field used for renderer.
---------------------- ---------------------------------------------------------
min_value The minimum numeric data value needed to begin class
breaks.
---------------------- ---------------------------------------------------------
normalization_field Used when normalizationType is field. The string value
indicating the attribute field by which the data value is
normalized.
---------------------- ---------------------------------------------------------
normalization_total Used when normalizationType is percent-of-total, this
number property contains the total of all data values.
---------------------- ---------------------------------------------------------
normalization_type Determine how the data was normalized.
Must be one of the following values:
+ esriNormalizeByField
+ esriNormalizeByLog
+ esriNormalizeByPercentOfTotal
---------------------- ---------------------------------------------------------
rotation_expression A constant value or an expression that derives the angle
of rotation based on a feature attribute value. When an
attribute name is specified, it's enclosed in square
brackets.
---------------------- ---------------------------------------------------------
rotation_type A string property which controls the origin and direction
of rotation. If the rotation_type is defined as
arithmetic, the symbol is rotated from East in a
couter-clockwise direction where East is the 0 degree
axis. If the rotationType is defined as geographic, the
symbol is rotated from North in a clockwise direction
where North is the 0 degree axis.
Must be one of the following values:
+ arithmetic
+ geographic
---------------------- ---------------------------------------------------------
arcade_expression An Arcade expression evaluating to a number.
---------------------- ---------------------------------------------------------
arcade_title The title identifying and describing the associated
Arcade expression as defined in the arcade_expression
property.
---------------------- ---------------------------------------------------------
visual_variables An object used to set rendering options.
====================== =========================================================
**Symbol Syntax**
======================= =========================================================
**Optional Argument** **Description**
----------------------- ---------------------------------------------------------
symbol_style optional string. This is the type of symbol the user
needs to create. Valid inputs are: simple, picture, text,
or carto. The default is simple.
----------------------- ---------------------------------------------------------
symbol_type optional string. This is the symbology used by the
geometry. For example 's' for a Line geometry is a solid
line. And '-' is a dash line.
**Point Symbols**
+ 'o' - Circle (default)
+ '+' - Cross
+ 'D' - Diamond
+ 's' - Square
+ 'x' - X
**Polyline Symbols**
+ 's' - Solid (default)
+ '-' - Dash
+ '-.' - Dash Dot
+ '-..' - Dash Dot Dot
+ '.' - Dot
+ '--' - Long Dash
+ '--.' - Long Dash Dot
+ 'n' - Null
+ 's-' - Short Dash
+ 's-.' - Short Dash Dot
+ 's-..' - Short Dash Dot Dot
+ 's.' - Short Dot
**Polygon Symbols**
+ 's' - Solid Fill (default)
+ '\' - Backward Diagonal
+ '/' - Forward Diagonal
+ '|' - Vertical Bar
+ '-' - Horizontal Bar
+ 'x' - Diagonal Cross
+ '+' - Cross
----------------------- ---------------------------------------------------------
cmap optional string or list. This is the color scheme a user
can provide if the exact color is not needed, or a user
can provide a list with the color defined as:
[red, green blue, alpha]. The values red, green, blue are
from 0-255 and alpha is a float value from 0 - 1.
The default value is 'jet' color scheme.
----------------------- ---------------------------------------------------------
cstep optional integer. If provided, its the color location on
the color scheme.
======================= =========================================================
**Simple Symbols**
This is a list of optional parameters that can be given for point, line or
polygon geometries.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
marker_size optional float. Numeric size of the symbol given in
points.
-------------------- ---------------------------------------------------------
marker_angle optional float. Numeric value used to rotate the symbol.
The symbol is rotated counter-clockwise. For example,
The following, angle=-30, in will create a symbol rotated
-30 degrees counter-clockwise; that is, 30 degrees
clockwise.
-------------------- ---------------------------------------------------------
marker_xoffset Numeric value indicating the offset on the x-axis in points.
-------------------- ---------------------------------------------------------
marker_yoffset Numeric value indicating the offset on the y-axis in points.
-------------------- ---------------------------------------------------------
line_width optional float. Numeric value indicating the width of the line in points
-------------------- ---------------------------------------------------------
outline_style Optional string. For polygon point, and line geometries , a
customized outline type can be provided.
Allowed Styles:
+ 's' - Solid (default)
+ '-' - Dash
+ '-.' - Dash Dot
+ '-..' - Dash Dot Dot
+ '.' - Dot
+ '--' - Long Dash
+ '--.' - Long Dash Dot
+ 'n' - Null
+ 's-' - Short Dash
+ 's-.' - Short Dash Dot
+ 's-..' - Short Dash Dot Dot
+ 's.' - Short Dot
-------------------- ---------------------------------------------------------
outline_color optional string or list. This is the same color as the
cmap property, but specifically applies to the outline_color.
==================== =========================================================
**Picture Symbol**
This type of symbol only applies to Points, MultiPoints and Polygons.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
marker_angle Numeric value that defines the number of degrees ranging
from 0-360, that a marker symbol is rotated. The rotation
is from East in a counter-clockwise direction where East
is the 0 axis.
-------------------- ---------------------------------------------------------
marker_xoffset Numeric value indicating the offset on the x-axis in points.
-------------------- ---------------------------------------------------------
marker_yoffset Numeric value indicating the offset on the y-axis in points.
-------------------- ---------------------------------------------------------
height Numeric value used if needing to resize the symbol. Specify a value in points. If images are to be displayed in their original size, leave this blank.
-------------------- ---------------------------------------------------------
width Numeric value used if needing to resize the symbol. Specify a value in points. If images are to be displayed in their original size, leave this blank.
-------------------- ---------------------------------------------------------
url String value indicating the URL of the image. The URL should be relative if working with static layers. A full URL should be used for map service dynamic layers. A relative URL can be dereferenced by accessing the map layer image resource or the feature layer image resource.
-------------------- ---------------------------------------------------------
image_data String value indicating the base64 encoded data.
-------------------- ---------------------------------------------------------
xscale Numeric value indicating the scale factor in x direction.
-------------------- ---------------------------------------------------------
yscale Numeric value indicating the scale factor in y direction.
-------------------- ---------------------------------------------------------
outline_color optional string or list. This is the same color as the
cmap property, but specifically applies to the outline_color.
-------------------- ---------------------------------------------------------
outline_style Optional string. For polygon point, and line geometries , a
customized outline type can be provided.
Allowed Styles:
+ 's' - Solid (default)
+ '-' - Dash
+ '-.' - Dash Dot
+ '-..' - Dash Dot Dot
+ '.' - Dot
+ '--' - Long Dash
+ '--.' - Long Dash Dot
+ 'n' - Null
+ 's-' - Short Dash
+ 's-.' - Short Dash Dot
+ 's-..' - Short Dash Dot Dot
+ 's.' - Short Dot
-------------------- ---------------------------------------------------------
outline_color optional string or list. This is the same color as the
cmap property, but specifically applies to the outline_color.
-------------------- ---------------------------------------------------------
line_width optional float. Numeric value indicating the width of the line in points
==================== =========================================================
**Text Symbol**
This type of symbol only applies to Points, MultiPoints and Polygons.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
font_decoration The text decoration. Must be one of the following values:
- line-through
- underline
- none
-------------------- ---------------------------------------------------------
font_family Optional string. The font family.
-------------------- ---------------------------------------------------------
font_size Optional float. The font size in points.
-------------------- ---------------------------------------------------------
font_style Optional string. The text style.
- italic
- normal
- oblique
-------------------- ---------------------------------------------------------
font_weight Optional string. The text weight.
Must be one of the following values:
- bold
- bolder
- lighter
- normal
-------------------- ---------------------------------------------------------
background_color optional string/list. Background color is represented as
a four-element array or string of a color map.
-------------------- ---------------------------------------------------------
halo_color Optional string/list. Color of the halo around the text.
The default is None.
-------------------- ---------------------------------------------------------
halo_size Optional integer/float. The point size of a halo around
the text symbol.
-------------------- ---------------------------------------------------------
horizontal_alignment optional string. One of the following string values
representing the horizontal alignment of the text.
Must be one of the following values:
- left
- right
- center
- justify
-------------------- ---------------------------------------------------------
kerning optional boolean. Boolean value indicating whether to
adjust the spacing between characters in the text string.
-------------------- ---------------------------------------------------------
line_color optional string/list. Outline color is represented as
a four-element array or string of a color map.
-------------------- ---------------------------------------------------------
line_width optional integer/float. Outline size.
-------------------- ---------------------------------------------------------
marker_angle optional int. A numeric value that defines the number of
degrees (0 to 360) that a text symbol is rotated. The
rotation is from East in a counter-clockwise direction
where East is the 0 axis.
-------------------- ---------------------------------------------------------
marker_xoffset optional int/float.Numeric value indicating the offset
on the x-axis in points.
-------------------- ---------------------------------------------------------
marker_yoffset optional int/float.Numeric value indicating the offset
on the x-axis in points.
-------------------- ---------------------------------------------------------
right_to_left optional boolean. Set to true if using Hebrew or Arabic
fonts.
-------------------- ---------------------------------------------------------
rotated optional boolean. Boolean value indicating whether every
character in the text string is rotated.
-------------------- ---------------------------------------------------------
text Required string. Text Value to display next to geometry.
-------------------- ---------------------------------------------------------
vertical_alignment Optional string. One of the following string values
representing the vertical alignment of the text.
Must be one of the following values:
- top
- bottom
- middle
- baseline
==================== =========================================================
**Cartographic Symbol**
This type of symbol only applies to line geometries.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
line_width optional float. Numeric value indicating the width of the line in points
-------------------- ---------------------------------------------------------
cap Optional string. The cap style.
-------------------- ---------------------------------------------------------
join Optional string. The join style.
-------------------- ---------------------------------------------------------
miter_limit Optional string. Size threshold for showing mitered line joins.
==================== =========================================================
The kwargs parameter accepts all parameters of the create_symbol method and the
create_renderer method.
"""
if ('kind' in kwargs and \
kwargs['kind'] == 'map') or \
(len(args) > 3 and args[3] == 'map'):
from arcgis.features._data.geodataset.viz import plot
has_wm = True
wm = kwargs.pop('map_widget', None)
if wm is None:
has_wm = False
wm = GIS().map()
if has_wm:
plot(df=self,
map_widget=wm,
name=kwargs.pop('name', "Feature Collection Layer"),
renderer_type=kwargs.pop("renderer_type", None),
symbol_type=kwargs.pop('symbol_type', None),
symbol_style=kwargs.pop('symbol_style', None),
col=kwargs.pop('col', None),
colors=kwargs.pop('cmap', None) or kwargs.pop('colors', None) or kwargs.pop('pallette', 'jet'),
alpha=kwargs.pop('alpha', 1),
**kwargs)
return True
else:
return plot(df=self,
map_widget=wm,
name=kwargs.pop('name', "Feature Collection Layer"),
renderer_type=kwargs.pop("renderer_type", None),
symbol_type=kwargs.pop('symbol_type', None),
symbol_style=kwargs.pop('symbol_style', None),
col=kwargs.pop('col', None),
colors=kwargs.pop('cmap', None) or kwargs.pop('colors', None) or kwargs.pop('pallette', 'jet'),
alpha=kwargs.pop('alpha', 1),
**kwargs)
if ('kind' in kwargs and \
kwargs['kind'] == 'map') or \
(len(args) > 3 and args[3] == 'map') and \
('as_graphic' in kwargs and kwargs['as_graphic']):
from arcgis.features import FeatureCollection, FeatureSet
from arcgis import geometry
if self._gis is None:
gis = GIS(set_active=False)
else:
gis = self._gis
if self.sr:
sr = self.sr
else:
sr = self.sr
extent = None
if HASARCPY:
if sr:
wkid = None
if hasattr(sr, 'factoryCode'):
wkid = {'wkid' : sr.factoryCode}
elif isinstance(sr, geometry.SpatialReference):
wkid = self.sr
ext = self.geoextent
extent = {
"xmin" : ext[0],
"ymin" : ext[1],
"xmax" : ext[2],
"ymax" : ext[3],
"spatialReference" : wkid
}
else:
ext = self.geoextent
extent = {
"xmin" : ext[0],
"ymin" : ext[1],
"xmax" : ext[2],
"ymax" : ext[3],
"spatialReference" : {'wkid' : 4326}
}
else:
sr = self.sr
if self.sr is None:
sr = {'wkid' : 4326}
ext = self.geoextent
extent = {
"xmin" : ext[0],
"ymin" : ext[1],
"xmax" : ext[2],
"ymax" : ext[3],
"spatialReference" : sr
}
if 'map_widget' not in kwargs:
raise Exception("map_widget is required to plot the SpatialDataFrame")
else:
m = kwargs.pop('map_widget')
symbol = kwargs.pop('symbol', None)
popup = kwargs.pop('popup', None)
try:
fs = FeatureSet.from_dict(self.__feature_set__)
m.draw(fs, symbol=symbol, popup=popup)
if extent and \
isinstance(extent, dict):
m.extent = extent
except:
raise Exception('Could not plot the Spatial DataFrame.')
else:
return super(SpatialDataFrame, self).plot(*args, **kwargs)
# ----------------------------------------------------------------------
@staticmethod
def from_xy(df, x_column, y_column, sr=4326):
"""
Converts a Pandas DataFrame into a Spatial DataFrame by providing the X/Y columns.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
df Required Pandas DataFrame. Source dataset
-------------------- ---------------------------------------------------------
x_column Required string. The name of the X-coordinate series
-------------------- ---------------------------------------------------------
y_column Required string. The name of the Y-coordinate series
-------------------- ---------------------------------------------------------
sr Optional int. The wkid number of the spatial reference.
==================== =========================================================
:returns: SpatialDataFrame
"""
from .io.fileops import _from_xy
return _from_xy(df=df, x_column=x_column,
y_column=y_column, sr=sr)
# ----------------------------------------------------------------------
@staticmethod
def from_df(df, address_column="address", geocoder=None):
"""
Returns a SpatialDataFrame from a dataframe with an address column.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
df Required Pandas DataFrame. Source dataset
-------------------- ---------------------------------------------------------
address_column Optional String. The default is "address". This is the
name of a column in the specified dataframe that contains
addresses (as strings). The addresses are batch geocoded
using the GIS's first configured geocoder and their
locations used as the geometry of the spatial dataframe.
Ignored if the 'geometry' parameter is also specified.
-------------------- ---------------------------------------------------------
geocoder Optional Geocoder. The geocoder to be used. If not
specified, the active GIS's first geocoder is used.
==================== =========================================================
:returns: SpatialDataFrame
NOTE: Credits will be consumed for batch_geocoding, from
the GIS to which the geocoder belongs.
"""
from arcgis.geocoding import get_geocoders, geocode, batch_geocode
if geocoder is None:
geocoder = arcgis.env.active_gis._tools.geocoders[0]
geoms = []
if address_column in df.columns:
# batch geocode addresses in the address column and use them as the geometry
batch_size = geocoder.properties.locatorProperties.MaxBatchSize
N = len(df)
geoms = []
for i in range(0, N, batch_size):
start = i
stop = i + batch_size if i + batch_size < N else N
# print('Geocoding from ' + str(start) + ' to ' + str(stop))
res = batch_geocode(list(df[start:stop][address_column]), geocoder=geocoder)
for index in range(len(res)):
address = df.ix[start + index, address_column]
try:
loc = res[index]['location']
x = loc['x']
y = loc['y']
# self.ix[start + index, 'x'] = x
# self.ix[start + index, 'y'] = y
geoms.append(arcgis.geometry.Geometry({'x': x, 'y': y}))
except:
x, y = None, None
try:
loc = geocode(address, geocoder=geocoder)[0]['location']
x = loc['x']
y = loc['y']
except:
print('Unable to geocode address: ' + address)
pass
# self.ix[start + index, 'x'] = x
# self.ix[start + index, 'y'] = y
geoms.append(None)
else:
raise ValueError("Address column not found in dataframe")
return SpatialDataFrame(df, geometry=geoms)
#----------------------------------------------------------------------
@staticmethod
def from_featureclass(filename, **kwargs):
"""
Returns a SpatialDataFrame from a feature class.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
filename Required string. The full path to the feature class
-------------------- ---------------------------------------------------------
sql_clause Optional string. The sql clause to parse data down
-------------------- ---------------------------------------------------------
where_clause Optional string. A where statement
-------------------- ---------------------------------------------------------
sr Optional SpatialReference. A spatial reference object
==================== =========================================================
:returns: SpatialDataFrame
"""
from .io import from_featureclass
gis = kwargs.pop('gis', arcgis.env.active_gis)
if HASARCPY:
return from_featureclass(filename=filename, **kwargs)
elif isinstance(gis, GIS) and \
gis._con._auth.lower() != "anon":
return from_featureclass(filename=filename, **kwargs)
else:
raise Exception("Cannot create the SpatialDataFrame, you must " +\
"have an authenticated GIS.")
#----------------------------------------------------------------------
@staticmethod
def from_layer(layer, **kwargs):
"""
Returns a SpatialDataFrame/Pandas' Dataframe from a FeatureLayer or Table object.
============== ==============================================================
**Arguments** **Description**
-------------- --------------------------------------------------------------
layer required FeatureLayer/Table. This is the service endpoint object.
============== ==============================================================
:returns: SpatialDataFrame for feature layers with geometry and Panda's Dataframe for tables
"""
from .io import from_layer
return from_layer(layer=layer, **kwargs)
#----------------------------------------------------------------------
def to_featureclass(self,
out_location, out_name,
overwrite=True, skip_invalid=True):
"""converts a SpatialDataFrame to a feature class
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
out_location Required string. A save location workspace
-------------------- ---------------------------------------------------------
out_name Required string. The name of the feature class to save as
-------------------- ---------------------------------------------------------
overwrite Optional boolean. True means to erase and replace value,
false means to append
-------------------- ---------------------------------------------------------
skip_invalids Optional boolean. If True, any bad rows will be ignored.
==================== =========================================================
:returns: string
"""
from .io import to_featureclass
return to_featureclass(df=self,
out_location=out_location,
out_name=out_name,
overwrite=overwrite, skip_invalid=skip_invalid)
#----------------------------------------------------------------------
def to_hdf(self, path_or_buf, key, **kwargs):
"""Write the contained data to an HDF5 file using HDFStore.
Parameters
----------
path_or_buf : the path (string) or HDFStore object
key : string
indentifier for the group in the store
mode : optional, {'a', 'w', 'r+'}, default 'a'
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default False
For Table formats, append the input data to the existing
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__.
Applicable only to format='table'.
complevel : int, 1-9, default 0
If a complib is specified compression will be applied
where possible
complib : {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None
If complevel is > 0 apply compression to objects written
in the store wherever possible
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
dropna : boolean, default False.
If true, ALL nan rows will not be written to store.
"""
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, | pd.DataFrame(self) | pandas.DataFrame |
import wandb
from wandb import data_types
import numpy as np
import pytest
import os
import sys
import datetime
from wandb.sdk.data_types._dtypes import *
class_labels = {1: "tree", 2: "car", 3: "road"}
test_folder = os.path.dirname(os.path.realpath(__file__))
im_path = os.path.join(test_folder, "..", "assets", "test.png")
def test_none_type():
assert TypeRegistry.type_of(None) == NoneType()
assert TypeRegistry.type_of(None).assign(None) == NoneType()
assert TypeRegistry.type_of(None).assign(1) == InvalidType()
def test_string_type():
assert TypeRegistry.type_of("Hello") == StringType()
assert TypeRegistry.type_of("Hello").assign("World") == StringType()
assert TypeRegistry.type_of("Hello").assign(None) == InvalidType()
assert TypeRegistry.type_of("Hello").assign(1) == InvalidType()
def test_number_type():
assert TypeRegistry.type_of(1.2) == NumberType()
assert TypeRegistry.type_of(1.2).assign(1) == NumberType()
assert TypeRegistry.type_of(1.2).assign(None) == InvalidType()
assert TypeRegistry.type_of(1.2).assign("hi") == InvalidType()
def make_datetime():
return datetime.datetime(2000, 12, 1)
def make_date():
return datetime.date(2000, 12, 1)
def make_datetime64():
return np.datetime64("2000-12-01")
def test_timestamp_type():
assert TypeRegistry.type_of(make_datetime()) == TimestampType()
assert (
TypeRegistry.type_of(make_datetime())
.assign(make_date())
.assign(make_datetime64())
== TimestampType()
)
assert TypeRegistry.type_of(make_datetime()).assign(None) == InvalidType()
assert TypeRegistry.type_of(make_datetime()).assign(1) == InvalidType()
def test_boolean_type():
assert TypeRegistry.type_of(True) == BooleanType()
assert TypeRegistry.type_of(True).assign(False) == BooleanType()
assert TypeRegistry.type_of(True).assign(None) == InvalidType()
assert TypeRegistry.type_of(True).assign(1) == InvalidType()
def test_any_type():
assert AnyType() == AnyType().assign(1)
assert AnyType().assign(None) == InvalidType()
def test_never_type():
assert InvalidType().assign(1) == InvalidType()
assert InvalidType().assign("a") == InvalidType()
assert InvalidType().assign(True) == InvalidType()
assert InvalidType().assign(None) == InvalidType()
def test_unknown_type():
assert UnknownType().assign(1) == NumberType()
assert UnknownType().assign(None) == InvalidType()
def test_union_type():
wb_type = UnionType([float, str])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == InvalidType()
wb_type = UnionType([float, AnyType()])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == wb_type
wb_type = UnionType([float, UnknownType()])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == UnionType([float, StringType()])
assert wb_type.assign(None) == InvalidType()
wb_type = UnionType([float, OptionalType(UnknownType())])
assert wb_type.assign(None).assign(True) == UnionType(
[float, OptionalType(BooleanType())]
)
wb_type = UnionType([float, UnionType([str, UnknownType()])])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == UnionType([float, str, bool])
assert wb_type.assign(None) == InvalidType()
def test_const_type():
wb_type = ConstType(1)
assert wb_type.assign(1) == wb_type
assert wb_type.assign("a") == InvalidType()
assert wb_type.assign(2) == InvalidType()
def test_set_const_type():
wb_type = ConstType(set())
assert wb_type.assign(set()) == wb_type
assert wb_type.assign(None) == InvalidType()
assert wb_type.assign({1}) == InvalidType()
assert wb_type.assign([]) == InvalidType()
wb_type = ConstType({1, 2, 3})
assert wb_type.assign(set()) == InvalidType()
assert wb_type.assign(None) == InvalidType()
assert wb_type.assign({1, 2, 3}) == wb_type
assert wb_type.assign([1, 2, 3]) == InvalidType()
def test_object_type():
wb_type = TypeRegistry.type_of(np.random.rand(30))
assert wb_type.assign(np.random.rand(30)) == wb_type
assert wb_type.assign(4) == InvalidType()
def test_list_type():
assert ListType(int).assign([]) == ListType(int, 0)
assert ListType(int).assign([1, 2, 3]) == ListType(int, 3)
assert ListType(int).assign([1, "a", 3]) == InvalidType()
def test_dict_type():
spec = {
"number": float,
"nested": {
"list_str": [str],
},
}
exact = {
"number": 1,
"nested": {
"list_str": ["hello", "world"],
},
}
subset = {"nested": {"list_str": ["hi"]}}
narrow = {"number": 1, "string": "hi"}
wb_type = TypeRegistry.type_of(exact)
assert wb_type.assign(exact) == wb_type
assert wb_type.assign(subset) == InvalidType()
assert wb_type.assign(narrow) == InvalidType()
spec = {
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(UnknownType()),
}
wb_type = TypedDictType(spec)
assert wb_type.assign({}) == wb_type
assert wb_type.assign({"optional_number": 1}) == wb_type
assert wb_type.assign({"optional_number": "1"}) == InvalidType()
assert wb_type.assign({"optional_unknown": "hi"}) == TypedDictType(
{
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(str),
}
)
assert wb_type.assign({"optional_unknown": None}) == TypedDictType(
{
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(UnknownType()),
}
)
wb_type = TypedDictType({"unknown": UnknownType()})
assert wb_type.assign({}) == InvalidType()
assert wb_type.assign({"unknown": None}) == InvalidType()
assert wb_type.assign({"unknown": 1}) == TypedDictType(
{"unknown": float},
)
def test_nested_dict():
notation_type = TypedDictType(
{
"a": float,
"b": bool,
"c": str,
"d": UnknownType(),
"e": {},
"f": [],
"g": [
[
{
"a": float,
"b": bool,
"c": str,
"d": UnknownType(),
"e": {},
"f": [],
"g": [[]],
}
]
],
}
)
expanded_type = TypedDictType(
{
"a": NumberType(),
"b": BooleanType(),
"c": StringType(),
"d": UnknownType(),
"e": TypedDictType({}),
"f": ListType(),
"g": ListType(
ListType(
TypedDictType(
{
"a": NumberType(),
"b": BooleanType(),
"c": StringType(),
"d": UnknownType(),
"e": TypedDictType({}),
"f": ListType(),
"g": ListType(ListType()),
}
)
)
),
}
)
example = {
"a": 1,
"b": True,
"c": "StringType()",
"d": "hi",
"e": {},
"f": [1],
"g": [
[
{
"a": 2,
"b": False,
"c": "StringType()",
"d": 3,
"e": {},
"f": [],
"g": [[5]],
}
]
],
}
real_type = TypedDictType.from_obj(example)
assert notation_type == expanded_type
assert notation_type.assign(example) == real_type
def test_image_type():
wb_type = data_types._ImageFileType()
image_simple = data_types.Image(np.random.rand(10, 10))
wb_type_simple = data_types._ImageFileType.from_obj(image_simple)
image_annotated = data_types.Image(
np.random.rand(10, 10),
boxes={
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
"box_ground_truth": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
},
masks={
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth": {"path": im_path, "class_labels": class_labels},
},
)
wb_type_annotated = data_types._ImageFileType.from_obj(image_annotated)
image_annotated_differently = data_types.Image(
np.random.rand(10, 10),
boxes={
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
},
masks={
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth_2": {"path": im_path, "class_labels": class_labels},
},
)
assert wb_type.assign(image_simple) == wb_type_simple
assert wb_type.assign(image_annotated) == wb_type_annotated
# OK to assign Images with disjoint class set
assert wb_type_annotated.assign(image_simple) == wb_type_annotated
# Merge when disjoint
assert wb_type_annotated.assign(
image_annotated_differently
) == data_types._ImageFileType(
box_layers={"box_predictions": {1, 2, 3}, "box_ground_truth": {1, 2, 3}},
box_score_keys={"loss", "acc"},
mask_layers={
"mask_ground_truth_2": set(),
"mask_ground_truth": set(),
"mask_predictions": {1, 2, 3},
},
class_map={"1": "tree", "2": "car", "3": "road"},
)
def test_classes_type():
wb_classes = data_types.Classes(
[
{"id": 1, "name": "cat"},
{"id": 2, "name": "dog"},
{"id": 3, "name": "horse"},
]
)
wb_class_type = (
wandb.wandb_sdk.data_types.helper_types.classes._ClassesIdType.from_obj(
wb_classes
)
)
assert wb_class_type.assign(1) == wb_class_type
assert wb_class_type.assign(0) == InvalidType()
def test_table_type():
table_1 = wandb.Table(columns=["col"], data=[[1]])
t1 = data_types._TableType.from_obj(table_1)
table_2 = wandb.Table(columns=["col"], data=[[1.3]])
table_3 = wandb.Table(columns=["col"], data=[["a"]])
assert t1.assign(table_2) == t1
assert t1.assign(table_3) == InvalidType()
def test_table_implicit_types():
table = wandb.Table(columns=["col"])
table.add_data(None)
table.add_data(1)
with pytest.raises(TypeError):
table.add_data("a")
table = wandb.Table(columns=["col"], optional=False)
with pytest.raises(TypeError):
table.add_data(None)
table.add_data(1)
with pytest.raises(TypeError):
table.add_data("a")
def test_table_allow_mixed_types():
table = wandb.Table(columns=["col"], allow_mixed_types=True)
table.add_data(None)
table.add_data(1)
table.add_data("a") # No error with allow_mixed_types
table = wandb.Table(columns=["col"], optional=False, allow_mixed_types=True)
with pytest.raises(TypeError):
table.add_data(None) # Still errors since optional is false
table.add_data(1)
table.add_data("a") # No error with allow_mixed_types
def test_tables_with_dicts():
good_data = [
[None],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
]
bad_data = [
[None],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
}
]
],
}
]
}
],
]
table = wandb.Table(columns=["A"], data=good_data, allow_mixed_types=True)
table = wandb.Table(columns=["A"], data=bad_data, allow_mixed_types=True)
table = wandb.Table(columns=["A"], data=good_data)
with pytest.raises(TypeError):
table = wandb.Table(columns=["A"], data=bad_data)
def test_table_explicit_types():
table = wandb.Table(columns=["a", "b"], dtype=int)
table.add_data(None, None)
table.add_data(1, 2)
with pytest.raises(TypeError):
table.add_data(1, "a")
table = wandb.Table(columns=["a", "b"], optional=False, dtype=[int, str])
with pytest.raises(TypeError):
table.add_data(None, None)
table.add_data(1, "a")
with pytest.raises(TypeError):
table.add_data("a", "a")
table = wandb.Table(columns=["a", "b"], optional=[False, True], dtype=[int, str])
with pytest.raises(TypeError):
table.add_data(None, None)
with pytest.raises(TypeError):
table.add_data(None, "a")
table.add_data(1, None)
table.add_data(1, "a")
with pytest.raises(TypeError):
table.add_data("a", "a")
def test_table_type_cast():
table = wandb.Table(columns=["type_col"])
table.add_data(1)
wb_classes = data_types.Classes(
[
{"id": 1, "name": "cat"},
{"id": 2, "name": "dog"},
{"id": 3, "name": "horse"},
]
)
table.cast("type_col", wb_classes.get_type())
table.add_data(2)
with pytest.raises(TypeError):
table.add_data(4)
box_annotation = {
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
"box_ground_truth": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
}
mask_annotation = {
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth": {"path": im_path, "class_labels": class_labels},
}
def test_table_specials():
table = wandb.Table(
columns=["image", "table"],
optional=False,
dtype=[data_types.Image, data_types.Table],
)
with pytest.raises(TypeError):
table.add_data(None, None)
# Infers specific types from first valid row
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, None]]),
)
# Denies conflict
with pytest.raises(TypeError):
table.add_data(
"hello",
data_types.Table(data=[[1, True, None]]),
)
# Denies conflict
with pytest.raises(TypeError):
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, "True", None]]),
)
# allows further refinement
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, 1]]),
)
# allows addition
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, 1]]),
)
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_nan_non_float():
import pandas as pd
wandb.Table(dataframe=pd.DataFrame(data=[["A"], [np.nan]], columns=["a"]))
def test_table_typing_numpy():
# Pulled from https://numpy.org/devdocs/user/basics.types.html
# Numerics
table = wandb.Table(columns=["A"], dtype=[NumberType])
table.add_data(None)
table.add_data(42)
table.add_data(np.byte(1))
table.add_data(np.short(42))
table.add_data(np.ushort(42))
table.add_data(np.intc(42))
table.add_data(np.uintc(42))
table.add_data(np.int_(42))
table.add_data(np.uint(42))
table.add_data(np.longlong(42))
table.add_data(np.ulonglong(42))
table.add_data(np.half(42))
table.add_data(np.float16(42))
table.add_data(np.single(42))
table.add_data(np.double(42))
table.add_data(np.longdouble(42))
table.add_data(np.csingle(42))
table.add_data(np.cdouble(42))
table.add_data(np.clongdouble(42))
table.add_data(np.int8(42))
table.add_data(np.int16(42))
table.add_data(np.int32(42))
table.add_data(np.int64(42))
table.add_data(np.uint8(42))
table.add_data(np.uint16(42))
table.add_data(np.uint32(42))
table.add_data(np.uint64(42))
table.add_data(np.intp(42))
table.add_data(np.uintp(42))
table.add_data(np.float32(42))
table.add_data(np.float64(42))
table.add_data(np.float_(42))
table.add_data(np.complex64(42))
table.add_data(np.complex128(42))
table.add_data(np.complex_(42))
# Booleans
table = wandb.Table(columns=["A"], dtype=[BooleanType])
table.add_data(None)
table.add_data(True)
table.add_data(False)
table.add_data(np.bool_(True))
# Array of Numerics
table = wandb.Table(columns=["A"], dtype=[[NumberType]])
table.add_data(None)
table.add_data([42])
table.add_data(np.array([1, 0], dtype=np.byte))
table.add_data(np.array([42, 42], dtype=np.short))
table.add_data(np.array([42, 42], dtype=np.ushort))
table.add_data(np.array([42, 42], dtype=np.intc))
table.add_data(np.array([42, 42], dtype=np.uintc))
table.add_data(np.array([42, 42], dtype=np.int_))
table.add_data(np.array([42, 42], dtype=np.uint))
table.add_data(np.array([42, 42], dtype=np.longlong))
table.add_data(np.array([42, 42], dtype=np.ulonglong))
table.add_data(np.array([42, 42], dtype=np.half))
table.add_data(np.array([42, 42], dtype=np.float16))
table.add_data(np.array([42, 42], dtype=np.single))
table.add_data(np.array([42, 42], dtype=np.double))
table.add_data(np.array([42, 42], dtype=np.longdouble))
table.add_data(np.array([42, 42], dtype=np.csingle))
table.add_data(np.array([42, 42], dtype=np.cdouble))
table.add_data(np.array([42, 42], dtype=np.clongdouble))
table.add_data(np.array([42, 42], dtype=np.int8))
table.add_data(np.array([42, 42], dtype=np.int16))
table.add_data(np.array([42, 42], dtype=np.int32))
table.add_data(np.array([42, 42], dtype=np.int64))
table.add_data(np.array([42, 42], dtype=np.uint8))
table.add_data(np.array([42, 42], dtype=np.uint16))
table.add_data(np.array([42, 42], dtype=np.uint32))
table.add_data(np.array([42, 42], dtype=np.uint64))
table.add_data(np.array([42, 42], dtype=np.intp))
table.add_data(np.array([42, 42], dtype=np.uintp))
table.add_data(np.array([42, 42], dtype=np.float32))
table.add_data(np.array([42, 42], dtype=np.float64))
table.add_data(np.array([42, 42], dtype=np.float_))
table.add_data(np.array([42, 42], dtype=np.complex64))
table.add_data(np.array([42, 42], dtype=np.complex128))
table.add_data(np.array([42, 42], dtype=np.complex_))
# Array of Booleans
table = wandb.Table(columns=["A"], dtype=[[BooleanType]])
table.add_data(None)
table.add_data([True])
table.add_data([False])
table.add_data(np.array([True, False], dtype=np.bool_))
# Nested arrays
table = wandb.Table(columns=["A"])
table.add_data([[[[1, 2, 3]]]])
table.add_data(np.array([[[[1, 2, 3]]]]))
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_table_typing_pandas():
import pandas as pd
# TODO: Pandas https://pandas.pydata.org/pandas-docs/stable/user_guide/basics.html#basics-dtypes
# Numerics
table = wandb.Table(dataframe=pd.DataFrame([[1], [0]]).astype(np.byte))
table.add_data(1)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.short))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.ushort))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.intc))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uintc))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int_))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.longlong))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.ulonglong))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.half))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.float16))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.single))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.double))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.longdouble))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.csingle))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.cdouble))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.clongdouble))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int8))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int16))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int32))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int64))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint8))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint16))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint32))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint64))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.intp))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uintp))
table.add_data(42)
table = wandb.Table(dataframe= | pd.DataFrame([[42], [42]]) | pandas.DataFrame |
#!/usr/bin/env python
from sklearn.externals import joblib
import numpy as np
import pandas as pd
def get_sepsis_score(data, model):
num_rows = len(data)
M1 = joblib.load('model-saved.pkl')
s_m = np.load('septic_mean.npy', allow_pickle=True)
ns_m = np.load('Nonseptic_mean.npy', allow_pickle=True)
All = np.vstack((s_m, ns_m))
maenAll = np.mean(All, axis=0)
# Pre processing for sLinear Interpolate
for column in range(data.shape[1]):
col = data[:, column]
value = col[~np.isnan(col)]
indexVal = np.argwhere(~np.isnan(col))
indexNaN = np.argwhere(np.isnan(col))
# if len(value) == 1:
if ((len(value) == 1) & (col.shape[0] > 1)):
col[np.int(indexNaN[0])] = maenAll[column]
data[:, column] = col
df = | pd.DataFrame.from_records(data) | pandas.DataFrame.from_records |
#!/usr/bin/env python
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import sys
import numpy as np
np.random.seed(5)
np.set_printoptions(threshold=sys.maxsize)
import pandas as pd
import glob
import os
import time
import json
from sklearn import preprocessing
from sklearn import ensemble
from sklearn import linear_model
from sklearn import svm
from sklearn.preprocessing import PolynomialFeatures
from sklearn import neural_network
from sklearn.metrics import r2_score
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
#from keras.models import Sequential
#from keras.layers.core import Dense, Dropout, Activation
#from keras.layers.advanced_activations import PReLU, SReLU, LeakyReLU
import xgboost
from sklearn.preprocessing import FunctionTransformer
transformer = FunctionTransformer(func=np.log1p, inverse_func=np.expm1)
from sklearn.pipeline import Pipeline
from sklearn import neighbors
from sklearn import tree
from sklearn import gaussian_process
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
#from keras.layers import Input, Dense
#from keras.models import Model
#import keras.backend as K
from sklearn.metrics import r2_score, mean_squared_error
import sklearn.dummy
import math
from sklearn.multioutput import MultiOutputRegressor
import tensorflow as tf
tf.set_random_seed(10)
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.callbacks import EarlyStopping
early_stopping_monitor = EarlyStopping(patience=50)
# Methods
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def coeff_determination(y_pred, y_true): #Order of function inputs is important here
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
os.environ['KMP_DUPLICATE_LIB_OK']='True'
outputs = ['time']
fold = ['nfold', 'fold', 'id','percentage','nfold']
PP_OUT_FLAG = True
LOG_FLAG = False
class Regression():
def __init__(self, trainFilename, testFilename, resultsDir, run_case=True):
# assert len(trainFilenames) == len(testFilenames)
self.resultsDir = resultsDir
#ntrees = 1000
self.trainFilename = trainFilename
self.testFilename = testFilename
self.regressors = {
'lm': MultiOutputRegressor(linear_model.LinearRegression()),
'rg': MultiOutputRegressor(linear_model.Ridge()),
'svm': MultiOutputRegressor(svm.SVR(kernel='rbf')),
'gp': MultiOutputRegressor(gaussian_process.GaussianProcessRegressor()),
'knn': MultiOutputRegressor(neighbors.KNeighborsRegressor(n_neighbors=5)),
'dt': MultiOutputRegressor(tree.DecisionTreeRegressor()),
'br': MultiOutputRegressor(ensemble.BaggingRegressor(n_jobs=-1)),
'etr': MultiOutputRegressor(ensemble.ExtraTreesRegressor(n_jobs=-1)),
'rfr': MultiOutputRegressor(ensemble.RandomForestRegressor(n_jobs=-1)),
'abr': MultiOutputRegressor(ensemble.AdaBoostRegressor()),
'gbr': MultiOutputRegressor(ensemble.GradientBoostingRegressor()),
'xgb': MultiOutputRegressor(xgboost.XGBRegressor()),
'dl': None
}
if trainFilename is not None and testFilename is not None:
self.load_data()
self.preprocess_data()
for key in self.regressors.keys():
self.fit_model(key)
else:
print('Loading dummy regression class')
def load_data(self):
filename = self.trainFilename
print(self.trainFilename)
if os.path.exists(filename):
train_data = pd.read_csv(filename,header=None,encoding = "ISO-8859-1")
filename = self.testFilename
if os.path.exists(filename):
test_data1 = pd.read_csv(filename,header=None,encoding = "ISO-8859-1")
out_df = train_data.iloc[:,-1].values.reshape(-1,1)
inp_df = train_data.iloc[:,:-1]
test_out_df1 = test_data1.iloc[:,-1].values.reshape(-1,1)
test_inp_df1 = test_data1.iloc[:,:-1]
self.train_X = inp_df
self.train_y = out_df
self.test_X = test_inp_df1
self.test_y = test_out_df1
def preprocess_data(self):
self.preproc_X = Pipeline([('stdscaler', StandardScaler()),('minmax', MinMaxScaler(feature_range=(-1, 1)))])
self.preproc_y = Pipeline([('stdscaler', StandardScaler()),('minmax', MinMaxScaler(feature_range=(-1, 1)))])
self.train_X_p = self.preproc_X.fit_transform(self.train_X)#.as_matrix()
self.train_y_p = self.preproc_y.fit_transform(self.train_y)#.as_matrix()
self.test_X_p = self.preproc_X.transform(self.test_X)#.as_matrix()
self.test_y_p = self.preproc_y.transform(self.test_y)#.as_matrix()
def build_model(self, model_type):
start = time.time()
if model_type != 'dl':
model = self.regressors[model_type]
else:
tf.keras.backend.clear_session()
tf.reset_default_graph()
nunits = 200
# design network
model = Sequential()
model.add(Dense(nunits, activation='tanh', input_shape=(self.train_X.shape[1],)))
model.add(Dense(nunits, activation='tanh'))
model.add(Dense(nunits, activation='tanh'))
model.add(Dense(self.train_y.shape[1], activation='linear'))
model.compile(loss='mse', optimizer='adam',metrics=[coeff_determination])
model.summary()
end = time.time()
build_time = (end-start)
return model, build_time
def train_model(self, model, model_type):
start = time.time()
if model_type != 'dl':
model.fit(self.train_X_p, self.train_y_p)
else:
model.fit(self.train_X_p, self.train_y_p, epochs=1000, batch_size=16, validation_split=0.1, verbose=1, callbacks=[early_stopping_monitor], shuffle=True)
end = time.time()
training_time = (end - start)
return model, training_time
def test_model(self, model):
start = time.time()
test_yhat_p = model.predict(self.test_X_p)
end = time.time()
inference_time = (end - start)
return test_yhat_p , inference_time
def compute_metric(self, test_y, test_yhat):
results = []
test_y = test_y.reshape(-1,1)
test_yhat = test_yhat
for out_index in range(test_y.shape[1]):
y_true = test_y[:,out_index]
y_pred = test_yhat[:,out_index]
r2 = r2_score(y_true, y_pred)
evs = explained_variance_score(y_true, y_pred)
mae = mean_absolute_error(y_true, y_pred)
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
rho = np.corrcoef(y_true, y_pred)[0][1]
# mape = mean_absolute_percentage_error(y_true, y_pred)
result = [r2, rho, evs, mae, rmse]
results.append(result)
res_df = | pd.DataFrame(results) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
from sklearn.metrics import confusion_matrix, multilabel_confusion_matrix
from skmultilearn.problem_transform import ClassifierChain
from skmultilearn.problem_transform import BinaryRelevance
from skmultilearn.adapt import MLkNN
from keras.layers import Dense
from keras.models import Sequential
from keras.metrics import *
##########################################################
# Section 1 - Data Loading
##########################################################
# Getting feature data
finalData = np.array(pd.read_csv('D:/UIP/finaldata.csv', index_col='Name'))
biodata = finalData[:, 21:]
# Getting type data as dataframe for visualisations
pType = pd.read_csv('D:/UIP/primType.csv', index_col=0)
sType = pd.read_csv('D:/UIP/secondType.csv', index_col=0)
bTypes = pd.read_csv('D:/UIP/sparseTypes.csv', index_col=0)
# Getting features as numpy arrays for model inputs
primType = np.array(pType)
secType = np.array(sType)
bothTypes = np.array(bTypes)
# Get splitted data
Xtrain, Xtest, Ytrain, Ytest = train_test_split(finalData, bothTypes, test_size=0.2, random_state=12345)
XtrainPrim, XtestPrim, YtrainPrim, YtestPrim = train_test_split(finalData, primType, test_size=0.2, random_state=12345)
XtrainSec, XtestSec, YtrainSec, YtestSec = train_test_split(finalData, secType, test_size=0.2, random_state=12345)
# Get splitted biodata
XtrainBio, XtestBio, YtrainBio, YtestBio = train_test_split(biodata, bothTypes, test_size=0.2, random_state=12345)
XtrainPrimBio, XtestPrimBio, YtrainPrimBio, YtestPrimBio = train_test_split(biodata, primType, test_size=0.2, random_state=12345)
XtrainSecBio, XtestSecBio, YtrainSecBio, YtestSecBio = train_test_split(biodata, secType, test_size=0.2, random_state=12345)
##########################################################
# Section 2 - Data Visualisation
##########################################################
# Visualising class distribution for Pokemon type
def visualiseTypeDist(typeData, nat):
# Type Categories
categories = list(typeData.columns.values)
plt.figure(figsize=(15, 8))
ax = sns.barplot(categories, typeData.sum().values)
# Axis labels
if nat == 1:
plt.title("Distribution of Primary Pokemon Types", fontsize=14)
elif nat == 2:
plt.title("Distribution of Secondary Pokemon Types", fontsize=14)
else:
plt.title("Distribution of Pokemon Types (single and dual)", fontsize=14)
plt.ylabel('Pokemon of that Type', fontsize=14)
plt.xlabel('Pokemon Type', fontsize=14)
rects = ax.patches
labels = typeData.sum().values
# Print hist labels
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 1,
label, ha='center', va='bottom', fontsize=12)
plt.show()
visualiseTypeDist(pType, 1)
visualiseTypeDist(sType, 2)
visualiseTypeDist(bTypes, 0)
# Function to re-encode output of Neural Network into one-hot encoding
def reEncode(predictions):
newOut = np.ndarray((len(predictions), len(predictions[0])))
for i in range(len(predictions)):
row = predictions[i]
m = max(row)
for j in range(len(predictions[0])):
if row[j] == m:
newOut[i][j] = 1
else:
newOut[i][j] = 0
return newOut
# Setting epsilon for re-encoding multiple type predictions
epsilon = 0.03
# Function to re-encode output of Neural Network into multiple-hot encoding
def reEncodeMulti(predictions):
newOut = np.ndarray((len(predictions), len(predictions[0])))
for i in range(len(predictions)):
row = predictions[i]
m = max(row)
rowAlt = [e for e in row if e != m]
tx = max(rowAlt)
rowAltB = [e for e in rowAlt if e != tx]
tb = max(rowAltB)
for j in range(len(predictions[0])):
if row[j] == m:
newOut[i][j] = 1
elif row[j] == tx:
if (tx - tb) >= epsilon:
newOut[i][j] = 1
else:
newOut[i][j] = 0
return newOut
# ###############################################################
# # Section 3 - Multi-class classification for Type 1 of Pokemon
# ###############################################################
# Neural Network with Softmax + Categorical Crossentropy
def test_network(Xtrain, Xtest, Ytrain, Ytest):
model = Sequential()
feat = len(Xtrain[0])
# Hidden Layers
model.add(Dense(64, activation='relu', input_dim=feat))
# model.add(Dense(64, activation='relu'))
# Output layer with 18 nodes using Softmax activation (we have 18 Pokemon types)
model.add(Dense(18, activation='softmax'))
# Running the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(Xtrain, Ytrain, epochs=40, batch_size=32)
# Accuracy Metrics and Predictions
score = model.evaluate(Xtest, Ytest, batch_size=16)
predictions = model.predict(Xtest)
return predictions, score
# # Decision Tree - (Deprecated)
# def test_tree(Xtrain, Xtest, Ytrain, Ytest):
# # Setting tree parameters
# classifier = DecisionTreeClassifier(criterion='entropy', max_depth=10, random_state=12345)
# classifier.fit(Xtrain, Ytrain)
# # Accuracy Metrics and Predictions
# print('Accuracy Score for Decision Tree on training set: {:.2f}'.format(classifier.score(Xtrain, Ytrain)))
# print('Accuracy Score for Decision Tree on test set: {:.2f}'.format(classifier.score(Xtest, Ytest)))
# predictions = classifier.predict(Xtest)
# return predictions
# K-Nearest Neighbours for Multi-Class classification
def test_knn(Xtrain, Xtest, Ytrain, Ytest):
# Setting k = 3
classifier = KNeighborsClassifier(n_neighbors=3)
classifier.fit(Xtrain, Ytrain)
# Accuracy Metrics and Predictions
predictions = classifier.predict(Xtest)
score = classifier.score(Xtest, Ytest)
return predictions, score
# ######################################################################
# # Section 4 - Multi-class, Multi-label approach to Type classification
# ######################################################################
# Neural Network with Softmax + Binary Crossentropy
def test_network2(Xtrain, Xtest, Ytrain, Ytest):
model = Sequential()
feat = len(Xtrain[0])
# Hidden Layers
model.add(Dense(64, activation='relu', input_dim=feat))
# model.add(Dense(64, activation='relu'))
# Output layer with 18 nodes using Softmax activation (we have 18 Pokemon types)
model.add(Dense(18, activation='softmax'))
# Running the model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(Xtrain, Ytrain, epochs=40, batch_size=32)
# Accuracy Metrics and Predictions
score = model.evaluate(Xtest, Ytest, batch_size=16)
predictions = model.predict(Xtest)
return predictions, score
# Multilabel k Nearest Neighbours (MLkNN)
def test_mlknn(Xtrain, Xtest, Ytrain, Ytest):
# Training the classfier and making predictions
classifier = MLkNN(k=1)
classifier.fit(Xtrain, Ytrain)
predictions = classifier.predict(Xtest)
# Measuring accuracy
scores = classifier.score(Xtest, Ytest)
loss = metrics.hamming_loss(Ytest, predictions)
return predictions, scores, loss
# Binary Relevance with Logistic Regression
def test_logistic(Xtrain, Xtest, Ytrain, Ytest):
# Setting parameters for Logistic Regression
reg = LogisticRegression(C = 1.0, solver='lbfgs', random_state=12345)
# Initialising the Binary Relevance Pipeline
classifier = BinaryRelevance(classifier=reg)
# Training the classfiers and making predictions
classifier.fit(Xtrain, Ytrain)
predictions = classifier.predict(Xtest)
# Measuring accuracy
scores = classifier.score(Xtest, Ytest)
loss = metrics.hamming_loss(Ytest, predictions)
return predictions, scores, loss
###############################################################
# Section 5 - Getting results from models
###############################################################
typeList = ['Normal', 'Fighting', 'Flying', 'Poison', 'Ground', 'Rock', 'Bug', 'Ghost',
'Steel', 'Fire', 'Water', 'Grass', 'Electric', 'Psychic', 'Ice', 'Dragon', 'Dark', 'Fairy']
pokemon = pd.read_csv('D:/UIP/testList.csv', header=0)['Name']
#### Section 5.1 - Predicting a Pokemon's primary type. First with bio + move data, then only biodata. ####
# Neural Network
primaryNet_predic, primaryNet_acc = test_network(XtrainPrim, XtestPrim, YtrainPrim, YtestPrim)
pd.DataFrame(reEncode(primaryNet_predic), index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/NetPredictionsPrim.csv')
primaryNet_predicBio, primaryNet_accBio = test_network(XtrainPrimBio, XtestPrimBio, YtrainPrimBio, YtestPrimBio)
pd.DataFrame(reEncode(primaryNet_predicBio), index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/NetPredictionsPrimWithoutMoves.csv')
# # Decision Tree
# primaryForest_predic = test_tree(XtrainPrim, XtestPrim, YtrainPrim, YtestPrim)
# primaryForest_predicBio = test_tree(XtrainPrimBio, XtestPrimBio, YtrainPrimBio, YtestPrimBio)
# K Nearest Neighbours
primaryKNN_predic, primaryKNN_acc = test_knn(XtrainPrim, XtestPrim, YtrainPrim, YtestPrim)
pd.DataFrame(primaryKNN_predic, index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/KNNPredictionsPrim.csv')
primaryKNN_predicBio, primaryKNN_accBio = test_knn(XtrainPrimBio, XtestPrimBio, YtrainPrimBio, YtestPrimBio)
pd.DataFrame(primaryKNN_predicBio, index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/KNNPredictionsPrimWithoutMoves.csv')
#### Section 5.2 - Predicting both types for Pokemon. First with bio + move data, then only biodata. ####
# Neural Network
primaryNet_predic2, primaryNet_acc2 = test_network2(Xtrain[:, :21], Xtest[:, :21], Ytrain, Ytest)
pd.DataFrame(reEncodeMulti(primaryNet_predic2), index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/NetPredictions.csv')
primaryNet_predicBio2, primaryNet_accBio2 = test_network2(XtrainBio, XtestBio, YtrainBio, YtestBio)
pd.DataFrame(reEncodeMulti(primaryNet_predicBio2), index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/NetPredictionsWithoutMoves.csv')
# # MLkNN
mlknn_pred, mlknn_acc, mlknn_hamloss = test_mlknn(Xtrain, Xtest, Ytrain, Ytest)
pd.DataFrame(mlknn_pred.A, index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/MLKNNtPredictions.csv')
mlknn_predBio, mlknn_accBio, mlknn_hamlossBio = test_mlknn(XtrainBio, XtestBio, YtrainBio, YtestBio)
pd.DataFrame(mlknn_predBio.A, index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/MLKNNtPredictionsWithoutMoves.csv')
# Binary Relevance - Logistic Regression
log_pred, log_acc, log_loss = test_logistic(Xtrain, Xtest, Ytrain, Ytest)
pd.DataFrame(log_pred.A, index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/LogPredictions.csv')
log_predBio, log_accBio, log_lossBio = test_logistic(XtrainBio, XtestBio, YtrainBio, YtestBio)
| pd.DataFrame(log_predBio.A, index=pokemon, columns=typeList) | pandas.DataFrame |
"""
Import as:
import core.artificial_signal_generators as carsigen
"""
import datetime
import logging
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import scipy as sp
# import statsmodels as sm
import statsmodels.api as sm
import helpers.hdbg as hdbg
# TODO(*): statsmodels needs this import to work properly.
# import statsmodels.tsa.arima_process as smarima # isort: skip # noqa: F401 # pylint: disable=unused-import
_LOG = logging.getLogger(__name__)
# TODO(gp): Remove after PTask2335.
if True:
import gluonts
import gluonts.dataset.artificial as gda
import gluonts.dataset.artificial.recipe as rcp
import gluonts.dataset.repository.datasets as gdrd # isort: skip # noqa: F401 # pylint: disable=unused-import
import gluonts.dataset.util as gdu # isort: skip # noqa: F401 # pylint: disable=unused-import
def get_gluon_dataset_names() -> List[str]:
"""
Get names of available Gluon datasets. Each of those names can be used
in `get_gluon_dataset` function.
:return: list of names
"""
return list(gluonts.dataset.repository.datasets.dataset_recipes.keys())
def get_gluon_dataset(
dataset_name: str = "m4_hourly",
train_length: Optional[int] = None,
test_length: Optional[int] = None,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Load Gluon dataset, transform it into train and test dataframes.
The default `m4_hourly` time series look like this:
https://gluon-ts.mxnet.io/_images/examples_forecasting_tutorial_9_0.png
:param dataset_name: name of the dataset. Supported names can be
obtained using `get_gluon_dataset_names`.
:param train_length: length of the train dataset
:param test_length: length of the test dataset
:return: train and test dataframes
"""
dataset = gluonts.dataset.repository.datasets.get_dataset(
dataset_name, regenerate=False
)
train_entry = next(iter(dataset.train))
test_entry = next(iter(dataset.test))
train_df = gluonts.dataset.util.to_pandas(train_entry)
test_df = gluonts.dataset.util.to_pandas(test_entry)
train_length = train_length or train_df.shape[0]
test_length = test_length or test_df.shape[0]
hdbg.dassert_lte(train_length, train_df.shape[0])
hdbg.dassert_lte(test_length, test_df.shape[0])
train_df = pd.DataFrame(train_df.head(train_length), columns=["y"])
test_df = pd.DataFrame(test_df.head(test_length), columns=["y"])
return train_df, test_df
def evaluate_recipe(
recipe: List[Tuple[str, Callable]], length: int, **kwargs: Any
) -> Dict[str, np.array]:
"""
Generate data based on recipe.
For documentation on recipes, see
https://gluon-ts.mxnet.io/_modules/gluonts/dataset/artificial/_base.html#RecipeDataset.
:param recipe: [(field, function)]
:param length: length of data to generate
:param kwargs: kwargs passed into gluonts.dataset.artificial.recipe.evaluate
:return: field names mapped to generated data
"""
return rcp.evaluate(recipe, length, **kwargs)
def add_recipe_components(
recipe: List[Tuple[str, Callable]], name: str = "signal"
) -> List[Tuple[str, rcp.Lifted]]:
"""
Append the sum of the components to the recipe.
:param recipe: [(field, function)]
:param name: name of the sum
:return: recipe with the sum component
"""
recipe = recipe.copy()
names = [name for name, _ in recipe]
addition = rcp.Add(names)
recipe.append((name, addition))
return recipe
def generate_recipe_dataset(
recipe: Union[Callable, List[Tuple[str, Callable]]],
freq: str,
start_date: pd.Timestamp,
max_train_length: int,
prediction_length: int,
num_timeseries: int,
trim_length_func: Callable = lambda x, **kwargs: 0,
) -> gluonts.dataset.common.TrainDatasets:
"""
Generate GluonTS TrainDatasets from recipe.
For more information on recipes, see
https://gluon-ts.mxnet.io/_modules/gluonts/dataset/artificial/_base.html#RecipeDataset
and
https://gluon-ts.mxnet.io/examples/synthetic_data_generation_tutorial/tutorial.html.
For `feat_dynamic_cat` and `feat_dynamic_real` generation pass in
`shape=(n_features, 0)`. GluonTS replaces `0` in shape with
`max_train_length + prediction_length`.
:param recipe: GluonTS recipe. Datasets with keys `feat_dynamic_cat`,
`feat_dynamic_real` and `target` are passed into `ListDataset`.
:param freq: frequency
:param start_date: start date of the dataset
:param max_train_length: maximum length of a training time series
:param prediction_length: length of prediction range
:param num_timeseries: number of time series to generate
:param trim_length_func: Callable f(x: int) -> int returning the
(shortened) training length
:return: GluonTS TrainDatasets (with `train` and `test` attributes).
"""
names = [name for name, _ in recipe]
hdbg.dassert_in("target", names)
metadata = gluonts.dataset.common.MetaData(freq=freq)
recipe_dataset = gda.RecipeDataset(
recipe,
metadata,
max_train_length,
prediction_length,
num_timeseries,
trim_length_fun=trim_length_func,
data_start=start_date,
)
return recipe_dataset.generate()
class ArmaProcess:
"""
A thin wrapper around statsmodels `ArmaProcess`, with Pandas support.
"""
def __init__(self, ar_coeffs: List[float], ma_coeffs: List[float]) -> None:
"""
Initialize `arma_process` using given coefficients.
Useful properties include
- arroots
- isinvertible
- isstationary
- maroots
Further details are available at
- https://www.statsmodels.org/stable/generated/statsmodels.tsa.arima_process.ArmaProcess.html # pylint: disable=line-too-long
"""
self.ar_coeffs = ar_coeffs
self.ma_coeffs = ma_coeffs
self.arma_process = sm.tsa.ArmaProcess.from_coeffs(
self.ar_coeffs, self.ma_coeffs
)
def generate_sample(
self,
date_range_kwargs: Dict[str, Any],
scale: float = 1,
burnin: float = 0,
seed: Optional[int] = None,
) -> pd.Series:
"""
Generate an ARMA realization.
This wraps statsmodels' `generate_sample`, placing the values in a
`pd.Series` with index specified through the date range parameters.
:param date_range_kwargs: kwargs to forward to `pd.date_range`, e.g.,
- "start", "end", "periods", "freq"
:param scale: standard deviation of noise
:param burnin: number of leading samples to drop
:seed: np.random.seed seed
"""
if seed is None:
seed = 0
np.random.seed(seed)
# Create index and infer number of samples.
index = | pd.date_range(**date_range_kwargs) | pandas.date_range |
import os
import numpy as np
import pandas as pd
import tarfile
import urllib.request
from experimentgenerator.experiment_generator import ExperimentGenerator
from experimentgenerator.parameters_distribution import ParametersDistribution
from autoscalingsim.utils.error_check import ErrorChecker
from autoscalingsim.utils.download_bar import DownloadProgressBar
@ExperimentGenerator.register('azure-vms')
class AzureVMsExperimentGenerator(ExperimentGenerator):
"""
Enriches experiment configuration files based on the Azure VMs usage dataset from 2019.
The related paper was published at SOSP'17:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. 2017.
Resource Central: Understanding and Predicting Workloads for Improved Resource Management in Large Cloud Platforms.
In Proceedings of the 26th Symposium on Operating Systems Principles (SOSP '17). Association for Computing Machinery,
New York, NY, USA, 153–167. DOI:https://doi.org/10.1145/3132747.3132772
Dataset links full:
https://github.com/Azure/AzurePublicDataset/blob/master/AzurePublicDatasetLinksV2.txt
Assumption in processing the data: 1 service per VM
"""
dataset_base_link = 'https://azurecloudpublicdataset2.blob.core.windows.net/azurepublicdatasetv2/trace_data/'
dataset_vmtable_link_extension = 'vmtable/'
dataset_vmtable_name = 'vmtable.csv'
dataset_vm_cpu_readings_link_extension = 'vm_cpu_readings/'
dataset_vm_cpu_readings_name = 'vm_cpu_readings-file-{}-of-195.csv'
archive_postfix = '.gz'
@classmethod
def enrich_experiment_generation_recipe(cls, specialized_generator_config : dict, experiment_generation_recipe : dict):
data_path = ErrorChecker.key_check_and_load('data_path', specialized_generator_config)
cpu_readings_file_ids = ErrorChecker.key_check_and_load('file_ids', specialized_generator_config)
if isinstance(cpu_readings_file_ids, int):
cpu_readings_file_ids = [cpu_readings_file_ids]
cpu_readings_filenames = [ cls.dataset_vm_cpu_readings_name.format(cpu_readings_file_id) for cpu_readings_file_id in cpu_readings_file_ids ]
download_cpu_readings = True
cpu_readings_presence_status = dict()
if os.path.exists(data_path):
cpu_readings_presence_status = { filename : os.path.exists(os.path.join(data_path, filename)) for filename in cpu_readings_filenames }
download_cpu_readings = not np.all(list(cpu_readings_presence_status.values()))
else:
cpu_readings_presence_status = { filename : False for filename in cpu_readings_filenames }
os.makedirs(data_path)
download_vmtable = not os.path.exists(os.path.join(data_path, cls.dataset_vmtable_name))
if download_cpu_readings or download_vmtable:
print('Downloading Azure VMs usage dataset...')
if download_cpu_readings:
cpu_readings_archives_to_download = [ filename + cls.archive_postfix for filename, status in cpu_readings_presence_status.items() if status == False ]
for order_num, constructed_archive_name in enumerate(cpu_readings_archives_to_download, 1):
print(f'Downloading Azure CPU readings archive ({order_num} of {len(cpu_readings_archives_names)})...')
downloaded_cpu_readings_archive = os.path.join(data_path, constructed_archive_name)
full_cpu_readings_file_link = cls.dataset_base_link + cls.dataset_vm_cpu_readings_link_extension + constructed_archive_name
urllib.request.urlretrieve(full_cpu_readings_file_link, downloaded_cpu_readings_archive, DownloadProgressBar())
cls._unpack_and_cleanup_archive(downloaded_cpu_readings_archive)
if download_vmtable:
print('Downloading VMs main table...')
constructed_archive_name = cls.dataset_vmtable_name + cls.archive_postfix
downloaded_vm_table_archive = os.path.join(data_path, constructed_archive_name)
full_vm_table_file_link = cls.dataset_base_link + cls.dataset_vmtable_link_extension + constructed_archive_name
urllib.request.urlretrieve(full_vm_table_file_link, downloaded_vm_table_archive, DownloadProgressBar())
cls._unpack_and_cleanup_archive(downloaded_vm_table_archive)
# Processing VMs table
csv_reading_batch_size = ErrorChecker.key_check_and_load('csv_reading_batch_size', specialized_generator_config, default = 1000)
vm_categories_of_interest = ErrorChecker.key_check_and_load('vm_category', specialized_generator_config)
colnames = ['vm_id', 'subscription_id', 'deployment_id', 'timestamp_vm_created', 'timestamp_vm_deleted', 'max_cpu', 'avg_cpu', 'p95_max_cpu', 'vm_category', 'vm_virtual_core_count_bucket', 'vm_memory_bucket']
vms_table_iter = pd.read_csv(os.path.join(data_path, cls.dataset_vmtable_name), names = colnames, chunksize = csv_reading_batch_size, iterator = True, header = None)
selected_workloads_data = pd.DataFrame(columns = colnames)
for iter_num, chunk in enumerate(vms_table_iter, 1):
print(f'Processing {cls.dataset_vmtable_name}: iteration {iter_num}')
selected_part = chunk
if not vm_categories_of_interest in ['all', '*'] and (not isinstance(vm_categories_of_interest, list) or vm_categories_of_interest != ['Delay-insensitive', 'Interactive', 'Unknown']):
selected_part = chunk[chunk['vm_category'].isin(vm_categories_of_interest)]
selected_workloads_data = | pd.concat([selected_workloads_data, selected_part]) | pandas.concat |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
from datetime import datetime
def loadprices_df(csvfile, startdate=None, enddate=None):
df = | pd.read_csv(csvfile, header=0, usecols=['Date', 'Close']) | pandas.read_csv |
import pandas as pd
from autogluon.utils.tabular.utils.savers import save_pd
from .constants import *
from . import evaluate_utils
from.preprocess import preprocess_utils
def evaluate(results_raw, frameworks=None, banned_datasets=None, folds_to_keep=None, columns_to_agg_extra=None, frameworks_compare_vs_all=None, output_dir=None):
if frameworks is None:
frameworks = sorted(list(results_raw[FRAMEWORK].unique()))
if frameworks_compare_vs_all is None:
frameworks_compare_vs_all = []
if folds_to_keep is None:
folds_to_keep = sorted(list(results_raw[FOLD].unique()))
if banned_datasets is not None:
results_raw = results_raw[~results_raw[DATASET].isin(banned_datasets)]
total_datasets = sorted(results_raw[DATASET].unique())
results_raw = preprocess_utils.clean_result(result_df=results_raw, folds_to_keep=folds_to_keep, remove_invalid=True)
results_raw = results_raw[results_raw[FRAMEWORK].isin(frameworks)]
# Calculate each frameworks errored datasets
total_frameworks = results_raw[FRAMEWORK].unique()
total_folds = results_raw[FOLD].unique()
num_frameworks = len(total_frameworks)
num_datasets = len(total_datasets)
num_folds = len(total_folds)
ideal_rows = num_folds * num_datasets * num_frameworks
actual_rows = len(results_raw)
errors = ideal_rows - actual_rows
print('num_datasets:', num_datasets)
print('num_folds:', num_folds)
print('errors:', errors)
for framework in total_frameworks:
results_framework = results_raw[results_raw[FRAMEWORK] == framework]
num_rows_framework = len(results_framework)
datasets_framework = results_framework[DATASET].unique()
datasets_framework_errors = [dataset for dataset in total_datasets if dataset not in datasets_framework]
datasets_framework_errors_count = len(datasets_framework_errors)
framework_fold_errors = num_datasets * num_folds - num_rows_framework
print('################################################')
print('framework:', framework)
print('datasets_framework_errors:', datasets_framework_errors)
print('datasets_framework_errors_count:', datasets_framework_errors_count)
print('framework_fold_errors:', framework_fold_errors)
print('################################################')
all_results_pairs = {}
for framework_2 in frameworks_compare_vs_all:
results_list = []
for framework_1 in total_frameworks:
if framework_1 == framework_2:
results_ranked, results_ranked_by_dataset = evaluate_utils.compare_frameworks(results_raw=results_raw, frameworks=[framework_2], banned_datasets=banned_datasets, folds_to_keep=folds_to_keep, columns_to_agg_extra=columns_to_agg_extra, datasets=total_datasets, verbose=False)
ties = len(results_ranked_by_dataset)
results_list.append([framework_1, 0, 0, ties])
continue
results_ranked, results_ranked_by_dataset = evaluate_utils.compare_frameworks(results_raw=results_raw, frameworks=[framework_1, framework_2], banned_datasets=banned_datasets, folds_to_keep=folds_to_keep, columns_to_agg_extra=columns_to_agg_extra, datasets=total_datasets, verbose=False)
datasets_pair = results_ranked_by_dataset[DATASET].unique()
framework_1_wins = 0
framework_2_wins = 0
ties = 0
for dataset in datasets_pair:
results_isolated = results_ranked_by_dataset[results_ranked_by_dataset[DATASET] == dataset]
results_isolated = results_isolated[results_isolated[FRAMEWORK] == framework_1]
results_isolated_rank = results_isolated[RANK].iloc[0]
if results_isolated_rank == 1:
framework_1_wins += 1
elif results_isolated_rank == 2:
framework_2_wins += 1
elif results_isolated_rank == 1.5:
ties += 1
else:
raise AssertionError('Rank not valid: %s' % results_isolated_rank)
results_list.append([framework_1, framework_1_wins, framework_2_wins, ties])
results_pairs = | pd.DataFrame(data=results_list, columns=[FRAMEWORK, '> ' + framework_2, '< ' + framework_2, '= ' + framework_2]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
These test the private routines in types/cast.py
"""
import pytest
from datetime import datetime, timedelta, date
import numpy as np
import pandas as pd
from pandas import (Timedelta, Timestamp, DatetimeIndex,
DataFrame, NaT, Period, Series)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_convert_objects,
cast_scalar_to_array,
infer_dtype_from_scalar,
infer_dtype_from_array,
maybe_convert_string_to_object,
maybe_convert_scalar,
find_common_type)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
PeriodDtype)
from pandas.core.dtypes.common import (
is_dtype_equal)
from pandas.util import testing as tm
class TestMaybeDowncast(object):
def test_downcast_conv(self):
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
assert (np.array_equal(result, arr))
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
# GH16875 coercing of bools
ser = Series([True, True, False])
result = maybe_downcast_to_dtype(ser, np.dtype(np.float64))
expected = ser
tm.assert_series_equal(result, expected)
# conversions
expected = np.array([1, 2])
for dtype in [np.float64, object, np.int64]:
arr = np.array([1.0, 2.0], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected, check_dtype=False)
for dtype in [np.float64, object]:
expected = np.array([1.0, 2.0, np.nan], dtype=dtype)
arr = np.array([1.0, 2.0, np.nan], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected)
# empties
for dtype in [np.int32, np.float64, np.float32, np.bool_,
np.int64, object]:
arr = np.array([], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'int64')
tm.assert_almost_equal(result, np.array([], dtype=np.int64))
assert result.dtype == np.int64
def test_datetimelikes_nan(self):
arr = np.array([1, 2, np.nan])
exp = np.array([1, 2, np.datetime64('NaT')], dtype='datetime64[ns]')
res = maybe_downcast_to_dtype(arr, 'datetime64[ns]')
tm.assert_numpy_array_equal(res, exp)
exp = np.array([1, 2, np.timedelta64('NaT')], dtype='timedelta64[ns]')
res = maybe_downcast_to_dtype(arr, 'timedelta64[ns]')
tm.assert_numpy_array_equal(res, exp)
def test_datetime_with_timezone(self):
# GH 15426
ts = Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
exp = DatetimeIndex([ts, ts])
res = maybe_downcast_to_dtype(exp, exp.dtype)
tm.assert_index_equal(res, exp)
res = maybe_downcast_to_dtype(exp.asi8, exp.dtype)
tm.assert_index_equal(res, exp)
class TestInferDtype(object):
def testinfer_dtype_from_scalar(self):
# Test that infer_dtype_from_scalar is returning correct dtype for int
# and float.
for dtypec in [np.uint8, np.int8, np.uint16, np.int16, np.uint32,
np.int32, np.uint64, np.int64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == type(data)
data = 12
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.int64
for dtypec in [np.float16, np.float32, np.float64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == dtypec
data = np.float(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.float64
for data in [True, False]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.bool_
for data in [np.complex64(1), np.complex128(1)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.complex_
for data in [np.datetime64(1, 'ns'), Timestamp(1),
datetime(2000, 1, 1, 0, 0)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == 'M8[ns]'
for data in [np.timedelta64(1, 'ns'), Timedelta(1),
timedelta(1)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == 'm8[ns]'
for tz in ['UTC', 'US/Eastern', 'Asia/Tokyo']:
dt = Timestamp(1, tz=tz)
dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=True)
assert dtype == 'datetime64[ns, {0}]'.format(tz)
assert val == dt.value
dtype, val = infer_dtype_from_scalar(dt)
assert dtype == np.object_
assert val == dt
for freq in ['M', 'D']:
p = Period('2011-01-01', freq=freq)
dtype, val = infer_dtype_from_scalar(p, pandas_dtype=True)
assert dtype == 'period[{0}]'.format(freq)
assert val == p.ordinal
dtype, val = infer_dtype_from_scalar(p)
dtype == np.object_
assert val == p
# misc
for data in [date(2000, 1, 1),
Timestamp(1, tz='US/Eastern'), 'foo']:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.object_
def testinfer_dtype_from_scalar_errors(self):
with pytest.raises(ValueError):
infer_dtype_from_scalar(np.array([1]))
@pytest.mark.parametrize(
"arr, expected, pandas_dtype",
[('foo', np.object_, False),
(b'foo', np.object_, False),
(1, np.int_, False),
(1.5, np.float_, False),
([1], np.int_, False),
(np.array([1], dtype=np.int64), np.int64, False),
([np.nan, 1, ''], np.object_, False),
(np.array([[1.0, 2.0]]), np.float_, False),
(pd.Categorical(list('aabc')), np.object_, False),
(pd.Categorical([1, 2, 3]), np.int64, False),
(pd.Categorical(list('aabc')), 'category', True),
(pd.Categorical([1, 2, 3]), 'category', True),
(Timestamp('20160101'), np.object_, False),
(np.datetime64('2016-01-01'), np.dtype('<M8[D]'), False),
(pd.date_range('20160101', periods=3),
np.dtype('<M8[ns]'), False),
(pd.date_range('20160101', periods=3, tz='US/Eastern'),
'datetime64[ns, US/Eastern]', True),
(pd.Series([1., 2, 3]), np.float64, False),
(pd.Series(list('abc')), np.object_, False),
(pd.Series(pd.date_range('20160101', periods=3, tz='US/Eastern')),
'datetime64[ns, US/Eastern]', True)])
def test_infer_dtype_from_array(self, arr, expected, pandas_dtype):
dtype, _ = infer_dtype_from_array(arr, pandas_dtype=pandas_dtype)
assert is_dtype_equal(dtype, expected)
def test_cast_scalar_to_array(self):
arr = cast_scalar_to_array((3, 2), 1, dtype=np.int64)
exp = np.ones((3, 2), dtype=np.int64)
tm.assert_numpy_array_equal(arr, exp)
arr = cast_scalar_to_array((3, 2), 1.1)
exp = np.empty((3, 2), dtype=np.float64)
exp.fill(1.1)
tm.assert_numpy_array_equal(arr, exp)
arr = cast_scalar_to_array((2, 3), Timestamp('2011-01-01'))
exp = np.empty((2, 3), dtype='datetime64[ns]')
exp.fill(np.datetime64('2011-01-01'))
tm.assert_numpy_array_equal(arr, exp)
# pandas dtype is stored as object dtype
obj = Timestamp('2011-01-01', tz='US/Eastern')
arr = cast_scalar_to_array((2, 3), obj)
exp = np.empty((2, 3), dtype=np.object)
exp.fill(obj)
tm.assert_numpy_array_equal(arr, exp)
obj = Period('2011-01-01', freq='D')
arr = cast_scalar_to_array((2, 3), obj)
exp = np.empty((2, 3), dtype=np.object)
exp.fill(obj)
tm.assert_numpy_array_equal(arr, exp)
class TestMaybe(object):
def test_maybe_convert_string_to_array(self):
result = maybe_convert_string_to_object('x')
tm.assert_numpy_array_equal(result, np.array(['x'], dtype=object))
assert result.dtype == object
result = maybe_convert_string_to_object(1)
assert result == 1
arr = np.array(['x', 'y'], dtype=str)
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
assert result.dtype == object
# unicode
arr = np.array(['x', 'y']).astype('U')
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
assert result.dtype == object
# object
arr = np.array(['x', 2], dtype=object)
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 2], dtype=object))
assert result.dtype == object
def test_maybe_convert_scalar(self):
# pass thru
result = maybe_convert_scalar('x')
assert result == 'x'
result = maybe_convert_scalar(np.array([1]))
assert result == np.array([1])
# leave scalar dtype
result = maybe_convert_scalar(np.int64(1))
assert result == np.int64(1)
result = maybe_convert_scalar(np.int32(1))
assert result == np.int32(1)
result = maybe_convert_scalar(np.float32(1))
assert result == np.float32(1)
result = maybe_convert_scalar(np.int64(1))
assert result == np.float64(1)
# coerce
result = | maybe_convert_scalar(1) | pandas.core.dtypes.cast.maybe_convert_scalar |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.patches import Arc
from matplotlib.path import Path
import networkx as nx
import numpy as np
import pandas as pd
from cell2cell.plotting.aesthetics import get_colors_from_labels, generate_legend
def circos_plot(interaction_space, sender_cells, receiver_cells, ligands, receptors, excluded_score=0, metadata=None,
sample_col='#SampleID', group_col='Groups', meta_cmap='Set2', cells_cmap='Pastel1', colors=None, ax=None,
figsize=(10,10), fontsize=14, legend=True, ligand_label_color='dimgray', receptor_label_color='dimgray',
filename=None):
'''Generates the circos plot in the exact order that sender and
receiver cells are provided. Similarly, ligands and receptors are
sorted by the order they are input.
Parameters
----------
interaction_space : cell2cell.core.interaction_space.InteractionSpace
Interaction space that contains all a distance matrix after running the
the method compute_pairwise_communication_scores. Alternatively, this
object can a SingleCellInteractions or a BulkInteractions object after
running the method compute_pairwise_communication_scores.
sender_cells : list
List of cells to be included as senders.
receiver_cells : list
List of cells to be included as receivers.
ligands : list
List of genes/proteins to be included as ligands produced by the
sender cells.
receptors : list
List of genes/proteins to be included as receptors produced by the
receiver cells.
excluded_score : float, default=0
Rows that have a communication score equal or lower to this will
be dropped from the network.
metadata : pandas.Dataframe, default=None
Metadata associated with the cells, cell types or samples in the
matrix containing CCC scores. If None, cells will be color only by
individual cells.
sample_col : str, default='#SampleID'
Column in the metadata for the cells, cell types or samples
in the matrix containing CCC scores.
group_col : str, default='Groups'
Column in the metadata containing the major groups of cells, cell types
or samples in the matrix with CCC scores.
meta_cmap : str, default='Set2'
Name of the matplotlib color palette for coloring the major groups
of cells.
cells_cmap : str, default='Pastel1'
Name of the color palette for coloring individual cells.
colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells. If colors is specified, meta_cmap will be
ignored.
ax : matplotlib.axes.Axes, default=None
Axes instance for a plot.
figsize : tuple, default=(10, 10)
Size of the figure (width*height), each in inches.
fontsize : int, default=14
Font size for ligand and receptor labels.
legend : boolean, default=True
Whether including legends for cell and cell group colors as well
as ligand/receptor colors.
ligand_label_color : str, default='dimgray'
Name of the matplotlib color palette for coloring the labels of
ligands.
receptor_label_color : str, default='dimgray'
Name of the matplotlib color palette for coloring the labels of
receptors.
filename : str, default=None
Path to save the figure of the elbow analysis. If None, the figure is not
saved.
Returns
-------
ax : matplotlib.axes.Axes
Axes instance containing a circos plot.
'''
if hasattr(interaction_space, 'interaction_elements'):
if 'communication_matrix' not in interaction_space.interaction_elements.keys():
raise ValueError('Run the method compute_pairwise_communication_scores() before generating circos plots.')
else:
readable_ccc = get_readable_ccc_matrix(interaction_space.interaction_elements['communication_matrix'])
elif hasattr(interaction_space, 'interaction_space'):
if 'communication_matrix' not in interaction_space.interaction_space.interaction_elements.keys():
raise ValueError('Run the method compute_pairwise_communication_scores() before generating circos plots.')
else:
readable_ccc = get_readable_ccc_matrix(interaction_space.interaction_space.interaction_elements['communication_matrix'])
else:
raise ValueError('Not a valid interaction_space')
# Figure setups
if ax is None:
R = 1.0
center = (0, 0)
fig = plt.figure(figsize=figsize, frameon=False)
ax = fig.add_axes([0., 0., 1., 1.], aspect='equal')
ax.set_axis_off()
ax.set_xlim((-R * 1.05 + center[0]), (R * 1.05 + center[0]))
ax.set_ylim((-R * 1.05 + center[1]), (R * 1.05 + center[1]))
else:
xlim = ax.get_xlim()
ylim = ax.get_ylim()
x_range = abs(xlim[1] - xlim[0])
y_range = abs(ylim[1] - ylim[0])
R = np.nanmin([x_range/2.0, y_range/2.0]) / 1.05
center = (np.nanmean(xlim), np.nanmean(ylim))
# Elements to build network
# TODO: Add option to select sort_by: None (as input), cells, proteins or metadata
sorted_nodes = sort_nodes(sender_cells=sender_cells,
receiver_cells=receiver_cells,
ligands=ligands,
receptors=receptors
)
# Build network
G = _build_network(sender_cells=sender_cells,
receiver_cells=receiver_cells,
ligands=ligands,
receptors=receptors,
sorted_nodes=sorted_nodes,
readable_ccc=readable_ccc,
excluded_score=excluded_score
)
# Get coordinates
nodes_dict = get_arc_angles(G=G,
sorting_feature='sorting')
edges_dict = dict()
for k, v in nodes_dict.items():
edges_dict[k] = get_cartesian(theta=np.nanmean(v),
radius=0.95*R/2.,
center=center,
angle='degrees')
small_R = determine_small_radius(edges_dict)
# Colors
cells = list(set(sender_cells+receiver_cells))
if metadata is not None:
meta = metadata.set_index(sample_col).reindex(cells)
meta = meta[[group_col]].fillna('NA')
labels = meta[group_col].unique().tolist()
if colors is None:
colors = get_colors_from_labels(labels, cmap=meta_cmap)
meta['Color'] = [colors[idx] for idx in meta[group_col]]
else:
meta = | pd.DataFrame(index=cells) | pandas.DataFrame |
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
data_base=pd.read_csv("train_data.csv") #####importing train case
test=pd.read_csv("test_data.csv") #####importing test case
features=['Retweet count','Likes count','Tweet value'] ######features for training
y=data_base.User
x=data_base[features]
model=RandomForestRegressor() ####### Using Random forest Regressor
model.fit(x,y)
arr=model.predict(test[features]) #####prediction
arr= | pd.DataFrame(arr) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_split(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.split("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.split("__")
tm.assert_series_equal(result, exp)
result = values.str.split("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.split("[,_]")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
def test_split_object_mixed():
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.split("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_n(any_string_dtype, method):
s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype)
expected = Series([["a", "b"], pd.NA, ["b", "c"]])
result = getattr(s.str, method)(" ", n=None)
tm.assert_series_equal(result, expected)
result = getattr(s.str, method)(" ", n=0)
tm.assert_series_equal(result, expected)
def test_rsplit(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.rsplit("__")
tm.assert_series_equal(result, exp)
result = values.str.rsplit("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.rsplit("[,_]")
exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_", n=1)
exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]])
tm.assert_series_equal(result, exp)
def test_rsplit_object_mixed():
# mixed
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.rsplit("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
def test_split_blank_string(any_string_dtype):
# expand blank split GH 20067
values = Series([""], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df
tm.assert_frame_equal(result, exp)
values = | Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype) | pandas.Series |
'''Train CIFAR10 with PyTorch.'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from tqdm import trange
import pandas as pd
from PIL import Image
def make_train_anno(data_root_dir, anno_path):
# Set data directories.
train_dir = data_root_dir + 'bounding_box_train/'
# Get image names.
train_img_names = sorted([d for d in os.listdir(train_dir) if d.split('.')[-1].lower() in ('jpg', 'jpeg', 'png')])
# Organize anntation data.
train_list = __org_data(train_dir, train_img_names, 'train')
df = pd.DataFrame(train_list)
df = __add_person_index(df)
# Save DataFrame.
__save_dataframe(df, anno_path, 'train')
def make_qng_anno(data_root_dir, anno_path):
# Set data directories.
gallery_dir = data_root_dir + 'bounding_box_test/'
query_dir = data_root_dir + 'query/'
# Get image names.
gallery_img_names = sorted([d for d in os.listdir(gallery_dir) if d.split('.')[-1].lower() in ('jpg', 'jpeg', 'png')])
query_img_names = sorted([d for d in os.listdir(query_dir) if d.split('.')[-1].lower() in ('jpg', 'jpeg', 'png')])
# Organize anntation data.
gallery_list = __org_data(gallery_dir, gallery_img_names, 'gallery')
query_list = __org_data(query_dir, query_img_names, 'query')
data_list = gallery_list + query_list
df = | pd.DataFrame(data_list) | pandas.DataFrame |
# pylint:disable=unsupported-assignment-operation
# pylint:disable=unsubscriptable-object
"""Module containing different I/O functions to load data recorded by Withings Sleep Analyzer."""
import datetime
import re
from ast import literal_eval
from pathlib import Path
from typing import Dict, Optional, Sequence, Union
import numpy as np
import pandas as pd
from biopsykit.sleep.utils import split_nights
from biopsykit.utils._datatype_validation_helper import _assert_file_extension, _assert_has_columns, _assert_is_dir
from biopsykit.utils._types import path_t
from biopsykit.utils.datatype_helper import SleepEndpointDataFrame, is_sleep_endpoint_dataframe
from biopsykit.utils.time import tz
__all__ = [
"WITHINGS_RAW_DATA_SOURCES",
"load_withings_sleep_analyzer_raw_file",
"load_withings_sleep_analyzer_raw_folder",
"load_withings_sleep_analyzer_summary",
]
WITHINGS_RAW_DATA_SOURCES = {
"hr": "heart_rate",
"respiratory-rate": "respiration_rate",
"sleep-state": "sleep_state",
"snoring": "snoring",
}
""" Mapping of data source names to names of the biosignal (and the exported dataframe column)"""
def load_withings_sleep_analyzer_raw_folder(
folder_path: path_t,
timezone: Optional[Union[datetime.tzinfo, str]] = None,
split_into_nights: Optional[bool] = True,
) -> Union[pd.DataFrame, Sequence[pd.DataFrame]]:
"""Load folder with raw data from a Withings Sleep Analyzer recording session and convert into time-series data.
The function will return a list of dataframes (one dataframe per night, if ``split_into_nights`` is ``True``)
with continuous time-series data (sampling distance: 1min) of all data sources
(heart rate, respiratory rate, sleep state, snoring) combined. The dataframe columns will be:
* ``heart_rate``: heart rate in beats-per-minute (bpm)
* ``respiration_rate``: respiration rate in breaths-per-minute (bpm)
* ``sleep_state``: current sleep state: 0 = awake, 1 = light sleep, 2 = deep sleep, 3 = rem sleep
* ``snoring``: flag whether snoring was detected: 0 = no snoring, 100 = snoring
The files are all expected to have the following name pattern: ``raw-sleep-monitor_<datasource>.csv``.
.. warning::
If data is not split into single nights (``split_into_nights`` is ``False``),
data in the dataframe will **not** be resampled.
Parameters
----------
folder_path: :class:`~pathlib.Path` or str
path to folder with Sleep Analyzer raw data
timezone : str or :class:`datetime.tzinfo`, optional
timezone of the acquired data, either as string of as tzinfo object.
Default: 'Europe/Berlin'
split_into_nights : bool, optional
whether to split the dataframe into the different recording nights (and return a list of dataframes) or not.
Default: ``True``
Returns
-------
:class:`~pandas.DataFrame` or list of such
dataframe (or list of dataframes, if ``split_into_nights`` is ``True``) with Sleep Analyzer data
Raises
------
ValueError
if ``folder_path`` is not a directory
if no Sleep Analyzer Raw files are in directory specified by ``folder_path``
See Also
--------
load_withings_sleep_analyzer_raw_file
load a single Sleep Analyzer file with only one data source
"""
# ensure pathlib
folder_path = Path(folder_path)
_assert_is_dir(folder_path)
raw_files = list(sorted(folder_path.glob("raw_sleep-monitor_*.csv")))
if len(raw_files) == 0:
raise ValueError("No sleep analyzer raw files found in {}!".format(folder_path))
data_sources = [re.findall(r"raw_sleep-monitor_(\S*).csv", s.name)[0] for s in raw_files]
list_data = [
load_withings_sleep_analyzer_raw_file(
file_path,
data_source=WITHINGS_RAW_DATA_SOURCES[data_source],
timezone=timezone,
split_into_nights=split_into_nights,
)
for file_path, data_source in zip(raw_files, data_sources)
if data_source in WITHINGS_RAW_DATA_SOURCES
]
if split_into_nights:
# "transpose" list of dictionaries.
# before: outer list = data sources, inner dict = nights.
# after: outer dict = nights, inner list = data sources
keys = np.unique(np.array([sorted(data.keys()) for data in list_data]).flatten())
dict_nights = {}
for key in keys:
dict_nights.setdefault(key, [])
for data in list_data:
dict_nights[key].append(data[key])
data = {key: pd.concat(data, axis=1) for key, data in dict_nights.items()}
else:
data = pd.concat(list_data, axis=1)
return data
def load_withings_sleep_analyzer_raw_file(
file_path: path_t,
data_source: str,
timezone: Optional[Union[datetime.tzinfo, str]] = None,
split_into_nights: Optional[bool] = True,
) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""Load single Withings Sleep Analyzer raw data file and convert into time-series data.
Parameters
----------
file_path : :class:`~pathlib.Path` or str
path to file
data_source : str
data source of file specified by ``file_path``. Must be one of
['heart_rate', 'respiration_rate', 'sleep_state', 'snoring'].
timezone : str or :class:`datetime.tzinfo`, optional
timezone of recorded data, either as string or as tzinfo object.
Default: 'Europe/Berlin'
split_into_nights : bool, optional
whether to split the dataframe into the different recording nights (and return a dictionary of dataframes)
or not.
Default: ``True``
Returns
-------
:class:`~pandas.DataFrame` or dict of such
dataframe (or dict of dataframes, if ``split_into_nights`` is ``True``) with Sleep Analyzer data
Raises
------
ValueError
if unsupported data source was passed
`~biopsykit.utils.exceptions.FileExtensionError`
if ``file_path`` is not a csv file
`~biopsykit.utils.exceptions.ValidationError`
if file does not have the required columns ``start``, ``duration``, ``value``
"""
if data_source not in WITHINGS_RAW_DATA_SOURCES.values():
raise ValueError(
"Unsupported data source {}! Must be one of {}.".format(
data_source, list(WITHINGS_RAW_DATA_SOURCES.values())
)
)
file_path = Path(file_path)
_assert_file_extension(file_path, ".csv")
data = pd.read_csv(file_path)
_assert_has_columns(data, [["start", "duration", "value"]])
if timezone is None:
timezone = tz
# convert string timestamps to datetime
data["start"] = pd.to_datetime(data["start"])
# sort index
data = data.set_index("start").sort_index()
# drop duplicate index values
data = data.loc[~data.index.duplicated()]
# convert it into the right time zone
data = data.groupby("start", group_keys=False).apply(_localize_time, timezone=timezone)
# convert strings of arrays to arrays
data["duration"] = data["duration"].apply(literal_eval)
data["value"] = data["value"].apply(literal_eval)
# rename index
data.index.name = "time"
# explode data and apply timestamp explosion to groups
data_explode = data.apply(pd.Series.explode)
data_explode = data_explode.groupby("time", group_keys=False).apply(_explode_timestamp)
data_explode.index = data_explode.index.tz_localize("UTC").tz_convert(timezone)
# rename the value column
data_explode.columns = [data_source]
# convert dtypes from object into numerical values
data_explode = data_explode.astype(int)
# drop duplicate index values
data_explode = data_explode.loc[~data_explode.index.duplicated()]
if split_into_nights:
data_explode = split_nights(data_explode)
data_explode = {key: _reindex_datetime_index(d) for key, d in data_explode.items()}
else:
data_explode = _reindex_datetime_index(data_explode)
return data_explode
def load_withings_sleep_analyzer_summary(file_path: path_t, timezone: Optional[str] = None) -> SleepEndpointDataFrame:
"""Load Sleep Analyzer summary file.
This function additionally computes several other sleep endpoints from the Sleep Analyzer summary data to be
comparable with the output with the format of other sleep analysis algorithms.
All time information are reported in minutes.
The resulting dataframe has the following columns:
* ``total_duration``: Total recording time
* ``total_time_light_sleep``: Total time of light sleep
* ``total_time_deep_sleep``: Total time of deep sleep
* ``total_time_rem_sleep``: Total time of REM sleep
* ``total_time_awake``: Total time of being awake
* ``total_sleep_duration``: Total sleep duration, i.e., time between Sleep Onset and Wake Onset
* ``number_wake_bouts``: Total number of wake bouts
* ``sleep_onset_latency``: Sleep Onset Latency, i.e., time in bed needed to fall asleep
* ``getup_onset_latency``: Get Up Latency, i.e., time in bed after awakening until getting up
* ``sleep_onset``: Sleep Onset, i.e., time of falling asleep, in absolute time
* ``wake_onset``: Wake Onset, i.e., time of awakening, in absolute time
* ``wake_after_sleep_onset``: Wake After Sleep Onset (WASO), i.e., total time awake after falling asleep
* ``count_snoring_episodes``: Total number of snoring episodes
* ``total_time_snoring``: Total time of snoring
* ``heart_rate_avg``: Average heart rate during recording in bpm
* ``heart_rate_min``: Minimum heart rate during recording in bpm
* ``heart_rate_max``: Maximum heart rate during recording in bpm
Parameters
----------
file_path : :class:`~pathlib.Path` or str
path to file
timezone : str or :class:`datetime.tzinfo`, optional
timezone of recorded data, either as string or as tzinfo object.
Default: 'Europe/Berlin'
Returns
-------
:obj:`~biopsykit.datatype_helper.SleepEndpointDataFrame`
dataframe with Sleep Analyzer summary data, i.e., sleep endpoints
"""
# ensure pathlib
file_path = Path(file_path)
_assert_file_extension(file_path, ".csv")
data = pd.read_csv(file_path)
_assert_has_columns(data, [["von", "bis"]])
if timezone is None:
timezone = tz
for col in ["von", "bis"]:
# convert into date time
data[col] = pd.to_datetime(data[col]).dt.tz_convert(timezone)
# total duration in seconds
data["total_duration"] = [int(td.total_seconds()) for td in data["bis"] - data["von"]]
data["date"] = data["von"]
data["date"] = data["date"].apply(
lambda date: ((date - pd.Timedelta("1d")) if date.hour < 12 else date).normalize()
)
data.rename(
{
"von": "recording_start",
"bis": "recording_end",
"leicht (s)": "total_time_light_sleep",
"tief (s)": "total_time_deep_sleep",
"rem (s)": "total_time_rem_sleep",
"wach (s)": "total_time_awake",
"Aufwachen": "number_wake_bouts",
"Duration to sleep (s)": "sleep_onset_latency",
"Duration to wake up (s)": "getup_latency",
"Snoring episodes": "count_snoring_episodes",
"Snoring (s)": "total_time_snoring",
"Average heart rate": "heart_rate_avg",
"Heart rate (min)": "heart_rate_min",
"Heart rate (max)": "heart_rate_max",
},
axis="columns",
inplace=True,
)
data["sleep_onset"] = data["recording_start"] + | pd.to_timedelta(data["sleep_onset_latency"], unit="seconds") | pandas.to_timedelta |
"""Python library for GCCR002"""
from contextlib import contextmanager
from datetime import datetime
import hashlib
from io import StringIO
from IPython.display import display as _display
from itertools import chain, product, combinations_with_replacement
import joblib
import json
import logging
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import networkx as nx
import numpy as np
import pandas as pd
import pathlib
import pickle
import pingouin
import re
from scipy.special import logit
from scipy.stats import ks_2samp, mannwhitneyu, wilcoxon, gaussian_kde, chi2_contingency, entropy, norm
import seaborn as sns
from sklearn.decomposition import PCA, NMF
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFE, RFECV
from sklearn.linear_model import LinearRegression, RidgeClassifier, RidgeClassifierCV, LogisticRegression, LogisticRegressionCV
from sklearn.metrics import auc, roc_curve, roc_auc_score, plot_roc_curve, confusion_matrix
from sklearn.metrics import precision_score, recall_score, get_scorer, make_scorer, SCORERS
from sklearn.model_selection import ShuffleSplit, GroupShuffleSplit, LeaveOneOut, cross_validate, cross_val_score, cross_val_predict
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.utils.class_weight import compute_sample_weight
from statsmodels.api import add_constant
from statsmodels.discrete.discrete_model import Logit
import sys
sys.path.append('/home/rgerkin/dev/pyvenn') #TODO: Turn pyvenn into a pip-installable package
from tqdm.auto import tqdm, trange
import urllib
from venn import venn3, venn4, venn5, get_labels
import warnings
import zipfile
sns.set(font_scale=1.1)
sns.set_style('whitegrid')
logger = logging.Logger('GCCR002')
known_md5s = {'GCCR002_complete_database.csv': 'd476f67b081dd9d8d8cf1ee0481ad4e8',
'GCCR002_DATA_COVID_TimeStamp.xlsx': 'aa016d9208fbb44ffd8ce4a2dfe908a4',
'GCCR002_DATA_COVID_TimeStamp_plusdataJuly.csv': '56922f025047e379bf5cfc8ff2ceed91'}
DATA = pathlib.Path('data')
YOUGOV_CUTOFF_DATE = '2020-07-03'
# In order to guarantee a match to the published figures, we must remove YouGov reported after the manuscript submission date.
# This corresponds to week 11. To update this figure with new data (collected by YouGov after manuscript submission),
# change max_week to a higher value (e.g. the present day)"""
# For each type (e.g. categorical), a list of regular expressions for features considered to be that type
dtype_ontology = {'categorical': ['Gender', 'GCCR', 'Referred', 'Test_Name'],
'discrete': ['Age', 'Days_since_onset', 'Onset_day', 'Completion_day', 'Recovery'],
'binary': ['Changes', 'Symptoms', 'Prior_conditions', 'cigarette(!=_f)', 'cigarette_use', 'Resp'],
'continuous': ['(?<!did_)(before_)', 'during_', 'after_', 'change_', 'recovery_', 'frequency', 'cigarette(?!_use)'],
}
feature_ontology = {'incidental': ['GCCR', 'Test_Name', 'Completion_', 'Referred'],
'chemosensory': ['Changes_in', 'Taste', 'Smell', 'Cheme', '_food', '_smell'],
'demographic': ['Gender', 'Age', 'Country'],
'history': ['Prior_conditions', 'cigarette'],
'typical': ['Symptoms', 'Resp', 'Recovery', 'Blocked', 'Onset_', 'Days_']
}
timing_ontology = {'incidental': ['GCCR', 'Test_Name', 'Day', '_day', 'Referred'],
'demographic': ['Gender', 'Age', 'Country'],
'before': ['Prior_conditions', 'before_illness', 'cigarette'],
'during': ['Changes_in', 'change_illness', 'during_illness', 'Resp', 'Symptoms'],
'after': ['Recovery', 'after_illness', 'recovery_illness']}
# Color scheme
colors = pd.Series(
index=pd.MultiIndex.from_tuples([], names=["diagnosis", "sense"]), dtype="object"
)
colors.loc["C19+", "Smell"] = "#6699CD"
colors.loc["C19-", "Smell"] = "#a5bcd4"
colors.loc["C19+", "Taste"] = "#ff9900"
colors.loc["C19-", "Taste"] = "#ffce85"
colors.loc["C19+", "Chemesthesis"] = "#009999"
colors.loc["C19-", "Chemesthesis"] = "#5fc7c7"
colors.loc["C19+", "Blocked_nose"] = "#996600"
colors.loc["C19-", "Blocked_nose"] = "#d1a752"
@contextmanager
def all_logging_disabled(highest_level=logging.CRITICAL):
"""
A context manager that will prevent any logging messages
triggered during the body from being processed.
:param highest_level: the maximum logging level in use.
This would only need to be changed if a custom level greater than CRITICAL
is defined.
"""
# two kind-of hacks here:
# * can't get the highest logging level in effect => delegate to the user
# * can't get the current module-level override => use an undocumented
# (but non-private!) interface
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level)
def get_hash(x):
return joblib.hash(x)
def load_all():
# All of the content loaded here was produced in pre-analysis.ipynb
with open(DATA / 'processed' / 'data-types.json') as f:
dtypes = json.load(f)
df = pd.read_csv(DATA / 'processed' / 'data-clean.csv', dtype=dtypes, index_col=0)
Xu = pd.read_csv(DATA / 'processed' / 'X-raw.csv', index_col=0).astype('float')
Xn = pd.read_csv(DATA / 'processed' / 'X-normalized.csv', index_col=0).astype('float')
#Xu.index = Xu.index.astype(int)
#Xn.index = Xu.index.astype(int)
with open(DATA / 'processed' / 'targets.json') as f:
targets = json.load(f)
sets = {name: set(ids) for name, ids in targets.items()}
with open(DATA / 'processed' / 'classes.json') as f:
classes = json.load(f)
return df, Xu, Xn, dtypes, sets, classes
def load_raw():
#file_name = 'GCCR002_DATA_COVID_TimeStamp.xlsx'
#file_name = 'GCCR002_DATA_COVID_TimeStamp_plusdataJuly.csv'
#assert_md5(file_name) # Check that the MD5 hash of the file is as expected
#if file_name.endswith('.xlsx'):
# df = pd.read_excel(file_name) # Pandas takes forever to load Excel files
#elif file_name.endswith('.csv'):
# df = pd.read_csv(file_name)
df_ORIGINAL = pd.read_csv(DATA / 'raw' / 'GCCR002_DATA_COVID_TimeStamp.csv')
df_JULY = pd.read_csv(DATA / 'raw' / 'GCCR002_julydatabase_timestamp_Countryclean_labelscorrect.csv')
to_drop = ['UniqueID.1', 'UniqueID_1', 'Unnamed: 0', 'Unnamed: 2', 'Country_clean']
for df_ in [df_ORIGINAL, df_JULY]:
df_.drop(to_drop, axis=1, errors='ignore', inplace=True)
df_['Date_of_onset'] = pd.to_datetime(df_['Date_of_onset'])
df_['Year_of_birth_Time_Stamp'] = pd.to_datetime(df_['Year_of_birth_Time_Stamp'])
assert not set(df_ORIGINAL['UniqueID']).intersection(set(df_JULY['UniqueID']))
df = pd.concat([df_ORIGINAL, df_JULY[df_ORIGINAL.columns]])
df = df.rename(columns={'Chemethesis_before_illness': 'Chemesthesis_before_illness'})
assert len(set(df['UniqueID'])) == df.shape[0]
df = df.set_index('UniqueID')
df = df.drop('UniqueID.1', errors='ignore')
report_size(df, 'loading')
return df
def get_md5(file_name):
"""Get MD5 hash of file"""
with open(file_name, 'rb') as f:
# read contents of the file
data = f.read()
# pipe contents of the file through
md5 = hashlib.md5(data).hexdigest()
return md5
def assert_md5(file_name):
md5 = get_md5(file_name)
assert md5 == known_md5s[file_name], "MD5 hashes do not match; file may have been changed."
def date_to_integer_day(series):
series = series.dt.dayofyear
series = series.fillna(-1).astype(int)
return series
def display(x):
if isinstance(x, str):
print(x)
else:
_display(x)
def interp_index(array1, array2, threshold):
i = np.searchsorted(array1, threshold)
a1 = array1[i-1]
b1 = array1[i]
a2 = array2[i-1]
b2 = array2[i]
return a2 + (b2-a2)*(threshold-a1)/(b1-a1)
def plot_roc(clf, X, y, cv, cv_kwargs=None, weights=None, concat=True, ax=None, name=None, title=None):
# Plot ROC curve
roc_aucs = []
n = cv.get_n_splits()
cv_kwargs = {} if cv_kwargs is None else cv_kwargs
if ax is None:
plt.figure(figsize=(4,4))
ax = plt.gca()
y_score = []
y_true = []
all_weights = []
sample_weight_ = get_weights(X, y, weights)
for i, (train, test) in enumerate(cv.split(X, **cv_kwargs)):
#sample_weight = get_weights(X.iloc[train], y.iloc[train], weights)
sample_weight = sample_weight_.iloc[train]
clf.fit(X.iloc[train, :], y.iloc[train], sample_weight=sample_weight)
#sample_weight = get_weights(X.iloc[test], y.iloc[test], weights)
sample_weight = sample_weight_.iloc[test]
if hasattr(clf, 'predict_proba'):
y_score_ = clf.predict_proba(X.iloc[test, :])[:, 1]
else:
y_score_ = clf.decision_function(X.iloc[test, :])
if not concat:
curve = plot_roc_curve(clf, X.iloc[test, :], y.iloc[test],
alpha=(1/np.sqrt(n)), ax=ax,
sample_weight=sample_weight, name='Split %d' % i)
roc_aucs.append(curve.roc_auc)
else:
auc = roc_auc_score(y.iloc[test], y_score_)
roc_aucs.append(auc)
y_score += list(y_score_)
y_true += list(y.iloc[test])
all_weights += list(sample_weight)
score = np.mean(roc_aucs)
if concat:
fpr, tpr, thresholds = roc_curve(y_true, y_score, sample_weight=all_weights)
#score = roc_auc_score(y_true, y_score, sample_weight=all_weights)
if not name:
name = clf.__class__.__name__.replace('Classifier','').replace('Ridge', 'Linear')
sens_half = interp_index(fpr, tpr, 0.5)
spec_half = 1-interp_index(tpr, fpr, 0.5)
print("%s: Sens50 = %.3g, Spec50 = %.3g" % (name, sens_half, spec_half))
label = '%s: %.3g' % (name, score) if name else '%.3g' % score
ax.plot(fpr, tpr, label=label)
else:
ax.set_title('AUC = %.3f +/- %.3f' % (score, np.std(roc_aucs)/np.sqrt(n)))
ax.plot([0, 1], [0, 1], 'k--')
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
if title:
ax.set_title(title)
if n <= 10 or concat:
ax.legend(fontsize=12, loc=4)
return score
def rank_features(clf, X):
# Rank the features identified by the classifier from most to least important
key_features = pd.Series(clf.feature_importances_, index=X.columns).sort_values(ascending=False)
# Show the 20 most important
key_features.index = nicify(list(key_features.index))
return key_features.to_frame(name='Importance')
def rank_coefs(clf, X, nicify_=True):
key_features = pd.Series(clf.coef_.ravel(), index=X.columns)
if hasattr(clf, 'intercept_') and clf.intercept_:
key_features['Intercept'] = clf.intercept_[0]
kf = key_features.to_frame(name='Value')
kf['Magnitude'] = kf['Value'].abs().round(3)
kf['Sign'] = ['+' if x>=0 else '-' for x in kf['Value']]
kf = kf.sort_values('Magnitude', ascending=False)
kf = kf.drop('Value', axis=1)
kf = kf[kf['Magnitude']>0]
if nicify_:
kf.index = nicify(list(kf.index))
return kf
def compute_score(clf, X, y, cv):
# Apply cross-validation using this splitter, and check the following fitness metrics
results = cross_validate(clf, X, y, scoring=['roc_auc'], cv=cv)
for key in results:
print(key, results[key].mean())
def cardinality_filter(X, n, dtype=None):
cols = []
for col in X:
if dtype is None or X[col].dtype == dtype:
u = X[col].unique()
if len(u)>=n:
cols.append(col)
return cols
def ontology_to_classes(df, ontology, invert=False, add=None):
if add is None:
add = []
unassigned_cols = list(df.drop('id', errors='ignore'))
if invert:
classes = {x:[] for x in ontology}
else:
classes = {}
for key, patterns in ontology.items():
for pattern in patterns:
r = re.compile(pattern)
cols = list(filter(r.search, list(df)))
for col in cols:
if col in unassigned_cols:
if invert:
classes[key].append(col)
else:
classes[col] = key
unassigned_cols.remove(col)
assert len(unassigned_cols)==0, "%s were unassigned." % unassigned_cols
for kind in add:
# The above ontology maps each feature to a single class.
# Additiomal feature_classes below can reuse these features.
if kind == 'CDC9':
classes[kind] = ['Symptoms_%s' % x for x in
['changes_in_smell', 'changes_in_food_flavor', 'fever', 'muscle_aches',
'runny_nose', 'dry_cough', 'diarrhea', 'fatigue', 'difficulty_breathing_/_shortness_of_breath']]
if kind == 'CDC7':
classes[kind] = ['Symptoms_%s' % x for x in
['fever', 'muscle_aches',
'runny_nose', 'dry_cough', 'diarrhea', 'fatigue', 'difficulty_breathing_/_shortness_of_breath']]
if kind == 'CDC3':
classes[kind] = ['Symptoms_%s' % x for x in
['fever', 'dry_cough', 'difficulty_breathing_/_shortness_of_breath']]
elif kind == 'chemosensory-binary':
classes[kind] = [x for x in classes['chemosensory'] if 'illness' not in x]
return classes
def get_rccv_score(clf, X, y, feature_classes, classes, weights='balanced'):
sample_weight = get_weights(X, y, weights)
features = list(chain(*[feature_classes[x] for x in classes]))
clf.fit(X[features], y, sample_weight=sample_weight)
return clf.best_score_.round(3)
def roc(clf, X, y, feature_classes, classes, cv, weights=None, concat=True, ax=None, with_name=True, title=False):
features = list(chain(*[feature_classes[x] for x in classes]))
if with_name:
name = '%s' % '+'.join(classes)
score = plot_roc(clf, X[features], y, cv, weights=weights, concat=concat, ax=ax, name=name)
if ax and title:
if title is True:
title = '%s' % '+'.join(classes)
ax.set_title(title)
return score
def country_weights(X, y):
test_names = [col for col in X if 'Test_' in col]
sample_weight = y.copy()
sample_weight[:] = 1
for test_name in test_names:
m = X[test_name].mean() # Allows this to work even on standardized data
index = X[X[test_name]>m].index
if len(index):
weight = compute_sample_weight('balanced', y.loc[index])
sample_weight.loc[index] = weight
return sample_weight
def feature_weights(X, y, feature):
sample_weight = y.copy()
sample_weight[:] = 1
m = X[feature].mean() # Allows this to work even on standardized data
index = X[X[feature]>m].index
if len(index):
weight = compute_sample_weight('balanced', y.loc[index])
sample_weight.loc[index] = weight
return sample_weight
def get_weights(X, y, kind):
if isinstance(kind, pd.Series):
sample_weight = kind
elif kind == 'balanced-by-country':
sample_weight = country_weights(X, y)
elif kind == 'balanced':
sample_weight = compute_sample_weight('balanced', y)
elif kind:
sample_weight = compute_sample_weight('balanced', X[kind])
else:
sample_weight = compute_sample_weight(None, y)
sample_weight = pd.Series(sample_weight, index=X.index)
return sample_weight
def table_summarize(X, y, feature):
y.name = 'COVID status'
summary = X.join(y).groupby([feature, 'COVID status']).count().sum(axis=1).to_frame().unstack('COVID status')[0]
return summary.div(summary.sum()).round(2)
def hist_summarize(X, y, feature):
plt.hist(X.loc[y==1, feature], color='r', bins=30, alpha=0.3, density=True, label='COVID+');
plt.hist(X.loc[y==0, feature], color='g', bins=30, alpha=0.3, density=True, label='COVID-');
plt.legend()
def report_size(df, action):
print("Data after %s has %d subjects and %d features" % (action, *df.shape))
def qq_plot(X, y, feature):
x_minus = X[y==0][feature].quantile(np.linspace(0, 1, 101))
x_plus = X[y==1][feature].quantile(np.linspace(0, 1, 101))
ax = sns.lineplot(x_minus, x_plus)
ax.set_xlabel('%s (COVID -)' % feature.replace('_',' '))
ax.set_ylabel('%s (COVID +)' % feature.replace('_',' '))
ax.plot([0, max(x_minus)], [0, max(x_plus)], '--')
def pp_plot(X, y, feature, label=True, stabilized=False, ax=None):
x_minus = X[y==0][feature]
x_plus = X[y==1][feature]
minn = min(x_minus.min(), x_plus.min())
maxx = max(x_minus.max(), x_plus.max())
s_minus = pd.Series(index=np.linspace(minn-0.001, maxx+0.001, 200), dtype=float)
s_plus = pd.Series(index=np.linspace(minn-0.001, maxx+0.001, 200), dtype=float)
s_minus[:] = s_minus.index.map(lambda x: (x_minus<=x).mean())
s_plus[:] = s_plus.index.map(lambda x: (x_plus<=x).mean())
if stabilized:
s_minus = (2/np.pi)*np.arcsin(np.sqrt(s_minus))
s_plus = (2/np.pi)*np.arcsin(np.sqrt(s_plus))
D, p = ks_2samp(x_minus, x_plus)
#S, p = mannwhitneyu(x_minus, x_plus)
sign = (s_plus - s_minus).mean() > 0
#print(sign)
ax = sns.lineplot(s_minus, s_plus, ax=ax,
label='%s (D=%.2f)' % (feature.replace('_', ' ').title(), D if sign>0 else -D))
ax.set_xlabel('COVID -')
ax.set_ylabel('COVID +')
ax.plot([0, 1], [0, 1], 'k--')
ax.legend(fontsize=11)
def nicify(name, line_break=False):
if isinstance(name, list):
return list(map(nicify, name))
s = name.replace('_', ' ').title().replace('Symptoms ', '').split('/')[0].strip().replace('Gccr', 'GCCR v')\
.replace('Illness Y', 'Illness').replace('Before Illness', 'Before').replace('After Illness', 'After')\
.replace('Prior Conditions None', 'No Prior Conditions')\
.replace('Basic Tastes ', '').replace('Recovery Y', 'Recovered').replace('Prior Conditions ', '').split('(')[0]\
.replace('Changes In Smell I Cannot Smell At All', 'Anosmia/Hyposmia')\
.replace('Changes In Smell Smells Smell Different Than They Did Before', 'Parosmia')\
.replace("Changes In Smell I Can Smell Things That Aren'T There", 'Phantosmia')\
.replace('Changes In Smell Sense Of Smell Fluctuates', 'Smell Fluctuation')\
.replace('During Illness', 'During').replace(' Illness', '')\
.replace(' That Required Chemotherapy Or Radiation', '+Chemo/Radiation')\
.replace('Combustible Cigarette', 'Cigarette')\
.replace('E-Cigarette 30 Day', 'E-Cigarette')\
.replace(' That Did Not Require Chemotherapy Or Radiation', '-Chemo/Radiation')\
.replace('Results','').replace('Final','').replace('!','').replace('Version','').split('[')[0]\
.replace('Const', 'Intercept')\
.replace(' ', ' ').strip()
if line_break:
x = s.rfind(' ')
s = s[:x] + '\n' + s[x+1:]
return s
def nicify_labels(ax, x=True, y=True, line_break=True):
for xy in ['x', 'y']:
if locals()[xy]:
# Fix axis labels
z = getattr(ax, 'get_%slabel' % xy)()
new = nicify(z, line_break=line_break)
getattr(ax, 'set_%slabel' % xy)(new)
# Fix tick labels
z = getattr(ax, 'get_%sticklabels' % xy)()
new = [nicify(zi.get_text(), line_break=line_break)
if not zi.get_text().isnumeric() else zi.get_text()
for zi in z]
getattr(ax, 'set_%sticklabels' % xy)(new)
def fill_impute(df, feature_dtypes, copy=True):
if copy:
df = df.copy()
# Apply the following missing data handling and recasting rules.
for col, dtype in feature_dtypes.items():
if dtype == 'categorical':
df[col] = df[col].fillna('Missing').astype('object')
elif dtype == 'discrete':
df[col] = df[col].fillna(df[col].median()).astype(int)
elif dtype == 'binary':
df[col] = df[col].fillna(0.5).astype('float')
elif dtype == 'continuous':
df[col] = df[col].fillna(df[col].median()).astype('float')
return df
def plot_violin(X, y, feature, ax):
y.name = "COVID status"
Xy = X.join(y)
sns.violinplot(x="COVID status", y=feature, data=Xy, ax=ax, alpha=0.2)
ax.set_xlabel('')
ax.set_xticklabels(['COVID -', 'COVID +'], fontweight='bold')
ax.set_ylabel(nicify(feature), fontweight='bold')
def rescale(X):
# Create a version of X for which every column has mean 0, variance 1.
X_st = X.copy()
std_sclr = StandardScaler()
X_st[:] = std_sclr.fit_transform(X)
assert np.allclose(X_st.mean(), 0)
assert all(np.isclose(X_st.var(ddof=0), 1) + np.isclose(X_st.var(ddof=0), 0))
# Create a version of X for which every column has min 0, max 1.
mm_sclr = MinMaxScaler()
X_nm = X.copy()
X_nm[:] = mm_sclr.fit_transform(X)
return X_st, std_sclr, X_nm, mm_sclr
def lrcv_check(lrcv, X, y, features):
sample_weight = get_weights(X, y, 'balanced-by-country')
lrcv.fit(X[features], y, sample_weight=sample_weight)
return pd.DataFrame(lrcv.scores_[True].mean(axis=0).round(3),
index=pd.Series(lrcv.Cs_, name='C'),
columns=pd.Series(lrcv.l1_ratios_, name='L1 Ratio'))
def rccv_check(rccv, X, y, features):
sample_weight = get_weights(X, y, 'balanced-by-country')
rccv.fit(X[features], y, sample_weight=sample_weight)
return rccv.best_score_.round(3), rccv.alpha_
def raw_hist(X, y, feature, cumul=False):
minn = X[feature].min()
maxx = X[feature].max()
diff = maxx - minn
bins = np.linspace(minn-diff*0.01, maxx+diff*0.01, 30)
X.loc[y==1, feature].hist(density=True, cumulative=cumul, bins=bins, alpha=0.3, label='+')
X.loc[y==0, feature].hist(density=True, cumulative=cumul, bins=bins, alpha=0.3, label='-')
plt.legend()
plt.title(nicify(feature))
def contingency(X, features, verbose=True):
z = pd.crosstab(*[X[f] for f in features])
z.index.name = nicify(z.index.name)
z.columns.name = nicify(z.columns.name)
n = z.sum().sum()
chi2, p, _, _ = chi2_contingency(z)
k = min(*z.shape)
if n and k>1:
v = np.sqrt(chi2/(n*(k-1)))
else:
v = None
if min(z.shape) >= 2 and z.iloc[0, 1] and z.iloc[1, 1]:
num = (z.iloc[0, 0] / z.iloc[0, 1])
denom = (z.iloc[1, 0] / z.iloc[1, 1])
oddsr = num / denom
else:
oddsr = None
if verbose:
print('p = %.2g' % p)
return z, p, chi2, v, oddsr
def plot_coefs(clf, X, title=''):
x = rank_coefs(clf, X)
#x = x.drop('Intercept', errors='ignore')
threshold = x.drop('Intercept', errors='ignore')['Magnitude'].max()/10
x = x[x['Magnitude'] > threshold]
x = x.sort_values('Magnitude', ascending=True)
x['Pos'] = x.apply(lambda z: z['Magnitude'] if z['Sign']=='+' else None, axis=1)
x['Neg'] = x.apply(lambda z: z['Magnitude'] if z['Sign']=='-' else None, axis=1)*-1
try:
x['Pos'].plot(kind='barh', color='r', label='+')
except:
pass
try:
x['Neg'].plot(kind='barh', color='b', label='-')
except:
pass
plt.xlabel('Coefficient Magnitude')
plt.title(title)
plt.tight_layout()
def plot_pb_given_a_(X, b, a, restrict=None, ax=None, title=None, color='k', scale=1000, ticks=None):
if restrict is not None:
data = X.loc[restrict, [a, b]]
else:
data = X[[a, b]]
data = data.dropna()
kde = gaussian_kde(data.T)
if ticks is None:
ticks = np.linspace(-100, 100, 9)
a_ticks = [t for t in ticks if (t>=X[a].min() and t<=X[a].max())]
b_ticks = [t for t in ticks if (t>=X[b].min() and t<=X[b].max())]
a_support = a_ticks
b_support = np.linspace(b_ticks[0], b_ticks[-1], 100)
aa, bb = np.meshgrid(a_support, b_support)
pab = kde([aa.ravel(), bb.ravel()]).reshape(len(b_support), len(a_support))
pab = pd.DataFrame(pab, index=b_support, columns=a_support)
kde = gaussian_kde(data[a])
pa = pd.Series(kde(a_support), index=a_support)
pbga = pab.div(pa)
if ax is None:
ax = plt.gca()
for a_tick in a_ticks:
l2d = ax.plot(b_support, a_tick + scale*(pbga[a_tick]), label=a_tick, color=color)
color = l2d[0].get_color()
ax.plot(b_support, np.ones_like(b_support)*a_tick, '--', color=color)
ax.set_yticks(a_ticks)
ax.set_xticks(b_ticks)
ax.tick_params(reset=True, axis='y', length=5, width=1)
ax.set_xlim(b_ticks[0], b_ticks[-1])
ax.set_xlabel(nicify(b))
ax.set_ylabel(nicify(a))
if title:
ax.set_title(title)
#ax.legend()
return pab, pa
def plot_difference(pab_0, pa_0, pab_1, pa_1, ax=None, crange=(-1, 1), scale=10):
pbga_0 = pab_0.div(pa_0)
pbga_1 = pab_1.div(pa_1)
assert np.allclose(pbga_0.index, pbga_1.index)
assert np.allclose(pbga_0.columns, pbga_1.columns)
log2_odds = np.log2(pbga_1 / pbga_0)
from matplotlib.cm import get_cmap
from matplotlib.colors import Normalize
from matplotlib.colorbar import ColorbarBase
norm = Normalize(crange[0], crange[1], True)
cmap = get_cmap('RdBu_r')
for a_tick in log2_odds.columns:
color = cmap(norm(log2_odds[a_tick].values))
l2d = ax.scatter(log2_odds.index, a_tick + (scale*pa_0[a_tick]*2**log2_odds[a_tick]), label=a_tick, c=color, s=1)
#color = l2d[0].get_color()
ax.plot(log2_odds.index, np.ones_like(log2_odds.index)*a_tick, '--', color='k')
cb = plt.colorbar(l2d)
cb.outline.set_visible(False)
cb1 = ColorbarBase(cb.ax, cmap=cmap, norm=norm)
cticks = np.linspace(*crange, 5)
cb1.set_ticks(cticks)
cb1.set_ticklabels(['%.2g' % (2**x) for x in cticks])
cb1.set_label('Odds Ratio')
#cb.remove()
ax.set_title('Ratio')
def plot_conditionals(X, y, b, a, restrict=None, crange=(-2, 2), scale=10):
covid = {0: y[y==0].index,
1: y[y==1].index}
fig, ax = plt.subplots(1, 3, sharey=True, figsize=(15, 4))
if restrict is None:
restrict = y.index
restrict_0 = covid[0] & restrict
restrict_1 = covid[1] & restrict
pba_0, pa_0 = plot_pb_given_a_(X, b, a, restrict=restrict_0, ax=ax[0], title='COVID-', color='b')
pba_1, pa_1 = plot_pb_given_a_(X, b, a, restrict=restrict_1, ax=ax[1], title='COVID+', color='r')
ax[1].set_ylabel('')
ax[2].set_xlabel(ax[1].get_xlabel())
plot_difference(pba_0, pa_0, pba_1, pa_1, ax=ax[2], crange=crange, scale=scale)
plt.tight_layout()
return pba_0, pba_1
def get_matches(X, match_list):
return [x for x in X if any([m in x for m in match_list])]
def check_lr(X, y, cv, sample_weight=None):
from sklearn.linear_model import LogisticRegressionCV
lrcv = LogisticRegressionCV(penalty='elasticnet',
l1_ratios = np.linspace(0, 1, 5),
Cs = np.logspace(-3, 3, 7),
solver = 'saga',
scoring = 'roc_auc',
cv = cv,
max_iter=10000)
lrcv.fit(X, y, sample_weight=sample_weight)
return pd.DataFrame(lrcv.scores_[True].mean(axis=0),
index=lrcv.Cs_,
columns=lrcv.l1_ratios_)
def venn_covid(X, restrict, features, label, figsize=(5, 5)):
indices = {}
for feature in features:
z = X.loc[restrict, feature]
indices[feature] = set(z[z==1].index)
labels = get_labels([indices[feature] for feature in features], fill='percent')
labels = {k: v.replace('(','').replace(')','') for k, v in labels.items()}
venn3(labels, names=nicify(features), figsize=figsize, fontsize=9)
plt.gca().get_legend().remove()
z = X.loc[restrict, features]
z = z[z.sum(axis=1)==0].shape[0] / z.shape[0]
plt.title('%s; None of the three = %.1f%%' % (label, z*100))
def kde_plot(df, x, restrict, label, color, ax=None, title=None, **kwargs):
sns.set_style('whitegrid')
data = df.loc[restrict, x].dropna()
x_range = (np.min(df[x]), np.max(df[x]))
ax = sns.kdeplot(data, clip=x_range, color=color,
alpha=0.5, label=label, ax=ax, **kwargs)
ax.set_xlim(*x_range)
ax.set_xlabel(nicify(x), fontweight='bold')
ax.set_ylabel('Probabilty density', fontweight='bold')
if ax:
ax.set_title(nicify(x) if title is None else title)
return ax
def joint_plot(df, x, y, restrict, label, maxx=1e-3, cmap='Reds', cbar=False, ax=None):
sns.set_style('whitegrid')
data = df.loc[restrict, [x, y]].dropna()
x_range = (np.min(df[x]), np.max(df[x]))
y_range = (np.min(df[y]), np.max(df[y]))
ax = sns.kdeplot(data[x], data[y], shade=True, clip=[x_range, y_range],
vmin=0, vmax=maxx, cmap=cmap, shade_lowest=True, alpha=0.5,
ax=ax, n_levels=100, cbar=True,
cbar_kws={'format': '%.2g',
'label': 'Probability density (x1000)',
'shrink': 0.8})
cax = plt.gcf().axes[-1]
if cbar:
cbar_ticks = cax.get_yticks()
cax.set_yticklabels((cbar_ticks*1000).round(2))
else:
cax.remove()
ax.set_xlim(*x_range)
ax.set_ylim(*y_range)
ax.set_xlabel(nicify(x), fontweight='bold')
ax.set_ylabel(nicify(y), fontweight='bold')
ax.set_title(label, fontweight='bold')
return ax
def feature_hist(df, categories, feature, drop=None, bw=5, cut=0, ax=None, title=None, colors='rbgmck'):
for i, (label, indices) in enumerate(categories.items()):
ax = kde_plot(df, feature, indices, label, colors[i], lw=3, bw=bw, cut=cut, ax=ax, title=title)
ax.legend(fontsize=9);
def feature_contingency(df, categories, feature, drop=None, normalize=None, verbose=True):
z = df[[feature]].copy()
for label, indices in categories.items():
z.loc[indices, 'Group'] = label
if drop:
z = z[~z[feature].isin(drop)]
c = contingency(z, [feature, 'Group'], verbose=verbose)[0]
if normalize is not None:
c = c.div(c.sum(axis=normalize), axis=1-normalize).round(2)
try:
c.index = [x.replace('\n', ' ') for x in c.index]
except:
pass
try:
c.columns = [x.replace('\n', ' ') for x in c.columns]
except:
pass
c = c.rename(index={'M': 'Men', 'F': 'Women'})
return c
def feature_compare(df, categories, feature):
z = pd.DataFrame(index=list(categories),
columns=pd.MultiIndex.from_product([categories, ['Δ', 'σ', 'seΔ', 'D', 'p']]))
z.index.name = nicify(feature)
for d1 in categories:
for d2 in categories:
x1 = df.loc[categories[d1], feature]
x2 = df.loc[categories[d2], feature]
delta = x1.mean() - x2.mean()
d = cohen_d(x1, x2)
p = mannwhitneyu(x1, x2).pvalue
z.loc[d1, (d2, 'Δ')] = "%.2g" % delta
z.loc[d1, (d2, 'σ')] = "%.2g" % (0 if not d>0 else delta/d)
z.loc[d1, (d2, 'seΔ')] = "%.2g" % (delta/np.sqrt(len(x1)+len(x2)))
z.loc[d1, (d2, 'D')] = "%.2g" % d
z.loc[d1, (d2, 'p')] = "%.2g" % p
if len(categories)==2:
d1 = list(categories)[0]
d2 = list(categories)[1]
z = z.loc[d1, d2]
z.name = nicify(feature)
return z
def features_compare(df, categories, features):
assert len(categories) == 2
zs = [feature_compare(df, categories, feature) for feature in features]
return pd.concat(zs, axis=1)
def hist_or_contingency(df, categories, feature, drop=None, normalize=None):
if (df.dtypes[feature] != 'object') and (df[feature].max() > 5 or df[feature].min() < -5):
f = feature_hist
else:
f = feature_contingency
return f(df, categories, feature, drop=None, normalize=None)
def describe_three_clusters(df, feature, s, drop=None, normalize=None):
smell_loss = (df['Smell_change_illness']<-80)
smell_recovery = (df['Smell_recovery_illness']>30)
r = (df['Recovery_y/n']==2) & df.index.to_series().isin(s['covid'])
categories = {'Recovered Smell': r & smell_loss & smell_recovery,
'Nonrecovered Smell': r & smell_loss & ~smell_recovery,
'Intact Smell': r & ~smell_loss}
return hist_or_contingency(df, categories, feature, drop=drop, normalize=normalize)
def get_set(df, query):
return set(df.query(query, engine='python').index)
def diagnosis_joint_plots(df, feature1, feature2, r, s, maxx=3e-4):
if r is None:
r = set(df.index)
fig, ax = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(6, 11.5))
for i, (diagnosis, label, cmap) in enumerate([('lab-covid', 'C19+', 'Reds'),
('non-covid', 'C19-', 'Reds')]):
joint_plot(df, feature1, feature2,
r & s[diagnosis], label, cmap=cmap, maxx=maxx, ax=ax[i], cbar=(i==0))
return ax
def statsmodels_to_df(results, plot=False, title='', figsize=(10, 5), scale=None):
summ = results.summary()
df = pd.read_csv(StringIO(summ.tables[1].as_csv()), index_col=0)
df.columns = df.columns.str.strip()
df['abs_coef'] = df['coef'].abs()
df.index = df.index.str.strip()
df = df.sort_values('abs_coef', ascending=False)
df = df.round(2)
df['P>|z|'] = results.pvalues#.apply(lambda x: '%.1g'%x)
df = df[df['abs_coef']>0]
if scale is not None and scale is not False:
df['coef'] /= scale
try:
df['std err'] /= scale
df['[0.025'] /= scale
df['0.975]'] /= scale
except:
pass
df.index = nicify(list(df.index))
if plot:
plt.figure(figsize=figsize)
dfp = df.drop('Intercept')
ax = dfp.sort_values('abs_coef', ascending=True).plot.barh(y='coef', xerr='std err', legend=None, capsize=4, ax=plt.gca())
labels = ax.get_yticklabels()
ax.set_yticklabels('%s\n(p=%.1g)' % (label.get_text(), df['P>|z|'].iloc[-i-2])
for i, label in enumerate(labels))
ax.set_xlabel('Regression Coefficient', fontweight='bold')
ax.set_title(title, fontweight='bold')
df = df.drop('abs_coef', axis=1)
for col in df:
def fill(x):
try:
return '%.2g' % float(x)
except:
return None
df[col] = df[col].apply(fill)
return df
def pooled_sd(x1, x2):
n1 = len(x1)
n2 = len(x2)
s1 = np.std(x1)
s2 = np.std(x2)
num = (n1-1)*(s1**2) + (n2-1)*(s2**2)
denom = n1 + n2 - 2
return np.sqrt(num/denom)
def cohen_d(x1, x2):
return (np.mean(x1) - np.mean(x2)) / pooled_sd(x1, x2)
def sequential_features(clf, X, y, features, cv, Cs=np.logspace(-1, 3, 5)):
"""Return feature that maximizes cross-validated ROC AUC,
then feature that maximizes it given inclusion of the first feature, and so ob"""
roc_aucs = pd.DataFrame(columns=pd.MultiIndex.from_product([Cs, ['Rank', 'AUC']]), dtype='float')
bar0 = tqdm(Cs)
bar1 = trange(len(features))
bar2 = trange(len(features))
for C in bar0:
clf.C = C
features_used = []
features_remaining = features.copy()
bar1.reset()
for i in range(len(features)):
bar1.update(1)
best_auc = 0
best_feature = None
bar2.reset()
z = pd.Series(index=features_remaining)
for j in range(len(features)):
bar2.update(1)
feature = features[j]
if feature in features_remaining:
auc = cross_val_score(clf, X[features_used + [feature]], y, cv=cv,
scoring='roc_auc', n_jobs=cv.n_splits).mean()
#auc += 0.003*('savory' in feature.lower())
z[feature] = auc
if auc > best_auc:
best_feature = feature
best_auc = auc
features_used.append(best_feature)
features_remaining.remove(best_feature)
#print(z.sort_values(ascending=False))
roc_aucs.loc[best_feature, (C, 'Rank')] = i+1
roc_aucs.loc[best_feature, (C, 'AUC')] = best_auc
return roc_aucs
def status_map(df, mapping, name):
status = {}
for k, v in mapping.items():
status.update({key: k for key in v})
df[name] = df.index.map(status.get)
return df
def get_tuple_feature_aucs(clf, X, y, n, sample_weight=None, only_binary=False, nicify_=True, add_to=None):
if only_binary:
symptoms = [x for x in X if 'Symptoms_' in x]
else:
symptoms = list(X)
if n > 1:
tuples = combinations_with_replacement(symptoms, n)
else:
tuples = symptoms
s = pd.Series(index=tuples, dtype='float')
for tup in tqdm(s.index):
if n > 1:
tup_ = list(set(tup)) # Get rid of duplicates
else:
tup_ = [tup]
if add_to:
tup_+= list(set(add_to))
clf.fit(X[tup_], y, sample_weight=sample_weight)
s.loc[tup] = clf.scores_[True].mean()
if n>1:
s.index = pd.MultiIndex.from_tuples(s.index)
if nicify_:
s.index = s.index.map(lambda x: ' + '.join([nicify(xi) for xi in x]))
else:
if nicify_:
s.index = nicify(list(s.index))
s = s.sort_values(ascending=False).round(3)
s.index.name = 'Symptom set'
df = s.to_frame('ROC AUC')
return df
def yg_week(df_gccr, offset=0, how='Onset_day'):
days = (datetime.strptime('2020/04/01', '%Y/%m/%d') - datetime.strptime('2020/01/01', '%Y/%m/%d')).days
days += offset
return 1 + ((df_gccr[how].astype(int) - days)/7).astype(int).clip(0, 9999)
def download_unzip_df(url):
filehandle, _ = urllib.request.urlretrieve(url)
zip_file_object = zipfile.ZipFile(filehandle, 'r')
first_file = zip_file_object.namelist()[0]
file = zip_file_object.open(first_file)
return pd.read_csv(file, encoding='latin1', dtype='object')
def download_yougov():
url = 'https://raw.githubusercontent.com/YouGov-Data/covid-19-tracker/master'
yg_countries = pd.read_csv('%s/countries.csv' % url, header=None)[0]
path = pathlib.Path('data/yougov')
path.mkdir(parents=True, exist_ok=True)
for country in tqdm(yg_countries):
file_url = '%s/data/%s.csv' % (url, country.replace(' ', '-').replace('emerites', 'emirates'))
#print(file_name)
try:
yg = pd.read_csv(file_url,
encoding='latin1', dtype='object')
except:
try:
zip_file_url = file_url[:-4]+'.zip'
print(zip_file_url)
yg = download_unzip_df(zip_file_url)
except:
raise Exception("Could not download or read %s" % file_name)
yg.to_csv(path / ('yougov_%s.csv' % country))
return yg_countries
def fix_yougov(yg):
yg = yg[yg['qweek'].str.contains('week')].copy()#.dropna(subset=['qweek'])
yg['week'] = yg['qweek'].apply(lambda x: x.split(' ')[1]).astype(int)
try:
yg['endtime'] = pd.to_datetime(yg['endtime'], format='%d/%m/%Y %H:%M')
except:
yg['endtime'] = pd.to_datetime(yg['endtime'], format='%Y-%m-%d %H:%M:%S')
yg = yg[yg['endtime']<=YOUGOV_CUTOFF_DATE]
return yg
def get_yougov(df_gccr, countries):
df = pd.DataFrame(index=countries)
for country in countries:
yg = pd.read_csv('data/yougov/yougov_%s.csv' % country, dtype='object')
yg = fix_yougov(yg)
country = 'usa' if country == 'united-states' else country
country = 'uk' if country == 'united-kingdom' else country
weights = df_gccr[df_gccr['Country_of_Residence']==country]['yg_week'].value_counts()
for week in (set(weights.index) | set(yg['week'])):
if week not in (set(weights.index) & set(yg['week'])):
weights.loc[week] = 0
weight = weights[yg['week']]
z = pd.get_dummies(yg[['i3_health', 'i4_health']])
p = z[[x for x in z.columns if 'positive' in x]]
n = z[[x for x in z.columns if 'negative' in x]]
total = p.sum().sum() + n.sum().sum()
p = p.mul(weight.values, axis=0).sum().sum()
n = n.mul(weight.values, axis=0).sum().sum()
if p+n:
df.loc[country, 'YG_fp'] = p/(p+n)
else:
df.loc[country, 'YG_fp'] = None
df.loc[country, 'YG_N'] = total
df = df.drop(['united-states', 'united-kingdom'])
df.index.name = 'Country'
df = df.sort_values('YG_fp')
return df
def compare_yougov(df_gccr, df_yg, s):
df_gccr['status'] = -1
#df_gccr.loc[s['lab-covid'] | s['clinical-covid'], 'status'] = 1
df_gccr.loc[s['lab-covid'], 'status'] = 1
df_gccr.loc[s['non-covid'], 'status'] = 0
df_gccr = df_gccr[df_gccr['status'] >= 0]
#df_gccr['status'] = df_gccr['status'].astype(int)
df_gccr = df_gccr.groupby('Country_of_Residence').agg({'status': ['mean', 'count']})['status']
df_gccr.columns = ['GCCR_fp', 'GCCR_N']
df_gccr = df_gccr[df_gccr['GCCR_N']>=10]
df = df_gccr.join(df_yg, how='outer')
return df
def plot_fp(fp, drop=None, plot=True, verbose=False, ax=None, break_axis=False):
if ax is None and plot:
ax = plt.gca()
if drop:
fp = fp.copy()
fp = fp.drop(drop)
for kind in ['GCCR', 'YG']:
p = fp['%s_fp' % kind]
n = fp['%s_N' % kind]
fp['%s_se' % kind] = np.sqrt(p*(1-p)/n)
fp['%s_logodds_fp' % kind] = np.log(p/(1-p))
if plot:
ax.errorbar(fp['GCCR_fp'], fp['YG_fp'], xerr=fp['GCCR_se'], yerr=fp['YG_se'], marker='o', ls='none', alpha=0.5);
ax.set_xlabel('GCCR Fraction of COVID Tests Positive')
ax.set_ylabel('YouGov Fraction of\nCOVID Tests Positive');
fp_ = fp.dropna()
#lr = LinearRegression()
#lr.fit(fp_[['GCCR_logodds_fp']], fp_[['YG_logodds_fp']], sample_weight=1/fp_['GCCR_se']**2)
#x = np.linspace(-10, 10, 1000)
#y = lr.predict(x.reshape(-1, 1))
#from scipy.special import expit
#plt.plot(expit(x), expit(y), '--')
from scipy.stats import spearmanr, pearsonr
def pearson_spearman(x, y):
return pearsonr(x, y)[0], spearmanr(x, y)[0]
if verbose:
print("Log-Odds R = %.3g; Rho=%.3g" % pearson_spearman(fp_['GCCR_logodds_fp'], fp_['YG_logodds_fp']))
print("Raw R = %.3g; Rho=%.3g" % pearson_spearman(fp_['GCCR_fp'], fp_['YG_fp']))
return pearson_spearman(fp_['GCCR_fp'], fp_['YG_fp'])[0]
def cluster_summary(df, clusters, s, feature):
z = pd.DataFrame(index=list(clusters), columns=['female', 'male'])
for cluster in z.index:
for gender in z.columns:
restrict = clusters[cluster] & s[gender]
mean = df.loc[restrict, feature].mean()
std = df.loc[restrict, feature].std()
z.loc[cluster, gender] = '%.3g +/- %.2g' % (mean, std)
z.index = [x.replace('\n', ' ') for x in z.index]
z.index.name = nicify(feature)
z.columns = ['Women', 'Men']
return z
def exclusive_best_tuples(tuple_feature_aucs):
z = tuple_feature_aucs.copy()
z.index = pd.MultiIndex.from_tuples(
list(tuple_feature_aucs.index.map(lambda x: x.split(' + '))))
i = 0
while True:
index = z.index[i]
z = z.drop([x for x in z.index[i+1:] if x[0] in index or x[1] in index])
i += 1
if i >= z.shape[0]:
break
return z
def compare_lr_model_roc_aucs(X, y, cv, feature_sets, Cs):
z = pd.DataFrame(index=list(Cs), columns=feature_sets.keys())
z.index.name = 'C'
for C in Cs:
# Use the same model again
lr = LogisticRegression(penalty='elasticnet', solver='saga', C=C,
l1_ratio=1, max_iter=10000, random_state=0)
for label, features in feature_sets.items():
z.loc[C, label] = cross_val_score(lr, X[features], y, scoring='roc_auc', cv=cv,
n_jobs=cv.n_splits).mean().round(3)
return z
def single_and_cumulative_plot(single_aucs, single_xrange, cumul_aucs, cumul_xrange, n_features, classes, C=10, figsize=(14, 6)):
# Figure layout
#sns.set_style('whitegrid')
fig = plt.figure(figsize=figsize)
width_ratios = [single_xrange[1]-single_xrange[0], cumul_xrange[1]-cumul_xrange[0]]
spec = gridspec.GridSpec(ncols=2, nrows=1, width_ratios=width_ratios)
# First panel
ax0 = fig.add_subplot(spec[0])
feature_names = list(single_aucs.index)
ax0.plot(single_aucs, feature_names, 'ko', markersize=10)
ax0.hlines(y=range(n_features), xmin=single_xrange[0], xmax=single_aucs,
color='gray', alpha=0.2, linewidth=5)
ax0.set_ylim(n_features-0.5, -0.5)
ax0.set_xlim(*single_xrange)
ax0.set_xlabel('ROC Area Under Curve')
#ax0.set_title('Top single-feature models')
ax0.set_yticklabels(feature_names)
def fix_ticklabels(axes):
for ticklabel in axes.get_yticklabels():
text = ticklabel.get_text()
ticklabel.set_weight("normal")
if text in classes['features']['chemosensory']:
ticklabel.set_weight("bold")
if classes['dtypes'].get(text, '') not in ['binary', 'categorical']:
ticklabel.set_style("oblique")
fix_ticklabels(ax0)
ax0.set_yticklabels(nicify(feature_names))
# Second panel
if cumul_xrange[0] == cumul_xrange[1]:
return 0
ax1 = fig.add_subplot(spec[1])
z = cumul_aucs[C].sort_values('Rank')
z['Rank'] = np.arange(1, z.shape[0]+1)
z.plot(x='AUC', y='Rank', color='k', marker='o', ax=ax1)
ax1.set_ylim(n_features+0.5, 0.5)
ax1.set_ylabel('Cumulative # Features')
ax1.set_xlabel('ROC AUC')
ax1.set_xlim(*cumul_xrange)
ax1.set_yticks(range(1, n_features+1))
ax1.legend().remove()
#ax1.set_title('Top cumulative features model')
twinax = ax1.twinx()
twinax.set_ylim(n_features+0.5, 0.5)
twinax.set_yticks(range(1, n_features+1))
feature_names = list(cumul_aucs[C].sort_values('Rank').index)
twinax.set_yticklabels(feature_names)
fix_ticklabels(twinax)
feature_names = nicify(feature_names)
twinax.set_yticklabels(['+%s' % f if i else f for i, f in enumerate(feature_names)]);
plt.tight_layout()
def single_plot(single_aucs, single_xrange, n_features, classes, figsize=(10, 6), ax=None, delta=False):
if ax is None:
fig = plt.figure(figsize=figsize)
ax = plt.gca()
feature_names = list(single_aucs.index)
ax.plot(single_aucs, feature_names, 'ko', markersize=10)
ax.hlines(y=range(n_features), xmin=single_xrange[0], xmax=single_xrange[1],
color='gray', alpha=0.2, linewidth=5)
ax.set_ylim(n_features-0.5, -0.5)
ax.set_xlim(*single_xrange)
if delta:
ax.set_xlabel('Δ ROC AUC')
else:
ax.set_xlabel('ROC AUC')
ax.set_yticklabels(feature_names)
def fix_ticklabels(axes):
for ticklabel in axes.get_yticklabels():
text = ticklabel.get_text()
ticklabel.set_weight("normal")
if text in classes['features']['chemosensory']:
ticklabel.set_weight("bold")
if classes['dtypes'].get(text, '') not in ['binary', 'categorical']:
ticklabel.set_style("oblique")
fix_ticklabels(ax)
ax.set_yticklabels(nicify(feature_names))
return ax
def plot_cumul_roc_aucs(roc_aucs):
for C in roc_aucs.columns.levels[0]:
roc_aucs[C].sort_values('Rank')['AUC'].plot(label='C=%.3g' % C)
plt.legend()
ns = range(0, roc_aucs.shape[0], 5)
plt.xticks(ns, ['%d' % (1+x) for x in ns])
plt.xlabel('# Features')
def fix_quartile_lines(ax):
for i, l in enumerate(ax.lines):
if i % 3 != 1:
l.set_linestyle('--')
else:
l.set_linestyle('-')
l.set_linewidth(2)
if int(i/3) % 2 == 0:
l.set_color('white')
else:
l.set_color('black')
def mutual_info(contingency):
pxy = (contingency+1e-25) / contingency.sum().sum()
px = pxy.sum(axis=1)
py = pxy.sum(axis=0)
I = 0
for i in range(len(px)):
for j in range(len(py)):
I += pxy.iloc[i, j] * np.log2(pxy.iloc[i, j] / (px.iloc[i]*py.iloc[j]))
return I
def check_splits(clf, X, y, features, s, set_keys):
for train_keys in combinations_with_replacement(set_keys, len(set_keys)-1):
train_keys = set(train_keys)
train = set.union(*[s[x] for x in train_keys])
for test_keys in combinations_with_replacement(set_keys, len(set_keys)-1):
test_keys = set(test_keys)
if not train_keys.intersection(test_keys):
test = set.union(*[s[x] for x in test_keys])
clf.fit(X.loc[train, features], y[train])
auc = roc_auc_score(y[test], clf.predict_proba(X.loc[test, features])[:, 1])
print(
"Trained on: %s; Tested on: %s; AUC=%.3g"
% (train_keys, test_keys, auc)
)
def odds_ratio(df, sets, feature):
df_c = feature_contingency(df, sets, feature, verbose=False)
return (df_c.loc[1, "C19+"] / df_c.loc[1, "C19-"]) / (
df_c.loc[0, "C19+"] / df_c.loc[0, "C19-"]
)
def compute_gccr_yougov_corr(df, s, yg_countries):
country_of_residence = pd.read_csv("data/processed/country-of-residence.csv", index_col=0)
df_ = df.join(country_of_residence, how="inner")
rs = []
result = pd.Series(index=np.arange(7 * -6, 7 * 7, 7), dtype='float')
for offset in tqdm(result.index):
df_["yg_week"] = yg_week(df_, offset=offset, how="Completion_day")
# df_["yg_week"] = yg_week(df_, offset=offset, how="Onset_day")
yougov = get_yougov(df_, yg_countries)
gccr_yougov = compare_yougov(df_, yougov, s)
if offset == 0:
# Print sample size at offset=0
print(gccr_yougov[["GCCR_N", "YG_N"]].dropna().sum().astype(int))
result[offset] = plot_fp(gccr_yougov, plot=False)
return result
def get_auc_p(auc, n1, n2):
sigma_u = np.sqrt(n1 * n2 * (n1 + n2 + 1) / 12)
mu_u = n1 * n2 / 2
u = auc * n1 * n2
z = (u - mu_u) / sigma_u
return 1 - norm.cdf(z)
def tranche_compare(clf, X, y, s):
for gccr in [1, 2, 3]:
s["gccr%d" % gccr] = get_set(df, "GCCR==%d" % gccr) & (
s["lab-covid"] | s["non-covid"]
)
check_splits(
clf,
X,
y,
["Smell_during_illness", "Days_since_onset"],
s,
["gccr1", "gccr2", "gccr3"],
)
def anova(df, dv, between, ss_type=3):
if not isinstance(between, list):
between = [between]
df_anova = df.rename(columns={col: col.replace(' ', '_') for col in df}) # Pingouin can't handle the blank space
cols = list(set([dv] + between + ['COVID_Status']))
df_anova['COVID_Status'] = 1*(df_anova['COVID_Status']=='C19+')
df_anova = df_anova[cols]
df_anova[:] = StandardScaler().fit_transform(df_anova)
return pingouin.anova(data=df_anova, dv=dv, between=between, ss_type=ss_type)
def author_roles():
authors = | pd.read_csv('data/processed/author_roles.csv', encoding='latin1') | pandas.read_csv |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
# Setting up the environment.
import numpy as np
import pandas as pd
# %%
# Load the data from the John Hopkins github repo
df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/11-14-2020.csv', index_col=0)
# %%
# Dropping some columns and sorting
df1 = df[["Admin2", "Province_State", "Country_Region", "Confirmed", "Deaths", "Combined_Key", "Recovered", "Active", 'Last_Update']] #getting the columns I want
df1 = df1[df1['Confirmed'] !=0] #dropping States with 0 confirmed
df1 = df1[(df1["Country_Region"] == "US")] #dropping countries other than the US
df1 = df1.sort_values(by=['Province_State','Confirmed'], ascending=True) #sorting by State and then Confirmed
df1 = df1.dropna() #dropping NA values
df1 = df1[df1.Province_State != 'Wuhan Evacuee'] #dropping this row because it is not US
df1['Combined_Key'] = df1['Combined_Key'].str.replace(r', US', '') #removing US from Combined key so it looks better in the hover text
#df1 = df1.groupby(['Province_State'])
df1 = df1.reset_index() #resetting index so FIPS is not the index
df1 = df1.rename(columns={'Province_State': 'State'})
df1 = df1.rename(columns={'Admin2': 'County'})
df1 = df1.rename(columns={'Country_Region': 'Country'})
df1 = df1.rename(columns={'Combined_Key': 'County/State'})
df1.head(5)
#df1.shape
# %%
df2 = df1.groupby(['State']).sum()
df2 = df2.rename(columns={'Confirmed': 'Total Confirmed'})
df2 = df2.rename(columns={'Deaths': 'Total Deaths'})
df2 = df2.reset_index()
#df2 = df2[df['Confirmed'] > 20]
df2 = df2.sort_values(by=['Total Confirmed', 'State'], ascending=False)
df2.head(5)
# %%
frames = [df1, df2]
#df3 = pd.concat([df1, df2], axis=0, sort=False)
df3 = | pd.concat(frames) | pandas.concat |
import pickle
import numpy as np
import pandas as pd
## plot conf
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 7})
width = 8.5/2.54
height = width*(3/4)
###
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
plot_path = './'
male_rarities, female_rarities = pickle.load(open(script_dir+'/plot_pickles/raritys.p', 'rb'))
## Load DPVI fits
epsilons = [0.74]
epsilons = np.array(epsilons)
n_runs = 100
# Load DP results
syn_dpvi_coef_female_dict = pickle.load(open(script_dir+'/plot_pickles/female_coef_dict.p', 'rb'))
syn_dpvi_p_value_female_dict = pickle.load(open(script_dir+'/plot_pickles/female_p_value_dict.p', 'rb'))
syn_dpvi_coef_male_dict = pickle.load(open(script_dir+'/plot_pickles/male_coef_dict.p', 'rb'))
syn_dpvi_p_value_male_dict = pickle.load(open(script_dir+'/plot_pickles/male_p_value_dict.p', 'rb'))
## load bootstrap results
female_names = list(pd.read_csv('../R/original_bootstrapped/female_bootstrapped.csv', index_col=0).index)
female_nrun_coefs = pd.read_csv('../R/original_bootstrapped/female_bootstrap.csv', usecols=range(43), index_col=0)
female_nrun_pvalues = pd.read_csv('../R/original_bootstrapped/female_bootstrap.csv', usecols=range(43, 85))
female_nrun_coefs = | pd.DataFrame(female_nrun_coefs.values, columns=female_names) | pandas.DataFrame |
#General guide: https://github.com/googleads/google-ads-python
#When I use this script, it runs on a cron job every hour. The dataframe is uploaded to SQL (this code is not provided)
# and if the pct_of_budget exceeds a given value, it sends me an email with a list of campaigns to check on
import pandas as pd
import io
from googleads import adwords
from datetime import datetime, timedelta
acc_id = 'YOUR_ADS_ACCOUNT_NUMBER'
output = io.StringIO()
adwords_client = adwords.AdWordsClient.LoadFromStorage('googleads.yaml') #You will need to generate this file yourself
adwords_client.SetClientCustomerId(acc_id)
report_downloader = adwords_client.GetReportDownloader(version='v201809')
report_query = (adwords.ReportQueryBuilder()
.Select('CampaignName', 'Cost', 'Amount')
.From('CAMPAIGN_PERFORMANCE_REPORT')
.Where('CampaignStatus').In('ENABLED')
.During('TODAY')
.Build())
report_downloader.DownloadReportWithAwql(report_query, 'CSV', output, skip_report_header=True,
skip_column_header=False, skip_report_summary=True,
include_zero_impressions=True)
output.seek(0)
types= {'Cost': pd.np.float64, 'Budget': pd.np.float64}
df = | pd.read_csv(output,low_memory=False, dtype= types, na_values=[' --']) | pandas.read_csv |
# https://blog.csdn.net/a19990412/article/details/85139058
# LSTM实现股票预测--pytorch版本【120+行代码】
'''
模型假设
我这里认为每天的沪深300的最高价格,是依赖于当天的前n天的沪深300的最高价。
然后用RNN的LSTM模型来估计(捕捉到时序信息)。
让模型学会用前n天的最高价,来判断当天的最高价。
'''
# depends
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader
## load data
def generate_df_affect_by_n_days(series, n, index=False):
if len(series) <= n:
raise Exception("The Length of series is %d, while affect by (n=%d)." % (len(series), n))
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
import pandas.compat as compat
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
CategoricalIndex, DatetimeIndex, Float64Index, Index, Int64Index,
IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex,
UInt64Index, isna)
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'nbytes']
def setup_indices(self):
for name, idx in self.indices.items():
setattr(self, name, idx)
def test_pickle_compat_construction(self):
# need an object to create with
msg = (r"Index\(\.\.\.\) must be called with a collection of some"
r" kind, None was passed|"
r"__new__\(\) missing 1 required positional argument: 'data'|"
r"__new__\(\) takes at least 2 arguments \(1 given\)")
with pytest.raises(TypeError, match=msg):
self._holder()
def test_to_series(self):
# assert that we are creating a copy of the index
idx = self.create_index()
s = idx.to_series()
assert s.values is not idx.values
assert s.index is not idx
assert s.name == idx.name
def test_to_series_with_arguments(self):
# GH18699
# index kwarg
idx = self.create_index()
s = idx.to_series(index=idx)
assert s.values is not idx.values
assert s.index is idx
assert s.name == idx.name
# name kwarg
idx = self.create_index()
s = idx.to_series(name='__test')
assert s.values is not idx.values
assert s.index is not idx
assert s.name != idx.name
@pytest.mark.parametrize("name", [None, "new_name"])
def test_to_frame(self, name):
# see GH-15230, GH-22580
idx = self.create_index()
if name:
idx_name = name
else:
idx_name = idx.name or 0
df = idx.to_frame(name=idx_name)
assert df.index is idx
assert len(df.columns) == 1
assert df.columns[0] == idx_name
assert df[idx_name].values is not idx.values
df = idx.to_frame(index=False, name=idx_name)
assert df.index is not idx
def test_to_frame_datetime_tz(self):
# GH 25809
idx = pd.date_range(start='2019-01-01', end='2019-01-30', freq='D')
idx = idx.tz_localize('UTC')
result = idx.to_frame()
expected = pd.DataFrame(idx, index=idx)
tm.assert_frame_equal(result, expected)
def test_shift(self):
# GH8083 test the base class for shift
idx = self.create_index()
msg = "Not supported for type {}".format(type(idx).__name__)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_create_index_existing_name(self):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = self.create_index()
if not isinstance(expected, MultiIndex):
expected.name = 'foo'
result = pd.Index(expected)
tm.assert_index_equal(result, expected)
result = pd.Index(expected, name='bar')
expected.name = 'bar'
tm.assert_index_equal(result, expected)
else:
expected.names = ['foo', 'bar']
result = pd.Index(expected)
tm.assert_index_equal(
result, Index(Index([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'),
names=['foo', 'bar']))
result = pd.Index(expected, names=['A', 'B'])
tm.assert_index_equal(
result,
Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')],
dtype='object'), names=['A', 'B']))
def test_numeric_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match="cannot perform __mul__"):
idx * 1
with pytest.raises(TypeError, match="cannot perform __rmul__"):
1 * idx
div_err = "cannot perform __truediv__"
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = div_err.replace(' __', ' __r')
with pytest.raises(TypeError, match=div_err):
1 / idx
with pytest.raises(TypeError, match="cannot perform __floordiv__"):
idx // 1
with pytest.raises(TypeError, match="cannot perform __rfloordiv__"):
1 // idx
def test_logical_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match='cannot perform all'):
idx.all()
with pytest.raises(TypeError, match='cannot perform any'):
idx.any()
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
with pytest.raises(ValueError, match='The truth value of a'):
if idx:
pass
def test_reindex_base(self):
idx = self.create_index()
expected = np.arange(idx.size, dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with pytest.raises(ValueError, match='Invalid fill method'):
idx.get_indexer(idx, method='invalid')
def test_get_indexer_consistency(self):
# See GH 16819
for name, index in self.indices.items():
if isinstance(index, IntervalIndex):
continue
if index.is_unique or isinstance(index, CategoricalIndex):
indexer = index.get_indexer(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
else:
e = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=e):
index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
def test_ndarray_compat_properties(self):
idx = self.create_index()
assert idx.T.equals(idx)
assert idx.transpose().equals(idx)
values = idx.values
for prop in self._compat_props:
assert getattr(idx, prop) == getattr(values, prop)
# test for validity
idx.nbytes
idx.values.nbytes
def test_repr_roundtrip(self):
idx = self.create_index()
tm.assert_index_equal(eval(repr(idx)), idx)
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
assert "'foo'" in str(idx)
assert idx.__class__.__name__ in str(idx)
def test_repr_max_seq_item_setting(self):
# GH10182
idx = self.create_index()
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert '...' not in str(idx)
def test_copy_name(self):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
for name, index in compat.iteritems(self.indices):
if isinstance(index, MultiIndex):
continue
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_ensure_copied_data(self):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
for name, index in compat.iteritems(self.indices):
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs['freq'] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
continue
index_type = index.__class__
result = index_type(index.values, copy=True, **init_kwargs)
tm.assert_index_equal(index, result)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='copy')
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False,
**init_kwargs)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='same')
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
def test_memory_usage(self):
for name, index in compat.iteritems(self.indices):
result = index.memory_usage()
if len(index):
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
assert result2 > result
if index.inferred_type == 'object':
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_argsort(self):
for k, ind in self.indices.items():
# separately tested
if k in ['catIndex']:
continue
result = ind.argsort()
expected = np.array(ind).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
for k, ind in self.indices.items():
result = np.argsort(ind)
expected = ind.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(ind), (CategoricalIndex, RangeIndex)):
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, axis=1)
msg = "the 'kind' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, kind='mergesort')
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, order=('a', 'b'))
def test_take(self):
indexer = [4, 3, 0, 2]
for k, ind in self.indices.items():
# separate
if k in ['boolIndex', 'tuples', 'empty']:
continue
result = ind.take(indexer)
expected = ind[indexer]
assert result.equals(expected)
if not isinstance(ind,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# GH 10791
with pytest.raises(AttributeError):
ind.freq
def test_take_invalid_kwargs(self):
idx = self.create_index()
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode='clip')
def test_repeat(self):
rep = 2
i = self.create_index()
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
i = self.create_index()
rep = np.arange(len(i))
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
def test_numpy_repeat(self):
rep = 2
i = self.create_index()
expected = i.repeat(rep)
tm.assert_index_equal(np.repeat(i, rep), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(i, rep, axis=0)
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where(self, klass):
i = self.create_index()
cond = [True] * len(i)
result = i.where(klass(cond))
expected = i
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(i[1:])
expected = pd.Index([i._na_value] + i[1:].tolist(), dtype=i.dtype)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize("method", ["intersection", "union",
"difference", "symmetric_difference"])
def test_set_ops_error_cases(self, case, method):
for name, idx in compat.iteritems(self.indices):
# non-iterable input
msg = "Input must be Index or array-like"
with pytest.raises(TypeError, match=msg):
getattr(idx, method)(case)
def test_intersection_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[:5]
second = idx[:3]
intersect = first.intersection(second)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.intersection(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.intersection(case)
assert tm.equalContents(result, second)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.intersection([1, 2, 3])
def test_union_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[3:]
second = idx[:5]
everything = idx
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.union(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.union(case)
assert tm.equalContents(result, everything)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.union([1, 2, 3])
@pytest.mark.parametrize("sort", [None, False])
def test_difference_base(self, sort):
for name, idx in compat.iteritems(self.indices):
first = idx[2:]
second = idx[:4]
answer = idx[4:]
result = first.difference(second, sort)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.difference(case, sort)
elif isinstance(idx, CategoricalIndex):
pass
elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
assert result.__class__ == answer.__class__
tm.assert_numpy_array_equal(result.sort_values().asi8,
answer.sort_values().asi8)
else:
result = first.difference(case, sort)
assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.difference([1, 2, 3], sort)
def test_symmetric_difference(self):
for name, idx in compat.iteritems(self.indices):
first = idx[1:]
second = idx[:-1]
if isinstance(idx, CategoricalIndex):
pass
else:
answer = idx[[0, -1]]
result = first.symmetric_difference(second)
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.symmetric_difference(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.symmetric_difference(case)
assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.symmetric_difference([1, 2, 3])
def test_insert_base(self):
for name, idx in compat.iteritems(self.indices):
result = idx[1:4]
if not len(idx):
continue
# test 0th element
assert idx[0:4].equals(result.insert(0, idx[0]))
def test_delete_base(self):
for name, idx in compat.iteritems(self.indices):
if not len(idx):
continue
if isinstance(idx, RangeIndex):
# tested in class
continue
expected = idx[1:]
result = idx.delete(0)
assert result.equals(expected)
assert result.name == expected.name
expected = idx[:-1]
result = idx.delete(-1)
assert result.equals(expected)
assert result.name == expected.name
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
idx.delete(len(idx))
def test_equals(self):
for name, idx in compat.iteritems(self.indices):
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
# Cannot pass in non-int64 dtype to RangeIndex
if not isinstance(idx, RangeIndex):
same_values = Index(idx, dtype=object)
assert idx.equals(same_values)
assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
assert not idx.equals(pd.Series(idx))
def test_equals_op(self):
# GH9947, GH10637
index_a = self.create_index()
if isinstance(index_a, PeriodIndex):
pytest.skip('Skip check for PeriodIndex')
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
msg = "Lengths must match|could not be broadcast"
with pytest.raises(ValueError, match=msg):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match=msg):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with pytest.raises(ValueError, match=msg):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
series_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
def test_numpy_ufuncs(self):
# test ufuncs of numpy, see:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html
for name, idx in compat.iteritems(self.indices):
for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10,
np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin,
np.arccos, np.arctan, np.sinh, np.cosh, np.tanh,
np.arcsinh, np.arccosh, np.arctanh, np.deg2rad,
np.rad2deg]:
if isinstance(idx, DatetimeIndexOpsMixin):
# raise TypeError or ValueError (PeriodIndex)
# PeriodIndex behavior should be changed in future version
with pytest.raises(Exception):
with np.errstate(all='ignore'):
func(idx)
elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)):
# coerces to float (e.g. np.sin)
with np.errstate(all='ignore'):
result = func(idx)
exp = Index(func(idx.values), name=idx.name)
tm.assert_index_equal(result, exp)
assert isinstance(result, pd.Float64Index)
else:
# raise AttributeError or TypeError
if len(idx) == 0:
continue
else:
with pytest.raises(Exception):
with np.errstate(all='ignore'):
func(idx)
for func in [np.isfinite, np.isinf, np.isnan, np.signbit]:
if isinstance(idx, DatetimeIndexOpsMixin):
# raise TypeError or ValueError (PeriodIndex)
with pytest.raises(Exception):
func(idx)
elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)):
# Results in bool array
result = func(idx)
assert isinstance(result, np.ndarray)
assert not isinstance(result, Index)
else:
if len(idx) == 0:
continue
else:
with pytest.raises(Exception):
func(idx)
def test_hasnans_isnans(self):
# GH 11343, added tests for hasnans / isnans
for name, index in self.indices.items():
if isinstance(index, MultiIndex):
pass
else:
idx = index.copy()
# cases in indices doesn't include NaN
expected = np.array([False] * len(idx), dtype=bool)
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is False
idx = index.copy()
values = np.asarray(idx.values)
if len(index) == 0:
continue
elif isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
continue
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = index.__class__(values, freq=index.freq)
else:
idx = index.__class__(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_fillna(self):
# GH 11343
for name, index in self.indices.items():
if len(index) == 0:
pass
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.fillna(idx[0])
else:
idx = index.copy()
result = idx.fillna(idx[0])
tm.assert_index_equal(result, idx)
assert result is not idx
msg = "'value' must be a scalar, passed: "
with pytest.raises(TypeError, match=msg):
idx.fillna([idx[0]])
idx = index.copy()
values = np.asarray(idx.values)
if isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
continue
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = index.__class__(values, freq=index.freq)
else:
idx = index.__class__(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_nulls(self):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
for name, index in self.indices.items():
if len(index) == 0:
tm.assert_numpy_array_equal(
index.isna(), np.array([], dtype=bool))
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.isna()
else:
if not index.hasnans:
tm.assert_numpy_array_equal(
index.isna(), np.zeros(len(index), dtype=bool))
tm.assert_numpy_array_equal(
index.notna(), np.ones(len(index), dtype=bool))
else:
result = isna(index)
tm.assert_numpy_array_equal(index.isna(), result)
tm.assert_numpy_array_equal(index.notna(), ~result)
def test_empty(self):
# GH 15270
index = self.create_index()
assert not index.empty
assert index[:0].empty
def test_join_self_unique(self, join_type):
index = self.create_index()
if index.is_unique:
joined = index.join(index, how=join_type)
assert (index == joined).all()
def test_map(self):
# callable
index = self.create_index()
# we don't infer UInt64
if isinstance(index, pd.UInt64Index):
expected = index.astype('int64')
else:
expected = index
result = index.map(lambda x: x)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index)])
def test_map_dictlike(self, mapper):
index = self.create_index()
if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)):
pytest.skip("skipping tests for {}".format(type(index)))
identity = mapper(index.values, index)
# we don't infer to UInt64 for a dict
if isinstance(index, pd.UInt64Index) and isinstance(identity, dict):
expected = index.astype('int64')
else:
expected = index
result = index.map(identity)
tm.assert_index_equal(result, expected)
# empty mappable
expected = pd.Index([np.nan] * len(index))
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
def test_putmask_with_wrong_mask(self):
# GH18368
index = self.create_index()
with pytest.raises(ValueError):
index.putmask(np.ones(len(index) + 1, np.bool), 1)
with pytest.raises(ValueError):
index.putmask(np.ones(len(index) - 1, np.bool), 1)
with pytest.raises(ValueError):
index.putmask('foo', 1)
@pytest.mark.parametrize('copy', [True, False])
@pytest.mark.parametrize('name', [None, 'foo'])
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, copy, name, ordered):
# GH 18630
index = self.create_index()
if name:
index = index.rename(name)
# standard categories
dtype = CategoricalDtype(ordered=ordered)
result = index.astype(dtype, copy=copy)
expected = CategoricalIndex(index.values, name=name, ordered=ordered)
tm.assert_index_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(index.unique().tolist()[:-1], ordered)
result = index.astype(dtype, copy=copy)
expected = CategoricalIndex(index.values, name=name, dtype=dtype)
tm.assert_index_equal(result, expected)
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
result = index.astype('category', copy=copy)
expected = CategoricalIndex(index.values, name=name)
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import geopandas as gp
import pandas as pd
import numpy as np
import networkx as nx
import os
from shapely.geometry import Point, Polygon, LineString, mapping
from shapely import geometry
from simpledbf import Dbf5
import warnings
warnings.filterwarnings('ignore')
# GTFS directories, service ids, and years
GTFS = [[r'C:\Users\xzh263\Dropbox (KTC)\SFCTA CMP\2021 CMP\Coverage\gtfs_2021may13', '1_merged_10007724', 2021],
[r'C:\Users\xzh263\Dropbox (KTC)\SFCTA CMP\2021 CMP\Coverage\gtfs_2020april9', 1, 2020],
[r'C:\Users\xzh263\Dropbox (KTC)\SFCTA CMP\2021 CMP\Coverage\gtfs_2019may22', '1_merged_8846826', 2019]]
# Output directory
Coverage_Dir = r'C:\Users\xzh263\Dropbox (KTC)\SFCTA CMP\2021 CMP\Coverage'
# OSM Streets
Streets_Dir = r'C:\Users\xzh263\Dropbox (KTC)\SFCTA CMP\2021 CMP\Coverage\champ_hwy_shapefile'
street_file = 'champ_freeflow.shp'
#TAZ shapefile
TAZ_Dir = Coverage_Dir
taz_file = 'TAZ2454_clean\TAZ2454_clean.shp'
# define parameters needed by the calculation
min_trips = 22
buffer_radius = 0.25 * 5280 # a quarter mile walking distance
#Define NAD 1983 StatePlane California III
cal3 = {'proj': 'lcc +lat_1=37.06666666666667 +lat_2=38.43333333333333 +lat_0=36.5 +lon_0=-120.5 +x_0=2000000 +y_0=500000.0000000002', 'ellps': 'GRS80', 'datum': 'NAD83', 'no_defs': True}
# Define Functions
def generate_transit_stops_geo(stop_dir):
stops=pd.read_csv(os.path.join(stop_dir, 'stops.txt'))
stops['geometry'] = list(zip(stops.stop_lon, stops.stop_lat))
stops['geometry'] = stops['geometry'].apply(Point)
stops = gp.GeoDataFrame(stops, geometry='geometry', crs={'init': 'epsg:4326'})
return stops
def generate_transit_shapes_geo(stop_dir, service_id):
shapes=pd.read_csv(os.path.join(stop_dir, 'shapes.txt'))
shapes_gdf = pd.DataFrame()
shape_ids = shapes.shape_id.unique().tolist()
rid = 0
for shpid in shape_ids:
shp = shapes[shapes['shape_id']==shpid].sort_values(by='shape_pt_sequence')
linestr = LineString(zip(shp.shape_pt_lon, shp.shape_pt_lat))
linestr = gp.GeoDataFrame(index=[shpid], crs='epsg:4326', geometry=[linestr])
shapes_gdf = shapes_gdf.append(linestr)
rid = rid + 1
shapes_gdf = shapes_gdf.reset_index()
shapes_gdf.columns = ['shape_id', 'geometry']
trips = pd.read_csv(os.path.join(stop_dir, 'trips.txt'))
trips = trips[trips['service_id']==service_id]
trips_shapes = shapes_gdf[shapes_gdf['shape_id'].isin(trips['shape_id'])]
return trips, shapes, trips_shapes
def frequent_bus_routes(gtfs_dir, service_id, peak_period, outname):
# input gtfs files
routes_info =pd.read_csv(os.path.join(gtfs_dir, 'routes.txt'))
stops = generate_transit_stops_geo(gtfs_dir)
trips, shapes, trips_shapes = generate_transit_shapes_geo(gtfs_dir, service_id)
stop_times = pd.read_csv(os.path.join(gtfs_dir, 'stop_times.txt'))
stop_times['hour'] = stop_times['arrival_time'].apply(lambda x: int(x[0:2]))
stop_times['minute'] = stop_times['arrival_time'].apply(lambda x: int(x[3:5]))
period_cols = ['route_id', 'direction_id']
#There may be multiples shapes for the same route, so here the most frequent shape is used for each route_id
trips_shapes_mcv = trips.groupby(period_cols)['shape_id'].agg(lambda x:x.value_counts().index[0]).reset_index()
start_stops_idx = stop_times.groupby(['trip_id'])['stop_sequence'].transform(min) == stop_times['stop_sequence']
trips_hour = pd.merge(trips,
stop_times[start_stops_idx][['trip_id', 'arrival_time', 'departure_time', 'hour', 'minute']],
on='trip_id', how='left')
# trips occuring during the time period of interest
# whole period
if peak_period == 'AM':
trips_period = trips_hour[(trips_hour['hour']>=7) & (trips_hour['hour']<9)]
elif peak_period == 'PM':
trips_period= trips_hour[((trips_hour['hour']==16) & (trips_hour['minute']>=30)) | (trips_hour['hour']==17) | ((trips_hour['hour']==18) & (trips_hour['minute']<30))]
else:
print('Input needs to be either AM or PM')
# check if routes meet the minimum period and hourly requirements
route_period_counts = trips_period.groupby(period_cols).trip_id.count().reset_index()
route_period_counts.columns = period_cols +['total_trips']
route_frequent = route_period_counts[route_period_counts['total_trips']>= min_trips]
if len(route_frequent)>0:
route_frequent_shapes = route_frequent.merge(trips_shapes_mcv, on= period_cols, how='left')
route_frequent_shapes = trips_shapes.merge(route_frequent_shapes, on='shape_id')
route_frequent_shapes = route_frequent_shapes.merge(routes_info, on='route_id', how='left')
route_frequent_shapes.to_file(os.path.join(Coverage_Dir, 'frequent_routes_5min_' + outname + '_' + peak_period + '.shp'))
else:
print('No frequent routes found for %s %s' % (outname, peak_period))
# frequent stops
stop_cols = ['stop_id', 'route_id', 'direction_id']
stop_times_by_route = stop_times.merge(trips[['route_id', 'direction_id', 'trip_id']], on='trip_id', how='left')
if peak_period == 'AM':
stop_route_period = stop_times_by_route[(stop_times_by_route['hour']>=7) & (stop_times_by_route['hour']<9)]
elif peak_period == 'PM':
stop_route_period = stop_times_by_route[((stop_times_by_route['hour']==16) & (stop_times_by_route['minute']>=30)) | (stop_times_by_route['hour']==17) | ((stop_times_by_route['hour']==18) & (stop_times_by_route['minute']<30))]
else:
print('Input needs to be either AM or PM')
stop_period_counts = stop_route_period.groupby(stop_cols).trip_id.count().reset_index()
stop_period_counts.columns = stop_cols + ['total_trips']
stop_frequent = stop_period_counts[stop_period_counts['total_trips']>= min_trips]
if len(stop_frequent)>0:
stop_frequent_list = stop_frequent.stop_id.unique().tolist()
stop_frequent_gdf = stops[stops['stop_id'].isin(stop_frequent_list)]
stop_frequent_gdf.to_file(os.path.join(Coverage_Dir, 'frequent_stops_5min_' + outname + '_' + peak_period + '.shp'))
else:
print('No frequent stops found for %s %s' % (outname, peak_period))
stop_frequent_list=[]
return stop_frequent_list, route_period_counts, stop_period_counts
# TAZ Zones
taz_shp = gp.read_file(os.path.join(TAZ_Dir, taz_file))
taz_sf_shp = taz_shp[taz_shp['COUNTY']==1]
taz_sf_shp = taz_sf_shp.to_crs(cal3)
# Streets network
streets = gp.read_file(os.path.join(Streets_Dir, street_file))
streets.insert(0, 'LinkID', range(1, len(streets)+1))
streets = streets.to_crs(cal3)
def latlong(x):
return round(x.coords.xy[1][0],6), round(x.coords.xy[0][0], 6), round(x.coords.xy[1][-1], 6), round(x.coords.xy[0][-1], 6)
streets['B_Lat'], streets['B_Long'], streets['E_Lat'], streets['E_Long'] = zip(*streets['geometry'].map(latlong))
b_nodes = streets[['B_Lat', 'B_Long']]
b_nodes.columns = ['Lat', 'Long']
e_nodes = streets[['E_Lat', 'E_Long']]
e_nodes.columns = ['Lat', 'Long']
streets_endnodes = b_nodes.append(e_nodes, ignore_index=True).reset_index()
# Assign unique node id
endnodes_cnt=streets_endnodes.groupby(['Lat', 'Long']).index.count().reset_index()
endnodes_cnt.rename(columns={'index':'NodeCnt'}, inplace=True)
endnodes_cnt['NodeID'] = endnodes_cnt.index+1
# Generate the the unique node shapefile
#endnodes_cnt['geometry'] = list(zip(endnodes_cnt.Long, endnodes_cnt.Lat))
#endnodes_cnt['geometry'] = endnodes_cnt['geometry'].apply(Point)
#endnodes_unique_gpd = gp.GeoDataFrame(endnodes_cnt, geometry='geometry')
#endnodes_unique_gpd.crs = cal3
#endnodes_unique_gpd.to_file(os.path.join(Streets_Dir, 'streets_endnodes.shp'))
endnodes_cnt = endnodes_cnt[['Lat', 'Long', 'NodeCnt', 'NodeID']]
endnodes_cnt.columns = ['B_Lat', 'B_Long', 'B_NodeCnt', 'B_NodeID']
streets = streets.merge(endnodes_cnt, on=['B_Lat', 'B_Long'], how='left')
endnodes_cnt.columns = ['E_Lat', 'E_Long', 'E_NodeCnt', 'E_NodeID']
streets = streets.merge(endnodes_cnt, on=['E_Lat', 'E_Long'], how='left')
endnodes_cnt.columns = ['Lat', 'Long', 'NodeCnt', 'NodeID']
streets['length'] = 3.2808 * streets.geometry.length
streets['b_e'] = list(zip(streets['B_NodeID'], streets['E_NodeID']))
streets['e_b'] = list(zip(streets['E_NodeID'], streets['B_NodeID']))
# Save the updated street shapefile with endnodes
#outcols = [c for c in streets.columns.tolist() if c not in ['b_e', 'e_b']]
#streets[outcols].to_file(os.path.join(Streets_Dir, 'streets_with_endnodes.shp'))
# Build Walking Network
def build_walking_network(gtfs_dir):
stops = generate_transit_stops_geo(gtfs_dir)
stops = stops.to_crs(cal3)
stops_within_sf = gp.sjoin(stops, taz_sf_shp, op='within').reset_index()
stops_within_sf = stops_within_sf[stops.columns]
stops_within_sf.insert(0, 'NodeID', range(endnodes_cnt['NodeID'].max() + 1, endnodes_cnt['NodeID'].max() + 1 + len(stops_within_sf)))
search_radius = 300 # ft
stops_geo = stops_within_sf.copy()
stops_geo['point_geo'] = stops_geo['geometry']
stops_geo['geometry'] = stops_geo['geometry'].buffer(search_radius/3.2808)
stop_near_links = gp.sjoin(streets[['LinkID', 'B_NodeID', 'E_NodeID', 'length', 'geometry']], stops_geo, op='intersects')
def calc_dist(x):
stop_point = x['point_geo']
link_geo = x['geometry']
x['near_dist'] = stop_point.distance(link_geo)
x['stop_to_begin'] = link_geo.project(stop_point) * 3.2808 #meters to feet
x['stop_to_end'] = x['length'] - x['stop_to_begin']
return x
stop_near_links = stop_near_links.apply(calc_dist, axis=1)
stop_near_links = stop_near_links.sort_values(['stop_id','near_dist'])
stop_near_links = stop_near_links.drop_duplicates('stop_id')
stop_near_links['near_link'] = stop_near_links['LinkID']
stop_near_links['near_link_bid'] = stop_near_links['B_NodeID']
stop_near_links['near_link_eid'] = stop_near_links['E_NodeID']
stop_near_links = stop_near_links.reset_index()
# construct a network graph
tgraph = nx.Graph()
# road network nodes
tgraph.add_nodes_from(endnodes_cnt.NodeID.tolist())
# road network links
for i in range (0, len(streets)):
tgraph.add_edge(streets.loc[i,'B_NodeID'],
streets.loc[i,'E_NodeID'],
weight = streets.loc[i, 'length'])
return stops_within_sf, stop_near_links, tgraph
def stop_walking_area(walk_graph, walk_dis, start_node, link_near_stop):
cur_path = dict(nx.single_source_dijkstra_path(walk_graph, start_node, cutoff=walk_dis, weight='weight'))
del cur_path[start_node]
reach_links = {}
for key in cur_path:
sub_path = list(zip(cur_path[key][:-1],cur_path[key][1:]))
for each_link in sub_path:
if each_link in reach_links:
next
else:
reach_links[each_link] = 1
reach_links_df = pd.DataFrame.from_dict(reach_links, orient='index',columns=['accessed']).reset_index()
reach_links_df.rename(columns={'index':'b_e'},inplace=True)
streets_access = streets[(streets['b_e'].isin(reach_links_df['b_e'])) | (streets['e_b'].isin(reach_links_df['b_e'])) | (streets['LinkID']==link_near_stop)]
geom = [x for x in streets_access.geometry]
multi_line = geometry.MultiLineString(geom)
multi_line_polygon = multi_line.convex_hull
return multi_line_polygon
# Accessible area from high frequent stops
def frequent_access_area(walk_graph, stop_list, stop_with_nearest_link, buffer_radius):
stop_access_gdf = gp.GeoDataFrame()
for cur_stop_id in stop_list:
lidx = stop_with_nearest_link.index[stop_with_nearest_link['stop_id']==cur_stop_id][0]
cur_node_id = stop_with_nearest_link.loc[lidx, 'NodeID']
cur_link = stop_with_nearest_link.loc[lidx, 'near_link']
cur_graph = walk_graph.copy()
cur_graph.add_node(cur_node_id)
cur_graph.add_edge(stop_with_nearest_link.loc[lidx,'near_link_bid'],
stop_with_nearest_link.loc[lidx,'NodeID'],
weight = stop_with_nearest_link.loc[lidx, 'stop_to_begin'])
cur_graph.add_edge(stop_with_nearest_link.loc[lidx,'NodeID'],
stop_with_nearest_link.loc[lidx,'near_link_eid'],
weight = stop_with_nearest_link.loc[lidx, 'stop_to_end'])
get_geo = stop_walking_area(cur_graph, buffer_radius, cur_node_id, cur_link)
cur_access_polygon = gp.GeoDataFrame(index=[0], crs=cal3, geometry=[get_geo])
cur_access_polygon['stop_id'] = cur_stop_id
stop_access_gdf = stop_access_gdf.append(cur_access_polygon, ignore_index=True)
return stop_access_gdf
# Attach TAZ attributes
taz_dbf = Dbf5(os.path.join(Coverage_Dir, 'tazdata.dbf'))
taz = taz_dbf.to_dataframe()
taz['SFTAZ'] = taz['SFTAZ'].astype(int)
taz_sf = taz_sf_shp.merge(taz, left_on = 'TAZ', right_on = 'SFTAZ', how = 'left')
taz_sf["area_acre"] = taz_sf['geometry'].area * 0.00024711 #Square meters to acres
def frequent_stops_access_taz(frequent_stops_access_union):
frequent_stops_access_taz= taz_sf_shp['geometry'].intersection(frequent_stops_access_union)
taz_sf_access_gdf = gp.GeoDataFrame()
taz_sf_access_gdf['accessarea'] = frequent_stops_access_taz.area* 0.00024711 #Square meters to acres
taz_sf_access_gdf['index'] = frequent_stops_access_taz.index
taz_sf_access_gdf['geometry'] = frequent_stops_access_taz.geometry
taz_sf_access_tazid = taz_sf_access_gdf.merge(taz_sf_shp[['TAZ', 'AREALAND']], left_on='index', right_index=True, how='left')
taz_sf_access_attrs = taz_sf[['TAZ', 'AREALAND', 'HHLDS', 'TOTALEMP', 'POP', 'area_acre']].merge(taz_sf_access_tazid, on=['TAZ', 'AREALAND'], how='left')
taz_sf_access_attrs['areapcnt'] = 100 * taz_sf_access_attrs['accessarea'] / taz_sf_access_attrs['area_acre']
taz_sf_access_attrs['access_pop'] = taz_sf_access_attrs['POP'] * taz_sf_access_attrs['areapcnt'] / 100
taz_sf_access_attrs['access_jobs'] = taz_sf_access_attrs['TOTALEMP'] * taz_sf_access_attrs['areapcnt'] / 100
taz_sf_access_attrs['access_hhlds'] = taz_sf_access_attrs['HHLDS'] * taz_sf_access_attrs['areapcnt'] / 100
outcols = ['accessarea', 'index', 'TAZ', 'AREALAND', 'HHLDS', 'TOTALEMP',
'POP', 'area_acre', 'areapcnt', 'access_pop', 'access_jobs', 'access_hhlds']
return taz_sf_access_attrs[outcols]
df_coverage = | pd.DataFrame() | pandas.DataFrame |
import os
from pathlib import Path
import time
from datetime import datetime
import json
import traceback
import uuid
import pandas as pd
import dash
from dash.dependencies import Input, Output, State
from dash_extensions.enrich import ServersideOutput
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.exceptions import PreventUpdate
# import plotly.graph_objs as go
import plotly.express as px
import requests
import asyncio
from sseclient import SSEClient
# from iexfinance.stocks import Stock
# Local imports
from __init__ import HERE, TIMEOUT_12HR, DEFAULT_TICKER, DEFAULT_SNAPSHOT_UUID, ticker_dict, exchange_list
from app import app, cache, db, logger
from dash_utils import make_table, replace_str_element_w_dash_component
from get_fin_report import get_financial_report, get_yahoo_fin_values, get_number_from_string, get_string_from_number, get_sector_data
from get_dcf_valuation import get_dcf_df
def handler_data_message(title, exception_obj):
return [{
'status-info': html.P(children=title,
style={'backgroundColor': 'red', 'fontSize': '200%'}),
'supp-data': html.P(children=str(exception_obj),
style={'color': 'red'})
}]
@app.callback([Output('ticker-input', 'value'),
Output('analysis-mode', 'value'),
Output('snapshot-uuid', 'value'),
Output('handler-parseURL', 'data')],
[Input('nav-dcf', 'active'),
Input('url', 'pathname')])
def parse_ticker(dcf_app_active, pathname):
if dcf_app_active:
parse_ticker = pathname.split('/apps/dcf')[-1].split('/')
if len(parse_ticker) == 1:
return DEFAULT_TICKER, [1], str(uuid.uuid5(uuid.uuid4(), DEFAULT_TICKER)), dash.no_update
elif len(parse_ticker) == 2:
ticker_value = parse_ticker[1].upper() or DEFAULT_TICKER
return ticker_value, [1], str(uuid.uuid5(uuid.uuid4(), ticker_value)), dash.no_update
else: # >=3
if parse_ticker[2]:
try:
uuid_val = uuid.UUID(parse_ticker[2], version=5)
if uuid_val.hex == parse_ticker[2].replace('-',''):
return parse_ticker[1].upper() or DEFAULT_TICKER, [], parse_ticker[2], dash.no_update
else:
raise ValueError("Bad Snapshot ID from URL: " + parse_ticker[2])
except:
return parse_ticker[1].upper() or DEFAULT_TICKER, [], '', handler_data_message('See Error Message(s) below:', traceback.format_exc())
else:
return parse_ticker[1].upper(), [], str(uuid.uuid5(uuid.uuid4(), parse_ticker[1].upper())), dash.no_update
else:
raise PreventUpdate
@app.callback([Output('snapshot-link', 'href'),
Output('save-snapshot', 'disabled'),
Output('snapshot-link', 'disabled')],
[Input('analysis-mode', 'value'),
Input('save-snapshot', 'n_clicks'),
Input('ticker-input', 'value'),
Input('snapshot-uuid', 'value')],
State('dcf-store', 'data'))
def save_snapshot(live_analysis_mode, save_button_clicked, ticker, snapshot_uuid, df_dict):
if 1 in live_analysis_mode: # generate a fresh UUID
snapshot_uuid = str(uuid.uuid5(uuid.UUID(snapshot_uuid), ticker))
if save_button_clicked:
# df_dict[ticker] = {**df_dict[ticker], **dcf_dict[ticker]}
if 'analysis_timestamp' in df_dict[ticker]['stats_dict']: # >= v0.6-alpha.3
df_dict[ticker]['stats_dict']['analysis_timestamp'] += f',\n{snapshot_uuid} : Analysis saved @ {datetime.now().strftime("%b %-m %Y %H:%M:%S")}'
db.set(ticker+'-'+snapshot_uuid, json.dumps(df_dict))
return '/apps/dcf/' + ticker + '/' + snapshot_uuid, False, not save_button_clicked
else:
return dash.no_update, True, True
@app.callback([Output('status-info', 'children'),
Output('supp-info', 'children')],
[Input('handler-parseURL', 'data'),
Input('handler-ticker-valid', 'data'),
Input('handler-past-data', 'data'),
Input('handler-dcf-data', 'data'),
Input('handler-lastpricestream', 'data'),
Input('status-info', 'loading_state')])
def refresh_for_update(handler_parseURL, handler_ticker, handler_past, handler_dcf, handler_lastpricestream, status_loading_dict):
ctx = dash.callback_context
if not ctx.triggered:
return tuple(["Enter Ticker to continue"] * 2)
status_msg = []
supp_msg = []
triggered_elements = [c['prop_id'] for c in ctx.triggered]
if 'handler-ticker-valid.data' in triggered_elements and ctx.inputs['status-info.loading_state']['is_loading']:
return ctx.inputs['handler-ticker-valid.data'][0]['status-info'], ctx.inputs['handler-ticker-valid.data'][0]['supp-data']
# return 'Updating...', 'Updating...'
else:
update_data = [d for c, d in ctx.inputs.items() if '.data' in c]
for d in update_data:
if d:
status = d[0]['status-info'] # always 1 element is sent by handler, so use 0
status_msg += status
supp = d[0]['supp-data']
if isinstance(supp, str):
supp_msg.extend(replace_str_element_w_dash_component(supp, repl_dash_component=[]))
elif supp: # it is a dcc or html component, get children
supp_msg.extend(replace_str_element_w_dash_component(supp['props']['children']))
return status_msg, supp_msg or dash.no_update
@app.callback([Output("ticker-input", "valid"),
Output("ticker-input", "invalid"),
Output("ticker-allcaps", "children"),
Output('handler-ticker-valid', 'data')],
[Input("ticker-input", "value")])
def check_ticker_validity(ticker):
try:
if not ticker:
raise ValueError("Ticker Value is Empty, please Type Ticker, press Enter or Tab to continue analysis.")
ticker_allcaps = ticker.upper()
if ticker_allcaps in ticker_dict(): # Validate with https://sandbox.iexapis.com/stable/ref-data/symbols?token=
is_valid_ticker = True
return is_valid_ticker, not is_valid_ticker, 'Getting financial data... for: ' + ticker_dict()[ticker_allcaps], [{'status-info': 'Market Price used in Calculation: ',
'supp-data': ''}]
else:
raise ValueError("Invalid Ticker entered: " + ticker + '\nValid Tickers from listed Exchanges:\n' + '\n'.join(exchange_list()))
except Exception as InvalidTicker:
# dbc.Alert(
# str(InvalidTicker),
# id="alert-invalid-ticker",
# dismissable=True,
# is_open=True,
# )
logger.exception(InvalidTicker)
return False, True, '', handler_data_message('See Error Message(s) below:',
traceback.format_exc())
@app.callback([ServersideOutput('fin-store', 'data'),
Output('select-column', 'options'),
Output('status-info', 'loading_state'),
Output('handler-past-data', 'data')],
[Input('ticker-input', 'valid')],
[State('ticker-input', 'value'),
State('analysis-mode', 'value'),
State('snapshot-uuid', 'value')])
def fin_report(ticker_valid, ticker, live_analysis_mode, snapshot_uuid):
if not ticker_valid:
return [], [], {'is_loading': True}, dash.no_update
try:
ticker_allcaps = ticker.upper()
db_key = ticker_allcaps+'-'+snapshot_uuid
if 1 in live_analysis_mode or not db.exists(db_key):
df, lastprice, lastprice_time, report_date_note = get_financial_report(ticker_allcaps)
next_earnings_date, beta = get_yahoo_fin_values(ticker_allcaps)
stats_record = {'ticker': ticker_allcaps,
'lastprice': float(lastprice.replace(',','')),
'lastprice_time': lastprice_time,
'beta': beta,
'next_earnings_date': next_earnings_date,
'report_date_note': report_date_note,
'analysis_timestamp': datetime.now().strftime("%b %-m %Y %H:%M:%S"),
}
df_dict = {ticker_allcaps: {'fin_report_dict': df.to_dict('records'), 'stats_dict': stats_record}}
else:
df_dict = json.loads(db.get(db_key)) # pull output callback from from server cache or database: redis
if not df_dict:
raise KeyError('Redis Key not found: ' + db_key + '\nPlease click the app tab link to refresh state!')
df = | pd.DataFrame.from_dict(df_dict[ticker_allcaps]['fin_report_dict']) | pandas.DataFrame.from_dict |
import unittest
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType, StructField, StructType, IntegerType, FloatType
from haychecker.dhc.metrics import entropy
replace_empty_with_null = udf(lambda x: None if x == "" else x, StringType())
replace_0_with_null = udf(lambda x: None if x == 0 else x, IntegerType())
replace_0dot_with_null = udf(lambda x: None if x == 0. else x, FloatType())
replace_every_string_with_null = udf(lambda x: None, StringType())
replace_every_int_with_null = udf(lambda x: None, IntegerType())
replace_every_float_with_null = udf(lambda x: None, FloatType())
class TestEntropy(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestEntropy, self).__init__(*args, **kwargs)
self.spark = SparkSession.builder.master("local[2]").appName("entropy_test").getOrCreate()
self.spark.sparkContext.setLogLevel("ERROR")
def test_empty(self):
data = pd.DataFrame()
data["c1"] = []
data["c2"] = []
schema = [StructField("c1", IntegerType(), True), StructField("c2", StringType(), True)]
df = self.spark.createDataFrame(data, StructType(schema))
r1 = entropy(0, df)[0]
self.assertEqual(r1, 0.)
def test_allnull(self):
data = pd.DataFrame()
data["c1"] = [chr(i) for i in range(100)]
data["c2"] = [i for i in range(100)]
data["c3"] = [i / 0.7 for i in range(100)]
df = self.spark.createDataFrame(data)
df = df.withColumn("c1", replace_every_string_with_null(df["c1"]))
df = df.withColumn("c2", replace_every_int_with_null(df["c2"]))
df = df.withColumn("c3", replace_every_float_with_null(df["c3"]))
r = entropy(0, df)[0]
self.assertEqual(r, 0.)
r = entropy(1, df)[0]
self.assertEqual(r, 0.)
r = entropy(2, df)[0]
self.assertEqual(r, 0.)
def test_allequal(self):
data = pd.DataFrame()
data["c1"] = [chr(0) for _ in range(100)]
data["c2"] = [1 for _ in range(100)]
data["c3"] = [0.7 for _ in range(100)]
df = self.spark.createDataFrame(data)
r = entropy(0, df)[0]
self.assertEqual(r, 0.)
r = entropy(1, df)[0]
self.assertEqual(r, 0.)
r = entropy(2, df)[0]
self.assertEqual(r, 0.)
def test_halfnull_halfequal(self):
data = | pd.DataFrame() | pandas.DataFrame |
# all_in_one is a fuction, created for splitiing of a dataset inot 3 parts and to do the repatative tasks namely, draw learning curves, ROC curves and model classification analysis(Error Analysis).
# Import basic libraries
import numpy as np
import pandas as pd
import seaborn as sns
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
import time
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.neural_network import MLPClassifier
#To get probabilty of SGD
from sklearn.calibration import CalibratedClassifierCV
base_model = SGDClassifier()
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from matplotlib import pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import learning_curve
#################################################################################################################################
# Import libraries for performance analysis (Error analysis)
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
#################################################################################################################################
### We want to split our dataset into following three subsets.
# 1st: Training dataset ~ 60 % of total dataset
# 2nd: Cross-Validation dataset ~20% of tatal dataset
# 3rd: Testing dataset ~ 20% of total dataset
from sklearn.cross_validation import train_test_split
def all_in_one_test(features,target):
#Split X and y in training and testing data by 80:20 ratio.
X_train1, X_test, y_train1, y_test = train_test_split(features,target, test_size=0.2, random_state=1)
#Again,Split training1 dataset into training and cross-validation datasets by 80:20 ratio.
X_train, X_val, y_train, y_val = train_test_split(X_train1, y_train1, test_size=0.25, random_state=1)
print ("Training Dataset :", X_train.shape, y_train.shape)
print ("Testing Dataset:", X_test.shape, y_test.shape)
print ("Validation Dataset:", X_val.shape, y_val.shape)
# Create a list of classifiers
classifiers = [LogisticRegression() , DecisionTreeClassifier() ,RandomForestClassifier(), SVC(probability=True), GaussianNB(), KNeighborsClassifier(), GradientBoostingClassifier(),CalibratedClassifierCV(base_model) , MLPClassifier()]
# Create a dictionary of the classifiers
classifiers_dict = {'Logistic Classifier': LogisticRegression(), 'Decision_Tree Classifier': DecisionTreeClassifier(), 'Random_Forest Classifier': RandomForestClassifier(),
'SVM Classifier': SVC(probability=True), "GaussianNB Classifier":GaussianNB(), "KNN Classifiers": KNeighborsClassifier(),"XGB Classifier": GradientBoostingClassifier(),
"SGD Classifier":CalibratedClassifierCV(base_model) , 'MLP Classifier':MLPClassifier()}
# All Learning Curves in one figure
from sklearn.model_selection import learning_curve
fig, axs = plt.subplots(3,3, figsize=(15, 10))
fig.subplots_adjust(hspace = 0.25, wspace=0.25)
axs = axs.ravel()
List = ['Logistic Regression', 'Decision Tree', 'Random Forest' ,'SVM' , 'Gaussian NB' , 'KNN', 'XGB', 'SGD', 'MLP']
k = 0
for i in range(len(classifiers)):
train_sizes, train_scores, test_scores = learning_curve(classifiers[i], features,target)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
axs[i].fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
axs[i].fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
axs[i].plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
axs[i].plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
axs[i].legend(loc= 'lower right')
axs[i].set_ylim([0.0,1.1])
axs[i].set_title(str(List[k]))
k = k+1
plt.show()
# All Classification reports + Accuracy reports + Confusion matrices
results = pd.DataFrame([[0, 0,0,0, 0,0 ,0]],columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 ','ROC', 'Time'])
for name, classifier in classifiers_dict.items():
print(name)
start = time.time()
clf = classifier.fit(X_train, y_train)
y_pred = clf.predict(X_test)
end = time.time()
print(classification_report(y_test, y_pred))
conf_mat = confusion_matrix(y_test,y_pred)
print('Confusion matrix:\n',conf_mat)
labels =['Class0','Class 1']
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(conf_mat,cmap = plt.cm.Blues)
fig.colorbar(cax)
ax.set_xticklabels(['']+ labels)
ax.set_yticklabels(['']+ labels)
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
print ("\n",accuracy_score(y_test, y_pred))
print("\n Time taken by the algorithm to get trained and for prediction :",end-start)
print ('\n==========================================================================\n')
roc=roc_auc_score(y_test, y_pred)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
t = end-start
#Model_results = pd.DataFrame(columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score','ROC'])
model_results = | pd.DataFrame([[name, acc,prec,rec, f1,roc, t]],columns = ['Model', 'Accuracy', 'Precision', 'Recall', 'F1 ','ROC','Time']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat May 23 11:28:30 2020
@author: rener
"""
import numpy as np
import pandas as pd
import os
from datetime import date
import time
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
#%% For the various companies we have data going back differently far.
#
#
frames=[]
for file in os.listdir('Stocks'):
frames.append(
pd.read_csv('Stocks/' +file,index_col=0))
# For the various companies we have data going back differently far.
# So there is decision to make: We could discard look for the shortest
# available timeseries, and trim all other datasets to the same length.
# But then whenever we compute a covariance for two longer datasets
# we will not use all available information.
# So we only trim every pair in the covariance computing function.
df= | pd.concat(frames) | pandas.concat |
import copy
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
import matplotlib.pyplot as plt
import pandas as pd
import subprocess
from template import Template
from themeclasses import *
import time
class DERVET(Template):
def __init__(self, parent, controller, bd):
Template.__init__(self, parent, controller, bd)
# bill with baseline, newload, and DER results
self.bill = pd.DataFrame()
self.billts = pd.DataFrame()
self.tsresults = pd.DataFrame()
# Button to run dervet
run_dervet = tk.Button(self, text='Run DER-VET', command=lambda: self.rundervet())
run_dervet.grid(row=0, column=0, sticky='nsew')
plot_dervetts = tk.Button(self, text='Plot DER-VET Time Series Results', command=lambda: self.plotdervetts())
plot_dervetts.grid(row=1, column=0, sticky='nsew')
plot_dervetebill = tk.Button(self, text='Plot DER-VET Energy Bill Results', command=lambda: self.plotdervetbill())
plot_dervetebill.grid(row=2, column=0, sticky='nsew')
plot_dervetdbill = tk.Button(self, text='Plot DER-VET Demand Bill Results', command=lambda: self.plotdervetdbill())
plot_dervetdbill.grid(row=3, column=0, sticky='nsew')
# Set up plotting canvas
self.plotwindow = tk.Frame(self)
self.plotwindow.grid(row=10, column=1)
self.figure = plt.figure(num=5, figsize=(10, 5), dpi=100)
self.axes = self.figure.add_subplot(111)
self.chart_type = FigureCanvasTkAgg(self.figure, self.plotwindow)
self.toolbar = NavigationToolbar2Tk(self.chart_type, self.plotwindow)
self.toolbar.update()
self.chart_type._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# Set up text results
self.textwindow = tk.Frame(self)
self.textwindow.grid(row=10, column=0)
self.sizekw = tk.DoubleVar()
self.sizekw.set(0)
kwrow = tk.Label(self.textwindow, text='Optimal Battery Size (kW): ', justify='left')
kwrow.grid(row=0, column=0)
sizekwlab = tk.Label(self.textwindow, textvariable=self.sizekw, justify='left')
sizekwlab.grid(row=0, column=1)
self.sizekwh = tk.DoubleVar()
self.sizekwh.set(0)
kwhrow = tk.Label(self.textwindow, text='Optimal Battery Size (kWh): ', justify='left')
kwhrow.grid(row=1, column=0)
sizekwhlab = tk.Label(self.textwindow, textvariable=self.sizekwh, justify='left')
sizekwhlab.grid(row=1, column=1)
self.sizedur = tk.DoubleVar()
self.sizedur.set(0)
durrow = tk.Label(self.textwindow, text='Optimal Battery Duration (hrs): ', justify='left')
durrow.grid(row=2, column=0)
sizedurlab = tk.Label(self.textwindow, textvariable=self.sizedur, justify='left')
sizedurlab.grid(row=2, column=1)
self.ccost = tk.DoubleVar()
self.ccost.set(0)
ccostrow = tk.Label(self.textwindow, text='Estimated Battery Capital Cost ($): ', justify='left')
ccostrow.grid(row=3, column=0)
ccostlab = tk.Label(self.textwindow, textvariable=self.ccost, justify='left')
ccostlab.grid(row=3, column=1)
self.savings = tk.DoubleVar()
self.savings.set(0)
savingsrow = tk.Label(self.textwindow, text='Yearly Savings from battery', justify='left')
savingsrow.grid(row=4, column=0)
savingslab = tk.Label(self.textwindow, textvariable=self.savings, justify='left')
savingslab.grid(row=4, column=1)
def rundervet(self):
# detect DER-VET install and stop if not available
# detect if newload is up to date
# Edit timeseries
tstemp = pd.read_csv(self.controller.parameters['DERVET_TIMESERIES_FILENAME'])
# check if timeseries has the right number of rows
if self.controller.frames['Load'].data.shape[0] != tstemp.shape[0]:
print('ERROR: DER-VET time step needs to match DEFT time step.')
print('DEFT shows ' + str(self.controller.frames['Load'].data.shape[0]) + ' time steps')
print('DER-VET shows ' + str(tstemp.shape[0]) + ' time steps.')
print('Please upload a DER-VET time series file with the correct number of rows')
return 1
tstemp['Site Load (kW)'] = copy.copy(self.controller.frames['Newload'].newload['Busy Day New Load'])
tstemp.to_csv(self.controller.parameters['DERVET_TIMESERIES_FILENAME'], index=False) # save timeseries csv
# Edit model parameters
mptemp = pd.read_csv(self.controller.parameters['DERVET_MODEL_PARAMETERS_REFERENCE'])
mptemp.loc[mptemp['Key'] == 'customer_tariff_filename', 'Optimization Value']\
= self.controller.frames['Tariff'].filename
if self.controller.frames['Load'].loadlimit.get() > 0:
mptemp.loc[mptemp['Key'] == 'max_import', 'Optimization Value'] = \
-self.controller.frames['Load'].loadlimit.get()
mptemp.to_csv(self.controller.parameters['DERVET_MODEL_PARAMETERS_FILENAME'])
print('Starting DER-VET at ' + str(time.time()))
starttime = time.time()
subprocess.run(["./dervet/python.exe", "./dervet/dervet/run_DERVET.py",
self.controller.parameters['DERVET_MODEL_PARAMETERS_FILENAME']])
print('DER-VET finished at ' + str(time.time()))
endtime = time.time()
print('Taking a Total of ' + str(endtime-starttime) + ' seconds')
self.plotdervetts()
def plotdervetts(self):
# read timeseries results
self.tsresults = pd.read_csv(self.controller.parameters['DERVET_TIMESERIES_RESULTS_FILENAME'])
plt.figure(5)
plt.cla() # Clear the plotting window to allow for re-plotting.
# Plot the peak day
plt.step(x=pd.to_datetime(self.tsresults['Start Datetime (hb)'], format='%Y-%m-%d %H:%M:%S'),
y=self.tsresults['LOAD: Site Load Original Load (kW)'],
where='post', color=theme['batterycolor'], label='New Load with ZE Equipment')
plt.step(x= | pd.to_datetime(self.tsresults['Start Datetime (hb)'], format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
import numpy as np
import pandas as pd
import remixt.bamreader
import os
empty_data = {
'fragments': remixt.bamreader.create_fragment_table(0),
'alleles': remixt.bamreader.create_allele_table(0),
}
def _get_key(record_type, chromosome):
return '/{}/chromosome_{}'.format(record_type, chromosome)
def _unique_index_append(store, key, data):
try:
nrows = store.get_storer(key).nrows
except (AttributeError, KeyError):
nrows = 0
data.index = pd.Series(data.index) + nrows
if nrows == 0:
store.put(key, data, format='table')
else:
store.append(key, data)
def merge_overlapping_seqdata(outfile, infiles, chromosomes):
out_store = pd.HDFStore(outfile, 'w', complevel=9, complib='blosc')
index_offsets = pd.Series(0, index=chromosomes, dtype=np.int64)
for _id, infile in infiles.items():
store = pd.HDFStore(infile)
tables = store.keys()
for chromosome in chromosomes:
allele_table = '/alleles/chromosome_{}'.format(chromosome)
fragment_table = '/fragments/chromosome_{}'.format(chromosome)
if allele_table not in tables:
print("missing table {}".format(allele_table))
continue
if fragment_table not in tables:
print("missing table {}".format(fragment_table))
continue
alleles = store[allele_table]
fragments = store[fragment_table]
alleles['fragment_id'] = alleles['fragment_id'].astype(np.int64)
fragments['fragment_id'] = fragments['fragment_id'].astype(np.int64)
alleles['fragment_id'] += index_offsets[chromosome]
fragments['fragment_id'] += index_offsets[chromosome]
index_offsets[chromosome] = max(alleles['fragment_id'].max(), fragments['fragment_id'].max()) + 1
out_store.append('/alleles/chromosome_{}'.format(chromosome), alleles)
out_store.append('/fragments/chromosome_{}'.format(chromosome), fragments)
store.close()
out_store.close()
def create_chromosome_seqdata(seqdata_filename, bam_filename, snp_filename, chromosome, max_fragment_length, max_soft_clipped, check_proper_pair):
""" Create seqdata from bam for one chromosome.
Args:
seqdata_filename(str): seqdata hdf store to write to
bam_filename(str): bam from which to extract read information
snp_filename(str): TSV chromosome, position file listing SNPs
chromosome(str): chromosome to extract
max_fragment_length(int): maximum length of fragments generating paired reads
max_soft_clipped(int): maximum soft clipping for considering a read concordant
check_proper_pair(boo): check proper pair flag
"""
reader = remixt.bamreader.AlleleReader(
bam_filename,
snp_filename,
chromosome,
max_fragment_length,
max_soft_clipped,
check_proper_pair,
)
with pd.HDFStore(seqdata_filename, 'w', complevel=9, complib='zlib') as store:
while reader.ReadAlignments(10000000):
_unique_index_append(store, _get_key('fragments', chromosome), reader.GetFragmentTable())
_unique_index_append(store, _get_key('alleles', chromosome), reader.GetAlleleTable())
def create_seqdata(seqdata_filename, bam_filename, snp_filename, max_fragment_length, max_soft_clipped, check_proper_pair, tempdir, chromosomes):
try:
os.makedirs(tempdir)
except:
pass
all_seqdata = {}
for chrom in chromosomes:
chrom_seqdata = os.path.join(tempdir, "{}_seqdata.h5".format(chrom))
all_seqdata[chrom] = chrom_seqdata
create_chromosome_seqdata(
chrom_seqdata, bam_filename, snp_filename,
chrom, max_fragment_length, max_soft_clipped,
check_proper_pair
)
merge_seqdata(seqdata_filename, all_seqdata)
def merge_seqdata(out_filename, in_filenames):
""" Merge seqdata files for non-overlapping sets of chromosomes
Args:
out_filename(str): seqdata hdf store to write to
out_filename(dict): seqdata hdf store to read from
"""
with pd.HDFStore(out_filename, 'w', complevel=9, complib='zlib') as out_store:
for in_filename in in_filenames.values():
with pd.HDFStore(in_filename, 'r') as in_store:
for key in in_store.keys():
out_store.put(key, in_store[key], format='table')
class Writer(object):
def __init__(self, seqdata_filename):
""" Streaming writer of seq data hdf5 files
Args:
seqdata_filename (str): name of seqdata hdf5 file
"""
self.store = pd.HDFStore(seqdata_filename, 'w', complevel=9, complib='zlib')
def write(self, chromosome, fragment_data, allele_data):
""" Write a chunk of reads and alleles data
Args:
fragment_data (pandas.DataFrame): fragment data
allele_data (pandas.DataFrame): allele data
Input 'fragment_data' dataframe has columns 'fragment_id', 'start', 'end'.
if columns 'is_duplicate', 'mapping_quality' are not provided they are
given nominal values.
Input 'allele_data' dataframe has columns 'position', 'fragment_id', 'is_alt'.
"""
# Add nominal mapping quality
if 'mapping_quality' not in fragment_data:
fragment_data['mapping_quality'] = 60
# Add nominal is_duplicate value
if 'is_duplicate' not in fragment_data:
fragment_data['is_duplicate'] = 0
fragment_data = fragment_data[['fragment_id', 'start', 'end', 'is_duplicate', 'mapping_quality']]
allele_data = allele_data[['position', 'fragment_id', 'is_alt']]
_unique_index_append(self.store, _get_key('fragments', chromosome), fragment_data)
_unique_index_append(self.store, _get_key('alleles', chromosome), allele_data)
def close(self):
""" Close seq data file
"""
self.store.close()
_identity = lambda x: x
def _read_seq_data_full(seqdata_filename, record_type, chromosome, post=_identity):
key = _get_key(record_type, chromosome)
try:
return post(pd.read_hdf(seqdata_filename, key))
except KeyError:
return empty_data[record_type]
def _get_seq_data_nrows(seqdata_filename, key):
with pd.HDFStore(seqdata_filename, 'r') as store:
try:
return store.get_storer(key).nrows
except (AttributeError, KeyError):
return 0
def _read_seq_data_chunks(seqdata_filename, record_type, chromosome, chunksize, post=_identity):
key = _get_key(record_type, chromosome)
nrows = _get_seq_data_nrows(seqdata_filename, key)
if nrows == 0:
yield empty_data[record_type]
else:
for i in range(nrows//chunksize + 1):
yield post(pd.read_hdf(seqdata_filename, key, start=i*chunksize, stop=(i+1)*chunksize))
def read_seq_data(seqdata_filename, record_type, chromosome, chunksize=None, post=_identity):
""" Read sequence data from a HDF seqdata file.
Args:
seqdata_filename (str): name of seqdata file
record_type (str): record type, can be 'alleles' or 'reads'
chromosome (str): select specific chromosome
KwArgs:
chunksize (int): number of rows to stream at a time, None for the entire file
post (callable): post processing function
Yields:
pandas.DataFrame
"""
if chunksize is None:
return _read_seq_data_full(seqdata_filename, record_type, chromosome, post=post)
else:
return _read_seq_data_chunks(seqdata_filename, record_type, chromosome, chunksize, post=post)
def read_fragment_data(seqdata_filename, chromosome, filter_duplicates=False, map_qual_threshold=1, chunksize=None):
""" Read fragment data from a HDF seqdata file.
Args:
seqdata_filename (str): name of seqdata file
chromosome (str): select specific chromosome, None for all chromosomes
KwArgs:
filter_duplicates (bool): filter reads marked as duplicate
map_qual_threshold (int): filter reads with less than this mapping quality
chunksize (int): number of rows to stream at a time, None for the entire file
Yields:
pandas.DataFrame
Returned dataframe has columns 'fragment_id', 'start', 'end'
"""
def filter_reads(reads):
# Filter duplicates if necessary
if 'is_duplicate' in reads and filter_duplicates is not None:
if filter_duplicates:
reads = reads[reads['is_duplicate'] == 0]
reads.drop(['is_duplicate'], axis=1, inplace=True)
# Filter poor quality reads
if 'mapping_quality' in reads and map_qual_threshold is not None:
reads = reads[reads['mapping_quality'] >= map_qual_threshold]
reads.drop(['mapping_quality'], axis=1, inplace=True)
return reads
return read_seq_data(seqdata_filename, 'fragments', chromosome, chunksize=chunksize, post=filter_reads)
def read_allele_data(seqdata_filename, chromosome, chunksize=None):
""" Read allele data from a HDF seqdata file.
Args:
seqdata_filename (str): name of seqdata file
chromosome (str): select specific chromosome, None for all chromosomes
KwArgs:
chunksize (int): number of rows to stream at a time, None for the entire file
Yields:
pandas.DataFrame
Returned dataframe has columns 'position', 'is_alt', 'fragment_id'
"""
return read_seq_data(seqdata_filename, 'alleles', chromosome, chunksize=chunksize)
def read_chromosomes(seqdata_filename):
""" Read chromosomes from a HDF seqdata file.
Args:
seqdata_filename (str): name of seqdata file
Returns:
list of chromsomes
"""
with | pd.HDFStore(seqdata_filename, 'r') | pandas.HDFStore |
# -*- coding:utf-8 -*-
"""
AHMath module.
Project: alphahunter
Author: HJQuant
Description: Asynchronous driven quantitative trading framework
"""
import copy
import collections
import warnings
import math
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy.stats import norm
class AHMath(object):
""" alphahunter 常用数学函数
"""
@staticmethod
def array(num_list):
""" list类型转换成numpy array类型
"""
return np.array(num_list)
@staticmethod
def multiply(a, b):
""" 返回两个数的乘积,出现任何异常,返回None
"""
if pd.isnull(a) or pd.isnull(b):
return None
else:
return a * b
@staticmethod
def power(a, b):
""" a的b次方
"""
return math.pow(a, b)
@staticmethod
def exp(a):
""" e的a次方
"""
return math.exp(a)
@staticmethod
def expm1(a):
""" e的a次方减1
"""
return math.expm1(a)
@staticmethod
def log(a):
""" e为底的log(a)
"""
return math.log(a)
@staticmethod
def log1p(a):
""" log(1 + a)
"""
return math.log1p(a)
@staticmethod
def sqrt(a):
""" a的平方根
"""
return math.sqrt(a)
@staticmethod
def abs(a):
""" a的绝对值
"""
return math.fabs(a)
@staticmethod
def copysign(a, b):
""" b的正负号乘以a
"""
return math.copysign(a, b)
@staticmethod
def zeros(a):
""" 长度为a,元素都为0的numpy array类型
"""
return np.zeros(a)
@staticmethod
def ones(a):
""" 长度为a的,元素都为1的numpy array类型
"""
return np.ones(a)
@staticmethod
def max(a):
""" 返回一个列表里面最大的元素,出现任何异常,返回None
"""
if (a is None) or (len(a) == 0):
return None
a_array = np.array([i for i in a if pd.notnull(i)])
count = len(a_array)
if count == 0:
return None
else:
return a_array.max()
@staticmethod
def min(a):
""" 返回一个列表里面最小的元素,出现任何异常,返回None
"""
if (a is None) or (len(a) == 0):
return None
a_array = np.array([i for i in a if pd.notnull(i)])
count = len(a_array)
if count == 0:
return None
else:
return a_array.min()
@staticmethod
def sum(a):
""" 返回一个列表里面所有元素的和,出现任何异常,返回0.0
"""
if (a is None) or (len(a) == 0):
return 0.0
result = 0.0 if pd.isnull(a[0]) else a[0]
for i in range(1, len(a)):
if pd.isnull(a[i]):
continue
result += a[i]
return result
@staticmethod
def cum_sum(a):
""" 返回一个list的累积求和列表类型,如果其中有None值,按照0.0处理
"""
if (a is None) or (len(a) == 0):
return [0.0]
b = [each if pd.notnull(each) else 0.0 for each in a]
return list(np.array(b).cumsum())
@staticmethod
def dot(a, b):
""" 返回两个列表的点乘乘积,出现异常,返回None
"""
if len(a) != len(b):
return None
else:
a = AHMath.array(a)
b = AHMath.array(b)
a_m_b = AHMath.array([AHMath.multiply(a[i], b[i]) for i in range(len(a))])
return AHMath.sum(a_m_b)
@staticmethod
def count_nan(a):
""" 返回一个列表里None值的个数,出现异常,返回None
"""
count = 0
if a is None:
return None
for i in a:
if pd.isnull(i):
count += 1
return count
@staticmethod
def mean(a):
""" 返回一个列表里面元素的平均值,出现任何异常,返回None
"""
if (a is None) or (len(a) == 0):
return None
count = len(a) - AHMath.count_nan(a)
if count == 0:
return None
return AHMath.sum(a) / float(count)
@staticmethod
def std(a):
""" 返回一个列表里面元素的标准差,出现任何异常,返回None
"""
if (a is None) or (len(a) == 0):
return None
count = len(a) - AHMath.count_nan(a)
if count <= 1:
return None
mean = AHMath.mean(a)
s = 0
for e in a:
if pd.isnull(e):
continue
s += (e - mean) ** 2
return AHMath.sqrt(s / float(count - 1))
@staticmethod
def weighted_mean(a, w):
""" 给定一个列表w作为权重,返回另一个列表a的加权平均值,出现任何异常,返回None
"""
if len(a) != len(w):
print('weighted mean lists not same length')
return None
s = 0
w_sum = 0
for i in range(0, len(a)):
if ( | pd.isnull(a[i]) | pandas.isnull |
# @name: ont_struct.py
# @title: Imports ontology tree and calculates hierarchical level per ontology term
# @description: Pulls ontology tree from EBI's Ontology Lookup Service (OLS) API (https://www.ebi.ac.uk/ols/index);
# then parses into individual terms and creates the parents for each individual term.
# Building in part off of python [ols-client library](https://github.com/cthoyt/ols-client/blob/master/src/ols_client/client.py)
# @author: <NAME>
# @email: <EMAIL>
# @date: 31 January 2018
# After looking through the documentation and all the associated fields that come out from the API call, it looks like there's no way to avoid
# doing a loop through the entire tree, starting from end node and iterating till you hit the root (s).
# Perhaps not the fastest way, but the most direct way will start from the end node rather than going from the roots down.
# NOTE: Would be faster to pull all the terms from a static .owl file to find the parents and then use these same functions to assemble the hierarchy.
# However, that introduces more dependencies, and OLS has already gone through the bother of standardizing the output files.
# So, the slow but steady way...
# [0] Setup ---------------------------------------------------------------------------------
import numpy as np
import pandas as pd
import requests
import progressbar
import time
# <<< get_data(url) >>>
# @name: get_data(url)
# @title: Access data from EBI API
# @description: returns json object with unfiltered layers of gooiness.
# General structure:
# data['page'] --> list of number of pages in the full query
# data['_links'] --> https request strings for first, next, last, self pages
# data['_embedded']['terms'] --> nested list of the actual data for each term. Incldues:
# ...['_links']: https requests for ancestors, descendants, tree structure, graph structure for *each* term
# ...['description']: short description of term
# ...['iri']: permanent url to Ontobee page about term
# ...['is_obsolete']: t/f if term is obsolete
# ...['is-root']: t/f if is uppermost level
# ...['label']: name of term
# ...['obo_id']: unique id
# ...['synonyms']: synonyms for term
# ...[<other stuff>]: things that didn't seem as relevant.
# @input: url from any API
# @output: False if query failed; json-ized data if successful
# @example: get_data('http://www.ebi.ac.uk/ols/api/ontologies/go/terms?size=500')
def get_data(url):
resp = requests.get(url)
if (resp.ok):
data = resp.json()
return data
else:
print('query was not sucessful')
return None
# <<< addit_pages(json_data) >>>
# Checks if previous call to OLS has more results
# returns the url for the next query if there are more pages
def addit_pages(json_data):
curr_page = json_data['page']['number']
last_page = json_data['page']['totalPages']-1
if(curr_page < last_page):
next_page = json_data['_links']['next']['href']
return {'next': next_page, 'current': curr_page, 'last': last_page}
else:
return False
# <<< _term_gen(json_data) >>>
# term generator to be able to loop through all terms in pulled data
def _term_gen(data):
try:
data['_embedded']['terms']
except:
print('query failure')
else:
for term in data['_embedded']['terms']:
yield term
# <<< pull_terms(json_data) >>>
# function to remove only the good bits from an API call to OLS
# collects all the terms within a given ontology
# returns a dataframe containing their ids, labels (names), descriptions, synonyms, iri (purl to ontobee), whether is a root node, and the url to call to get their hierarchicalParents
# [EBI description of parent/child relationships](https://github.com/EBISPOT/OLS/blob/master/ols-web/src/main/asciidoc/generated-snippets/terms-example/links.adoc)
# "Hierarchical parents include is-a and other related parents, such as part-of/develops-from, that imply a hierarchical relationship"
def pull_terms(json_data, filter_obs = True):
iter_terms = _term_gen(json_data)
ids = []
labels = []
descrips = []
syn = []
iri = []
root = []
selfs = []
parents = []
# pull out the relevant values
for term in iter_terms:
# filter out the obsolete terms, if specified
# removes:
# obsolete terms
# "Thing"
if((not filter_obs) | ((not term['is_obsolete']) & pd.notnull(term['obo_id']))):
ids.append(term['obo_id'])
labels.append(term['label'])
iri.append(term['iri'])
root.append(term['is_root'])
selfs.append(term['_links']['self']['href'])
try:
descrips.append(term['description'][0])
except:
descrips.append('')
try:
syn.append(term['synonyms'][0])
except:
syn.append('')
try:
parents.append(term['_links']['hierarchicalParents']['href'])
except KeyError: # there's no parents for this one
parents.append('')
continue
# convert to a dataframe
terms = pd.DataFrame([ids, labels, descrips, syn, iri, root, selfs, parents], index = ['id', 'label', 'description', 'synonyms', 'node_url', 'is_root', 'self_url', 'parent_url']).T.set_index('id')
return terms
# Primary API call to OLS to get the unique terms.
# returns terms, parents
# --> term dictionary
# @example: fbcv = get_terms('fbcv')
def get_terms(ont_id, base_url = 'http://www.ebi.ac.uk/ols/api/ontologies/', end_url = '/terms?size=500', save_terms = False, output_dir = ''):
url = base_url + ont_id + end_url
json_data = get_data(url)
# set up containers for loops
terms = pull_terms(json_data)
next_page = addit_pages(json_data)
with progressbar.ProgressBar(max_value = next_page['last']) as bar:
while(next_page):
bar.update(next_page['current'])
json_data = get_data(next_page['next'])
next_page = addit_pages(json_data) # update next page
terms = pd.concat([terms,pull_terms(json_data)])
if (save_terms):
terms.to_csv(output_dir + str(pd.Timestamp.today().strftime('%F')) + '_' + ont_id + '_terms.tsv', sep='\t')
return terms
# @description: pulls out all the immediate hierahrichal parents for a
# [EBI description of parent/child relationships](https://github.com/EBISPOT/OLS/blob/master/ols-web/src/main/asciidoc/generated-snippets/terms-example/links.adoc)
# "Hierarchical parents include is-a and other related parents, such as part-of/develops-from, that imply a hierarchical relationship"
# @input: *terms*: dataframe of terms, output of `get_terms`
#
# @example: parent_df = find_parents(fbcv, ont_id = 'fbcv')
def find_parents(terms, ont_id, save_terms = True, output_dir = ''):
nodes = []
anc = []
roots = []
counter = 0
with progressbar.ProgressBar(max_value = len(terms), initial_value=0) as bar:
for idx, row in terms.iterrows():
# try:
# parent_url = term['_links']['hierarchicalParents']['href']
# except KeyError: # there's no children for this one
# continue
if((row.parent_url != "") & (pd.notnull(row.parent_url))):
try:
response = get_data(row.parent_url)
except:
print('\n index: ' + str(idx) + ' (counter: ' + str(counter) + ')')
print('server overloaded; waiting 2 min. and caching results')
temp = pd.DataFrame([nodes, anc, roots], index = ['id', 'ancestor_id', 'is_root']).T
temp.to_csv(output_dir + str(pd.Timestamp.today().strftime('%F')) + '_' + ont_id + '_parents_TEMPidx' + str(counter) + '.tsv', sep='\t')
time.sleep(120)
response = get_data(row.parent_url)
iter_terms = _term_gen(response)
for parent_term in iter_terms:
nodes.append(idx)
anc.append(parent_term['obo_id'])
roots.append(parent_term['is_root'])
# yield term['obo_id'], parent_term['obo_id'], parent_term['is_root']
counter += 1
if (counter % 5 == 0):
bar.update(counter)
# combine into a dataframe; set ancestor level to 0
parents = pd.DataFrame([nodes, anc, roots], index = ['id', 'ancestor_id', 'is_root']).T
if (save_terms):
parents.to_csv(output_dir + str(pd.Timestamp.today().strftime('%F')) + '_' + ont_id + '_parents.tsv', sep='\t')
return parents
# <<< find_nextgen() >>>
# @name:
# @title:
# @description:
# @NOTE: lots of permutations of this funciton were written. One of the main sticking points was
# whether to concat a DataFrame in each iteration of the loop, or whether to save each variable in
# a separate list. While both work (or should, in principle), using a DataFrame means that the output
# variable needs to be declared as a global, or else during the recursion results will be saved over/lost.
# For simplicity, then, passing everything as lists and converting later.
# @input:
# @output:
# @example:
# outer function to pull an ancestor for a specific ID
def find_ancestors_1node(parent_df, id, reverse = True, return_paths = False):
root_ids = set(parent_df.ancestor_id[parent_df.is_root == True])
# << find_nextgen() >> # helper function to recurse through the parent ids and ID all the ancestors.
# variation on https://www.python.org/doc/essays/graphs/
def find_nextgen(parent_df, child_id, root_ids, path = [], paths = []):
# update the path with the current search param
# path will reset after every loop
path = path + [child_id]
if(child_id in root_ids):
# have hit the root node
paths.append(path)
return path
# find the id(s) of the parent of child_id
parent_row = parent_df[parent_df.id == child_id]
# -- recurse --
for idx, row in parent_row.iterrows():
find_nextgen(parent_df, row.ancestor_id, root_ids, path, paths)
return paths
# << reset_level(paths, reverse) >> : helper function to standardize indices
# resets level indices so 0 is the root nodes
def reset_level(paths, reverse):
path_dict = {}
for idx, path in enumerate(paths):
# reverse the list, so the root is at 0
if(reverse):
path.reverse()
for ont_idx, ont_id in enumerate(path):
# update dictionary
if ont_idx in path_dict:
# dictionary already has that value; append if it doesn't already exist within list
if ont_id not in path_dict[ont_idx]:
path_dict[ont_idx].append(ont_id)
else:
path_dict[ont_idx] = [ont_id]
return path_dict
# Calculate path
paths = find_nextgen(parent_df, id, root_ids)
ont_idx = reset_level(paths, reverse)
if (return_paths):
return {'paths': paths, 'ont_idx': ont_idx}
else:
return ont_idx
# @NOTE: certain high level nodes have an NA id. These were filtered out upstream.
# As a result, any descendants of this node will have NA ancestors; assuming these ont terms aren't particularly impt.
# Return value for ancestors will be NA
def find_ancestors(parent_df, ont_id = '', save_terms = True, output_dir = '', ids = [], reverse = True, return_paths = False, save_freq = 1000, start_idx = None):
# container for output
output = pd.DataFrame()
# ids is an optional testing parameter; if not declared, will look for all the ids.
# Remove duplicate ids; some ids have multiple parents, therefore need to keep in parent_df.
# However, including them the entire time will add unnecessary calculations of the same paths.
if(len(ids) == 0):
ids = pd.unique(parent_df.id)
if(start_idx is not None):
ids = ids[start_idx+1:]
elif(isinstance(ids, pd.Series)):
# convert series to Numpy ndarray
ids = ids.as_matrix()
with progressbar.ProgressBar(max_value = len(ids)) as bar:
for idx, node_id in np.ndenumerate(ids):
ancestors = find_ancestors_1node(parent_df, id = node_id, reverse = reverse, return_paths = return_paths)
# make sure ancestors returned something. If an ancestor has no unique ID, it was filtered out; return NA
if(return_paths):
if(len(ancestors['ont_idx']) > 0):
output = pd.concat([output, pd.DataFrame({'id': node_id, 'ancestors': [ancestors['ont_idx']], 'paths': [ancestors['paths']], 'node_level': max(ancestors['ont_idx'].keys())})], ignore_index=True)
else:
output = pd.concat([output, pd.DataFrame({'id': node_id, 'ancestors': [np.NaN], 'paths': [np.NaN], 'node_level': [np.NaN]})], ignore_index=True)
else:
if(len(ancestors) > 0):
output = pd.concat([output, pd.DataFrame({'id': node_id, 'ancestors': [ancestors], 'node_level': max(ancestors.keys())})], ignore_index=True)
else:
output = pd.concat([output, | pd.DataFrame({'id': node_id, 'ancestors': [np.NaN], 'node_level': [np.NaN]}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 2 01:29:34 2021
@author: <NAME>
Predict the infection ending:
SEIRAH time series proceeding from the last situation of SEIRAH_main.py,
until the condition of E+A+I=0.
"""
import networkx as nx
import random
from random import sample
import matplotlib.pyplot as plt1
import matplotlib.dates as mdates
import numpy as np
import pandas as pd
import copy as co
import csv as cs
import datetime
import SEIRAH_SW as SEIRAH_SW
random.seed(2020)
"""
Network Generation and Initialization.
NetworkX official documents:
https://networkx.github.io/documentation/stable/_modules/networkx/generators/random_graphs.html#newman_watts_strogatz_graph
City networks generated. H be isolated(node weight=0). 1stday of each status.
"""
def G_gene(N,k,p,expo,infe,asym,hosp,reco):
G = nx.newman_watts_strogatz_graph(N,k,p,seed=2020);
for i in G.nodes():
G.nodes[i]['status'] = 'susc' #Initialize all nodes as susc.
G.nodes[i]['S_1stday'] = 0 #Set initial date(day) to S nodes.
G.nodes[i]['E_1stday'] = 0
G.nodes[i]['I_1stday'] = 0
G.nodes[i]['A_1stday'] = 0
G.nodes[i]['H_1stday'] = 0
G.nodes[i]['R_1stday'] = 0
G.nodes[i]['Infe_other'] = 0 #Count for Rt calculating.
for i in G.edges():
G.edges[i]['weight'] = 1 #Initialize all edges weight = 1
"""
Initial G. SEIRAH status
"""
samp_sum = expo+infe+asym+hosp+reco #total number of samples
# Random sample nodes from G.
random_n = sample(G.nodes(), samp_sum) #use sample function
# nodes lists of each status. Randomly chose(clip) from random_n list.
node_e = random_n[0:expo]
a = expo + infe
node_i = random_n[expo:a]
b = a + asym
node_a = random_n[a:b]
c = b + hosp
node_h = random_n[b:c]
d = c + reco
node_r = random_n[c:d]
# Divide samples by status inputed numbers. Assign status except S.
# Record *_1stday. S no need to record.
for i in node_e:
G.nodes[i]['status'] = 'expo'
G.nodes[i]['S_1stday'] = 0 #Initial E to replace S status.
G.nodes[i]['E_1stday'] = 0
G.nodes[i]['I_1stday'] = 0
G.nodes[i]['A_1stday'] = 0
G.nodes[i]['H_1stday'] = 0
G.nodes[i]['R_1stday'] = 0
for i in node_i:
G.nodes[i]['status'] = 'infe'
G.nodes[i]['S_1stday'] = 0 #Initial E to replace S status.
G.nodes[i]['E_1stday'] = 0
G.nodes[i]['I_1stday'] = 0
G.nodes[i]['A_1stday'] = 0
G.nodes[i]['H_1stday'] = 0
G.nodes[i]['R_1stday'] = 0
for i in node_a:
G.nodes[i]['status'] = 'asym'
G.nodes[i]['S_1stday'] = 0 #Initial E to replace S status.
G.nodes[i]['E_1stday'] = 0
G.nodes[i]['I_1stday'] = 0
G.nodes[i]['A_1stday'] = 0
G.nodes[i]['H_1stday'] = 0
G.nodes[i]['R_1stday'] = 0
for i in node_h:
G.nodes[i]['status'] = 'hosp'
G.nodes[i]['S_1stday'] = 0 #Initial E to replace S status.
G.nodes[i]['E_1stday'] = 0
G.nodes[i]['I_1stday'] = 0
G.nodes[i]['A_1stday'] = 0
G.nodes[i]['H_1stday'] = 0
G.nodes[i]['R_1stday'] = 0
for i in node_r:
G.nodes[i]['status'] = 'reco'
G.nodes[i]['S_1stday'] = 0 #Initial E to replace S status.
G.nodes[i]['E_1stday'] = 0
G.nodes[i]['I_1stday'] = 0
G.nodes[i]['A_1stday'] = 0
G.nodes[i]['H_1stday'] = 0
G.nodes[i]['R_1stday'] = 0
# update G edges weight. node_h needs update link weight 1=>0
for i in node_h:
for nbr in G[i]:
G.edges[i, nbr]['weight'] = 0
return G
"""
Nodes mapping function. Shift weight as 0 or 1.
"""
def edge_weight_0(nwk):
for i in nwk.nodes():
if nwk.nodes[i]['status'] == 'hosp' or 'Com' in nwk.nodes[i]:
for nbr in nwk[i]:
nwk.edges[i, nbr]['weight'] = 0
return nwk
def edge_weight_1(nwk):
for i in nwk.nodes():
if nwk.nodes[i]['status'] != 'hosp' or 'Com' in nwk.nodes[i]:
for nbr in nwk[i]:
nwk.edges[i, nbr]['weight'] = 1
return nwk
"""
Count statuses of SEIRAH in target network. Daily data. NOT for TimeZone.
"""
def Count_status(__time_stamp, __nwk):
num_S = num_E = num_I = num_R = num_A = num_H = 0
count_Infe_other = count_T = count_H = 0
Rt = Tt = 0
for i in range(__nwk.number_of_nodes()):
# S is big number. No count to save computing time.
# if nwk.nodes[i]['status'] == 'susc':
# num_S = num_S + 1
# continue
if __nwk.nodes[i]['status'] == 'expo':
num_E = num_E + 1
#continue
if __nwk.nodes[i]['status'] == 'infe':
num_I = num_I + 1
#continue
if __nwk.nodes[i]['status'] == 'reco':
num_R = num_R + 1
#continue
if __nwk.nodes[i]['status'] == 'asym':
num_A = num_A + 1
#continue
if __nwk.nodes[i]['status'] == 'hosp':
num_H = num_H + 1
#Filter out new H, to count Rt, Tt
if __nwk.nodes[i]['H_1stday'] == __time_stamp:
count_H = count_H +1
count_Infe_other = count_Infe_other + __nwk.nodes[i]['Infe_other']
#Count how many days from E to H
count_T = count_T + __nwk.nodes[i]['H_1stday'] - __nwk.nodes[i]['E_1stday']
if count_H ==0 or num_H == 0: #Avoid error while H=0
Rt = Tt = 0
else:
Rt = count_Infe_other / count_H
Tt = count_T / count_H
#Tt = format(Rt / beta_t,'.2f')
__Count_status = [num_S, num_E, num_I, num_R, num_A, num_H, Rt, Tt]
return __Count_status
# # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # Above sections are FUNCTIONS. # #
# # MAIN() is as follows. # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # #
para_Epidemic = {'sigma':0.2,'p1':0.18,'p2':0.3,'l_AH':0.05,'l_IH':0.3,
'g_AR':0.07,'g_HR':0.1}
#beta = [0.1,0.1,0.1] # B_IS=0.3, B_AS=0.2, B_ES=0.2 as default
#beta_t = 0.1 # by Sandy:
tau = [0.5, 0.5] # Default 0.2,0.4 TimeZone(1), TimeZone(2)...Rest TimeZone Not actively infection
# (Epidemic parameters) ###################################################
# beta: Latency rate. β* in equations.
# sigma: Transmission rate. σ in equations.
# p1: Ratio of E to A.
# l_*: Hospitalization rate. λ* in equations.
# p2: Hospitalization ratio of A.
# g_*: Recovery rate. γ* in equations.
# tau: TimeZone Ratio Coefficient. τ* in equations.
# CBD is Commuting network. Interconnnected with city_0,1,2,3
#Import parameters from .csv, by Sandy
df0 = pd.read_csv('output_city_0.csv')
df1 = pd.read_csv('output_city_1.csv')
df2 = pd.read_csv('output_city_2.csv')
df3 = pd.read_csv('output_city_3.csv')
df_Shuto = pd.read_csv('output_Shuto.csv')
last_date = df_Shuto['Date'].values[-1]
predict_start_date=pd.Series([pd.to_datetime(last_date)])
df_RealH = | pd.read_csv('new_cases_cr2020.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
""" Geo layer object classes and methods
Toolset for working with static geo layer elements (networks, buffers,
areas such as administrative boundaries, road/electrical networks,
waterways, restricted areas, etc.)
"""
import copy
import math
import os
import random
import warnings
from functools import wraps
import fiona
import geopandas as gpd
import numpy as np
# import progressbar
import pyproj
from fiona.errors import FionaValueError
from geopandas.io.file import infer_schema
from gistools.conversion import geopandas_to_array
from gistools.coordinates import GeoGrid, r_tree_idx
from gistools.exceptions import GeoLayerError, LineLayerError, PointLayerError, \
PolygonLayerError, PolygonLayerWarning, GeoLayerEmptyError, ProjectionWarning
from gistools.geometry import katana, fishnet, explode, cut, cut_, cut_at_points, \
add_points_to_line, radius_of_curvature, shared_area_among_collection, intersects, \
intersecting_features, katana_centroid, area_partition_polygon, shape_factor, \
is_in_collection, overlapping_features, overlaps, hexana, nearest_feature, to_2d
from gistools.plotting import plot_geolayer
from gistools.projections import crs_from_layer
from numba import njit
from pandas import concat, Series
from rdp import rdp
from shapely import wkb
from shapely.geometry import Polygon, MultiPolygon, LineString, MultiLineString, \
Point, shape, MultiPoint
from shapely.ops import cascaded_union
from shapely.prepared import prep
from gistools.utils.check.descriptor import protected_property, lazyproperty
from gistools.utils.check.type import check_type, type_assert
from gistools.utils.check.value import check_string, check_sub_collection_in_collection
from gistools.utils.toolset import split_list_by_index
from tqdm import tqdm
def _build_consistent_gdf(data, layer_class, **kwargs):
""" Build geopandas dataframe with consistent geometry
Eliminate inconsistent geometries (keep only consistent one,
e.g. only lines, points or polygons)
Parameters
----------
data:
layer_class:
kwargs:
Returns
-------
"""
outdf = gpd.GeoDataFrame(data, **kwargs)
outdf = outdf[outdf.geometry.apply(
lambda geom: isinstance(geom, (layer_class._geometry_class,
layer_class._multi_geometry_class)))]
return outdf
def _difference(layer1, layer2):
""" Difference between two layers
Parameters
----------
layer1
layer2
Returns
-------
"""
gdf1 = layer1._gpd_df.drop("geometry", axis=1)
new_geometry = []
df = []
for i, geometry in enumerate(layer1.geometry):
is_intersecting = intersects(geometry, layer2.geometry, layer2.r_tree_idx)
if any(is_intersecting):
diff_result = explode([geometry.difference(cascaded_union(
[geom for geom in layer2.geometry[is_intersecting]]))])
new_geometry.extend(diff_result)
if len(diff_result) > 0:
df.extend([gdf1.iloc[[i]]] * len(diff_result))
else:
new_geometry.append(geometry)
df.append(gdf1.iloc[[i]])
return _build_consistent_gdf( | concat(df, ignore_index=True) | pandas.concat |
import os
import sqlite3
from unittest import TestCase
import warnings
from contextlib2 import ExitStack
from logbook import NullHandler, Logger
import numpy as np
import pandas as pd
from six import with_metaclass, iteritems, itervalues
import responses
from toolz import flip, groupby, merge
from trading_calendars import (
get_calendar,
register_calendar_alias,
)
import h5py
import zipline
from zipline.algorithm import TradingAlgorithm
from zipline.assets import Equity, Future
from zipline.assets.continuous_futures import CHAIN_PREDICATES
from zipline.data.fx import DEFAULT_FX_RATE
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.utils.memoize import classlazyval
from zipline.pipeline import SimplePipelineEngine
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.data.testing import TestingDataSet
from zipline.pipeline.domain import GENERIC, US_EQUITIES
from zipline.pipeline.loaders import USEquityPricingLoader
from zipline.pipeline.loaders.testing import make_seeded_random_loader
from zipline.protocol import BarData
from zipline.utils.paths import ensure_directory, ensure_directory_containing
from .core import (
create_daily_bar_data,
create_minute_bar_data,
make_simple_equity_info,
tmp_asset_finder,
tmp_dir,
write_hdf5_daily_bars,
)
from .debug import debug_mro_failure
from ..data.adjustments import (
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
)
from ..data.bcolz_daily_bars import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
)
from ..data.data_portal import (
DataPortal,
DEFAULT_MINUTE_HISTORY_PREFETCH,
DEFAULT_DAILY_HISTORY_PREFETCH,
)
from ..data.fx import (
InMemoryFXRateReader,
HDF5FXRateReader,
HDF5FXRateWriter,
)
from ..data.hdf5_daily_bars import (
HDF5DailyBarReader,
HDF5DailyBarWriter,
MultiCountryDailyBarReader,
)
from ..data.loader import (
get_benchmark_filename,
)
from ..data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY,
FUTURES_MINUTES_PER_DAY,
)
from ..data.resample import (
minute_frame_to_session_frame,
MinuteResampleSessionBarReader
)
from ..finance.trading import SimulationParameters
from ..utils.classproperty import classproperty
from ..utils.final import FinalMeta, final
from ..utils.memoize import remember_last
zipline_dir = os.path.dirname(zipline.__file__)
class DebugMROMeta(FinalMeta):
"""Metaclass that helps debug MRO resolution errors.
"""
def __new__(mcls, name, bases, clsdict):
try:
return super(DebugMROMeta, mcls).__new__(
mcls, name, bases, clsdict
)
except TypeError as e:
if "(MRO)" in str(e):
msg = debug_mro_failure(name, bases)
raise TypeError(msg)
else:
raise
class ZiplineTestCase(with_metaclass(DebugMROMeta, TestCase)):
"""
Shared extensions to core unittest.TestCase.
Overrides the default unittest setUp/tearDown functions with versions that
use ExitStack to correctly clean up resources, even in the face of
exceptions that occur during setUp/setUpClass.
Subclasses **should not override setUp or setUpClass**!
Instead, they should implement `init_instance_fixtures` for per-test-method
resources, and `init_class_fixtures` for per-class resources.
Resources that need to be cleaned up should be registered using
either `enter_{class,instance}_context` or `add_{class,instance}_callback}.
"""
_in_setup = False
@final
@classmethod
def setUpClass(cls):
# Hold a set of all the "static" attributes on the class. These are
# things that are not populated after the class was created like
# methods or other class level attributes.
cls._static_class_attributes = set(vars(cls))
cls._class_teardown_stack = ExitStack()
try:
cls._base_init_fixtures_was_called = False
cls.init_class_fixtures()
assert cls._base_init_fixtures_was_called, (
"ZiplineTestCase.init_class_fixtures() was not called.\n"
"This probably means that you overrode init_class_fixtures"
" without calling super()."
)
except BaseException: # Clean up even on KeyboardInterrupt
cls.tearDownClass()
raise
@classmethod
def init_class_fixtures(cls):
"""
Override and implement this classmethod to register resources that
should be created and/or torn down on a per-class basis.
Subclass implementations of this should always invoke this with super()
to ensure that fixture mixins work properly.
"""
if cls._in_setup:
raise ValueError(
'Called init_class_fixtures from init_instance_fixtures.'
' Did you write super(..., self).init_class_fixtures() instead'
' of super(..., self).init_instance_fixtures()?',
)
cls._base_init_fixtures_was_called = True
@final
@classmethod
def tearDownClass(cls):
# We need to get this before it's deleted by the loop.
stack = cls._class_teardown_stack
for name in set(vars(cls)) - cls._static_class_attributes:
# Remove all of the attributes that were added after the class was
# constructed. This cleans up any large test data that is class
# scoped while still allowing subclasses to access class level
# attributes.
delattr(cls, name)
stack.close()
@final
@classmethod
def enter_class_context(cls, context_manager):
"""
Enter a context manager to be exited during the tearDownClass
"""
if cls._in_setup:
raise ValueError(
'Attempted to enter a class context in init_instance_fixtures.'
'\nDid you mean to call enter_instance_context?',
)
return cls._class_teardown_stack.enter_context(context_manager)
@final
@classmethod
def add_class_callback(cls, callback, *args, **kwargs):
"""
Register a callback to be executed during tearDownClass.
Parameters
----------
callback : callable
The callback to invoke at the end of the test suite.
"""
if cls._in_setup:
raise ValueError(
'Attempted to add a class callback in init_instance_fixtures.'
'\nDid you mean to call add_instance_callback?',
)
return cls._class_teardown_stack.callback(callback, *args, **kwargs)
@final
def setUp(self):
type(self)._in_setup = True
self._pre_setup_attrs = set(vars(self))
self._instance_teardown_stack = ExitStack()
try:
self._init_instance_fixtures_was_called = False
self.init_instance_fixtures()
assert self._init_instance_fixtures_was_called, (
"ZiplineTestCase.init_instance_fixtures() was not"
" called.\n"
"This probably means that you overrode"
" init_instance_fixtures without calling super()."
)
except BaseException: # Clean up even on KeyboardInterrupt
self.tearDown()
raise
finally:
type(self)._in_setup = False
def init_instance_fixtures(self):
self._init_instance_fixtures_was_called = True
@final
def tearDown(self):
# We need to get this before it's deleted by the loop.
stack = self._instance_teardown_stack
for attr in set(vars(self)) - self._pre_setup_attrs:
delattr(self, attr)
stack.close()
@final
def enter_instance_context(self, context_manager):
"""
Enter a context manager that should be exited during tearDown.
"""
return self._instance_teardown_stack.enter_context(context_manager)
@final
def add_instance_callback(self, callback):
"""
Register a callback to be executed during tearDown.
Parameters
----------
callback : callable
The callback to invoke at the end of each test.
"""
return self._instance_teardown_stack.callback(callback)
def alias(attr_name):
"""Make a fixture attribute an alias of another fixture's attribute by
default.
Parameters
----------
attr_name : str
The name of the attribute to alias.
Returns
-------
p : classproperty
A class property that does the property aliasing.
Examples
--------
>>> class C(object):
... attr = 1
...
>>> class D(C):
... attr_alias = alias('attr')
...
>>> D.attr
1
>>> D.attr_alias
1
>>> class E(D):
... attr_alias = 2
...
>>> E.attr
1
>>> E.attr_alias
2
"""
return classproperty(flip(getattr, attr_name))
class WithDefaultDateBounds(with_metaclass(DebugMROMeta, object)):
"""
ZiplineTestCase mixin which makes it possible to synchronize date bounds
across fixtures.
This fixture should always be the last fixture in bases of any fixture or
test case that uses it.
Attributes
----------
START_DATE : datetime
END_DATE : datetime
The date bounds to be used for fixtures that want to have consistent
dates.
"""
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-12-29', tz='utc')
class WithLogger(object):
"""
ZiplineTestCase mixin providing cls.log_handler as an instance-level
fixture.
After init_instance_fixtures has been called `self.log_handler` will be a
new ``logbook.NullHandler``.
Methods
-------
make_log_handler() -> logbook.LogHandler
A class method which constructs the new log handler object. By default
this will construct a ``NullHandler``.
"""
make_log_handler = NullHandler
@classmethod
def init_class_fixtures(cls):
super(WithLogger, cls).init_class_fixtures()
cls.log = Logger()
cls.log_handler = cls.enter_class_context(
cls.make_log_handler().applicationbound(),
)
class WithAssetFinder(WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.asset_finder as a class-level fixture.
After init_class_fixtures has been called, `cls.asset_finder` is populated
with an AssetFinder.
Attributes
----------
ASSET_FINDER_EQUITY_SIDS : iterable[int]
The default sids to construct equity data for.
ASSET_FINDER_EQUITY_SYMBOLS : iterable[str]
The default symbols to use for the equities.
ASSET_FINDER_EQUITY_START_DATE : datetime
The default start date to create equity data for. This defaults to
``START_DATE``.
ASSET_FINDER_EQUITY_END_DATE : datetime
The default end date to create equity data for. This defaults to
``END_DATE``.
ASSET_FINDER_EQUITY_NAMES: iterable[str]
The default names to use for the equities.
ASSET_FINDER_EQUITY_EXCHANGE : str
The default exchange to assign each equity.
ASSET_FINDER_COUNTRY_CODE : str
The default country code to assign each exchange.
Methods
-------
make_equity_info() -> pd.DataFrame
A class method which constructs the dataframe of equity info to write
to the class's asset db. By default this is empty.
make_futures_info() -> pd.DataFrame
A class method which constructs the dataframe of futures contract info
to write to the class's asset db. By default this is empty.
make_exchanges_info() -> pd.DataFrame
A class method which constructs the dataframe of exchange information
to write to the class's assets db. By default this is empty.
make_root_symbols_info() -> pd.DataFrame
A class method which constructs the dataframe of root symbols
information to write to the class's assets db. By default this is
empty.
make_asset_finder_db_url() -> string
A class method which returns the URL at which to create the SQLAlchemy
engine. By default provides a URL for an in-memory database.
make_asset_finder() -> pd.DataFrame
A class method which constructs the actual asset finder object to use
for the class. If this method is overridden then the ``make_*_info``
methods may not be respected.
See Also
--------
zipline.testing.make_simple_equity_info
zipline.testing.make_jagged_equity_info
zipline.testing.make_rotating_equity_info
zipline.testing.make_future_info
zipline.testing.make_commodity_future_info
"""
ASSET_FINDER_EQUITY_SIDS = ord('A'), ord('B'), ord('C')
ASSET_FINDER_EQUITY_SYMBOLS = None
ASSET_FINDER_EQUITY_NAMES = None
ASSET_FINDER_EQUITY_EXCHANGE = 'TEST'
ASSET_FINDER_EQUITY_START_DATE = alias('START_DATE')
ASSET_FINDER_EQUITY_END_DATE = alias('END_DATE')
ASSET_FINDER_FUTURE_CHAIN_PREDICATES = CHAIN_PREDICATES
ASSET_FINDER_COUNTRY_CODE = '??'
@classmethod
def _make_info(cls, *args):
return None
make_futures_info = _make_info
make_exchanges_info = _make_info
make_root_symbols_info = _make_info
make_equity_supplementary_mappings = _make_info
del _make_info
@classmethod
def make_equity_info(cls):
return make_simple_equity_info(
cls.ASSET_FINDER_EQUITY_SIDS,
cls.ASSET_FINDER_EQUITY_START_DATE,
cls.ASSET_FINDER_EQUITY_END_DATE,
cls.ASSET_FINDER_EQUITY_SYMBOLS,
cls.ASSET_FINDER_EQUITY_NAMES,
cls.ASSET_FINDER_EQUITY_EXCHANGE,
)
@classmethod
def make_asset_finder_db_url(cls):
return 'sqlite:///:memory:'
@classmethod
def make_asset_finder(cls):
"""Returns a new AssetFinder
Returns
-------
asset_finder : zipline.assets.AssetFinder
"""
equities = cls.make_equity_info()
futures = cls.make_futures_info()
root_symbols = cls.make_root_symbols_info()
exchanges = cls.make_exchanges_info(equities, futures, root_symbols)
if exchanges is None:
exchange_names = [
df['exchange']
for df in (equities, futures, root_symbols)
if df is not None
]
if exchange_names:
exchanges = pd.DataFrame({
'exchange': pd.concat(exchange_names).unique(),
'country_code': cls.ASSET_FINDER_COUNTRY_CODE,
})
return cls.enter_class_context(tmp_asset_finder(
url=cls.make_asset_finder_db_url(),
equities=equities,
futures=futures,
exchanges=exchanges,
root_symbols=root_symbols,
equity_supplementary_mappings=(
cls.make_equity_supplementary_mappings()
),
future_chain_predicates=cls.ASSET_FINDER_FUTURE_CHAIN_PREDICATES,
))
@classmethod
def init_class_fixtures(cls):
super(WithAssetFinder, cls).init_class_fixtures()
cls.asset_finder = cls.make_asset_finder()
@classlazyval
def all_assets(cls):
"""A list of Assets for all sids in cls.asset_finder.
"""
return cls.asset_finder.retrieve_all(cls.asset_finder.sids)
@classlazyval
def exchange_names(cls):
"""A list of canonical exchange names for all exchanges in this suite.
"""
infos = itervalues(cls.asset_finder.exchange_info)
return sorted(i.canonical_name for i in infos)
@classlazyval
def assets_by_calendar(cls):
"""A dict from calendar -> list of assets with that calendar.
"""
return groupby(lambda a: get_calendar(a.exchange), cls.all_assets)
@classlazyval
def all_calendars(cls):
"""A list of all calendars for assets in this test suite.
"""
return list(cls.assets_by_calendar)
# TODO_SS: The API here doesn't make sense in a multi-country test scenario.
class WithTradingCalendars(object):
"""
ZiplineTestCase mixin providing cls.trading_calendar,
cls.all_trading_calendars, cls.trading_calendar_for_asset_type as a
class-level fixture.
After ``init_class_fixtures`` has been called:
- `cls.trading_calendar` is populated with a default of the nyse trading
calendar for compatibility with existing tests
- `cls.all_trading_calendars` is populated with the trading calendars
keyed by name,
- `cls.trading_calendar_for_asset_type` is populated with the trading
calendars keyed by the asset type which uses the respective calendar.
Attributes
----------
TRADING_CALENDAR_STRS : iterable
iterable of identifiers of the calendars to use.
TRADING_CALENDAR_FOR_ASSET_TYPE : dict
A dictionary which maps asset type names to the calendar associated
with that asset type.
"""
TRADING_CALENDAR_STRS = ('NYSE',)
TRADING_CALENDAR_FOR_ASSET_TYPE = {Equity: 'NYSE', Future: 'us_futures'}
# For backwards compatibility, exisitng tests and fixtures refer to
# `trading_calendar` with the assumption that the value is the NYSE
# calendar.
TRADING_CALENDAR_PRIMARY_CAL = 'NYSE'
@classmethod
def init_class_fixtures(cls):
super(WithTradingCalendars, cls).init_class_fixtures()
cls.trading_calendars = {}
for cal_str in (
set(cls.TRADING_CALENDAR_STRS) |
{cls.TRADING_CALENDAR_PRIMARY_CAL}
):
# Set name to allow aliasing.
calendar = get_calendar(cal_str)
setattr(cls,
'{0}_calendar'.format(cal_str.lower()), calendar)
cls.trading_calendars[cal_str] = calendar
type_to_cal = iteritems(cls.TRADING_CALENDAR_FOR_ASSET_TYPE)
for asset_type, cal_str in type_to_cal:
calendar = get_calendar(cal_str)
cls.trading_calendars[asset_type] = calendar
cls.trading_calendar = (
cls.trading_calendars[cls.TRADING_CALENDAR_PRIMARY_CAL]
)
_MARKET_DATA_DIR = os.path.join(zipline_dir, 'resources', 'market_data')
@remember_last
def read_checked_in_benchmark_data():
symbol = 'SPY'
filename = get_benchmark_filename(symbol)
source_path = os.path.join(_MARKET_DATA_DIR, filename)
benchmark_returns = pd.read_csv(
source_path,
parse_dates=[0],
index_col=0,
header=None,
).tz_localize('UTC')
return benchmark_returns.iloc[:, 0]
class WithBenchmarkReturns(WithDefaultDateBounds,
WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.benchmark_returns as a class-level
attribute.
"""
_default_treasury_curves = None
@classproperty
def BENCHMARK_RETURNS(cls):
benchmark_returns = read_checked_in_benchmark_data()
# Zipline ordinarily uses cached benchmark returns and treasury
# curves data, but when running the zipline tests this cache is not
# always updated to include the appropriate dates required by both
# the futures and equity calendars. In order to create more
# reliable and consistent data throughout the entirety of the
# tests, we read static benchmark returns and treasury curve csv
# files from source. If a test using this fixture attempts to run
# outside of the static date range of the csv files, raise an
# exception warning the user to either update the csv files in
# source or to use a date range within the current bounds.
static_start_date = benchmark_returns.index[0].date()
static_end_date = benchmark_returns.index[-1].date()
warning_message = (
'The WithBenchmarkReturns fixture uses static data between '
'{static_start} and {static_end}. To use a start and end date '
'of {given_start} and {given_end} you will have to update the '
'files in {resource_dir} to include the missing dates.'.format(
static_start=static_start_date,
static_end=static_end_date,
given_start=cls.START_DATE.date(),
given_end=cls.END_DATE.date(),
resource_dir=_MARKET_DATA_DIR,
)
)
if cls.START_DATE.date() < static_start_date or \
cls.END_DATE.date() > static_end_date:
raise AssertionError(warning_message)
return benchmark_returns
class WithSimParams(WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.sim_params as a class level fixture.
Attributes
----------
SIM_PARAMS_CAPITAL_BASE : float
SIM_PARAMS_DATA_FREQUENCY : {'daily', 'minute'}
SIM_PARAMS_EMISSION_RATE : {'daily', 'minute'}
Forwarded to ``SimulationParameters``.
SIM_PARAMS_START : datetime
SIM_PARAMS_END : datetime
Forwarded to ``SimulationParameters``. If not
explicitly overridden these will be ``START_DATE`` and ``END_DATE``
Methods
-------
make_simparams(**overrides)
Construct a ``SimulationParameters`` using the defaults defined by
fixture configuration attributes. Any parameters to
``SimulationParameters`` can be overridden by passing them by keyword.
See Also
--------
zipline.finance.trading.SimulationParameters
"""
SIM_PARAMS_CAPITAL_BASE = 1.0e5
SIM_PARAMS_DATA_FREQUENCY = 'daily'
SIM_PARAMS_EMISSION_RATE = 'daily'
SIM_PARAMS_START = alias('START_DATE')
SIM_PARAMS_END = alias('END_DATE')
@classmethod
def make_simparams(cls, **overrides):
kwargs = dict(
start_session=cls.SIM_PARAMS_START,
end_session=cls.SIM_PARAMS_END,
capital_base=cls.SIM_PARAMS_CAPITAL_BASE,
data_frequency=cls.SIM_PARAMS_DATA_FREQUENCY,
emission_rate=cls.SIM_PARAMS_EMISSION_RATE,
trading_calendar=cls.trading_calendar,
)
kwargs.update(overrides)
return SimulationParameters(**kwargs)
@classmethod
def init_class_fixtures(cls):
super(WithSimParams, cls).init_class_fixtures()
cls.sim_params = cls.make_simparams()
class WithTradingSessions(WithDefaultDateBounds, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.trading_days, cls.all_trading_sessions
as a class-level fixture.
After init_class_fixtures has been called, `cls.all_trading_sessions`
is populated with a dictionary of calendar name to the DatetimeIndex
containing the calendar trading days ranging from:
(DATA_MAX_DAY - (cls.TRADING_DAY_COUNT) -> DATA_MAX_DAY)
`cls.trading_days`, for compatibility with existing tests which make the
assumption that trading days are equity only, defaults to the nyse trading
sessions.
Attributes
----------
DATA_MAX_DAY : datetime
The most recent trading day in the calendar.
TRADING_DAY_COUNT : int
The number of days to put in the calendar. The default value of
``TRADING_DAY_COUNT`` is 126 (half a trading-year). Inheritors can
override TRADING_DAY_COUNT to request more or less data.
"""
DATA_MIN_DAY = alias('START_DATE')
DATA_MAX_DAY = alias('END_DATE')
# For backwards compatibility, exisitng tests and fixtures refer to
# `trading_days` with the assumption that the value is days of the NYSE
# calendar.
trading_days = alias('nyse_sessions')
@classmethod
def init_class_fixtures(cls):
super(WithTradingSessions, cls).init_class_fixtures()
cls.trading_sessions = {}
for cal_str in cls.TRADING_CALENDAR_STRS:
trading_calendar = cls.trading_calendars[cal_str]
sessions = trading_calendar.sessions_in_range(
cls.DATA_MIN_DAY, cls.DATA_MAX_DAY)
# Set name for aliasing.
setattr(cls,
'{0}_sessions'.format(cal_str.lower()), sessions)
cls.trading_sessions[cal_str] = sessions
class WithTmpDir(object):
"""
ZiplineTestCase mixing providing cls.tmpdir as a class-level fixture.
After init_class_fixtures has been called, `cls.tmpdir` is populated with
a `testfixtures.TempDirectory` object whose path is `cls.TMP_DIR_PATH`.
Attributes
----------
TMP_DIR_PATH : str
The path to the new directory to create. By default this is None
which will create a unique directory in /tmp.
"""
TMP_DIR_PATH = None
@classmethod
def init_class_fixtures(cls):
super(WithTmpDir, cls).init_class_fixtures()
cls.tmpdir = cls.enter_class_context(
tmp_dir(path=cls.TMP_DIR_PATH),
)
class WithInstanceTmpDir(object):
"""
ZiplineTestCase mixing providing self.tmpdir as an instance-level fixture.
After init_instance_fixtures has been called, `self.tmpdir` is populated
with a `testfixtures.TempDirectory` object whose path is
`cls.TMP_DIR_PATH`.
Attributes
----------
INSTANCE_TMP_DIR_PATH : str
The path to the new directory to create. By default this is None
which will create a unique directory in /tmp.
"""
INSTANCE_TMP_DIR_PATH = None
def init_instance_fixtures(self):
super(WithInstanceTmpDir, self).init_instance_fixtures()
self.instance_tmpdir = self.enter_instance_context(
tmp_dir(path=self.INSTANCE_TMP_DIR_PATH),
)
class WithEquityDailyBarData(WithAssetFinder, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.make_equity_daily_bar_data.
Attributes
----------
EQUITY_DAILY_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
EQUITY_DAILY_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_equity_daily_bar_data` will read data from
the minute bars defined by `WithEquityMinuteBarData`.
The current default is `False`, but could be `True` in the future.
EQUITY_DAILY_BAR_COUNTRY_CODES : tuple
The countres to create data for. By default this is populated
with all of the countries present in the asset finder.
Methods
-------
make_equity_daily_bar_data(country_code, sids)
make_equity_daily_bar_currency_codes(country_code, sids)
See Also
--------
WithEquityMinuteBarData
zipline.testing.create_daily_bar_data
""" # noqa
EQUITY_DAILY_BAR_START_DATE = alias('START_DATE')
EQUITY_DAILY_BAR_END_DATE = alias('END_DATE')
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = None
@classproperty
def EQUITY_DAILY_BAR_LOOKBACK_DAYS(cls):
# If we're sourcing from minute data, then we almost certainly want the
# minute bar calendar to be aligned with the daily bar calendar, so
# re-use the same lookback parameter.
if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS
else:
return 0
@classproperty
def EQUITY_DAILY_BAR_COUNTRY_CODES(cls):
return cls.asset_finder.country_codes
@classmethod
def _make_equity_daily_bar_from_minute(cls):
assert issubclass(cls, WithEquityMinuteBarData), \
"Can't source daily data from minute without minute data!"
assets = cls.asset_finder.retrieve_all(cls.asset_finder.equities_sids)
minute_data = dict(cls.make_equity_minute_bar_data())
for asset in assets:
yield asset.sid, minute_frame_to_session_frame(
minute_data[asset.sid],
cls.trading_calendars[Equity])
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
"""
Create daily pricing data.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code. Data should be created for
this country.
sids : tuple[int]
The sids to include in the data.
Yields
------
(int, pd.DataFrame)
A sid, dataframe pair to be passed to a daily bar writer.
The dataframe should be indexed by date, with columns of
('open', 'high', 'low', 'close', 'volume', 'day', & 'id').
"""
# Requires a WithEquityMinuteBarData to come before in the MRO.
# Resample that data so that daily and minute bar data are aligned.
if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls._make_equity_daily_bar_from_minute()
else:
return create_daily_bar_data(cls.equity_daily_bar_days, sids)
@classmethod
def make_equity_daily_bar_currency_codes(cls, country_code, sids):
"""Create listing currencies.
Default is to list all assets in USD.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code. Data should be created for
this country.
sids : tuple[int]
The sids to include in the data.
Returns
-------
currency_codes : pd.Series[int, str]
Map from sids to currency for that sid's prices.
"""
return pd.Series(index=list(sids), data='USD')
@classmethod
def init_class_fixtures(cls):
super(WithEquityDailyBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Equity]
if trading_calendar.is_session(cls.EQUITY_DAILY_BAR_START_DATE):
first_session = cls.EQUITY_DAILY_BAR_START_DATE
else:
first_session = trading_calendar.minute_to_session_label(
pd.Timestamp(cls.EQUITY_DAILY_BAR_START_DATE)
)
if cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS > 0:
first_session = trading_calendar.sessions_window(
first_session,
-1 * cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS
)[0]
days = trading_calendar.sessions_in_range(
first_session,
cls.EQUITY_DAILY_BAR_END_DATE,
)
cls.equity_daily_bar_days = days
class WithFutureDailyBarData(WithAssetFinder, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.make_future_daily_bar_data.
Attributes
----------
FUTURE_DAILY_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
FUTURE_DAILY_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_future_daily_bar_data` will read data from
the minute bars defined by `WithFutureMinuteBarData`.
The current default is `False`, but could be `True` in the future.
Methods
-------
make_future_daily_bar_data() -> iterable[(int, pd.DataFrame)]
A class method that returns an iterator of (sid, dataframe) pairs
which will be written to the bcolz files that the class's
``BcolzDailyBarReader`` will read from. By default this creates
some simple synthetic data with
:func:`~zipline.testing.create_daily_bar_data`
See Also
--------
WithFutureMinuteBarData
zipline.testing.create_daily_bar_data
"""
FUTURE_DAILY_BAR_USE_FULL_CALENDAR = False
FUTURE_DAILY_BAR_START_DATE = alias('START_DATE')
FUTURE_DAILY_BAR_END_DATE = alias('END_DATE')
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE = None
@classproperty
def FUTURE_DAILY_BAR_LOOKBACK_DAYS(cls):
# If we're sourcing from minute data, then we almost certainly want the
# minute bar calendar to be aligned with the daily bar calendar, so
# re-use the same lookback parameter.
if cls.FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls.FUTURE_MINUTE_BAR_LOOKBACK_DAYS
else:
return 0
@classmethod
def _make_future_daily_bar_from_minute(cls):
assert issubclass(cls, WithFutureMinuteBarData), \
"Can't source daily data from minute without minute data!"
assets = cls.asset_finder.retrieve_all(cls.asset_finder.futures_sids)
minute_data = dict(cls.make_future_minute_bar_data())
for asset in assets:
yield asset.sid, minute_frame_to_session_frame(
minute_data[asset.sid],
cls.trading_calendars[Future])
@classmethod
def make_future_daily_bar_data(cls):
# Requires a WithFutureMinuteBarData to come before in the MRO.
# Resample that data so that daily and minute bar data are aligned.
if cls.FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls._make_future_daily_bar_from_minute()
else:
return create_daily_bar_data(
cls.future_daily_bar_days,
cls.asset_finder.futures_sids,
)
@classmethod
def init_class_fixtures(cls):
super(WithFutureDailyBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Future]
if cls.FUTURE_DAILY_BAR_USE_FULL_CALENDAR:
days = trading_calendar.all_sessions
else:
if trading_calendar.is_session(cls.FUTURE_DAILY_BAR_START_DATE):
first_session = cls.FUTURE_DAILY_BAR_START_DATE
else:
first_session = trading_calendar.minute_to_session_label(
| pd.Timestamp(cls.FUTURE_DAILY_BAR_START_DATE) | pandas.Timestamp |
#!/usr/bin/env python
import pandas as pd
import seaborn as sns
import pylab as plt
__package__ = "Byron times plot"
__author__ = "<NAME> (<EMAIL>)"
if __name__ == '__main__':
filename = 'byron_times.dat'
data = pd.read_csv(filename, sep=',', header=0)
n_version = len(data.Method.unique())
ref = data[data.Method == 'darknet']['Time']
ref = | pd.concat([ref]*n_version) | pandas.concat |
# this functino is to run the mlp on the 0.5s binned data created by Shashiks
# features: downloaded bytes amount is the feature to be updated.
import pandas as pd
import numpy as np
import os
import math
import argparse
from keras import Sequential
from keras.layers import Dense, BatchNormalization, Dropout, Conv1D, MaxPooling1D, Flatten
from sklearn.metrics import accuracy_score, average_precision_score, recall_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils import shuffle
# constant values
YT = 1
FB = 0
V360 = 1
VNormal = 0
# select the train and test ids according to this random state and the seen and unseen nature
def select_train_and_test_id(random_state, is_seen):
# a set means id sets ranging from the 1-10,21-20,...
if is_seen:
test_ind = list(np.arange(1, 51))
train_ind = list(np.arange(1, 51))
else:
num_of_ids_from_one_set = 3
all_videos = np.arange(1, 51)
test_ind = []
# make sure that each video id is present in the test set at least 2 times
for ind_set in range(5):
for sub_id in range(num_of_ids_from_one_set):
test_ind.append(ind_set * 10 + (sub_id + random_state))
train_ind = list(set(list(all_videos)) - set(list(test_ind)))
return train_ind, test_ind
# based on the train and test list get the original and synthesis dataset from the
# processed data
def truncate_train_list(temp_train_df, number_of_traces):
temp_train_df = shuffle(temp_train_df, random_state=123)
temp_train_df.reset_index(inplace=True, drop=True)
len_temp_train_df = temp_train_df.shape[0]
truncated_train_df = temp_train_df.loc[0:np.ceil(number_of_traces * len_temp_train_df) - 1, :]
return truncated_train_df
def get_synth_df(synth_df, indices, num_of_synth_samples):
temp_df_list = []
for i in (indices):
temp_df_list.append(
synth_df.loc[i * num_of_synth_samples:i * num_of_synth_samples + num_of_synth_samples - 1, :])
df = | pd.concat(temp_df_list) | pandas.concat |
##Creates the sequence export sheet #just a utility for LocusExtractor
import pandas as pd
import re
import utilities
from seq_utilities import trim_at_first_stop
class SequenceExporter:
def __init__(self,templateFile,locusList,genome_frame):
#Read in teh template
templateFrame = | pd.read_csv(templateFile,header=0) | pandas.read_csv |
from typing import List
import pandas as pd
from utils import request_to_json, get_repo_names
from github_pr import GitHubPR
from github_users import GitHubUsers
# temporary - to minimize the number of requests
REPO_NAMES = [
"dyvenia",
"elt_workshop",
"git-workshop",
"gitflow",
"notebooks",
"timeflow",
"timeflow_ui",
"timelogs",
"viadot",
]
class GitHubFlow:
"""
For getting all informations per contributor.
"""
def __init__(self):
# self.repo_names = get_repo_names()
self.contributor_info = GitHubUsers()
self.pr_info = GitHubPR()
def get_prs_per_user(self, contributor: str = None, repo: str = None) -> dict:
"""
List all pull requests per pointed user from repository.
Args:
contributor (str, optional): Contributor name. Defaults to None.
repo (str, optional): Repo name. Defaults to None.
Returns:
dict: Dictionary included all PRs per contributor from specific repository.
"""
url = f"https://api.github.com/search/issues?q=is:pr+repo:dyvenia/{repo}+author:{contributor}"
pr_info = request_to_json(url)
final_dict_per_user = {}
try:
for ind in range(len(pr_info["items"])):
dict_per_user = {
"contributor": contributor,
"repo": repo,
"number": pr_info["items"][ind]["number"],
"title": pr_info["items"][ind]["title"],
}
final_dict_per_user[pr_info["items"][ind]["id"]] = dict_per_user
except KeyError as e:
print(f"For {contributor} : {e} is not found")
return final_dict_per_user
def list_all_pr_per_contributors(self, dict_repo_login: dict = None) -> List[dict]:
"""
List combined pull requests per every
Args:
dict_repo_login (dict, optional): Each contribution that occurs in a given organization.
The contributor is the key. The value is the repository list that the user contributes. Defaults to None.
Returns:
List[dict]: List of dictionaries. Key is the PR id. Info about specific PR in a value.
"""
list_of_dict_prs = []
for key, value in dict_repo_login.items():
for repo in value:
dict_pr = self.get_prs_per_user(key, repo)
list_of_dict_prs.append(dict_pr)
return list_of_dict_prs
def create_pairs_contributor_repo(self, df_repo_login: pd.DataFrame = None) -> dict:
"""
Create pairs contributor-repository. Pairing for each contribution that occurs in a given organization.
Args:
df_repo_login (pd.DataFrame, optional): Each contribution that occurs in a given organization. Defaults to None.
Returns:
dict: The contributor is the key. The value is the repository list that the user contributes.
"""
dict_repo_login = {}
dict_repo_login_raw = df_repo_login.to_dict("records")
for dct in dict_repo_login_raw:
try:
dict_repo_login[dct["login"]].append(dct["repo"])
except KeyError:
dict_repo_login[dct["login"]] = [dct["repo"]]
return dict_repo_login
def run_pr_info(self) -> pd.DataFrame:
"""
Method to generate DataFrame with information about all pull requests.
DataFrame contains information about PR name and PR number per user and repository where he contributes.
Returns:
pd.DataFrame: Data Frame["contributor", "repo", "number", "title"].
"""
df_all_contributions = self.contributor_info.get_all_contributions(REPO_NAMES)
dict_repo_login = self.create_pairs_contributor_repo(
df_all_contributions[["repo", "login"]]
)
list_of_dict_prs = self.list_all_pr_per_contributors(dict_repo_login)
df_transformed = pd.DataFrame(
[list_of.values() for list_of in list_of_dict_prs]
)
df = pd.DataFrame()
for x in df_transformed.columns:
df = pd.concat([df, df_transformed[x].apply(pd.Series)])
return df[["contributor", "repo", "number", "title"]].dropna()
def run_commit_info(self) -> pd.DataFrame:
"""
Method to generate DataFrame with information about all commits from pull requests.
DataFrame contains information about author, PR number, message and date_commit.
Returns:
pd.DataFrame: Data Frame["author", "pr_number", "date_commit", "message", "comment_count"].
"""
df = self.run_pr_info()
dict_pr_number_repo = {row["number"]: row["repo"] for _, row in df.iterrows()}
df_combined = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 19 13:38:04 2018
@author: nmei
"""
import pandas as pd
import os
working_dir = ''
batch_dir = 'batch'
if not os.path.exists(batch_dir):
os.mkdir(batch_dir)
content = '''
#!/bin/bash
# This is a script to qsub jobs
#$ -cwd
#$ -o test_run/out_q.txt
#$ -e test_run/err_q.txt
#$ -m be
#$ -M <EMAIL>
#$ -N "qjob"
#$ -S /bin/bash
'''
with open(os.path.join(batch_dir,'qsub_jobs'),'w') as f:
f.write(content)
df = pd.read_csv(os.path.join(working_dir,'../data/PoSdata.csv'))
df = df[df.columns[1:]]
df.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'success',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
participants = | pd.unique(df['participant']) | pandas.unique |
# -*- coding: utf-8 -*-
"""Interface for flopy's implementation for MODFLOW."""
__all__ = ["MfSfrNetwork"]
import pickle
from itertools import combinations, zip_longest
from textwrap import dedent
import geopandas
import numpy as np
import pandas as pd
from shapely import wkt
from shapely.geometry import LineString, Point, Polygon, box
from shapely.ops import linemerge
from swn.core import SurfaceWaterNetwork
from swn.spatial import compare_crs, get_sindex
from swn.util import abbr_str
try:
import matplotlib
except ImportError:
matplotlib = False
class MfSfrNetwork:
"""MODFLOW SFR network class.
Attributes
----------
model : flopy.modflow.mf.Modflow
Instance of a flopy MODFLOW model
segments : geopandas.GeoDataFrame
Copied from swn.segments, but with additional columns added
segment_data : pandas.DataFrame
Similar to structure in model.sfr.segment_data, but for one stress
period. Transient data (where applicable) will show summary statistics.
The index is 'nseg', ordered and starting from 1. An additional column
'segnum' is used to identify segments, and if defined,
abstraction/diversion identifiers, where iupseg != 0.
reaches : geopandas.GeoDataFrame
Similar to structure in model.sfr.reach_data with index 'reachID',
ordered and starting from 1. Contains geometry and other columns
not used by flopy. Use get_reach_data() for use with flopy.
diversions : geopandas.GeoDataFrame, pd.DataFrame or None
Copied from swn.diversions, if set/defined.
logger : logging.Logger
Logger to show messages.
"""
def __init__(self, logger=None):
"""Initialise MfSfrNetwork.
Parameters
----------
logger : logging.Logger, optional
Logger to show messages.
"""
from swn.logger import get_logger, logging
from importlib.util import find_spec
if logger is None:
self.logger = get_logger(self.__class__.__name__)
elif isinstance(logger, logging.Logger):
self.logger = logger
else:
raise ValueError(
"expected 'logger' to be Logger; found " + str(type(logger)))
self.logger.warning(
"using legacy MfSfrNetwork; consider using SwnModflow")
self.logger.info('creating new %s object', self.__class__.__name__)
if not find_spec('flopy'):
raise ImportError(self.__class__.__name__ + ' requires flopy')
self.segments = None
self.segment_data = None
self.reaches = None
self.diversions = None
# all other properties added afterwards
@classmethod
def from_swn_flopy(
cls, swn, model, ibound_action='freeze',
reach_include_fraction=0.2, min_slope=1./1000,
hyd_cond1=1., hyd_cond_out=None, thickness1=1., thickness_out=None,
width1=10., width_out=None, roughch=0.024,
abstraction={}, inflow={}, flow={}, runoff={}, etsw={}, pptsw={}):
"""Create a MODFLOW SFR structure from a surface water network.
Parameters
----------
swn : swn.SurfaceWaterNetwork
Instance of a SurfaceWaterNetwork.
model : flopy.modflow.mf.Modflow
Instance of a flopy MODFLOW model with DIS and BAS6 packages.
ibound_action : str, optional
Action to handle IBOUND:
- ``freeze`` : Freeze IBOUND, but clip streams to fit bounds.
- ``modify`` : Modify IBOUND to fit streams, where possible.
reach_include_fraction : float or pandas.Series, optional
Fraction of cell size used as a threshold distance to determine if
reaches outside the active grid should be included to a cell.
Based on the furthest distance of the line and cell geometries.
Default 0.2 (e.g. for a 100 m grid cell, this is 20 m).
min_slope : float or pandas.Series, optional
Minimum downwards slope imposed on segments. If float, then this is
a global value, otherwise it is per-segment with a Series.
Default 1./1000 (or 0.001).
hyd_cond1 : float or pandas.Series, optional
Hydraulic conductivity of the streambed, as a global or per top of
each segment. Used for either STRHC1 or HCOND1/HCOND2 outputs.
Default 1.
hyd_cond_out : None, float or pandas.Series, optional
Similar to thickness1, but for the hydraulic conductivity of each
segment outlet. If None (default), the same hyd_cond1 value for the
top of the outlet segment is used for the bottom.
thickness1 : float or pandas.Series, optional
Thickness of the streambed, as a global or per top of each segment.
Used for either STRTHICK or THICKM1/THICKM2 outputs. Default 1.
thickness_out : None, float or pandas.Series, optional
Similar to thickness1, but for the bottom of each segment outlet.
If None (default), the same thickness1 value for the top of the
outlet segment is used for the bottom.
width1 : float or pandas.Series, optional
Channel width, as a global or per top of each segment. Used for
WIDTH1/WIDTH2 outputs. Default 10.
width_out : None, float or pandas.Series, optional
Similar to width1, but for the bottom of each segment outlet.
If None (default), the same width1 value for the top of the
outlet segment is used for the bottom.
roughch : float or pandas.Series, optional
Manning's roughness coefficient for the channel. If float, then
this is a global value, otherwise it is per-segment with a Series.
Default 0.024.
abstraction : dict or pandas.DataFrame, optional
See generate_segment_data for details.
Default is {} (no abstraction from diversions).
inflow : dict or pandas.DataFrame, optional
See generate_segment_data for details.
Default is {} (no outside inflow added to flow term).
flow : dict or pandas.DataFrame, optional
See generate_segment_data. Default is {} (zero).
runoff : dict or pandas.DataFrame, optional
See generate_segment_data. Default is {} (zero).
etsw : dict or pandas.DataFrame, optional
See generate_segment_data. Default is {} (zero).
pptsw : dict or pandas.DataFrame, optional
See generate_segment_data. Default is {} (zero).
logger : logging.Logger, optional
Logger to show messages.
"""
obj = cls()
import flopy
if not isinstance(swn, SurfaceWaterNetwork):
raise ValueError('swn must be a SurfaceWaterNetwork object')
elif ibound_action not in ('freeze', 'modify'):
raise ValueError('ibound_action must be one of freeze or modify')
obj.model = model
obj.segments = swn.segments.copy()
# Make sure model CRS and segments CRS are the same (if defined)
crs = None
segments_crs = getattr(obj.segments.geometry, 'crs', None)
modelgrid_crs = None
modelgrid = obj.model.modelgrid
epsg = modelgrid.epsg
proj4_str = modelgrid.proj4
if epsg is not None:
segments_crs, modelgrid_crs, same = compare_crs(segments_crs, epsg)
else:
segments_crs, modelgrid_crs, same = compare_crs(segments_crs,
proj4_str)
if (segments_crs is not None and modelgrid_crs is not None and
not same):
obj.logger.warning(
'CRS for segments and modelgrid are different: {0} vs. {1}'
.format(segments_crs, modelgrid_crs))
crs = segments_crs or modelgrid_crs
# Make sure their extents overlap
minx, maxx, miny, maxy = modelgrid.extent
model_bbox = box(minx, miny, maxx, maxy)
rstats = obj.segments.bounds.describe()
segments_bbox = box(
rstats.loc['min', 'minx'], rstats.loc['min', 'miny'],
rstats.loc['max', 'maxx'], rstats.loc['max', 'maxy'])
if model_bbox.disjoint(segments_bbox):
raise ValueError('modelgrid extent does not cover segments extent')
# More careful check of overlap of lines with grid polygons
obj.logger.debug('building model grid cell geometries')
dis = obj.model.dis
cols, rows = np.meshgrid(np.arange(dis.ncol), np.arange(dis.nrow))
ibound = obj.model.bas6.ibound[0].array.copy()
ibound_modified = 0
grid_df = pd.DataFrame({'row': rows.flatten(), 'col': cols.flatten()})
grid_df.set_index(['row', 'col'], inplace=True)
grid_df['ibound'] = ibound.flatten()
if ibound_action == 'freeze' and (ibound == 0).any():
# Remove any inactive grid cells from analysis
grid_df = grid_df.loc[grid_df['ibound'] != 0]
# Determine grid cell size
col_size = np.median(dis.delr.array)
if dis.delr.array.min() != dis.delr.array.max():
obj.logger.warning(
'assuming constant column spacing %s', col_size)
row_size = np.median(dis.delc.array)
if dis.delc.array.min() != dis.delc.array.max():
obj.logger.warning(
'assuming constant row spacing %s', row_size)
cell_size = (row_size + col_size) / 2.0
# Note: modelgrid.get_cell_vertices(row, col) is slow!
xv = modelgrid.xvertices
yv = modelgrid.yvertices
r, c = [np.array(s[1])
for s in grid_df.reset_index()[['row', 'col']].iteritems()]
cell_verts = zip(
zip(xv[r, c], yv[r, c]),
zip(xv[r, c + 1], yv[r, c + 1]),
zip(xv[r + 1, c + 1], yv[r + 1, c + 1]),
zip(xv[r + 1, c], yv[r + 1, c])
)
obj.grid_cells = grid_cells = geopandas.GeoDataFrame(
grid_df, geometry=[Polygon(r) for r in cell_verts], crs=crs)
obj.logger.debug('evaluating reach data on model grid')
grid_sindex = get_sindex(grid_cells)
reach_include = swn.segments_series(reach_include_fraction) * cell_size
# Make an empty DataFrame for reaches
obj.reaches = pd.DataFrame(columns=['geometry'])
obj.reaches.insert(1, column='row', value=pd.Series(dtype=int))
obj.reaches.insert(2, column='col', value=pd.Series(dtype=int))
empty_reach_df = obj.reaches.copy() # take this before more added
obj.reaches.insert(
1, column='segnum',
value=pd.Series(dtype=obj.segments.index.dtype))
obj.reaches.insert(2, column='dist', value=pd.Series(dtype=float))
empty_reach_df.insert(3, column='length', value=pd.Series(dtype=float))
empty_reach_df.insert(4, column='moved', value=pd.Series(dtype=bool))
# recursive helper function
def append_reach_df(df, row, col, reach_geom, moved=False):
if reach_geom.geom_type == 'LineString':
df.loc[len(df.index)] = {
'geometry': reach_geom,
'row': row,
'col': col,
'length': reach_geom.length,
'moved': moved,
}
elif reach_geom.geom_type.startswith('Multi'):
for sub_reach_geom in reach_geom.geoms: # recurse
append_reach_df(df, row, col, sub_reach_geom, moved)
else:
raise NotImplementedError(reach_geom.geom_type)
# helper function that returns early, if necessary
def assign_short_reach(reach_df, idx, segnum):
reach = reach_df.loc[idx]
reach_geom = reach['geometry']
threshold = reach_include[segnum]
if reach_geom.length > threshold:
return
cell_lengths = reach_df.groupby(['row', 'col'])['length'].sum()
this_row_col = reach['row'], reach['col']
this_cell_length = cell_lengths[this_row_col]
if this_cell_length > threshold:
return
grid_geom = grid_cells.at[(reach['row'], reach['col']), 'geometry']
# determine if it is crossing the grid once or twice
grid_points = reach_geom.intersection(grid_geom.exterior)
split_short = (
grid_points.geom_type == 'Point' or
(grid_points.geom_type == 'MultiPoint' and
len(grid_points) == 2))
if not split_short:
return
matches = []
# sequence scan on reach_df
for oidx, orch in reach_df.iterrows():
if oidx == idx or orch['moved']:
continue
other_row_col = orch['row'], orch['col']
other_cell_length = cell_lengths[other_row_col]
if (orch['geometry'].distance(reach_geom) < 1e-6 and
this_cell_length < other_cell_length):
matches.append((oidx, orch['geometry']))
if len(matches) == 0:
# don't merge, e.g. reach does not connect to adjacent cell
pass
elif len(matches) == 1:
# short segment is in one other cell only
# update new row and col values, keep geometry as it is
row_col1 = tuple(reach_df.loc[matches[0][0], ['row', 'col']])
reach_df.loc[idx, ['row', 'col', 'moved']] = row_col1 + (True,)
# self.logger.debug(
# 'moved short segment of %s from %s to %s',
# segnum, this_row_col, row_col1)
elif len(matches) == 2:
assert grid_points.geom_type == 'MultiPoint', grid_points.wkt
if len(grid_points) != 2:
obj.logger.critical(
'expected 2 points, found %s', len(grid_points))
# Build a tiny DataFrame of coordinates for this reach
reach_c = pd.DataFrame({
'pt': [Point(c) for c in reach_geom.coords[:]]
})
if len(reach_c) == 2:
# If this is a simple line with two coords, split it
reach_c.index = [0, 2]
reach_c.loc[1] = {
'pt': reach_geom.interpolate(0.5, normalized=True)}
reach_c.sort_index(inplace=True)
reach_geom = LineString(list(reach_c['pt'])) # rebuild
# first match assumed to be touching the start of the line
if reach_c.at[0, 'pt'].distance(matches[1][1]) < 1e-6:
matches.reverse()
reach_c['d1'] = reach_c['pt'].apply(
lambda p: p.distance(matches[0][1]))
reach_c['d2'] = reach_c['pt'].apply(
lambda p: p.distance(matches[1][1]))
reach_c['dm'] = reach_c[['d1', 'd2']].min(1)
# try a simple split where distances switch
ds = reach_c['d1'] < reach_c['d2']
cidx = ds[ds].index[-1]
# ensure it's not the index of either end
if cidx == 0:
cidx = 1
elif cidx == len(reach_c) - 1:
cidx = len(reach_c) - 2
row1, col1 = list(reach_df.loc[matches[0][0], ['row', 'col']])
reach_geom1 = LineString(reach_geom.coords[:(cidx + 1)])
row2, col2 = list(reach_df.loc[matches[1][0], ['row', 'col']])
reach_geom2 = LineString(reach_geom.coords[cidx:])
# update the first, append the second
reach_df.loc[idx, ['row', 'col', 'length', 'moved']] = \
(row1, col1, reach_geom1.length, True)
reach_df.at[idx, 'geometry'] = reach_geom1
append_reach_df(reach_df, row2, col2, reach_geom2, moved=True)
# self.logger.debug(
# 'split and moved short segment of %s from %s to %s and %s',
# segnum, this_row_col, (row1, col1), (row2, col2))
else:
obj.logger.critical(
'unhandled assign_short_reach case with %d matches: %s\n'
'%s\n%s', len(matches), matches, reach, grid_points.wkt)
def assign_remaining_reach(reach_df, segnum, rem):
if rem.geom_type == 'LineString':
threshold = cell_size * 2.0
if rem.length > threshold:
obj.logger.debug(
'remaining line segment from %s too long to merge '
'(%.1f > %.1f)', segnum, rem.length, threshold)
return
# search full grid for other cells that could match
if grid_sindex:
bbox_match = sorted(grid_sindex.intersection(rem.bounds))
sub = grid_cells.geometry.iloc[bbox_match]
else: # slow scan of all cells
sub = grid_cells.geometry
assert len(sub) > 0, len(sub)
matches = []
for (row, col), grid_geom in sub.iteritems():
if grid_geom.touches(rem):
matches.append((row, col, grid_geom))
if len(matches) == 0:
return
threshold = reach_include[segnum]
# Build a tiny DataFrame for just the remaining coordinates
rem_c = pd.DataFrame({
'pt': [Point(c) for c in rem.coords[:]]
})
if len(matches) == 1: # merge it with adjacent cell
row, col, grid_geom = matches[0]
mdist = rem_c['pt'].apply(
lambda p: grid_geom.distance(p)).max()
if mdist > threshold:
obj.logger.debug(
'remaining line segment from %s too far away to '
'merge (%.1f > %.1f)', segnum, mdist, threshold)
return
append_reach_df(reach_df, row, col, rem, moved=True)
elif len(matches) == 2: # complex: need to split it
if len(rem_c) == 2:
# If this is a simple line with two coords, split it
rem_c.index = [0, 2]
rem_c.loc[1] = {
'pt': rem.interpolate(0.5, normalized=True)}
rem_c.sort_index(inplace=True)
rem = LineString(list(rem_c['pt'])) # rebuild
# first match assumed to be touching the start of the line
if rem_c.at[0, 'pt'].touches(matches[1][2]):
matches.reverse()
rem_c['d1'] = rem_c['pt'].apply(
lambda p: p.distance(matches[0][2]))
rem_c['d2'] = rem_c['pt'].apply(
lambda p: p.distance(matches[1][2]))
rem_c['dm'] = rem_c[['d1', 'd2']].min(1)
mdist = rem_c['dm'].max()
if mdist > threshold:
obj.logger.debug(
'remaining line segment from %s too far away to '
'merge (%.1f > %.1f)', segnum, mdist, threshold)
return
# try a simple split where distances switch
ds = rem_c['d1'] < rem_c['d2']
cidx = ds[ds].index[-1]
# ensure it's not the index of either end
if cidx == 0:
cidx = 1
elif cidx == len(rem_c) - 1:
cidx = len(rem_c) - 2
row, col = matches[0][0:2]
rem1 = LineString(rem.coords[:(cidx + 1)])
append_reach_df(reach_df, row, col, rem1, moved=True)
row, col = matches[1][0:2]
rem2 = LineString(rem.coords[cidx:])
append_reach_df(reach_df, row, col, rem2, moved=True)
else:
obj.logger.critical(
'how does this happen? Segments from %d touching %d '
'grid cells', segnum, len(matches))
elif rem.geom_type.startswith('Multi'):
for sub_rem_geom in rem.geoms: # recurse
assign_remaining_reach(reach_df, segnum, sub_rem_geom)
else:
raise NotImplementedError(rem.geom_type)
for segnum, line in obj.segments.geometry.iteritems():
remaining_line = line
if grid_sindex:
bbox_match = sorted(grid_sindex.intersection(line.bounds))
if not bbox_match:
continue
sub = grid_cells.geometry.iloc[bbox_match]
else: # slow scan of all cells
sub = grid_cells.geometry
# Find all intersections between segment and grid cells
reach_df = empty_reach_df.copy()
for (row, col), grid_geom in sub.iteritems():
reach_geom = grid_geom.intersection(line)
if reach_geom.is_empty or reach_geom.geom_type == 'Point':
continue
remaining_line = remaining_line.difference(grid_geom)
append_reach_df(reach_df, row, col, reach_geom)
# Determine if any remaining portions of the line can be used
if line is not remaining_line and remaining_line.length > 0:
assign_remaining_reach(reach_df, segnum, remaining_line)
# Reassign short reaches to two or more adjacent grid cells
# starting with the shortest reach
reach_lengths = reach_df['length'].loc[
reach_df['length'] < reach_include[segnum]]
for idx in list(reach_lengths.sort_values().index):
assign_short_reach(reach_df, idx, segnum)
# Potentially merge a few reaches for each row/col of this segnum
drop_reach_ids = []
gb = reach_df.groupby(['row', 'col'])['geometry'].apply(list)
for (row, col), geoms in gb.copy().iteritems():
row_col = row, col
if len(geoms) > 1:
geom = linemerge(geoms)
if geom.geom_type == 'MultiLineString':
# workaround for odd floating point issue
geom = linemerge([wkt.loads(g.wkt) for g in geoms])
if geom.geom_type == 'LineString':
sel = ((reach_df['row'] == row) &
(reach_df['col'] == col))
drop_reach_ids += list(sel.index[sel])
obj.logger.debug(
'merging %d reaches for segnum %s at %s',
sel.sum(), segnum, row_col)
append_reach_df(reach_df, row, col, geom)
elif any(a.distance(b) < 1e-6
for a, b in combinations(geoms, 2)):
obj.logger.warning(
'failed to merge segnum %s at %s: %s',
segnum, row_col, geom.wkt)
# else: this is probably a meandering MultiLineString
if drop_reach_ids:
reach_df.drop(drop_reach_ids, axis=0, inplace=True)
# TODO: Some reaches match multiple cells if they share a border
# Add all reaches for this segment
for _, reach in reach_df.iterrows():
row, col, reach_geom = reach.loc[['row', 'col', 'geometry']]
if line.has_z:
# intersection(line) does not preserve Z coords,
# but line.interpolate(d) works as expected
reach_geom = LineString(line.interpolate(
line.project(Point(c))) for c in reach_geom.coords)
# Get a point from the middle of the reach_geom
reach_mid_pt = reach_geom.interpolate(0.5, normalized=True)
reach_record = {
'geometry': reach_geom,
'segnum': segnum,
'dist': line.project(reach_mid_pt, normalized=True),
'row': row,
'col': col,
}
obj.reaches.loc[len(obj.reaches.index)] = reach_record
if ibound_action == 'modify' and ibound[row, col] == 0:
ibound_modified += 1
ibound[row, col] = 1
if ibound_action == 'modify':
if ibound_modified:
obj.logger.debug(
'updating %d cells from IBOUND array for top layer',
ibound_modified)
obj.model.bas6.ibound[0] = ibound
obj.reaches = obj.reaches.merge(
grid_df[['ibound']],
left_on=['row', 'col'], right_index=True)
obj.reaches.rename(
columns={'ibound': 'prev_ibound'}, inplace=True)
else:
obj.reaches['prev_ibound'] = 1
# Now convert from DataFrame to GeoDataFrame
obj.reaches = geopandas.GeoDataFrame(
obj.reaches, geometry='geometry', crs=crs)
# Assign segment data
obj.segments['min_slope'] = swn.segments_series(min_slope)
if (obj.segments['min_slope'] < 0.0).any():
raise ValueError('min_slope must be greater than zero')
# Column names common to segments and segment_data
segment_cols = [
'roughch',
'hcond1', 'thickm1', 'elevup', 'width1',
'hcond2', 'thickm2', 'elevdn', 'width2']
# Tidy any previous attempts
for col in segment_cols:
if col in obj.segments.columns:
del obj.segments[col]
# Combine pairs of series for each segment
more_segment_columns = pd.concat([
swn.pair_segments_frame(hyd_cond1, hyd_cond_out, 'hcond'),
swn.pair_segments_frame(thickness1, thickness_out, 'thickm'),
swn.pair_segments_frame(width1, width_out, name='width',
method="constant")
], axis=1, copy=False)
for name, series in more_segment_columns.iteritems():
obj.segments[name] = series
obj.segments['roughch'] = swn.segments_series(roughch)
# Mark segments that are not used
obj.segments['in_model'] = True
outside_model = \
set(swn.segments.index).difference(obj.reaches['segnum'])
obj.segments.loc[list(outside_model), 'in_model'] = False
# Add information from segments
obj.reaches = obj.reaches.merge(
obj.segments[['sequence', 'min_slope']], 'left',
left_on='segnum', right_index=True)
obj.reaches.sort_values(['sequence', 'dist'], inplace=True)
# Interpolate segment properties to each reach
obj.reaches['strthick'] = 0.0
obj.reaches['strhc1'] = 0.0
for segnum, seg in obj.segments.iterrows():
sel = obj.reaches['segnum'] == segnum
if seg['thickm1'] == seg['thickm2']:
val = seg['thickm1']
else: # linear interpolate to mid points
tk1 = seg['thickm1']
tk2 = seg['thickm2']
dtk = tk2 - tk1
val = dtk * obj.reaches.loc[sel, 'dist'] + tk1
obj.reaches.loc[sel, 'strthick'] = val
if seg['hcond1'] == seg['hcond2']:
val = seg['hcond1']
else: # linear interpolate to mid points in log-10 space
lhc1 = np.log10(seg['hcond1'])
lhc2 = np.log10(seg['hcond2'])
dlhc = lhc2 - lhc1
val = 10 ** (dlhc * obj.reaches.loc[sel, 'dist'] + lhc1)
obj.reaches.loc[sel, 'strhc1'] = val
del obj.reaches['sequence']
del obj.reaches['dist']
# Use MODFLOW SFR dataset 2 terms ISEG and IREACH, counting from 1
obj.reaches['iseg'] = 0
obj.reaches['ireach'] = 0
iseg = ireach = 0
prev_segnum = None
for idx, segnum in obj.reaches['segnum'].iteritems():
if segnum != prev_segnum:
iseg += 1
ireach = 0
ireach += 1
obj.reaches.at[idx, 'iseg'] = iseg
obj.reaches.at[idx, 'ireach'] = ireach
prev_segnum = segnum
obj.reaches.reset_index(inplace=True, drop=True)
obj.reaches.index += 1 # flopy series starts at one
obj.reaches.index.name = 'reachID'
obj.reaches['rchlen'] = obj.reaches.geometry.length
obj.reaches['strtop'] = 0.0
obj.reaches['slope'] = 0.0
if swn.has_z:
for reachID, item in obj.reaches.iterrows():
geom = item.geometry
# Get Z from each end
z0 = geom.coords[0][2]
z1 = geom.coords[-1][2]
dz = z0 - z1
dx = geom.length
slope = dz / dx
obj.reaches.at[reachID, 'slope'] = slope
# Get strtop from LineString mid-point Z
zm = geom.interpolate(0.5, normalized=True).z
obj.reaches.at[reachID, 'strtop'] = zm
else:
r = obj.reaches['row'].values
c = obj.reaches['col'].values
# Estimate slope from top and grid spacing
px, py = np.gradient(dis.top.array, col_size, row_size)
grid_slope = np.sqrt(px ** 2 + py ** 2)
obj.reaches['slope'] = grid_slope[r, c]
# Get stream values from top of model
obj.reaches['strtop'] = dis.top.array[r, c]
# Enforce min_slope
sel = obj.reaches['slope'] < obj.reaches['min_slope']
if sel.any():
obj.logger.warning(
'enforcing min_slope for %d reaches (%.2f%%)',
sel.sum(), 100.0 * sel.sum() / len(sel))
obj.reaches.loc[sel, 'slope'] = obj.reaches.loc[sel, 'min_slope']
if not hasattr(obj.reaches.geometry, 'geom_type'):
# workaround needed for reaches.to_file()
obj.reaches.geometry.geom_type = obj.reaches.geom_type
# Build segment_data for Data Set 6
obj.segment_data = obj.reaches[['iseg', 'segnum']]\
.drop_duplicates().rename(columns={'iseg': 'nseg'})
# index changes from 'reachID', to 'segnum', to finally 'nseg'
segnum2nseg_d = obj.segment_data.set_index('segnum')['nseg'].to_dict()
obj.segment_data['icalc'] = 1 # assumption for all streams
obj.segment_data['outseg'] = obj.segment_data['segnum'].map(
lambda x: segnum2nseg_d.get(obj.segments.loc[x, 'to_segnum'], 0))
obj.segment_data['iupseg'] = 0 # handle diversions next
obj.segment_data['iprior'] = 0
obj.segment_data['flow'] = 0.0
obj.segment_data['runoff'] = 0.0
obj.segment_data['etsw'] = 0.0
obj.segment_data['pptsw'] = 0.0
# upper elevation from the first and last reachID items from reaches
obj.segment_data['elevup'] = \
obj.reaches.loc[obj.segment_data.index, 'strtop']
obj.segment_data['elevdn'] = obj.reaches.loc[
obj.reaches.groupby(['iseg']).ireach.idxmax().values,
'strtop'].values
obj.segment_data.set_index('segnum', drop=False, inplace=True)
# copy several columns over (except 'elevup' and 'elevdn', for now)
segment_cols.remove('elevup')
segment_cols.remove('elevdn')
obj.segment_data[segment_cols] = obj.segments[segment_cols]
# now use nseg as primary index, not reachID or segnum
obj.segment_data.set_index('nseg', inplace=True)
obj.segment_data.sort_index(inplace=True)
# Add diversions (i.e. SW takes)
if swn.diversions is not None:
obj.diversions = swn.diversions.copy()
# Mark diversions that are not used / outside model
obj.diversions['in_model'] = True
outside_model = []
# Add columns for ICALC=0
obj.segment_data['depth1'] = 0.0
obj.segment_data['depth2'] = 0.0
# workaround for coercion issue
obj.segment_data['foo'] = ''
is_spatial = (
isinstance(obj.diversions, geopandas.GeoDataFrame) and
'geometry' in obj.diversions.columns and
(~obj.diversions.is_empty).all())
if swn.has_z:
empty_geom = wkt.loads('linestring z empty')
else:
empty_geom = wkt.loads('linestring empty')
for divid, divn in obj.diversions.iterrows():
if divn.from_segnum not in segnum2nseg_d:
# segnum does not exist -- segment is outside model
outside_model.append(divid)
continue
iupseg = segnum2nseg_d[divn.from_segnum]
assert iupseg != 0, iupseg
nseg = len(obj.segment_data) + 1
rchlen = 1.0 # length required
thickm = 1.0 # thickness required
hcond = 0.0 # don't allow GW exchange
seg_d = dict(obj.segment_data.loc[iupseg])
seg_d.update({ # index is nseg
'segnum': divid,
'icalc': 0, # stream depth is specified
'outseg': 0,
'iupseg': iupseg,
'iprior': 0, # normal behaviour for SW takes
'flow': 0.0, # abstraction assigned later
'runoff': 0.0,
'etsw': 0.0,
'pptsw': 0.0,
'roughch': 0.0, # not used
'hcond1': hcond, 'hcond2': hcond,
'thickm1': thickm, 'thickm2': thickm,
'width1': 0.0, 'width2': 0.0, # not used
})
# Use the last reach as a template to modify for new reach
reach_d = dict(obj.reaches.loc[
obj.reaches.iseg == iupseg].iloc[-1])
reach_d.update({
'segnum': divid,
'iseg': nseg,
'ireach': 1,
'rchlen': rchlen,
'min_slope': 0.0,
'slope': 0.0,
'strthick': thickm,
'strhc1': hcond,
})
# Assign one reach at grid cell
if is_spatial:
# Find grid cell nearest to diversion
if grid_sindex:
bbox_match = sorted(
grid_sindex.nearest(divn.geometry.bounds))
# more than one nearest can exist! just take one...
num_found = len(bbox_match)
grid_cell = grid_cells.iloc[bbox_match[0]]
else: # slow scan of all cells
sel = grid_cells.intersects(divn.geometry)
num_found = sel.sum()
grid_cell = grid_cells.loc[sel].iloc[0]
if num_found > 1:
obj.logger.warning(
'%d grid cells are nearest to diversion %r, '
'but only taking the first %s',
num_found, divid, grid_cell)
row, col = grid_cell.name
strtop = dis.top[row, col]
reach_d.update({
'geometry': empty_geom, # divn.geometry,
'row': row,
'col': col,
'strtop': strtop,
})
else:
strtop = dis.top[reach_d['row'], reach_d['col']]
reach_d['strtop'] = strtop
seg_d.update({
'geometry': empty_geom,
'elevup': strtop,
'elevdn': strtop,
})
depth = strtop + thickm
seg_d.update({'depth1': depth, 'depth2': depth})
obj.reaches.loc[len(obj.reaches) + 1] = reach_d
obj.segment_data.loc[nseg] = seg_d
if outside_model:
obj.diversions.loc[list(outside_model), 'in_model'] = False
obj.logger.debug(
'added %d diversions, ignoring %d that did not connect to '
'existing segments',
obj.diversions['in_model'].sum(), len(outside_model))
else:
obj.logger.debug(
'added all %d diversions', len(obj.diversions))
# end of coercion workaround
obj.segment_data.drop('foo', axis=1, inplace=True)
else:
obj.diversions = None
# Finally, add/rename a few columns to align with reach_data
obj.reaches.insert(2, column='k', value=0)
obj.reaches.insert(3, column='outreach', value=pd.Series(dtype=int))
obj.reaches.rename(columns={'row': 'i', 'col': 'j'}, inplace=True)
# Create flopy Sfr2 package
segment_data = obj.set_segment_data(
abstraction=abstraction, inflow=inflow,
flow=flow, runoff=runoff, etsw=etsw, pptsw=pptsw, return_dict=True)
reach_data = obj.get_reach_data()
flopy.modflow.mfsfr2.ModflowSfr2(
model=obj.model, reach_data=reach_data, segment_data=segment_data)
return obj
def __repr__(self):
"""Return string representation of MfSfrNetwork object."""
is_diversion = self.segment_data['iupseg'] != 0
segnum_l = list(self.segment_data.loc[~is_diversion, 'segnum'])
segments_line = str(len(segnum_l)) + ' from segments'
if set(segnum_l) != set(self.segments.index):
segments_line += ' ({:.0%} used)'.format(
len(segnum_l) / float(len(self.segments)))
segments_line += ': ' + abbr_str(segnum_l, 4)
if is_diversion.any() and self.diversions is not None:
divid_l = list(self.segment_data.loc[is_diversion, 'segnum'])
diversions_line = str(len(divid_l)) + ' from diversions'
if set(divid_l) != set(self.diversions.index):
diversions_line += ' ({:.0%} used)'.format(
len(divid_l) / float(len(self.diversions)))
diversions_line += abbr_str(divid_l, 4)
else:
diversions_line = 'no diversions'
nper = self.model.dis.nper
return dedent('''\
<{}: flopy {} {!r}
{} in reaches ({}): {}
{} in segment_data ({}): {}
{}
{}
{} stress period{} with perlen: {} />'''.format(
self.__class__.__name__, self.model.version, self.model.name,
len(self.reaches), self.reaches.index.name,
abbr_str(list(self.reaches.index), 4),
len(self.segment_data), self.segment_data.index.name,
abbr_str(list(self.segment_data.index), 4),
segments_line,
diversions_line,
nper, '' if nper == 1 else 's',
abbr_str(list(self.model.dis.perlen), 4)))
def __eq__(self, other):
"""Return true if objects are equal."""
import flopy
try:
for (ak, av), (bk, bv) in zip_longest(iter(self), iter(other)):
if ak != bk:
return False
is_none = (av is None, bv is None)
if all(is_none):
continue
elif any(is_none):
return False
elif type(av) != type(bv):
return False
elif isinstance(av, pd.DataFrame):
pd.testing.assert_frame_equal(av, bv)
elif isinstance(av, pd.Series):
pd.testing.assert_series_equal(av, bv)
elif isinstance(av, flopy.modflow.mf.Modflow):
# basic test
assert str(av) == str(bv)
else:
assert av == bv
return True
except (AssertionError, TypeError, ValueError):
return False
def __iter__(self):
"""Return object datasets with an iterator."""
yield "class", self.__class__.__name__
yield "segments", self.segments
yield "segment_data", self.segment_data
yield "reaches", self.reaches
yield "diversions", self.diversions
yield "model", self.model
def __getstate__(self):
"""Serialize object attributes for pickle dumps."""
return dict(self)
def __setstate__(self, state):
"""Set object attributes from pickle loads."""
if not isinstance(state, dict):
raise ValueError("expected 'dict'; found {!r}".format(type(state)))
elif "class" not in state:
raise KeyError("state does not have 'class' key")
elif state["class"] != self.__class__.__name__:
raise ValueError("expected state class {!r}; found {!r}"
.format(state["class"], self.__class__.__name__))
self.__init__()
self.segments = state["segments"]
self.segment_data = state["segment_data"]
self.reaches = state["reaches"]
self.diversions = state["diversions"]
# Note: model must be set outsie of this method
@property
def model(self):
"""Return flopy model object."""
try:
return getattr(self, '_model')
except AttributeError:
self.logger.error("'model' property not set")
@model.setter
def model(self, model):
import flopy
if not isinstance(model, flopy.modflow.mf.Modflow):
raise ValueError(
"'model' must be a flopy Modflow object; found " +
str(type(model)))
elif not model.has_package('DIS'):
raise ValueError('DIS package required')
elif not model.has_package('BAS6'):
raise ValueError('BAS6 package required')
if getattr(self, '_model', None) is not model:
self.logger.info("swapping 'model' object")
self._model = model
# Build stress period DataFrame from modflow model
stress_df = pd.DataFrame({'perlen': self.model.dis.perlen.array})
modeltime = self.model.modeltime
stress_df['duration'] = pd.TimedeltaIndex(
stress_df['perlen'].cumsum(), modeltime.time_units)
stress_df['start'] = pd.to_datetime(modeltime.start_datetime)
stress_df['end'] = stress_df['duration'] + stress_df.at[0, 'start']
stress_df.loc[1:, 'start'] = stress_df['end'].iloc[:-1].values
self._stress_df = stress_df # keep this for debugging
self.time_index = pd.DatetimeIndex(stress_df['start']).copy()
self.time_index.name = None
def plot(self, column='iseg',
cmap='viridis_r', legend=False):
"""
Show map of reaches with inflow segments in royalblue.
Parameters
----------
column : str
Column from reaches to use with 'cmap'; default 'iseg'.
See also 'legend' to help interpret values.
cmap : str
Matplotlib color map; default 'viridis_r',
legend : bool
Show legend for 'column'; default False.
Returns
-------
AxesSubplot
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.set_aspect('equal')
self.reaches[~self.reaches.is_empty].plot(
column=column, label='reaches', legend=legend, ax=ax, cmap=cmap)
self.grid_cells.plot(ax=ax, color='whitesmoke', edgecolor='gainsboro')
# return ax
is_diversion = self.segment_data['iupseg'] != 0
outlet_sel = (self.segment_data['outseg'] == 0) & (~is_diversion)
outlet_points = self.reaches.loc[self.reaches['iseg'].isin(
self.segment_data.loc[outlet_sel].index), 'geometry']\
.apply(lambda g: Point(g.coords[-1]))
outlet_points.plot(
ax=ax, label='outlet', marker='o', color='navy')
if 'inflow_segnums' in self.segment_data.columns:
inflow_sel = ~self.segment_data['inflow_segnums'].isnull()
inflow_points = self.reaches.loc[self.reaches['iseg'].isin(
self.segment_data.loc[inflow_sel].index), 'geometry']\
.apply(lambda g: Point(g.coords[0]))
inflow_points.plot(
ax=ax, label='inflow points', marker='o', color='royalblue')
return ax
def get_reach_data(self):
"""Return numpy.recarray for flopy's ModflowSfr2 reach_data.
Parameters
----------
None
Returns
-------
numpy.recarray
"""
from flopy.modflow.mfsfr2 import ModflowSfr2
# Build reach_data for Data Set 2
reach_data_names = []
for name in ModflowSfr2.get_default_reach_dtype().names:
if name in self.reaches.columns:
reach_data_names.append(name)
reach_data = pd.DataFrame(self.reaches[reach_data_names])
return reach_data.to_records(index=True)
def set_segment_data(self, abstraction={}, inflow={}, flow={}, runoff={},
etsw={}, pptsw={}, return_dict=False):
"""
Set timeseries data in segment_data required for flopy's ModflowSfr2.
This method does two things:
1. Updates sfr.segment_data, which is a dict of rec.array
for each stress period.
2. Updates summary statistics in segment_data if there are more
than one stress period, otherwise values are kept for one
stress period.
Other stationary data members that are part of segment_data
(e.g. hcond1, elevup, etc.) are not modified.
Parameters
----------
abstraction : dict or pandas.DataFrame, optional
Surface water abstraction from diversions. Default is {} (zero).
Keys are matched to diversions index.
inflow : dict or pandas.DataFrame, optional
Streamflow at the bottom of each segment, which is used to to
determine the streamflow entering the upstream end of a segment if
it is not part of the SFR network. Internal flows are ignored.
A dict can be used to provide constant values to segnum
identifiers. If a DataFrame is passed for a model with more than
one stress period, the index must be a DatetimeIndex aligned with
the start of each model stress period.
Default is {} (no outside inflow added to flow term).
flow : dict or pandas.DataFrame, optional
Flow to the top of each segment. This is added to any inflow,
which is handled separately. This can be negative for withdrawls.
Default is {} (zero).
runoff : dict or pandas.DataFrame, optional
Runoff to each segment. Default is {} (zero).
etsw : dict or pandas.DataFrame, optional
Evapotranspiration removed from each segment. Default is {} (zero).
pptsw : dict or pandas.DataFrame, optional
Precipitation added to each segment. Default is {} (zero).
return_dict : bool, optional
If True, return segment_data instead of setting the sfr object.
Default False, which implies that an sfr object exists.
Returns
-------
None or dict (if return_dict is True)
"""
from flopy.modflow.mfsfr2 import ModflowSfr2
# Build stress period DataFrame from modflow model
dis = self.model.dis
stress_df = pd.DataFrame({'perlen': dis.perlen.array})
modeltime = self.model.modeltime
stress_df['duration'] = pd.TimedeltaIndex(
stress_df['perlen'].cumsum(), modeltime.time_units)
stress_df['start'] = pd.to_datetime(modeltime.start_datetime)
stress_df['end'] = stress_df['duration'] + stress_df.at[0, 'start']
stress_df.loc[1:, 'start'] = stress_df['end'].iloc[:-1].values
# Consider all IDs from segments/diversions
segments_segnums = set(self.segments.index)
has_diversions = self.diversions is not None
if has_diversions:
diversions_divids = set(self.diversions.index)
else:
diversions_divids = set()
def check_ts(data, name):
"""Return DataFrame with index along nper.
Columns are either segnum or divid (checked later).
"""
if isinstance(data, dict):
data = pd.DataFrame(data, index=stress_df['start'])
elif not isinstance(data, pd.DataFrame):
raise ValueError(
'{0} must be a dict or DataFrame'.format(name))
data.index.name = name # handy for debugging
if len(data) != dis.nper:
raise ValueError(
'length of {0} ({1}) is different than nper ({2})'
.format(name, len(data), dis.nper))
if dis.nper > 1: # check DatetimeIndex
if not isinstance(data.index, pd.DatetimeIndex):
raise ValueError(
'{0}.index must be a pandas.DatetimeIndex'
.format(name))
elif not (data.index == stress_df['start']).all():
try:
t = stress_df['start'].to_string(
index=False, max_rows=5).replace('\n', ', ')
except TypeError:
t = abbr_str(list(stress_df['start']))
raise ValueError(
'{0}.index does not match expected ({1})'
.format(name, t))
# Also do basic check of column IDs against diversions/segments
if name == 'abstraction':
if not has_diversions:
if len(data.columns) > 0:
self.logger.error(
'abstraction provided, but diversions are not '
'defined for the surface water network')
data.drop(data.columns, axis=1, inplace=True)
return data
parent = self.diversions
parent_name = 'diversions'
parent_s = diversions_divids
else:
parent = self.segments
parent_name = 'segments'
parent_s = segments_segnums
try:
data.columns = data.columns.astype(parent.index.dtype)
except (ValueError, TypeError):
raise ValueError(
'{0}.columns.dtype must be same as {1}.index.dtype'
.format(name, parent_name))
data_id_s = set(data.columns)
if len(data_id_s) > 0:
if data_id_s.isdisjoint(parent_s):
msg = '{0}.columns (or keys) not found in {1}.index: {2}'\
.format(name, parent_name, abbr_str(data_id_s))
if name == 'inflow':
self.logger.warning(msg)
else:
raise ValueError(msg)
if name != 'inflow': # some segnums accumulate outside flow
not_found = data_id_s.difference(parent_s)
if not data_id_s.issubset(parent_s):
self.logger.warning(
'dropping %s of %s %s.columns, which are '
'not found in %s.index: %s',
len(not_found), len(data_id_s), name,
parent_name, abbr_str(data_id_s))
data.drop(not_found, axis=1, inplace=True)
return data
self.logger.debug('checking timeseries data against modflow model')
abstraction = check_ts(abstraction, 'abstraction')
inflow = check_ts(inflow, 'inflow')
flow = check_ts(flow, 'flow')
runoff = check_ts(runoff, 'runoff')
etsw = check_ts(etsw, 'etsw')
pptsw = check_ts(pptsw, 'pptsw')
# Translate segnum/divid to nseg
is_diversion = self.segment_data['iupseg'] != 0
divid2nseg = self.segment_data[is_diversion]\
.reset_index().set_index('segnum')['nseg']
divid2nseg_d = divid2nseg.to_dict()
segnum2nseg = self.segment_data[~is_diversion]\
.reset_index().set_index('segnum')['nseg']
segnum2nseg_d = segnum2nseg.to_dict()
segnum_s = set(segnum2nseg_d.keys())
def map_nseg(data, name):
data_id_s = set(data.columns)
if len(data_id_s) == 0:
return data
if name == 'abstraction':
colid2nseg_d = divid2nseg_d
parent_descr = 'diversions'
else:
colid2nseg_d = segnum2nseg_d
parent_descr = 'regular segments'
colid_s = set(colid2nseg_d.keys())
not_found = data_id_s.difference(colid_s)
if not data_id_s.issubset(colid_s):
self.logger.warning(
'dropping %s of %s %s.columns, which are '
'not found in segment_data.index for %s',
len(not_found), len(data_id_s), name,
parent_descr)
data.drop(not_found, axis=1, inplace=True)
return data.rename(columns=colid2nseg_d)
self.logger.debug('mapping segnum/divid to segment_data.index (nseg)')
abstraction = map_nseg(abstraction, 'abstraction')
flow = map_nseg(flow, 'flow')
runoff = map_nseg(runoff, 'runoff')
etsw = map_nseg(etsw, 'etsw')
pptsw = map_nseg(pptsw, 'pptsw')
self.logger.debug('accumulating inflow from outside network')
# Create an 'inflows' DataFrame calculated from combining 'inflow'
inflows = | pd.DataFrame(index=inflow.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_pct_change(self):
# GH#11150
pnl = DataFrame([np.arange(0, 40, 10),
np.arange(0, 40, 10),
np.arange(0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [pd.Timestamp('1960-02-15'),
pd.Timestamp('1960-02-16'),
pd.NaT,
pd.NaT]
df = DataFrame({
"A": float_data,
"B": datetime_data
})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None]})
result = df.all(bool_only=True)
expected = Series(dtype=np.bool)
tm.assert_series_equal(result, expected)
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True]})
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH 21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level='out')
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH 4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# GH 16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH 4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH 16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
from bs4.element import ProcessingInstruction
import requests
from bs4 import BeautifulSoup
import pandas
print()
listadenoticias = []
# recebo da página
retorno = requests.get('https://g1.globo.com/')
# separo o conteudo da página
conteudo = retorno.content
# transformo o conteudo num objeto BeautifulSoup
site = BeautifulSoup(conteudo, 'html.parser')
# o método find encontra um local no html, recebendo como paramentros: 1: tag, 2: atributo
posts = site.findAll('div', attrs={'class': 'feed-post-body'})
for post in posts:
# aqui eu refino minha busca, indo atrás apenas da tag do titulo
titulo = post.find('a', attrs={'class':'feed-post-link'})
# aqui eu pego o subtitulo
sub = post.find('a', attrs={'class': 'bstn-relatedtext'})
# aqui eu printo apenas o texto dos resultados
# print(titulo.text)
# print(titulo['href']) # da pra acessar os atributos da tag pelo index, como se fosse um dict
if sub:
listadenoticias.append([titulo.text, sub.text, titulo['href']])
else:
listadenoticias.append([titulo.text, '', titulo['href']])
# Esse DataFrame do pandas, transforma o conteudo em colunas
news = | pandas.DataFrame(listadenoticias, columns=['Título', 'Subtítulo', 'Links']) | pandas.DataFrame |
####################################
# author: <NAME>
# course: Python for Data Science and Machine Learning Bootcamp
# purpose: lecture notes
# description: Section 06 - Python for Data Analysis, Pandas
# other: N/A
####################################
# PANDAS
# To know: Pandas will try to turn all numeric data into float in order to retain
# as much information as possible
import pandas as pd
import numpy as np
## Series (data type)
# It is like a NumPy array that contains axis labels so it can be indexed by a
# label.
labels = ['a','b','c']
my_data = [10,20,30]
arr = np.array(my_data)
d = {'a':10,'b':20,'c':30}
pd.Series(data = my_data) # w/o labels
pd.Series(data = my_data, index = labels)
pd.Series(arr,labels) # NumPy arrays or lists work equally as a Series
pd.Series(data = labels) # string data
# if we have a dictionary, the following line is unnecesary
pd.Series(data = my_data, index = d)
pd.Series(data = d) # this is a simplified version
# very flexible, e.g., built-in functions within a Series
pd.Series(data = [sum,print,len]) # not used in reality
# indexing - it will depend on the data type is my index
ser1 = pd.Series([1,2,3,4],['USA','Germany','Chile','Japan'])
ser2 = pd.Series([1,2,5,4],['USA','Germany','Italy','Japan'])
ser1['USA']
ser1[0] == ser1['USA']
# operations are by label - if there is no match, then a NaN is return
ser1 + ser2
## DataFrames (made of Series objects)
from numpy.random import randn
np.random.seed(101)
df = pd.DataFrame(data = randn(5,4),index = ['A','B','C','C','E'], \
columns = ['W','X','Y','Z'])
df
# each column is Series
df['W']
df.W # SQL nomenclature is also allowed!! [it can be messy, since it can get
# confounded with a DataFrame method!]
type(df['W'])
# df['column']['row'] - numeric indexing works just for rows
df['W']['A']
df['W'][0]
type(df['W']['A'])
df[['W','Z']] # an extract from a DataFrame which is a DataFrame by itself
type(df[['W','Z']])
# creating a new column - it can be defined as it already exists
df['new'] = df['W'] + df['Y']
df
# deleting columns or rows - it does not happen in place!!
df.drop('new',axis = 1) # deletes the specified columns
df.drop(['B','C']) # deletes the specified rows
#df.drop[['B','C'], axis = 0] # same as previous command, but by default
df # not in place!!!
# to make it happen in place, I have to options
df = df.drop('new',axis = 1) # re-define df
#df.drop('new',axis = 1, inplace = True) # activate the inplace option
df
# shape or DataFrame dimensions
df.shape # 2-tuple: (columns, rows)
# selecting rows
# using loc - note it requires to use brackets instead of parentheses.
df.loc['A'] # series
df.loc[['A','B']] # DataFrame
# using numerical position with iloc
df.iloc[0]
df.iloc[[0,1]]
# selecting subsets
df.loc['B','Y'] # row first, column second
df['Y']['B'] # column first, row second
df.loc['B','Y'] == df['Y']['B']
df.loc[['A','B'],['W','X']]
df[['W','X']][:2]
df.loc[['A','B'],['W','X']] == df[['W','X']][:2]
# conditional selection
booldf = df > 0
df[booldf] # NaN when the value is false
df['W']>0 # a series
df[df['W']>0] # filtering by rows that fulfill the specified condition
# working with a filtered dataset
### one way
new_df = df[df['Z']<0]
new_df['X']
### another way
df[df['Z']<0][['X','Y']]
### and / or does not normally work in this environment, since we have plenty
### of values within a column. Both are built to be use for comparing just
### a single True/False value, not multiple (truth value of a Series is
### ambiguous). Instead what we need to use is the & symbol for 'and' and | for
### 'or'. These, now, are going to allow us to make multiple comparisons at
### the same time
df[(df['W']>0) & (df['X']>1)]
df[(df['W']>0) | (df['X']>1)]
# reseting / setting the index - not occurring in place!
df.reset_index() # it resets index values to numbers and creates a new column
# with their former values
# df.reset_index(inplace = True) # in place!
newind = 'CA NY WY OR CO'.split() # fast way to create a new list
df['States'] = newind
df.set_index('States') # setting an existing column as index
df
# multi-level indexing
outside = ['G1','G1','G1','G2','G2','G2']
inside = [1,2,3,1,2,3]
hier_index = list(zip(outside,inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
hier_index.levels # note each index level outside has an inside index
hier_index.levshape # from outside to inside
df = pd.DataFrame(randn(6,2),hier_index,['A','B'])
df
### indexing
df.loc['G1'] # data frame, outside index
df.loc['G1'].loc[1] # series, inside index
### index names
df.index.names # no names has been assigned
df.index.names = ['Groups','Num']
df.loc['G2'].loc[2]['B']
# cross - sections (xs function): useful when we want to extract info from a
# particular level that is common to each outside index
df.xs('G1') # easy
df.xs(1,level = 'Num') # non-trivial
## Missing Data
d = {'A':[1,2,np.nan],'B':[5,np.nan,np.nan],'C':[1,2,3]}
df = pd.DataFrame(d)
# dropping missing values
df.dropna() # it drops any ROW with missing values
df.dropna(axis = 1) # it drops any COLUMN with missing values
df.dropna(thresh = 2) # it keeps rows with at least 2 non-na values
df.dropna(thresh = 1) # it keeps rows with at least 1 non-na values
# filling missing values
df.fillna(value = 'FILL VALUE')
df['A'].fillna(value = df['A'].mean()) # for instance with the mean of the column
## Group By - same sort of stuff from SQL; group together rows based off of
# a column and perform an aggregate function on them
data = {'Company': ['GOOG','GOOG','MSFT','MSFT','FB','FB'], \
'Person': ['Sam','Charlie','Amy','Vanessa','Carl','Sarah'], \
'Sales': [200,120,340,124,243,350]}
df = pd.DataFrame(data)
# step 1 - group by a specific column
byComp = df.groupby('Company')
# step 2 - aggregate values using a specific operation (function)
byComp.mean() # Pandas ignores non-numeric columns, such as Person
byComp.sum()
byComp.sum().loc['FB']
byComp.std()
# all together
df.groupby('Company').sum().loc['FB']
df.groupby('Company').count()
# useful information
df.groupby('Company').describe().transpose()
## Merging, Joining and Concatenating
df1 = pd.DataFrame({'A':['A0','A1','A2','A3'],\
'B':['B0','B1','B2','B3'],\
'C':['C0','C1','C2','C3'],\
'D':['D0','D1','D2','D3']},\
index = [0,1,2,3])
df2 = pd.DataFrame({'A':['A4','A5','A6','A7'],\
'B':['B4','B5','B6','B7'],\
'C':['C4','C5','C6','C7'],\
'D':['D4','D5','D6','D7']},\
index = [4,5,6,7])
df3 = pd.DataFrame({'A':['A8','A9','A10','A11'],\
'B':['B8','B9','B10','B11'],\
'C':['C8','C9','C10','C11'],\
'D':['D8','D9','D10','D11']},\
index = [8,9,10,11])
# contatenating - dimensions should match along the axis we are concatenating on
# by default the axis = 0 (along rows)
| pd.concat([df1,df2,df3]) | pandas.concat |
from alphaVantageAPI.alphavantage import AlphaVantage
from unittest import TestCase
from unittest.mock import patch
from pandas import DataFrame, read_csv
from .utils import Path
from .utils import Constant as C
from .utils import load_json, _mock_response
## Python 3.7 + Pandas DeprecationWarning
# /alphaVantageAPI/env/lib/python3.7/site-packages/pandas/core/frame.py:7476:
# DeprecationWarning: Using or importing the ABCs from "collections" instead of from "collections.abc" is deprecated, and in 3.8 it will stop working elif isinstance(data[0], collections.Mapping):
class TestAlphaVantageAPI(TestCase):
@classmethod
def setUpClass(cls):
cls.test_data_path = C.TEST_DATA_PATH
# Set premium to True to avoid API throttling for testing
av = AlphaVantage(api_key=C.API_KEY_TEST, premium=True)
# Minimum parameters
cls.fx_parameters = {"function":"CURRENCY_EXCHANGE_RATE", "from_currency":"USD", "to_currency":"JPY"}
cls.fx_daily_parameters = {"function":"FX_DAILY", "from_currency":"EUR", "to_currency":"USD"}
cls.fx_intraday_parameters = {"function":"FX_INTRADAY", "from_currency":"EUR", "to_currency":"USD"}
cls.fx_monthly_parameters = {"function":"FX_MONTHLY", "from_currency":"EUR", "to_currency":"USD"}
cls.fx_weekly_parameters = {"function":"FX_WEEKLY", "from_currency":"EUR", "to_currency":"USD"}
cls.data_parameters = {"function":"TIME_SERIES_DAILY_ADJUSTED", "symbol":C.API_DATA_TEST}
cls.intraday_parameters = {"function":"TIME_SERIES_INTRADAY", "symbol":C.API_DATA_TEST}
cls.indicator_parameters = {"function":"RSI", "symbol":C.API_DATA_TEST, "interval":"weekly", "series_type":"open", "time_period":10}
cls.digital_parameters = {"function":"DIGITAL_CURRENCY_DAILY", "symbol":C.API_DIGITAL_TEST, "market":"CNY"}
cls.digital_rating_parameters = {"function":"CRYPTO_RATING", "symbols":C.API_DIGITAL_TEST}
cls.global_quote_parameters = {"function":"GLOBAL_QUOTE", "symbols":C.API_DIGITAL_TEST}
cls.overview_parameters = {"function":"OVERVIEW", "symbols":C.API_FUNDA_TEST}
cls.balance_parameters = {"function":"BALANCE_SHEET", "symbols":C.API_FUNDA_TEST}
cls.income_parameters = {"function":"INCOME_STATEMENT", "symbols":C.API_FUNDA_TEST}
cls.cashflow_parameters = {"function":"CASH_FLOW", "symbols":C.API_FUNDA_TEST}
cls.earnings_parameters = {"function": "EARNINGS_CALENDAR"}
cls.ipos_parameters = {"function": "IPO_CALENDAR"}
cls.listing_parameters = {"function": "LISTING_STATUS"}
# json files of sample data
cls.json_fx = load_json(cls.test_data_path / "mock_fx.json")
cls.json_fx_daily = load_json(cls.test_data_path / "mock_fx_daily.json")
cls.json_fx_intraday = load_json(cls.test_data_path / "mock_fx_intraday.json")
cls.json_fx_monthly = load_json(cls.test_data_path / "mock_fx_monthly.json")
cls.json_fx_weekly = load_json(cls.test_data_path / "mock_fx_weekly.json")
cls.json_data = load_json(cls.test_data_path / "mock_data.json")
cls.json_indicator = load_json(cls.test_data_path / "mock_indicator.json")
cls.json_digital = load_json(cls.test_data_path / "mock_digital.json")
cls.json_digital_rating = load_json(cls.test_data_path / "mock_digital_rating.json")
cls.json_global_quote = load_json(cls.test_data_path / "mock_global_quote.json")
cls.json_overview = load_json(cls.test_data_path / "mock_overview.json")
cls.json_balance = load_json(cls.test_data_path / "mock_balance_sheet.json")
cls.json_income = load_json(cls.test_data_path / "mock_income_statement.json")
cls.json_cashflow = load_json(cls.test_data_path / "mock_cash_flow.json")
# csv files of sample data
cls.csv_earnings_cal = read_csv(cls.test_data_path / "mock_earnings_cal.csv")
cls.csv_ipos_cal = | read_csv(cls.test_data_path / "mock_ipos_cal.csv") | pandas.read_csv |
from urllib.request import urlretrieve
import os # we want python to be able to read what we have in our hard drive
from statsmodels.tsa.arima.model import ARIMA
import numpy as np
import pandas as pd
from pmdarima import auto_arima
from matplotlib import cm
import matplotlib.pyplot as plt
import seaborn as sns
class EnergyAnalysis:
"""
Class that controls all class methods and finally
delivers the requested information.
It analyses energy data.
Attributes
----------------
url: str
The url for the requested file
output_file: str
Desired name to the file
df: pandas.DataFrame
The padas dataframe with the content of the file downloaded
Methods
----------------
__init__: Init method
Class constructor to inizialize the attributes of the class.
download_file: Download method
Download a file base on the url strored in the object of the class,
and reurns a pandas dataframe with the data
"""
def __init__(self):
"""
Class constructor to inizialize the attributes of the class.
Parameters
----------------
url: str
The url for the requested file
output_file: str
The name of the output file
df: pandas dataframe
The columns included in the correlation matrix
"""
self.url = "https://nyc3.digitaloceanspaces.com/owid-public/data/energy/owid-energy-data.csv"
self.output_file = "energy_data.csv"
self.df = None
self.download_file()
self.enrich_with_emission()
self.relevant_and_total_consumption()
# method 1 --> download file and read the csv to df attribute the pandas dataframe.
def download_file(self):
"""
Downloads a file from the object.url address into your hard drive and read the dataset into the df attribute which it is a pandas dataframe.
Parameters
----------------
None
Returns
----------------
dataset: pandas dataframe
Example
----------------
object.download_file()
"""
fullfilename = os.path.join("./downloads/" + self.output_file)
if not os.path.exists("./downloads/"):
os.makedirs("./downloads/")
urlretrieve(self.url, filename=fullfilename)
elif not os.path.exists(fullfilename):
urlretrieve(self.url, filename=fullfilename)
else:
print("File already exists!")
try:
# If file doesn't exist, download it. Else, print a warning message.
self.df = pd.read_csv(fullfilename)
self.df = self.df[(self.df["year"] >= 1970)]
except Exception:
raise Exception("Error 404") from Exception
# method 2 --> list all the available countries
def list_countries(self):
"""
Returns a list of all available countries in the dataset
Parameter
----------------
None
Raises
----------------
None
Returns
----------------
Array
"""
region_list = [
"Africa",
"Asia Pacific",
"CIS",
"Central America",
"Eastern Africa",
"Europe",
"Europe (other)",
"Middle Africa",
"Middle East",
"North America",
"OPEC",
"Other Asia & Pacific",
"Other CIS",
"Other Caribbean",
"Other Middle East",
"Other Northern Africa",
"Other South America",
"Other Southern Africa",
"South & Central America",
"USSR",
"Western Africa",
"Western Sahara",
"World",
]
return self.df[(~self.df["country"].isin(region_list))].country.unique()
# method 3 -->
def show_consumption(self, country: str, normalize: bool):
"""
Plots the normalized or not normalized consumptions of the past years of a given country.
Parameter
----------------
country: str
Name of the country that we want to analyze the consumption.
normalize: bool
Option if we want or not to normalize the consuption data.
Raises
----------------
ValueError
If the country is not present on teh dataset
Returns
----------------
None
"""
if country in self.list_countries():
aux = self.df[(self.df["country"] == country)]
# aux = aux[(aux["year"] <= 2019)]
# selects the "_consumption" columns
cols = [col for col in self.df.columns if "_consumption" in col]
cols.remove("total_consumption")
aux = aux.fillna(value=0)
norm = aux[cols]
# normalize the consumptions values to percentages
if normalize:
norm[cols] = norm[cols].apply(lambda x: (x / x.sum()) * 100, axis=1)
x = norm
x["year"] = aux["year"]
# plot
plt.style.use("seaborn")
x.plot.area(x="year", cmap=cm.get_cmap("Paired"))
plt.title("Consumption in " + country, fontsize=14)
plt.xlabel("Year", fontsize=14)
plt.ylabel("Consumption(in terawatt-hours)", fontsize=14)
plt.show()
else:
raise ValueError("Country does not exist.")
# method 4 -->
def consumption_country(self, countries: str):
"""
Select the Countries, sum up the total per year and plot it
Parameters
----------------
countries: list
A list with all countries to be analyzed
Returns
----------------
Plot with consumption and countries
Example
----------------
object.consumption_country(["Switzerland", "Portugal", "Chile"])
"""
# Create a list with all _consumption columns and create a new dataframe
consumption_list = self.df.filter(like="_consumption").columns
consumption_data = self.df[
[
"country",
"year",
"biofuel_consumption",
"coal_consumption",
"gas_consumption",
"hydro_consumption",
"nuclear_consumption",
"oil_consumption",
"other_renewable_consumption",
"solar_consumption",
"wind_consumption",
"total_consumption",
]
]
# calculate the sum of all consumption per year
# Create a dataframe for every country needed and drop NaN
for i in countries:
globals()[i] = consumption_data[consumption_data["country"] == i]
indexNames = globals()[i][globals()[i]["total_consumption"] < 1].index
globals()[i].drop(indexNames, inplace=True)
# plot the total consumption
for i in countries:
plt.plot(globals()[i]["year"], globals()[i]["total_consumption"], label=i)
plt.title("Consumption per Year", fontsize=14)
plt.xlabel("Year", fontsize=14)
plt.ylabel("Total Consumption (in terawatt-hours)", fontsize=14)
plt.grid(True)
plt.legend()
plt.show()
# method 5 -->
def gdp_country(self, countries: str):
"""
Select the Countries, and plot the gdp over the years
Parameters
----------------
countries: list
A list with all countries to be analyzed
Returns
----------------
Plot with gdp and countries
Example
----------------
object.gdp_country(["Switzerland", "Portugal", "Chile"])
"""
# Select the columns Country, Year and gdp and create a new dataframe
gdp_data = self.df[["country", "year", "gdp"]]
# Create a dataframe for every country needed and drop NaN
for i in countries:
globals()[i] = gdp_data[gdp_data["country"] == i]
gdp_data.dropna(subset=["gdp"], inplace=True)
# plot the total consumption
for i in countries:
plt.plot(globals()[i]["year"], globals()[i]["gdp"], label=i)
plt.title("GDP per Year", fontsize=14)
plt.xlabel("Year", fontsize=14)
plt.ylabel("GDP per Year", fontsize=14)
plt.grid(True)
plt.legend()
plt.show()
# method 6 -->
def gapminder(self, y: int):
"""
Plots a scatter Plot comparing the Gdp of each country and its Total Energy Consumption of a given year.
The population of each country can also be compared by the size of the data points.
Parameter
----------------
year: int
Year that we want to analyse countries' GDP and Total Energy Consumption
Raises
----------------
ValueError
If the input given is not an 'int'
Returns
----------------
Scatter plot
Example:
----------------
object.gapminder(2010)
"""
# From the Dataset only the columns of the problem were Selected
dataframe = self.df.filter(
regex="year|country|population|consumption|gdp|total_consumption"
)
dataframe = dataframe.fillna(value=0)
# Define the size of the plot for better visualization
fig = plt.figure(figsize=(15, 10))
if type(y) != int:
raise TypeError("Variable 'y' is not int.")
else:
year = dataframe[dataframe["year"] == pd.to_datetime(y, format="%Y")]
# x-axis values
x = year["gdp"]
# y-axis values
y = year["total_consumption"]
p = year["population"]
# size = [2*n for n in range(len(p))]
size = year["population"]
# plotting points as a scatter plot
plt.scatter(
x,
y,
label="Population Size",
edgecolors="black",
marker="o",
lw=1,
c=year.population,
s=year.population / 2 ** 18,
cmap="viridis",
)
plt.colorbar(
label="Total Energy Consumption (in terawatt-hours) ", shrink=1
)
plt.tick_params(labelsize=20)
# x-axis label
plt.xlabel("GDP", fontsize=20)
# x-axis label
plt.ylabel("Total Energy Consumption (in terawatt-hours)", fontsize=20)
# plot title
plt.title(
"Countries GDP and Energy Consumption in a given Year", fontsize=20
)
# Editing the Legend
pws = [500000, 10000000, 100000000, 1000000000]
for pw in pws:
plt.scatter(
[], [], s=pw / 2 ** 18, c="k", label=str(pw), cmap="viridis"
)
h, l = plt.gca().get_legend_handles_labels()
plt.legend(
h[1:],
l[1:],
labelspacing=1.9,
title="Population",
borderpad=0.9,
frameon=True,
framealpha=0.6,
edgecolor="blue",
facecolor="lightblue",
fontsize=20,
title_fontsize=25,
)
# Change the X and Y axis scale for better visualization
plt.xscale("log")
plt.yscale("log")
plt.grid()
# function to show the plot
f = plt.show()
return f
# Final Method
def Emissions_Consumption(self, y):
"""
Plots a scatter Plot comparing the Total Emissions of each country and its Total Energy Consumption of a given year.
The population of each country can also be compared by the size of the data points.
Parameter
----------------
year: int
Year that we want to analyse countries' Total Emissons and Total Energy Consumption
Raises
----------------
ValueError
If the input given is not an 'int'
Returns
----------------
Scatter plot
How to use it:
object.Emissions_Consumption(2010)
Quick Notes about the Scales of X & Y Axis
----------------
X Axis
Eg: 0.2 * 1e11 = 20 000 000 000 Tonnes of CO2 emissions
Y Axis
Eg: 100 000 = 100 000 of Energy Consumed Tera-Watts
"""
# From the Dataset only the columns of the problem were Selected
dataframe = self.df.filter(
regex="year|country|population|consumption|total_emissions|total_consumption"
)
# Here we Sum all the types of Consumptions and create a new column out of it
# Define the size of the plot for better visualization
fig = plt.figure(figsize=(15, 10))
year = dataframe[dataframe["year"] == pd.to_datetime(y, format="%Y")]
# Raise error if the input of the Method is not an integer
if type(y) != int:
raise TypeError("Variable 'y' is not int.")
# Plot a Scatter plot if Otherwise
else:
# x-axis values
x = year["total_emissions"]
# y-axis values
y = year["total_consumption"]
p = year["population"]
n = year["country"]
size = year["population"]
# plotting points as a scatter plot
plt.scatter(
x,
y,
label="Population Size",
edgecolors="black",
marker="o",
lw=1,
c=year.population,
s=year.population / 2 ** 19,
cmap="viridis",
)
plt.colorbar(label="Total Energy Consumption", shrink=1)
plt.tick_params(labelsize=20)
# x-axis label
plt.xlabel("Total Emissions (in tonnes of CO2)", fontsize=20)
# x-axis label
plt.ylabel("Total Energy Consumption (in terawatt-hours)", fontsize=20)
# plot title
plt.title(
"Countries Emissions and Energy Consumption in a given Year",
fontsize=20,
)
# Editing the Legend
pws = [500000, 10000000, 100000000, 1000000000]
for pw in pws:
plt.scatter(
[], [], s=pw / 2 ** 19, c="k", label=str(pw), cmap="viridis"
)
h, l = plt.gca().get_legend_handles_labels()
plt.legend(
h[1:],
l[1:],
labelspacing=1.9,
title="Population",
borderpad=0.9,
frameon=True,
framealpha=0.6,
edgecolor="blue",
facecolor="lightblue",
fontsize=20,
title_fontsize=25,
)
# Limit the Axis to fit all the Data Points
plt.ylim([-20000, 500000])
plt.xlim([-2000000, 1.29e11])
# Change the X and Y axis scale for better visualization
plt.xscale("linear")
plt.yscale("linear")
# Add background gridlines for better orientation
plt.grid()
# Function to show the plot
f = plt.show()
return f
# new method 4 (adjusted method 4 from the first day) -->
def consumption_emission_country(self, countries: str):
"""
Select the Countries, sum up the total consumption and emission per year and plot it on two different axes
Parameters
----------------
countries: list
A list with all countries to be analyzed
Returns
----------------
Plot with consumption, emmision and countries
Example
----------------
object.consumption_country(["Germany", "Russia", "China"])
"""
for country in countries:
if country in self.list_countries():
pass
else:
raise ValueError(
f"One of your selected countries ({country}) is not in the list for countries"
)
if type(countries) != list:
raise ValueError("Input is not a list")
else:
# Ze's part
self.df = self.df
pd.set_option("display.max_columns", None)
# Yannick's part
# Load the data into Dataframe
df = self.df
consumption_data = self.df[
[
"country",
"year",
"biofuel_consumption",
"coal_consumption",
"gas_consumption",
"hydro_consumption",
"nuclear_consumption",
"oil_consumption",
"other_renewable_consumption",
"solar_consumption",
"wind_consumption",
"total_consumption",
"total_emissions",
]
]
# Creat a Dataframe for every Country in list "Countries" and delete the last line (51) if we have data from 2020
for i in countries:
globals()[i] = consumption_data[consumption_data["country"] == i]
if len(globals()[i]) > 51:
n = 1
globals()[i].drop(globals()[i].tail(n).index, inplace=True)
# Create two empyt list and fill it with the adjusted Country names from the list "Countries"
df_names_consumption = []
df_names_emission = []
for country in countries:
consumption = country + "_Consumption"
emission = country + "_Emission"
df_names_consumption.append(consumption)
df_names_emission.append(emission)
# Set up the plot
fig = plt.figure(figsize=(13, 10))
ax = fig.add_subplot()
# Create a list for the legend
lns = list()
# Create dataframes for every country and axes
for a, b in zip(df_names_consumption, countries):
t = globals()[b]["year"]
globals()[a] = globals()[b]["total_consumption"]
[a] = ax.plot(t, globals()[a], "-", label=f"Total Consumption {a}")
lns.append(a)
ax2 = ax.twinx()
for a, b in zip(df_names_emission, countries):
globals()[a] = globals()[b]["total_emissions"]
[a] = ax2.plot(t, globals()[a], "--", label=f"Total Emissions {a}")
lns.append(a)
# Create the legend
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=0)
# Plot the result
ax.grid()
ax.set_xlabel("Year")
ax.set_ylabel("Total Consumption of a country (in terawatt-hours)")
ax2.set_ylabel("Total Emissions of a country (in tonnes of CO2)")
# plt.xlim(1985, 2019)
plt.show()
def enrich_with_emission(self):
"""
Enrinches the dataset with the informatio about the emissions of each energy resource
and compute the total consuption in each row.
Parameters
----------------
None
Returns
----------------
None
Example
----------------
object.enrich_with_emission()
"""
self.df = self.df[(self.df["year"] <= 2019)]
self.df["year"] = | pd.to_datetime(self.df["year"], format="%Y") | pandas.to_datetime |
from nose.tools import assert_equal, assert_raises, assert_almost_equal
from unittest.mock import Mock, call, patch
from skillmodels import SkillModel as smo
import numpy as np
from pandas import DataFrame
from numpy.testing import assert_array_equal as aae
from numpy.testing import assert_array_almost_equal as aaae
import json
import pandas as pd
from pandas.util.testing import assert_series_equal
from pandas.util.testing import assert_frame_equal
class TestGeneralParamsSlice:
def setup(self):
self.param_counter = 10
def test_general_params_slice(self):
assert_equal(smo._general_params_slice(self, 10), slice(10, 20))
def test_general_params_slice_via_usage(self):
sl = smo._general_params_slice(self, 10)
aae(np.arange(20)[sl], np.arange(10) + 10)
def test_side_effect_of_general_params_slice_on_param_counter(self):
sl = smo._general_params_slice(self, 10) # noqa
assert_equal(self.param_counter, 20)
class TestDeltasRelatedMethods:
def setup(self):
self.periods = [0, 1, 2]
self.controls = [['c1', 'c2'], ['c1', 'c2', 'c3'], ['c3', 'c4']]
self.factors = ['f1', 'f2']
cols = ['name', 'f1_loading_norm_value', 'f2_loading_norm_value']
dat = np.zeros((13, 3))
dat[(0, 6, 11), 1] = 5
dat[(1, 8, 12), 2] = 3
df = DataFrame(data=dat, columns=cols)
df['period'] = [0] * 6 + [1] * 3 + [2] * 4
df['has_normalized_intercept'] = [
True, False, False, True, False, False, True, True, False, True,
False, False, False]
df['intercept_norm_value'] = [
3, np.nan, np.nan, 4, np.nan, np.nan, 5, 6, np.nan, 7, np.nan,
np.nan, np.nan]
df['name'] = ['m{}'.format(number) for number in range(13)]
df.set_index(['period', 'name'], inplace=True)
self.update_info = df
def test_initial_deltas_without_controls_besides_constant(self):
self.controls = [[], [], []]
exp1 = np.array([[3], [0], [0], [4], [0], [0]])
exp2 = np.array([[5], [6], [0]])
exp3 = np.array([[7], [0], [0], [0]])
expected = [exp1, exp2, exp3]
calculated = smo._initial_deltas(self)
for calc, ex in zip(calculated, expected):
aae(calc, ex)
def test_initial_deltas_with_controls_and_constants(self):
exp1 = np.array([
[3, 0, 0], [0, 0, 0], [0, 0, 0], [4, 0, 0], [0, 0, 0], [0, 0, 0]])
exp2 = np.array([[5, 0, 0, 0], [6, 0, 0, 0], [0, 0, 0, 0]])
exp3 = np.array([[7, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]])
expected = [exp1, exp2, exp3]
calculated = smo._initial_deltas(self)
for calc, ex in zip(calculated, expected):
aae(calc, ex)
def test_deltas_bool_without_controls_besides_constants(self):
exp1 = np.array([False, True, True, False, True, True]).reshape(6, 1)
exp2 = np.array([False, False, True]).reshape(3, 1)
exp3 = np.array([False, True, True, True]).reshape(4, 1)
expected = [exp1, exp2, exp3]
self.controls = [[], [], []]
calculated = smo._deltas_bool(self)
for calc, ex in zip(calculated, expected):
aae(calc, ex)
def test_deltas_bool_with_controls_and_constant(self):
exp1 = np.ones((6, 3), dtype=bool)
exp1[(0, 3), 0] = False
exp2 = np.ones((3, 4), dtype=bool)
exp2[(0, 1), 0] = False
exp3 = np.ones((4, 3), dtype=bool)
exp3[0, 0] = False
expected = [exp1, exp2, exp3]
calculated = smo._deltas_bool(self)
for calc, ex in zip(calculated, expected):
aae(calc, ex)
def test_params_slice_deltas(self):
arr0 = np.ones((4, 2), dtype=bool)
arr1 = np.ones((6, 3), dtype=bool)
arr1[(0, 1), :] = 0
self._deltas_bool = Mock(return_value=[arr0, arr1, arr0])
self._general_params_slice = Mock()
smo._params_slice_for_deltas(self, 'short')
self._general_params_slice.assert_has_calls(
[call(8), call(12), call(8)])
def test_deltas_names_without_controls_beside_constant(self):
self.controls = [[], [], []]
d_boo = [np.ones((6, 1), dtype=bool), np.ones((3, 1), dtype=bool),
np.ones((4, 1), dtype=bool)]
self._deltas_bool = Mock(return_value=d_boo)
fs = 'delta__{}__{}__{}'
expected_names = \
expected_names = \
[fs.format(0, 'm0', 'constant'),
fs.format(0, 'm1', 'constant'),
fs.format(0, 'm2', 'constant'),
fs.format(0, 'm3', 'constant'),
fs.format(0, 'm4', 'constant'),
fs.format(0, 'm5', 'constant'),
fs.format(1, 'm6', 'constant'),
fs.format(1, 'm7', 'constant'),
fs.format(1, 'm8', 'constant'),
fs.format(2, 'm9', 'constant'),
fs.format(2, 'm10', 'constant'),
fs.format(2, 'm11', 'constant'),
fs.format(2, 'm12', 'constant')]
assert_equal(smo._deltas_names(self, params_type='short'),
expected_names)
def test_deltas_names_with_controls_and_constant(self):
self.add_constant = True
d_boo = [np.ones((6, 2), dtype=bool), np.ones((3, 3), dtype=bool),
np.ones((4, 2), dtype=bool)]
for i in range(3):
d_boo[i][0, 0] = False
self._deltas_bool = Mock(return_value=d_boo)
fs = 'delta__{}__{}__{}'
expected_names = \
[fs.format(0, 'm0', 'c1'), fs.format(0, 'm0', 'c2'),
fs.format(0, 'm1', 'constant'), fs.format(0, 'm1', 'c1'),
fs.format(0, 'm1', 'c2'),
fs.format(0, 'm2', 'constant'), fs.format(0, 'm2', 'c1'),
fs.format(0, 'm2', 'c2'),
fs.format(0, 'm3', 'constant'), fs.format(0, 'm3', 'c1'),
fs.format(0, 'm3', 'c2'),
fs.format(0, 'm4', 'constant'), fs.format(0, 'm4', 'c1'),
fs.format(0, 'm4', 'c2'),
fs.format(0, 'm5', 'constant'), fs.format(0, 'm5', 'c1'),
fs.format(0, 'm5', 'c2'),
fs.format(1, 'm6', 'c1'), fs.format(1, 'm6', 'c2'),
fs.format(1, 'm6', 'c3'),
fs.format(1, 'm7', 'constant'), fs.format(1, 'm7', 'c1'),
fs.format(1, 'm7', 'c2'), fs.format(1, 'm7', 'c3'),
fs.format(1, 'm8', 'constant'), fs.format(1, 'm8', 'c1'),
fs.format(1, 'm8', 'c2'), fs.format(1, 'm8', 'c3'),
fs.format(2, 'm9', 'c3'), fs.format(2, 'm9', 'c4'),
fs.format(2, 'm10', 'constant'), fs.format(2, 'm10', 'c3'),
fs.format(2, 'm10', 'c4'),
fs.format(2, 'm11', 'constant'), fs.format(2, 'm11', 'c3'),
fs.format(2, 'm11', 'c4'),
fs.format(2, 'm12', 'constant'), fs.format(2, 'm12', 'c3'),
fs.format(2, 'm12', 'c4')]
assert_equal(smo._deltas_names(self, params_type='short'),
expected_names)
class TestPsiRelatedMethods:
def setup(self):
self.factors = ['f1', 'f2', 'f3']
self.nfac = len(self.factors)
self.endog_factor = 'f3'
def test_initial_psi(self):
aae(smo._initial_psi(self), np.ones(3))
def test_psi_bool(self):
aae(smo._psi_bool(self), np.array([True, True, False]))
def test_params_slice_for_psi(self):
self._general_params_slice = Mock()
smo._params_slice_for_psi(self, params_type='short')
self._general_params_slice.assert_has_calls([call(2)])
def test_psi_names(self):
assert_equal(smo._psi_names(self, params_type='short'),
['psi__f1', 'psi__f2'])
class TestHRelatedMethods:
def setup(self):
self.factors = ['f1', 'f2']
cols = self.factors + [
'f1_loading_norm_value', 'f2_loading_norm_value']
self.nfac = 2
dat = np.zeros((20, 4))
dat[(0, 1, 6, 8, 11, 16, 18), 0] = 1
dat[(2, 3, 6, 7, 12, 13, 19), 1] = 1
dat[(1, 8), 2] = 5
dat[(6, 13, 19), 3] = 3
res = np.zeros((20, 2), dtype=bool)
res[(0, 6, 11, 16, 18), 0] = True
res[(2, 3, 7, 12), 1] = True
self.res_bool = res
self.exp_init_H = np.zeros((20, 2))
self.exp_init_H[(1, 8), 0] = 5
self.exp_init_H[(6, 13, 19), 1] = 3
df = DataFrame(data=dat, columns=cols)
self.update_info = df
def test_initial_H(self):
aae(smo._initial_H(self), self.exp_init_H)
def test_H_bool(self):
self._initial_H = Mock(return_value=self.exp_init_H)
aae(smo._H_bool(self), self.res_bool)
def test_params_slice_for_H(self):
self._H_bool = Mock(return_value=self.res_bool)
self._general_params_slice = Mock()
smo._params_slice_for_H(self, params_type='short')
self._general_params_slice.assert_has_calls([call(9)])
def test_helpers_for_h_transformation(self):
self.endog_correction = True
self.endog_factor = ['f1']
res1 = np.zeros(20, dtype=bool)
for i in (0, 1, 6, 8, 11, 16, 18):
res1[i] = True
res2 = np.zeros((7, 1))
res3 = np.zeros((7, 2))
calc1, calc2, calc3 = smo._helpers_for_H_transformation_with_psi(self)
aae(calc1, res1)
aae(calc2, res2)
aae(calc3, res3)
def test_H_names(self):
self.factors = ['f1', 'f2']
df = DataFrame(data=np.ones((6, 1)), columns=['col'])
df['name'] = ['m{}'.format(number) for number in range(6)]
df['period'] = [0, 0, 0, 1, 1, 1]
df.set_index(['period', 'name'], inplace=True)
self.update_info = df
boo = np.zeros((6, 2))
boo[(0, 1, 3), 0] = True
boo[(3, 4), 1] = True
self._H_bool = Mock(return_value=boo)
fs = 'H__{}__{}__{}'
expected_names = [
fs.format(0, 'f1', 'm0'), fs.format(0, 'f1', 'm1'),
fs.format(1, 'f1', 'm3'), fs.format(1, 'f2', 'm3'),
fs.format(1, 'f2', 'm4')]
assert_equal(smo._H_names(self, params_type='short'), expected_names)
class TestRRelatedMethods:
def setup(self):
self.nupdates = 12
self.estimator = 'chs'
df = DataFrame(data=np.zeros((12, 1)), columns=['col'])
df['period'] = np.array([0] * 5 + [1] * 7)
df['name'] = ['m{}'.format(i) for i in range(12)]
df.set_index(['period', 'name'], inplace=True)
self.update_info = df
self.bounds_distance = 0.001
self.lower_bound = np.empty(100, dtype=object)
self.lower_bound[:] = None
def test_initial_R(self):
aae(smo._initial_R(self), np.zeros(12))
def test_params_slice_for_R(self):
self._general_params_slice = Mock()
smo._params_slice_for_R(self, params_type='short')
self._general_params_slice.assert_has_calls([call(12)])
def test_set_bounds_for_R_not_robust(self):
self.robust_bounds = False
expected = np.zeros(100, dtype=object)
expected[:] = None
expected[10: 30] = 0.0
smo._set_bounds_for_R(self, slice(10, 30))
aae(self.lower_bound, expected)
def test_set_bounds_for_R_robust(self):
self.robust_bounds = True
expected = np.zeros(100, dtype=object)
expected[:] = None
expected[10: 30] = 0.001
smo._set_bounds_for_R(self, slice(10, 30))
aae(self.lower_bound, expected)
def test_R_names(self):
periods = [0] * 5 + [1] * 7
names = ['m{}'.format(i) for i in range(12)]
expected = ['R__{}__{}'.format(p, m) for p, m in zip(periods, names)]
assert_equal(smo._R_names(self, params_type='short'), expected)
class TestQRelatedMethods:
def setup(self):
self.nstages = 2
self.stages = [0, 1]
self.nfac = 5
self.factors = ['f{}'.format(i) for i in range(1, 6)]
params_info = np.ones((2, 5))
params_info[:, 0] = -1
params_info[1, (2, 4)] = 0
self.new_trans_coeffs = params_info
self.exp_bool = np.zeros((2, 5, 5), dtype=bool)
self.exp_bool[0, :, :] = np.diag([False, True, True, True, True])
self.exp_bool[1, :, :] = np.diag([False, True, False, True, False])
self.bounds_distance = 0.001
self.lower_bound = np.empty(100, dtype=object)
self.lower_bound[:] = None
def test_initial_q(self):
aae(smo._initial_Q(self), np.zeros((2, 5, 5)))
def test_q_bool(self):
aae(smo._Q_bool(self), self.exp_bool)
def test_params_slice_for_q(self):
self._Q_bool = Mock(return_value=self.exp_bool)
self._general_params_slice = Mock()
smo._params_slice_for_Q(self, params_type='short')
self._general_params_slice.assert_has_calls([call(6)])
def test_q_replacements(self):
expected = [[(1, 2, 2), (0, 2, 2)], [(1, 4, 4), (0, 4, 4)]]
assert_equal(smo._Q_replacements(self), expected)
def test_q_names(self):
expected = ['Q__0__f2', 'Q__0__f3', 'Q__0__f4', 'Q__0__f5',
'Q__1__f2', 'Q__1__f4']
assert_equal(smo._Q_names(self, params_type='short'), expected)
def test_set_bounds_for_Q_not_robust(self):
self.robust_bounds = False
expected = np.zeros(100, dtype=object)
expected[:] = None
expected[10: 30] = 0.0
smo._set_bounds_for_Q(self, slice(10, 30))
aae(self.lower_bound, expected)
def test_set_bounds_for_Q_robust(self):
self.robust_bounds = True
expected = np.zeros(100, dtype=object)
expected[:] = None
expected[10: 30] = 0.001
smo._set_bounds_for_Q(self, slice(10, 30))
aae(self.lower_bound, expected)
class TestXZeroRelatedMethods:
def setup(self):
self.nemf = 3
self.nobs = 100
self.nfac = 4
self.order_X_zeros = 2
self.factors = ['f1', 'f2', 'f3', 'f4']
def test_initial_X_zero(self):
res1, res2 = smo._initial_X_zero(self)
aae(res1, np.zeros((100, 3, 4)))
aae(res2, np.zeros((300, 4)))
def test_that_initial_X_zeros_are_views_on_same_memory(self):
res1, res2 = smo._initial_X_zero(self)
res1[:] = 1
aae(res2, np.ones((300, 4)))
def test_X_zero_filler(self):
aae(smo._X_zero_filler(self), np.zeros((3, 4)))
def test_params_slice_for_X_zero(self):
self._general_params_slice = Mock()
smo._params_slice_for_X_zero(self, params_type='short')
self._general_params_slice.assert_has_calls([call(12)])
def test_x_zero_replacements(self):
expected = [[(1, 2), (0, 2)], [(2, 2), (1, 2)]]
assert_equal(smo._X_zero_replacements(self), expected)
def test_set_bounds_for_X_zero(self):
self.lower_bound = np.empty(100, dtype=object)
self.lower_bound[:] = None
params_slice = slice(10, 22)
expected = self.lower_bound.copy()
expected[[16, 20]] = 0
smo._set_bounds_for_X_zero(self, params_slice=params_slice)
aae(self.lower_bound, expected)
def test_x_zero_names_short_params(self):
expected = [
'X_zero__0__f1', 'X_zero__0__f2', 'X_zero__0__f3', 'X_zero__0__f4',
'X_zero__1__f1', 'X_zero__1__f2', 'diff_X_zero__1__f3',
'X_zero__1__f4',
'X_zero__2__f1', 'X_zero__2__f2', 'diff_X_zero__2__f3',
'X_zero__2__f4']
assert_equal(smo._X_zero_names(self, params_type='short'), expected)
def test_x_zero_names_long_params(self):
expected = [
'X_zero__0__f1', 'X_zero__0__f2', 'X_zero__0__f3', 'X_zero__0__f4',
'X_zero__1__f1', 'X_zero__1__f2', 'X_zero__1__f3', 'X_zero__1__f4',
'X_zero__2__f1', 'X_zero__2__f2', 'X_zero__2__f3', 'X_zero__2__f4']
assert_equal(smo._X_zero_names(self, params_type='long'), expected)
class TestWZeroRelatedMethods:
def setup(self):
self.nemf = 4
self.nobs = 100
def test_initial_w_zero(self):
aae(smo._initial_W_zero(self),
np.ones((self.nobs, self.nemf)) / 4)
def test_params_slice_w_zero(self):
self._general_params_slice = Mock()
smo._params_slice_for_W_zero(self, params_type='short')
self._general_params_slice.assert_has_calls([call(4)])
def test_w_zero_names(self):
expected = ['W_zero__0', 'W_zero__1', 'W_zero__2', 'W_zero__3']
assert_equal(smo._W_zero_names(self, params_type='short'), expected)
class TestPZeroRelatedMethods:
def setup(self):
self.nemf = 2
self.nobs = 100
self.nfac = 4
self.helper = np.array([[True, True, True, True],
[False, True, True, True],
[False, False, True, True],
[False, False, False, True]])
self.lower_bound = np.empty(100, dtype=object)
self.lower_bound[:] = None
self.bound_indices = [10, 14, 17, 19, 20, 24, 27, 29]
self.bounds_distance = 0.001
self.estimator = 'chs'
def test_initial_P_zero_no_square_root_filters(self):
self.square_root_filters = False
res1, res2 = smo._initial_P_zero(self)
aae(res1, np.zeros((100, 2, 4, 4)))
aae(res2, np.zeros((200, 4, 4)))
def test_initial_P_zero_square_root_filters(self):
self.square_root_filters = True
res1, res2 = smo._initial_P_zero(self)
aae(res1, np.zeros((100, 2, 5, 5)))
aae(res2, np.zeros((200, 5, 5)))
def test_P_zero_filler_unrestricted_P_zeros(self):
self.restrict_P_zeros = False
aae(smo._P_zero_filler(self), np.zeros((2, 4, 4)))
def test_P_zero_filler_restricted_P_zeros(self):
self.restrict_P_zeros = True
aae(smo._P_zero_filler(self), np.zeros((1, 4, 4)))
def test_P_zero_filler_bool_unrestricted(self):
self._P_zero_filler = Mock(return_value=np.zeros((2, 4, 4)))
self.restrict_P_zeros = False
expected = np.zeros((2, 4, 4), dtype=bool)
expected[:] = self.helper
aae(smo._P_zero_bool(self), expected)
def test_P_zero_filler_bool_restricted(self):
self._P_zero_filler = Mock(return_value=np.zeros((1, 4, 4)))
self.restrict_P_zeros = True
expected = np.zeros((1, 4, 4), dtype=bool)
expected[:] = self.helper
aae(smo._P_zero_bool(self), expected)
def test_params_slice_P_zero_unrestricted(self):
self.restrict_P_zeros = False
self._general_params_slice = Mock()
smo._params_slice_for_P_zero(self, params_type='short')
self._general_params_slice.assert_has_calls([call(20)])
def test_params_slice_P_zero_restricted(self):
self.restrict_P_zeros = True
self._general_params_slice = Mock()
smo._params_slice_for_P_zero(self, params_type='short')
self._general_params_slice.assert_has_calls([call(10)])
def test_set_bounds_P_zero_unrestricted_not_robust(self):
self.robust_bounds = False
self.restrict_P_zeros = False
expected = np.zeros(100, dtype=object)
expected[:] = None
expected[self.bound_indices] = 0
smo._set_bounds_for_P_zero(self, slice(10, 30))
aae(self.lower_bound, expected)
def test_set_bounds_P_zero_unrestricted_robust(self):
self.robust_bounds = True
self.restrict_P_zeros = False
expected = np.zeros(100, dtype=object)
expected[:] = None
expected[self.bound_indices] = 0.001
smo._set_bounds_for_P_zero(self, slice(10, 30))
aae(self.lower_bound, expected)
def test_set_bounds_P_zero_restricted_not_robust(self):
self.robust_bounds = False
self.restrict_P_zeros = True
expected = np.zeros(100, dtype=object)
expected[:] = None
expected[self.bound_indices[:4]] = 0.0
smo._set_bounds_for_P_zero(self, slice(10, 20))
aae(self.lower_bound, expected)
def test_set_bounds_P_zero_invalid_params_slice(self):
self.robust_bounds = False
self.restrict_P_zeros = False
assert_raises(
AssertionError, smo._set_bounds_for_P_zero, self, slice(10, 15))
def test_P_zero_names_short(self):
self.nemf = 1
self.nfac = 3
self.factors = ['f1', 'f2', 'f3']
self.restrict_P_zeros = False
self.cholesky_of_P_zero = False
fs = 'cholesky_P_zero__0__{}__{}'
expected = [fs.format('f1', 'f1'), fs.format('f1', 'f2'),
fs.format('f1', 'f3'), fs.format('f2', 'f2'),
fs.format('f2', 'f3'), fs.format('f3', 'f3')]
assert_equal(smo._P_zero_names(self, params_type='short'), expected)
def test_P_zero_names_long(self):
self.nemf = 1
self.nfac = 3
self.factors = ['f1', 'f2', 'f3']
self.restrict_P_zeros = False
self.cholesky_of_P_zero = False
fs = 'P_zero__0__{}__{}'
expected = [fs.format('f1', 'f1'), fs.format('f1', 'f2'),
fs.format('f1', 'f3'), fs.format('f2', 'f2'),
fs.format('f2', 'f3'), fs.format('f3', 'f3')]
assert_equal(smo._P_zero_names(self, params_type='long'), expected)
class TestTransCoeffsRelatedMethods:
def setup(self):
self.factors = ['f1', 'f2']
self.nfac = 2
self.transition_names = ['first_func', 'second_func']
self.included_factors = [['f1', 'f2'], ['f2']]
self.stages = [0, 1]
self.nstages = 2
new_params = np.array([[1, 1], [0, 1]])
self.new_trans_coeffs = new_params
self.lower_bound = np.empty(100, dtype=object)
self.lower_bound[:] = None
self.upper_bound = self.lower_bound.copy()
@patch('skillmodels.estimation.skill_model.tf')
def test_initial_trans_coeffs(self, mock_tf):
mock_tf.nr_coeffs_first_func.return_value = 3
mock_tf.nr_coeffs_second_func.return_value = 10
expected = [np.zeros((2, 3)), np.zeros((2, 10))]
initials = smo._initial_trans_coeffs(self)
for i, e in zip(initials, expected):
aae(i, e)
@patch('skillmodels.estimation.skill_model.tf')
def test_params_slices_for_trans_coeffs(self, mock_tf):
mock_tf.nr_coeffs_first_func.return_value = 3
mock_tf.nr_coeffs_second_func.return_value = 10
self._general_params_slice = Mock(
side_effect=[slice(0, 3), slice(3, 13), slice(13, 23)])
res = smo._params_slice_for_trans_coeffs(self, params_type='short')
self._general_params_slice.assert_has_calls(
[call(3), call(10), call(10)])
mock_tf.nr_coeffs_first_func.assert_has_calls(
[call(included_factors=['f1', 'f2'], params_type='short')])
mock_tf.nr_coeffs_second_func.assert_has_calls(
[call(included_factors=['f2'], params_type='short')] * 2)
assert_equal([[slice(0, 3)] * 2, [slice(3, 13), slice(13, 23)]], res)
@patch('skillmodels.estimation.skill_model.tf')
def test_set_bounds_for_trans_coeffs(self, mock_tf):
lb = np.array([0, None, None], dtype=object)
ub = np.array([None, None, 1], dtype=object)
mock_tf.bounds_first_func.return_value = (lb, ub)
del mock_tf.bounds_second_func
sl = [[slice(0, 3)] * 2, [slice(3, 13), slice(13, 23)]]
expected_lb = self.lower_bound.copy()
expected_lb[0] = 0
expected_ub = self.upper_bound.copy()
expected_ub[2] = 1
smo._set_bounds_for_trans_coeffs(self, sl)
aae(self.lower_bound, expected_lb)
aae(self.upper_bound, expected_ub)
@patch('skillmodels.estimation.skill_model.tf')
def test_trans_coeffs_names(self, mock_tf):
mock_tf.nr_coeffs_second_func.return_value = 2
mock_tf.coeff_names_first_func.return_value = ['epsilon', 'psi', 'pi']
del mock_tf.coeff_names_second_func
expected = [
'epsilon', 'psi', 'pi', 'trans_coeff__0__f2__0',
'trans_coeff__0__f2__1', 'trans_coeff__1__f2__0',
'trans_coeff__1__f2__1']
assert_equal(smo._trans_coeffs_names(self, params_type='short'),
expected)
class TestTransformTransitionParamsFuncs:
def setup(self):
self.factors = ['f1', 'f2']
self.transition_names = ['first_func', 'second_func']
@patch('skillmodels.estimation.skill_model.tf')
def test_transform_trans_coeffs_funcs(self, mock_tf):
del mock_tf.transform_coeffs_second_func
assert_equal(smo._transform_trans_coeffs_funcs(self),
['transform_coeffs_first_func', None])
class TestParamsSlices:
def setup(self):
self.params_quants = ['a', 'b']
self._params_slice_for_a = Mock(return_value=slice(0, 3))
self._params_slice_for_b = Mock(return_value=slice(3, 5))
def test_params_slices(self):
assert_equal(smo.params_slices(self, params_type='short'),
{'a': slice(0, 3), 'b': slice(3, 5)})
class TestLenParams:
def setup(self):
self.params_quants = ['a', 'b']
self.params_slices = Mock(
return_value={'a': slice(0, 3), 'b': slice(3, 5)})
def test_len_params(self):
assert_equal(smo.len_params(self, params_type='short'), 5)
class TestBoundsList:
def setup(self):
slices = {'a': slice(0, 3), 'b': slice(3, 5)}
self.params_slices = Mock(return_value=slices)
self.len_params = Mock(return_value=5)
self.params_quants = ['a', 'b']
# mock function
def _set_bounds_for_b(self, params_slice):
self.lower_bound[params_slice] = 99
self.upper_bound[params_slice][0] = 100
def test_bounds_list(self):
expected = [(None, None)] * 3 + [(99, 100), (99, None)]
assert_equal(smo.bounds_list(self), expected)
class TestParamNames:
def setup(self):
self.params_quants = ['a', 'b']
self._a_names = Mock(return_value=['a1', 'a2'])
self._b_names = Mock(return_value=['b1', 'b2', 'b3'])
self.len_params = Mock(return_value=5)
def test_param_names(self):
assert_equal(smo.param_names(self, params_type='short'),
['a1', 'a2', 'b1', 'b2', 'b3'])
def test_param_names_invalid(self):
self.len_params = Mock(return_value=6)
assert_raises(
AssertionError, smo.param_names, self, params_type='short')
class TestTransformParams:
def setup(self):
self.params_quants = [
'deltas', 'trans_coeffs', 'X_zero', 'P_zero']
slices_dict = {
'short':
{'deltas': slice(0, 5),
'trans_coeffs': slice(5, 12),
'X_zero': slice(12, 15),
'P_zero': slice(15, 18)},
'long':
{'deltas': slice(0, 5),
'trans_coeffs': slice(5, 14),
'X_zero': slice(14, 17),
'P_zero': slice(17, 20)}}
self.params_slices = Mock(
side_effect=lambda params_type: slices_dict[params_type])
self._flatten_slice_list = \
Mock(side_effect=[slice(0, 5), slice(5, 14), slice(0, 5)])
len_dict = {'short': 18, 'long': 20}
self.len_params = Mock(
side_effect=lambda params_type: len_dict[params_type])
self._P_zero_filler = Mock()
self._P_zero_bool = Mock()
self.cholesky_of_P_zero = True
self._X_zero_filler = Mock()
self._X_zero_replacements = Mock()
self._initial_trans_coeffs = Mock()
self._transform_trans_coeffs_funcs = Mock()
self.included_factors = []
self.model_name = 'some_model'
@patch('skillmodels.estimation.skill_model.pp')
def test_expand_params(self, mock_pt):
mock_pt.transform_params_for_X_zero.return_value = np.arange(3)
mock_pt.transform_params_for_trans_coeffs.return_value = np.ones(9)
mock_pt.transform_params_for_P_zero.return_value = np.ones(3) * 17
expected = np.array([0] * 5 + [1] * 9 + [0, 1, 2] + [17] * 3)
aae(smo._transform_params(self, np.zeros(18), 'short_to_long'),
expected)
class TestGenerateStartParams:
def setup(self):
self.params_quants = [
'deltas', 'P_zero', 'W_zero', 'trans_coeffs']
self.nemf = 3
self.nfac = 2
self.stages = [0, 1]
self.factors = ['f1', 'f2']
self.included_factors = self.factors
self.transition_names = ['some_func', 'some_func']
self.start_values_per_quantity = {
'deltas': 5, 'P_zero_off_diags': 0, 'P_zero_diags': 0.5}
self.restrict_P_zeros = False
slices = {'deltas': slice(0, 4), 'P_zero': slice(4, 13),
'W_zero': slice(13, 16),
'trans_coeffs': [[slice(16, 17), slice(17, 18)],
[slice(18, 19), slice(19, 20)]]}
self.params_slices = Mock(return_value=slices)
self.len_params = Mock(return_value=20)
@patch('skillmodels.estimation.skill_model.tf')
def test_generate_start_params(self, mock_tf):
mock_tf.start_values_some_func.return_value = np.ones(1) * 7.7
expected = np.array(
[5] * 4 + [0.5, 0, 0.5] * 3 + [1 / 3] * 3 + [7.7] * 4)
aae(smo._generate_naive_start_params(self), expected)
class TestSigmaWeightsAndScalingFactor:
def setup(self):
self.nemf = 2
self.nobs = 10
self.nfac = 4
self.kappa = 1.5
# these test results have been calculated with the sigma_point
# function of the filterpy library
with open('skillmodels/tests/fast_routines/sigma_points_from_filterpy.json') as f:
self.fixtures = json.load(f)
def test_julier_sigma_weight_construction(self):
expected_sws = self.fixtures['julier_wm']
aae(smo.sigma_weights(self)[0], expected_sws)
def test_julier_scaling_factor(self):
expected_sf = 2.34520787991
assert_almost_equal(smo.sigma_scaling_factor(self), expected_sf)
class TestLikelihoodArgumentsDict:
def setup(self):
pass
class TestAllVariablesForIVEquations:
def setup(self):
self.measurements = {
'f1': [['y01', 'y02'], ['y11', 'y12'], []],
'f2': [['y04', 'y05'], ['y14', 'y15'], []],
'f3': [['y07', 'y08'], [], []]}
self.factors = ['f1', 'f2', 'f3']
self.included_factors = [['f1', 'f3'], ['f2', 'f3'], []]
self.transition_names = ['blubb', 'blubb', 'constant']
self.periods = [0, 1, 2]
def test_all_variables_for_iv_equations_constant_factor(self):
calc_meas_list = smo.all_variables_for_iv_equations(
self, 1, 'f1', 'test')
expected_meas_list = [
['y11_test', 'y12_test'],
['y07_copied_test', 'y08_copied_test']]
assert_equal(calc_meas_list, expected_meas_list)
def test_all_variables_for_iv_equations_non_constant(self):
calc_meas_list = smo.all_variables_for_iv_equations(self, 1, 'f2', '')
expected_meas_list = [
['y14', 'y15'], ['y07_copied', 'y08_copied']]
assert_equal(calc_meas_list, expected_meas_list)
def test_indepvar_permutations(self):
ret_val = [['y1', 'y2'], ['y3', 'y4']]
self.all_variables_for_iv_equations = Mock(return_value=ret_val)
expected_xs = [
['y1', 'y3'], ['y1', 'y4'], ['y2', 'y3'], ['y2', 'y4']]
calc_xs = smo.variable_permutations_for_iv_equations(self, 1, 1)[0]
assert_equal(calc_xs, expected_xs)
def test_instrument_permutations(self):
ret_val = [['y1_resid', 'y2_resid'], ['y3_resid', 'y4_resid']]
self.all_variables_for_iv_equations = Mock(return_value=ret_val)
expected_zs = [
[['y2'], ['y4']],
[['y2'], ['y3']],
[['y1'], ['y4']],
[['y1'], ['y3']]]
calc_zs = smo.variable_permutations_for_iv_equations(self, 1, 1)[1]
assert_equal(calc_zs, expected_zs)
class TestNumberOfIVParameters:
def setup(self):
self.factors = ['f1', 'f2', 'f3']
self.transition_names = ['bla', 'bla', 'blubb']
ret = (['correct', 'wrong'], ['correct2', 'wrong2'])
self.variable_permutations_for_iv_equations = Mock(return_value=ret)
@patch('skillmodels.estimation.skill_model.tf')
def test_number_of_iv_parameters(self, mock_tf):
mock_tf.iv_formula_bla.return_value = ('1 + 2 + 3 + 4', '_')
expected_param_nr = 4
calc_res = smo.number_of_iv_parameters(self, 'f1')
assert_equal(calc_res, expected_param_nr)
@patch('skillmodels.estimation.skill_model.tf')
def test_right_calls(self, mock_tf):
mock_tf.iv_formula_bla.return_value = ('1 + 2 + 3 + 4', '_')
smo.number_of_iv_parameters(self, 'f1')
mock_tf.iv_formula_bla.assert_has_calls([call('correct', 'correct2')])
class TestExtendedMeasCoeffs:
def setup(self):
self.factors = ['f1', 'f2']
self.transition_names = ['linear', 'constant']
self.measurements = {
'f1': [['y01', 'y02'], ['y11', 'y12']],
'f2': [['y03', 'y04'], []]}
coeffs = np.arange(0.6, 3.0, 0.2).reshape((6, 2))
cols = ['loadings', 'intercepts']
index_tuples = [(0, 'y01'), (0, 'y02'), (0, 'y03'), (0, 'y04'),
(1, 'y11'), (1, 'y12')]
self.index = pd.MultiIndex.from_tuples(index_tuples)
self.storage_df = pd.DataFrame(coeffs, index=self.index, columns=cols)
def test_extended_meas_coeffs_no_constant_factor_and_intercepts_case(self):
coeff_type = 'intercepts'
calc_intercepts = smo.extended_meas_coeffs(self, coeff_type, 0)
expected_intercepts = pd.Series(
data=[0.8, 1.2, 1.6, 2.0],
name='intercepts', index=['y01', 'y02', 'y03', 'y04'])
assert_series_equal(calc_intercepts, expected_intercepts)
def test_extendend_meas_coeffs_constant_factor_and_loadings_case(self):
coeff_type = 'loadings'
calc_loadings = smo.extended_meas_coeffs(self, coeff_type, 1)
expected_loadings = pd.Series(
data=[2.2, 2.6, 1.4, 1.8],
name='loadings',
index=['y11', 'y12', 'y03_copied', 'y04_copied'])
assert_series_equal(calc_loadings, expected_loadings)
class TestResidualMeasurements:
def setup(self):
intercepts = pd.Series(
[3.0, 2.0], name='intercepts', index=['m2', 'm1'])
loadings = pd.Series(
[2.0, 0.5], name='loadings', index=['m1', 'm2'])
self.extended_meas_coeffs = Mock(side_effect=[loadings, intercepts])
d = pd.DataFrame(data=np.array([[5, 4], [3, 2]]), columns=['m1', 'm2'])
self.y_data = ['dummy', d, 'dummy']
def test_residual_measurements(self):
expected_data = np.array([
[1.5, 2],
[0.5, -2]])
expected_resid = pd.DataFrame(
expected_data, columns=['m1_resid', 'm2_resid'])
calc_resid = smo.residual_measurements(self, period=1)
assert_frame_equal(calc_resid, expected_resid)
class TestWANorminfoDict:
def setup(self):
n = {}
n['f1'] = {'loadings': [['y1', 4], ['y2', 5], ['y3', 6]],
'intercepts': [['y4', 7], ['y5', 8]]}
df = pd.DataFrame(data=[[None]] * 3, columns=['f1'])
self.identified_restrictions = {
'coeff_sum_value': df, 'trans_intercept_value': df}
self.normalizations = n
self.stagemap = [0, 1, 2, 2]
def test_wa_norminfo_dict(self):
expected = {'loading_norminfo': ['y2', 5],
'intercept_norminfo': ['y5', 8]}
calculated = smo.model_coeffs_from_iv_coeffs_args_dict(self, 1, 'f1')
assert_equal(calculated, expected)
class TestBSMethods:
def setup(self):
self.bootstrap_samples = [
['id_0', 'id_1', 'id_1'],
['id_0', 'id_1', 'id_0'],
['id_1', 'id_0', 'id_0']]
self.bootstrap_nreps = 3
self.model_name = 'test_check_bs_sample'
self.dataset_name = 'test_data'
self.person_identifier = 'id'
self.period_identifier = 'period'
self.periods = [0, 1, 2]
self.data = pd.DataFrame(
data=np.array([
[0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0],
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]]).T,
columns=['period', 'arange'])
self.data['id'] = pd.Series(dtype='str', data=[
'id_0', 'id_0', 'id_0', 'id_1', 'id_1', 'id_1', 'id_2', 'id_2', 'id_2']) # noqa
self.bootstrap_sample_size = 3
self.nobs = 3
self.bootstrap_nprocesses = 2
def test_check_bs_samples_accepts_iterable(self):
smo._check_bs_samples(self)
def test_rejects_non_iterable(self):
self.bootstrap_samples = 240
assert_raises(
AssertionError, smo._check_bs_samples, self)
def test_raises_error_with_unknown_identifier(self):
self.bootstrap_samples[2][0] = 'a'
assert_raises(
AssertionError, smo._check_bs_samples, self)
def test_only_bootstrap_samples_with_enough_samples(self):
self.bootstrap_nreps = 10
assert_raises(
AssertionError, smo._check_bs_samples, self)
def test_generate_bs_samples(self):
np.random.seed(495)
expected_samples = [
['id_1', 'id_1', 'id_1'],
['id_0', 'id_2', 'id_2'],
['id_2', 'id_2', 'id_1']]
calc_samples = smo._generate_bs_samples(self)
assert_equal(calc_samples, expected_samples)
def test_select_bootstrap_data(self):
expected_data = pd.DataFrame(
data=np.array([
[0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0],
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 3.0, 4.0, 5.0]]).T,
columns=['period', 'arange'])
expected_data['id'] = [
'id_0', 'id_0', 'id_0', 'id_1', 'id_1', 'id_1',
'id_1', 'id_1', 'id_1']
calc_data = smo._select_bootstrap_data(self, 0)
assert_frame_equal(calc_data, expected_data)
# define some mock functions. Mock objects don't work because they
# cannot be pickled, which is required for Multiprocessing to work.
def _bs_fit(self, rep, params):
return rep * np.ones(3)
def param_names(self, params_type):
return ['p1', 'p2', 'p3']
def len_params(self, params_type):
return 3
def test_all_bootstrap_params(self):
calc_params = smo.all_bootstrap_params(self, params=np.ones(3))
expected_params = pd.DataFrame(
data=[[0.0] * 3, [1.0] * 3, [2.0] * 3],
index=['rep_0', 'rep_1', 'rep_2'],
columns=['p1', 'p2', 'p3'])
assert_frame_equal(calc_params, expected_params)
class TestBootstrapParamsToConfInt:
def setup(self):
bs_data = np.zeros((100, 2))
bs_data[:, 0] = np.arange(100)
bs_data[:, 1] = np.arange(5).repeat(20)
np.random.shuffle(bs_data)
cols = ['p1', 'p2']
ind = ['rep_{}'.format(i) for i in range(100)]
df = pd.DataFrame(data=bs_data, columns=cols, index=ind)
self.stored_bootstrap_params = df
def all_bootstrap_params(self, params):
return self.stored_bootstrap_params
def test_bootstrap_conf_int(self):
expected_conf_int = pd.DataFrame(
data=[[2.475, 96.525], [0, 4]],
index=['p1', 'p2'], columns=['lower', 'upper'])
calc_conf_int = smo.bootstrap_conf_int(self, np.ones(3))
aaae(calc_conf_int, expected_conf_int)
class TestBootstrapCovMatrix:
def setup(self):
np.random.seed(94355)
expected_cov = np.array([[28 / 3, 17.0], [17.0, 31.0]])
self.par_names = ['p1', 'p2']
self.expected_cov = pd.DataFrame(
data=expected_cov, columns=self.par_names, index=self.par_names)
self.params = np.arange(2)
self.model_name = 'test_bootstrap_cov_matrix'
self.dataset_name = 'data_for_testing_covmatrix'
def len_params(self, params_type):
return 3
def all_bootstrap_params(self, params):
fake_bs_params = np.array([[1, 4], [3, 8], [7, 15]])
fake_df = pd.DataFrame(
data=fake_bs_params, columns=self.par_names,
index=['rep1', 'rep2', 'rep3'])
return fake_df
def test_bootstrap_cov_matrix(self):
calc_cov = smo.bootstrap_cov_matrix(self, self.params)
assert_frame_equal(calc_cov, self.expected_cov)
class TestBootstrapPValues:
def setup(self):
bs_params = pd.DataFrame(np.arange(10).reshape(5, 2),
columns=['a', 'b'])
self.all_bootstrap_params = Mock(return_value=bs_params)
def test_bootstrap_p_values(self):
params = np.array([2, -9])
expected_p_values = | pd.Series([0.8333333, 0.333333], index=['a', 'b']) | pandas.Series |
"""
This module organizes all output data each decade. In other words, it
concatenates all the 'similarity_scores_{str(year)}-{str(year+9)}.tsv' files into
a single file ('total_data.tsv') and adds a column with the appropriate decade for each row.
The concatenated data will be used to generate time plots in plots.R.
"""
import pandas as pd
import numpy as np
import os
if __name__ == "__main__":
base = os.path.abspath(os.getcwd())
df = pd.DataFrame(
columns=[
"disease_name",
"disease",
"gene_name",
"gene",
"class",
"score",
"year",
]
)
do_mesh_df = | pd.read_csv("inputs/DO-slim-to-mesh.tsv", sep="\t") | pandas.read_csv |
import io
import pytest
import pandas as pd
from doltpy.cli.dolt import Dolt
from doltpy.cli.write import CREATE, UPDATE
from doltpy.cli.read import read_pandas
from doltpy.etl import (get_df_table_writer,
insert_unique_key,
get_unique_key_table_writer,
get_table_transformer,
get_bulk_table_writer,
get_dolt_loader,
get_branch_creator)
MENS_MAJOR_COUNT, WOMENS_MAJOR_COUNT = 'mens_major_count', 'womens_major_count'
AVERAGE_MAJOR_COUNT = 'average_major_count'
INITIAL_WOMENS = pd.DataFrame({'name': ['Serena'], 'major_count': [23]})
INITIAL_MENS = pd.DataFrame({'name': ['Roger'], 'major_count': [20]})
UPDATE_WOMENS = pd.DataFrame({'name': ['Margaret'], 'major_count': [24]})
UPDATE_MENS = pd.DataFrame({'name': ['Rafael'], 'major_count': [19]})
SECOND_UPDATE_WOMENS = pd.DataFrame({'name': ['Steffi'], 'major_count': [22]})
SECOND_UPDATE_MENS = | pd.DataFrame({'name': ['Novak'], 'major_count': [16]}) | pandas.DataFrame |
"""Test utils_data."""
import tempfile
from pathlib import Path
import pandas as pd
from dash_charts import utils_data
def test_enable_verbose_pandas():
"""Test enable_verbose_pandas."""
pd.set_option('display.max_columns', 0)
utils_data.enable_verbose_pandas() # act
| pd.get_option('display.max_columns') | pandas.get_option |
#!/bin/python
import pandas as pd
import nltk
import time
import os
import numpy as np
import sys
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.mixture import GaussianMixture
from data_io import *
from speech import *
if __name__ == "__main__":
# Load data.
print("Reading data")
labeled = load_data()
unlabeled = load_unlabeled_data()
all_data = labeled.data + unlabeled.data
# Create output folder.
print("Creating results folder")
ts = time.strftime('%Y-%m-%d_%Hh%Mm%S', time.localtime())
out_folder = f'data/results/cluster_{ts}'
os.mkdir(out_folder)
embedding_vectorizer = lambda: W2VAggVectorizerAvgTfidf(
tfidf=TfidfVectorizer(tokenizer=nltk.word_tokenize, max_df=0.8), window=20, dim=200
)
t0 = time.time()
pipe19_gmm = Pipeline([
(f'avgw2v_tfidf', embedding_vectorizer()),
('gmm', GaussianMixture(n_components=19,
init_params='random',
verbose=10))
])
print("Computing clusters...", end='')
pipe19_gmm.fit(np.array(all_data))
print("done")
print("%0.2fs" % (time.time() - t0))
# Find mapping from centroids to candidates from proportion of labeled docs in each cluster
X = pipe19_gmm['avgw2v_tfidf'].transform(np.array(labeled.data))
membership_probs = pipe19_gmm['gmm'].predict_proba(X)
# Membership probabilities for each candidate, given a cluster
df = pd.DataFrame(membership_probs)
df['y'] = labeled.labels
candidates_probs = df.groupby('y').sum()
candidates_probs_norm = normalize(candidates_probs, norm='l1', axis=0) # so columns sum to 1
# Determine mapping probabilistically
mapping = pd.DataFrame([np.random.choice(candidates_probs.index, p=col) for col in candidates_probs_norm.T])
# Predict centroids for unlabeled data
centroid_predictions = pipe19_gmm.predict(unlabeled.data)
# Map into candidate names
unlabeled_labels = | pd.Series(centroid_predictions) | pandas.Series |
import unittest
import warnings
import pandas as pd
import rowgenerators as rg
from synpums import *
from synpums.util import *
warnings.filterwarnings("ignore")
state = 'RI'
year = 2018
release = 5
def fetch(url):
return rg.dataframe(url).drop(columns=['stusab', 'county', 'name'])
class TestACSIncome(unittest.TestCase):
def x_test_median_incomes(self):
"""Check that the summary stats for the aggregate income of puma,
roughtly matches between the PUMS and ACS datasets. Both values are
divided by the median household income of the """
pums_acs = build_acs(state, sl='puma', year=year, release=release)
dfp, dfh = build_pums_dfp_dfh(state, year=2018, release=5)
puma_geoid = '79500US4400104'
dfh_g = dfh[dfh.geoid == puma_geoid]
pums = | pd.get_dummies(dfh_g['b19025']) | pandas.get_dummies |
import numpy as np
import pandas as pd
from astropy import constants as c
from werkzeug.contrib.cache import SimpleCache
cache = SimpleCache()
colors = {
'Blue': '#1f77b4',
'Orange': '#ff7f0e',
'Green': '#2ca02c',
'Red': '#d62728',
'Purple': '#9467bd',
}
def readExoplanetEU():
"""Read the exoplanet.eu database from the 'data' folder and store as
pandas DataFrame
"""
df = cache.get('exoplanetDB')
if df is None:
df = | pd.read_csv('data/exoplanetEU.csv', engine='c') | pandas.read_csv |
# -*- coding: utf-8 -*-
#
# Scikit Learn Machine Learning Process Flow;
# Version 1.0
# Author : <NAME>
#
#
# First edited : 27 September 2018
# Last edited :
#
# Description : Scikit Learn Machine Learning Basics Blog WorkFlow
#
# Required input file details :
# 1. __Placeholder__
#
# Output from the code;
# 1. __Placeholder__
# TODO: YTS;
import datetime;
import csv;
import logging;
import os;
import datetime;
import numpy as np;
import pandas as pd;
from time import time, sleep;
from sklearn import model_selection;
from sklearn import metrics;
from sklearn.metrics import make_scorer;
from sklearn import preprocessing;
from sklearn import datasets;
from sklearn.dummy import DummyClassifier;
from sklearn.decomposition import KernelPCA;
from sklearn.feature_selection import SelectKBest, chi2, f_classif;
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict, GridSearchCV;
from sklearn import linear_model;
from sklearn.svm import SVC;
from sklearn.ensemble import RandomForestClassifier, VotingClassifier;
from sklearn.base import BaseEstimator, TransformerMixin;
from sklearn.pipeline import make_pipeline, FeatureUnion;
from sklearn.feature_extraction.text import CountVectorizer;
import matplotlib;
import matplotlib.pyplot as plt;
import matplotlib.style as style;
# Check for style.available for more styles;
import seaborn as sns;
# matplotlib.rcParams['font.family'] = 'sans-serif';
# matplotlib.rcParams['font.sans-serif'] = ['Verdana'];
# matplotlib.rcParams['font.family'] = 'cursive';
#
# matplotlib.rcParams['font.weight'] = 8;
# matplotlib.rcParams['font.size'] = 9.5;
matplotlib.rcParams['font.family'] = 'fantasy';
matplotlib.rcParams['font.weight'] = 3;
matplotlib.rcParams['font.size'] = 10;
# ‘xx-small’, ‘x-small’, ‘small’, ‘medium’, ‘large’, ‘x-large’, ‘xx-large’
# https://matplotlib.org/users/text_props.html
style.use('bmh');
# style.use('seaborn-paper');
# style.use('seaborn-deep');
from itertools import chain;
def convert2pandas_df(x_array=None, y=None, feature_names=None, target_name=None):
''' list of datasets part of the sklearn ''';
assert x_array.shape[1] == len(feature_names); # assert the length of x_array and column label length are same;
assert x_array.shape[0] == len(y); # The target length should equal the features length;
assert isinstance(y, list); # Target should of the type list;
assert isinstance(feature_names, list); # feature_names should of the type list;
data_dict = {};
data_dict[target_name] = y;
for i, col_name in enumerate(feature_names):
data_dict[col_name] = list(chain.from_iterable( x_array[:, [i]] ));
return | pd.DataFrame(data_dict) | pandas.DataFrame |
#%%
import numpy as np
import pandas as pd
import networkx as nx
from collections import Counter
#%%
fname = "./processed_data/rideaustin_productivity.csv"
data = pd.read_csv(fname, dtype={"timebin": int}, parse_dates=["completed_on"])
# for interactive env can do next two lines
# data.sort_values('end_taz', inplace=True)
# data.head()
# %% valid tazs
tazindata = data.end_taz.unique()
# len(tazindata)
# %% read adjacency info
fname = "./processed_data/taz_adjacency.csv"
taz_adjacency = np.genfromtxt(fname, delimiter=",", dtype=int)
tazindata_ = set(tazindata)
taz_adjacency = [
[i, j] for i, j in taz_adjacency if (i in tazindata_) and (j in tazindata_)
]
# %% build spatial adjacency graph with networkx
gspatial = nx.Graph()
gspatial.add_edges_from(taz_adjacency)
conn_comps = nx.connected_components(gspatial)
conn_comps = sorted(conn_comps, key=len, reverse=True)
conn_largest = conn_comps[0]
# %% keep tazs in largest connected component
tazindata = [x for x in tazindata if x in conn_largest]
tazindata_ = set(tazindata)
taz_adjacency = [
[i, j] for i, j in taz_adjacency if (i in tazindata_) and (j in tazindata_)
]
# %% filter data in valid tazs
data = data[data.end_taz.isin(tazindata_)]
# %% build the spatiotemporal graph
links = []
num_timebins = 168 # look at the column timebin
istemporal = [] # 0 for spatial, 1 for temporal
# spatial links in each time slice
for t in range(num_timebins):
for i, j in taz_adjacency:
v = "{}-{}".format(i, t + 1)
w = "{}-{}".format(j, t + 1)
links.append([v, w])
istemporal.append(0)
# now add temporal links
for x in tazindata:
for t in range(num_timebins - 1):
v = "{}-{}".format(x, t + 1)
w = "{}-{}".format(x, t + 2)
links.append([v, w])
istemporal.append(1)
# for periodic time
v = "{}-{}".format(x, num_timebins)
w = "{}-{}".format(x, 1)
links.append([v, w])
istemporal.append(1)
g = nx.Graph()
g.add_edges_from(links)
# nx.number_connected_components(g) # should be one!
# %% vertex info
nodes = list(g.nodes())
node2vertex = {x: i for i, x in enumerate(nodes)}
df = pd.DataFrame({"node": nodes})
df["vertex"] = [node2vertex[node] for node in nodes]
df["taz"] = [int(node.split("-")[0]) for node in nodes]
df["hour"] = [int(node.split("-")[1]) for node in nodes]
# function for time labels
def timelabeller(hour):
w = (hour - 1) // 24
t = (hour - 1) % 24
wdays = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
return f"{wdays[w]} {t:02d}:00"
df["timelabel"] = df.hour.apply(timelabeller)
# counter number of nodes
cnts = Counter(data.node)
df["node_counts"] = df.node.apply(lambda x: cnts[x])
# save file
fname = "./processed_data/vertex_data.csv"
df.to_csv(fname, index=False)
print(f"...saved vertex info in {fname}")
# %%
spatiotemporal_graph = pd.DataFrame(
{
"vertex1": [node2vertex[v[0]] for v in links],
"vertex2": [node2vertex[v[1]] for v in links],
"temporal": [b for b in istemporal],
}
)
fname = "./processed_data/spatiotemporal_graph.csv"
spatiotemporal_graph.to_csv(fname, index=False)
print(f"...saved spatiotemporal graph in {fname}")
#%% read split data
# splitlevels = pd.read_csv("processed_data/splits_opt_pt.csv")
splitlevels = | pd.read_csv("processed_data/splits_qua.csv") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# In[78]:
# load Data
# R2 comparison for train set sizes
RFR_score = | pd.read_csv('Generated Data/RFR_score.csv') | pandas.read_csv |
from datetime import (
datetime,
time,
)
import numpy as np
import pytest
from pandas._libs.tslibs import timezones
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
class TestBetweenTime:
@td.skip_if_has_locale
def test_between_time_formats(self, frame_or_series):
# GH#11818
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
if frame_or_series is Series:
ts = ts[0]
strings = [
("2:00", "2:30"),
("0200", "0230"),
("2:00am", "2:30am"),
("0200am", "0230am"),
("2:00:00", "2:30:00"),
("020000", "023000"),
("2:00:00am", "2:30:00am"),
("020000am", "023000am"),
]
expected_length = 28
for time_string in strings:
assert len(ts.between_time(*time_string)) == expected_length
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_between_time(self, tzstr, frame_or_series):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("4/16/2012", "5/1/2012", freq="H")
ts = Series(np.random.randn(len(rng)), index=rng)
if frame_or_series is DataFrame:
ts = ts.to_frame()
ts_local = ts.tz_localize(tzstr)
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1, t2).tz_localize(tzstr)
tm.assert_equal(result, expected)
assert timezones.tz_compare(result.index.tz, tz)
def test_between_time_types(self, frame_or_series):
# GH11818
rng = | date_range("1/1/2000", "1/5/2000", freq="5min") | pandas.date_range |
from datetime import datetime, timedelta
import pandas as pd
def summary(data, time):
data['Date'] = | pd.to_datetime(data['Date']) | pandas.to_datetime |
from numpy import linalg, zeros, ones, hstack, asarray, vstack, array, mean, std
import itertools
import matplotlib.pyplot as plt
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.dates as mdates
from sklearn.metrics import mean_squared_error
from math import sqrt
import warnings
import copy
import time
warnings.filterwarnings("ignore")
import seaborn as sns
sns.set(style="whitegrid")
from PVPolyfit import preprocessing as preprocess
from PVPolyfit import utilities
from PVPolyfit import clustering as cluster
from PVPolyfit import kernel
def pvpolyfit(train_df, test_df, Y_tag, xs, I_tag, ghi_tag, cs_tag, highest_num_clusters, highest_degree, kernel_type, Y_high_filter, min_count_per_day, include_preprocess = False, plot_graph = True, graph_type = 'regression', print_info = False):
#print("h ERE")
if len(train_df) == 0 or len(test_df) == 0:
raise Exception("Either one or both DataFrames are empty.")
pvpoly = PVPolyfit(train_df, test_df, Y_tag, xs, I_tag, ghi_tag, cs_tag, print_info)
#print("H eRE")
pvpoly.prepare(Y_high_filter, min_count_per_day, include_preprocess)
#print("He RE")
rmse_list = []
std_rmse_list = []
pvpoly_objects = []
combined_labels = []
for i in range(1, highest_num_clusters+1):
pvpoly_iter = copy.deepcopy(pvpoly)
try:
labels = pvpoly_iter.run(num_clusters = i, num_iterations = 1, degrees = list(range(1,highest_degree+1)), kernel_type = kernel_type)
all_best_dfs, ultimate_days, avg_rmse, std_rmse = pvpoly_iter.evaluate(print_info = print_info)
rmse_list.append(avg_rmse)
std_rmse_list.append(std_rmse)
pvpoly_objects.append(pvpoly_iter)
combined_labels.append(labels)
except Exception as e:
if print_info:
print(e)
break
if len(rmse_list) == 0:
raise Exception("No Output was produced.")
min_idx = np.argmin(rmse_list)
if print_info:
print(min_idx)
print("{} cluster(s) were used.".format(range(1,highest_num_clusters+1)[min_idx]))
days_rmses, model_output, meases, df = pvpoly_objects[min_idx].plot(graph_type = graph_type, print_info = print_info, plot_graph = plot_graph)
return model_output, meases, days_rmses, range(1,highest_num_clusters+1)[min_idx], df, combined_labels[min_idx]
def _pvpolyfit_inputCluster(train_df, test_df, Y_tag, xs, I_tag, ghi_tag, cs_tag, num_clusters, highest_degree, kernel_type, Y_high_filter, min_count_per_day, include_preprocess = False, plot_graph = True, graph_type = 'regression', print_info = False):
#print('inside')
if len(train_df) == 0 or len(test_df) == 0:
raise Exception("Either one or both DataFrames are empty.")
pvpoly = PVPolyfit(train_df, test_df, Y_tag, xs, I_tag, ghi_tag, cs_tag, print_info)
pvpoly.prepare(Y_high_filter, min_count_per_day, include_preprocess)
try:
pvpoly.run(num_clusters = num_clusters, num_iterations = 1, degrees = list(range(1,highest_degree+1)), kernel_type = kernel_type)
all_best_dfs, ultimate_days, avg_rmse, std_rmse = pvpoly.evaluate(print_info = print_info)
except Exception as e:
raise Exception("Error has occurred: ", e)
if len(str(avg_rmse)) == 0:
raise Exception("No Output was produced. Go here for more information: ")
days_rmses, model_output, meases, df = pvpoly.plot(graph_type = graph_type, print_info = print_info, plot_graph = plot_graph)
return model_output, meases, days_rmses, num_clusters, df
def break_days(df, filter_bool, min_count_per_day = 8, frequency = 'days', print_info = False):
index_list = []
day_hour_list = []
prev = 0
for index, j in enumerate(df.index):
if str(type(j)) != "<class 'str'>":
print(type(j))
print(j)
print(df.loc[j])
j = j.strftime('%m/%d/%Y %H:%M:%S %p')
if frequency == 'days':
curr = int(datetime.strptime(j, '%m/%d/%Y %H:%M:%S %p').strftime('%d'))
frq = datetime.strptime(j, '%m/%d/%Y %H:%M:%S %p').strftime('%m/%d/%Y')
elif frequency == 'hours':
curr = int(datetime.strptime(j, '%m/%d/%Y %H:%M:%S %p').strftime('%H'))
frq = datetime.strptime(j, '%m/%d/%Y %H:%M:%S %p').strftime('%m/%d/%Y %H')
if curr != prev:
index_list.append(index)
day_hour_list.append(frq)
prev = curr
last_index = index
cut_results = []
# Break df into days
for k in range(len(index_list)):
if k == (len(index_list)-1):
# append lasfinal_df.iloc[[iindex]].indext day
cut_results.append(df[index_list[k]:-1])
else:
cut_results.append(df[index_list[k]:index_list[k+1]])
cut_results[-1] = pd.concat([cut_results[-1], df.iloc[[-1]]])
return index_list, day_hour_list, cut_results, df
def heat_plot(df, N):
# Nth column of DF will be plotted
# Inspiration of this code was gathered from Solar-Data-Tools
index_list, _, cut_df, _ = break_days(df, False)
lizt = []
comb_df = pd.DataFrame()
temp_df = pd.DataFrame()
cut_df = cut_df[1:-1]
dates = []
for i in range(len(cut_df)):
try:
comb_df[str(i)] = cut_df[i][cut_df[i].columns[N]].tolist()
dates.append(datetime.strptime(cut_df[i].index[0], '%m/%d/%Y %I:%M:%S %p').strftime('%m/%d/%Y'))
except ValueError:
continue
lizt = comb_df.values
fig, ax = plt.subplots(nrows=1, figsize=(10,8))
foo = ax.imshow(lizt, cmap='hot', interpolation='none', aspect='auto', vmin=0)
if df.columns[N] == 'error':
ax.set_title('PVPolyfit Error Heat Plot')
if df.columns[N] == 'rmse':
ax.set_title('PVPolyfit RMSE Heat Plot')
if df.columns[N] == 'model_output':
ax.set_title('PVPolyfit Model Output Heat Plot')
plt.colorbar(foo, ax=ax, label='W')
ax.set_xlabel('Day number')
ax.set_xticks(np.arange(len(dates)))
ax.set_xticklabels(dates)
ax.set_yticks([])
ax.set_ylabel(' Time of day ')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
for tick in ax.get_xticklabels():
tick.set_rotation(45)
plt.show()
class PVPolyfit:
'''
.______ ____ ____ .______ ______ __ ____ ____ _______ __ .___________.
| _ \ \ \ / / | _ \ / __ \ | | \ \ / / | ____|| | | |
| |_) | \ \/ / | |_) | | | | | | | \ \/ / | |__ | | `---| |----`
| ___/ \ / | ___/ | | | | | | \_ _/ | __| | | | |
| | \ / | | | `--' | | `----. | | | | | | | |
| _| \__/ | _| \______/ |_______| |__| |__| |__| |__|
An object, PVPolyfit, created for the creation of an accurate regression of Output depending on the two covariates
PARAMETERS
----------
train_df: df
holds training data with columns and index specified below
test_df: df
holds testing data with columns and index specified below
Y_tag: str
column name of output tag
xs: list of str
list of column names for two covariates
ghi_tag: str
column name of GHI input
cs_tag: str
column name of clearsky GHI generated by pvlib simulation (link below)
USER MUST INPUT DF's WITH FOLLOWING COLUMNS:
| Description | Original Use Case | Model Purpose |
|------------------------|---------------------|--------------------------|
| Output, Y_tag: | DC Power | Target for regression |
| xs: x1: | POA Irradiance | Covariate for regression |
| x2: | Ambient Temperature | Covariate for regression |
| Measured GHI, ghi_tag | GHI (irradiance) | Day classification |
| PVLib Clearsky, cs_tag | Simulated GHI | Day classification |
PVLib has a good tutorial to generate clearsky data: https://pvlib-python.readthedocs.io/en/stable/generated/pvlib.location.Location.get_clearsky.html
'''
def __init__(self, train_df, test_df, Y_tag, xs, I_tag, ghi_tag, cs_tag, print_info):
self.train_df = train_df
self.test_df = test_df
self.Y_tag = Y_tag
self.xs = xs
self.I_tag = I_tag
self.ghi_tag = ghi_tag
self.cs_tag = cs_tag
self.print_info = print_info
self.num_clusters = 0
self.num_iterations = 0
self.degrees = []
self.cut_results = []
self.test_cut_results = []
self.ordered_pair_list = []
self.test_ordered_pair_list = []
self.combined_test_cut_results = []
self.test_km_labels = []
self.avg_rmse = 0
# all_best_dfs[Degree][Day][Columns: 'Y', 'mins', 'maxs']
self.all_best_dfs = []
# model_day_couts[Degree][Best model's [Train Counts, Test Counts]]
self.model_day_counts = []
# ultimate_days[Day][i, ind]
self.ultimate_days = []
def prepare(self, Y_high_filter, min_count_per_day, include_preprocess):
''' Preprocess and classify days in DataFrame '''
self.train_df = preprocess.data_preprocessing(self.train_df, self.xs, self.Y_tag, self.I_tag, self.cs_tag, Y_high_filter, self.print_info, include_preprocess)
#if len(self.cs_tag) != 0 or len(self.ghi_tag) != 0:
if True:
if include_preprocess:
classification, k, MF = preprocess.classify_weather_day_GM_Tina(self.train_df, self.cs_tag, self.ghi_tag)
self.train_df['day_type'] = classification
if False:
if include_preprocess:
classification = cluster.classify_weather_day_MHopwood(self.cut_results, self.Y_tag, self.xs, kmeans_num_clusters = 4)
self.train_df['day_type'] = classification
# cuts train_df into daily DF's
# also returns a filtered train_df which cuts out a day if its length is too small
index_list, day_hour_list, self.cut_results, self.train_df = utilities.find_and_break_days_or_hours(self.train_df, True, min_count_per_day = min_count_per_day, frequency = 'days', print_info = self.print_info)
middles_dates, hours_kpi = utilities.get_weighted_middle_of_day_and_calculate_float_since_noon(self.cut_results, self.Y_tag)
### For each day, compile frequencies
### For each day, output the # times each class is triggered
self.ordered_pair_list, freq_df = cluster.create_conglomerated_vectors_for_clustering_algorithm(self.cut_results, hours_kpi, day_hour_list, self.Y_tag, self.xs)
self.test_df = preprocess.data_preprocessing(self.test_df, self.xs, self.Y_tag, self.I_tag, self.cs_tag, Y_high_filter, self.print_info, include_preprocess)
if len(self.cs_tag) != 0 or len(self.ghi_tag) != 0:
test_classification, test_k, test_MF = preprocess.classify_weather_day_GM_Tina(self.test_df, self.cs_tag, self.ghi_tag)
self.test_df['day_type'] = test_classification
test_index_list, test_day_hour_list, self.test_cut_results, self.test_df = utilities.find_and_break_days_or_hours(self.test_df, True, min_count_per_day = min_count_per_day, frequency = 'days', print_info = self.print_info)
test_middles_dates, test_hours_kpi = utilities.get_weighted_middle_of_day_and_calculate_float_since_noon(self.test_cut_results, self.Y_tag)
print("TEST CUT RESULTS CREATE CONGLOMERATED")
self.test_ordered_pair_list, test_freq_df = cluster.create_conglomerated_vectors_for_clustering_algorithm(self.test_cut_results, test_hours_kpi, test_day_hour_list, self.Y_tag, self.xs)
def run(self, num_clusters = 6, num_iterations = 1, degrees = list(range(1,10)), kernel_type = 'polynomial'):
'''
Iterates through Degrees
For each Degree, iterates n times
Returns best model for each input day
Parameters:
num_clusters: int, default 6
number of clusters used in clustering algorithm, synonymous with number of 'types of days'
num_iterations: int, default 1
number of times algorithm loops, indicates volatility of algorithm (usually very small, so default = 1)
degrees: list of ints
range of degrees that polynomial kernel iterates through
kernel_type: str
type of regression kernel to be used
OPTIONS: polynomial - a(AB)+
'''
self.num_clusters = num_clusters
self.num_iterations = num_iterations
self.degrees = degrees
self.kernel_type = kernel_type
self.all_best_dfs = []
self.model_day_counts = []
for degree in self.degrees:
P_se_list = []
combined_P_list = []
combined_day_counts = []
combined_test_km_labels = []
# 1. Run the code an n number of times
for i in range(self.num_iterations):
# clusters and adds 'model_num' column to cut_results & test_cut_results
train_kmeans_dfs, test_kmeans_dfs, self.test_km_labels, self.cut_results, self.test_cut_results, train_model_day_count, test_model_day_count = cluster.cluster_ordered_pairs_and_return_df_of_days_in_cluster(self.cut_results, self.test_cut_results, self.ordered_pair_list, self.test_ordered_pair_list, kmeans_num_clusters = self.num_clusters, print_info = self.print_info)
saved_models = cluster.save_model_for_each_cluster(train_kmeans_dfs, degree, self.Y_tag, self.xs, self.kernel_type)
self.kmeans_Y_lists = kernel.process_test_data_through_models(test_kmeans_dfs, saved_models, self.test_km_labels, self.xs)
# 2. For each iteration, save the modelled P and colors (based on model used)
combined_P_list.append(self.kmeans_Y_lists)
self.combined_test_cut_results.append(self.test_cut_results)
combined_test_km_labels.append(self.test_km_labels)
combined_day_counts.append([train_model_day_count, test_model_day_count])
P_se_km = kernel.EvaluateModel(array(self.test_df[self.Y_tag].tolist()),array(self.kmeans_Y_lists)).rmse()
P_se_list.append(P_se_km)
# 3. Gather the minimum and maximum for each index, save in two lists
mins = []
maxs = []
for i in range(len(self.test_df.index)):
min = 9999
max = -9999
for j in range(len(combined_P_list)):
if (combined_P_list[j][i] < min):
min = combined_P_list[j][i]
if (combined_P_list[j][i] > max):
max = combined_P_list[j][i]
mins.append(min)
maxs.append(max)
best_index = np.argmin(P_se_list)
best_model = combined_P_list[best_index]
best_df = pd.DataFrame()
best_df['Y'] = best_model
best_df['mins'] = mins
best_df['maxs'] = maxs
best_df.index = self.test_df.index
_,_,dfg,_ = utilities.find_and_break_days_or_hours(best_df, False, min_count_per_day = 0, frequency = 'days', print_info = self.print_info)
self.all_best_dfs.append(dfg)
self.model_day_counts.append(combined_day_counts[best_index])
return combined_test_km_labels[best_index]
def evaluate(self, print_info = True):
'''
Determine rmse for each day for each degree
and return index of best model for each day
'''
# iterate by day
all_rmse = []
self.ultimate_days = []
for i in range(len(self.all_best_dfs[0])):
min = 9999
ind = 0
# iterate by degree
for j in range(len(self.all_best_dfs)):
iterating_rmse = kernel.EvaluateModel(array(self.test_cut_results[i][self.Y_tag].tolist()),array(self.all_best_dfs[j][i]['Y'].tolist())).rmse()
print("Degree ", j, " has error: ", iterating_rmse)
if abs(iterating_rmse) < abs(min):
min = iterating_rmse
ind = j
if print_info:
print("{} index: {}, degrees len: {}".format(len(self.all_best_dfs), ind, len(self.degrees)))
print("Day {} chooses degree {} with {}".format(i, self.degrees[ind], min))
all_rmse.append(min)
self.ultimate_days.append([i, ind])
self.avg_rmse = np.array(all_rmse).mean()
return self.all_best_dfs, self.ultimate_days, self.avg_rmse, np.array(all_rmse).std()
def plot(self, graph_type = 'regression', print_info = True, plot_graph = False):
iter_rmses = []
if graph_type == 'regression':
colors = ['red', 'blue', 'green', 'orange', 'purple', 'brown', 'gold', 'pink', 'gray', 'cyan', 'darkgreen', 'cadetblue', 'lawngreen', 'cornflowerblue', 'navy', 'olive', 'orangered', 'orchid', 'plum',
'khaki', 'ivory', 'magenta', 'maroon', 'plum', 'cyan', 'crimson', 'coral', 'yellowgreen', 'wheat', 'sienna', 'salmon']*5
model_outputs = []
meases = []
df_index = []
uncer_vals = []
df_meases = []
for i in range(len(self.all_best_dfs[0])):
model_number = self.test_km_labels[i]
color = colors[model_number]
ind = self.ultimate_days[i][1]
Y_output_daily = self.all_best_dfs[ind][i]['Y'].tolist()
model_outputs.append(Y_output_daily)
day_index = self.all_best_dfs[ind][i].index.tolist()
day_maxes = self.all_best_dfs[ind][i]['maxs'].tolist()
day_mins = self.all_best_dfs[ind][i]['mins'].tolist()
day_meas = array(self.test_cut_results[i][self.Y_tag].tolist())
meases.append(day_meas)
dt_index = pd.to_datetime(day_index)
if plot_graph:
plt.plot(dt_index, day_meas, 'k')
plt.plot(dt_index, Y_output_daily, color)
plt.fill_between(dt_index, day_maxes, day_mins, facecolor = color)
plt.xlabel("time")
plt.ylabel("Watts")
plt.xticks(rotation=60)
plt.title('Modelled Multiple Day Types (by color)')
uncer = (array(Y_output_daily)-day_meas)#/(day_meas))
calc_rmse = sqrt(mean_squared_error(day_meas, array(Y_output_daily)))
iter_rmses.append(calc_rmse)
df_index.append(dt_index)
uncer_vals.append(uncer)
df_meases.append(day_meas)
if print_info:
print("[{}]:".format(datetime.strptime(day_index[0], '%m/%d/%Y %H:%M:%S %p').strftime('%Y-%m-%d')))
print("\trmse: {:.4f}, error: {:.4f}".format(calc_rmse, uncer.mean()))
if plot_graph:
plt.show()
plt.close()
uncer_values = [item for sublist in uncer_vals for item in sublist]
df_indices = [item for sublist in df_index for item in sublist]
df = | pd.DataFrame(index=df_indices) | pandas.DataFrame |
import requests
from bs4 import BeautifulSoup as bs
from selenium import webdriver
from fake_useragent import UserAgent
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import pandas as pd
import numpy as np
import re
import os
import pickle as pk
from collections import deque
import string
import time
import psycopg2 as pg
from pymongo import MongoClient
import leafly.data_preprocess as dp
import leafly.scrape_leafly as sl
ua = UserAgent()
MAIN_URL = 'http://analytical360.com/testresults'
def setup_driver():
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (ua.random)
driver = webdriver.PhantomJS(desired_capabilities=dcap)
driver.set_window_size(1920, 1080)
return driver
def get_headers_cookies(driver):
agent = ua.random # select a random user agent
headers = {
"Connection": "close", # another way to cover tracks
"User-Agent": agent
}
cookies = driver.get_cookies()
cooks = {}
for c in cookies:
cooks[c['name']] = c['value'] # map it to be usable for requests
return headers, cooks
def check_rows(res):
'''
args: takes a requests object
returns: all table rows in doc
checks to make sure the request was successful and the data we want is
there in the format we expect. Otherwise throws an error
'''
soup = bs(res.content, 'lxml')
rows = soup.findAll('tr', {'title': 'Click To View Detail Test Results'})
if len(rows) == 0 or not res.ok:
raise Exception('response returned:', res)
return rows
def get_links(rows):
'''
take requests response object
first checks to make sure there are any links in the row, if not, spits an
error
returns a list of unique links from all the rows
'''
links = set()
for i, r in enumerate(rows):
row_links = r.findAll('a')
if len(row_links) == 0:
raise Exception('no links in row', i)
links = set(row_links) | links
links = list(set([l.get('href') for l in links]))
return links
def get_flower_links(rows):
links = get_links(rows)
flower_links = [l for l in links if re.search('.*flowers.*', l)]
flower_rows = [r for r in rows if re.search('.*flowers.*', r.findAll('a')[0].get('href'))]
return flower_links, flower_rows
def check_groups(links):
'''
args: takes in list of links from analytical360
returns: unique product groups in links (i.e. edibles, flowers, etc)
checks to make sure number of groups in still 6
{'concentrates', 'edibles', 'flowers', 'liquids', 'listing', 'topicals'}
'''
groups = [l.split('/')[-2] for l in links]
groups = list(set(groups))
if len(groups) != 6:
raise Exception('number of product groups has changed!')
return groups
def make_links_dataframe(links):
'''
args: list of links
returns: dataframe of links with product group, link
'''
df = pd.DataFrame({'link': links, 'product': [
l.split('/')[-2] for l in links]})
return df
def extract_info(link):
'''
args: link to product page
returns: dict or df of properties of product
'''
res = requests.get(link)
s = bs(res.content)
h3s = s.findAll('h3')
if len(h3s) < 2:
raise Exception('can\'t find title in:', link)
name = h3s[1]
def get_links_selenium(driver):
# right now only gets one link
return driver.find_element_by_xpath('//*[@id="flowers"]/tbody/tr[1049]/td[1]/a')
def download_image(src, filename, headers, cooks):
r = requests.get(src, headers=headers, cookies=cooks)
with open(filename, 'wb') as f:
for chunk in r:
f.write(chunk)
def downloaded_strain_images():
pass
def get_flower_df(rows):
flower_links, flower_rows = get_flower_links(rows)
# get strain name and if there are cannabinoid percentages
flow_links = []
flow_names = []
flow_thc = []
flow_cbd = []
flow_active = []
nothc = []
nothcStrs = []
for r in flower_rows:
links = r.findAll('a')
strain = links[0].text
thc = links[1].text
cbd = links[2].text
activated = links[3].text
if thc == 'N/A' or re.search('.*\%.*', thc) is None:
nothc.append(r)
continue
flow_links.append(links[0].get('href'))
flow_names.append(strain)
flow_thc.append(thc)
flow_cbd.append(cbd)
flow_active.append(activated)
flow_df = pd.DataFrame({'name':flow_names, 'link':flow_links, 'thc':flow_thc, 'cbd':flow_cbd, 'activated':flow_active})
flow_df = flow_df.drop_duplicates()
return flow_df
def scrape_site(df, base_im_path='analytical360/new_images/', delay=None, sql=None, mongo=None):
'''
goes through analytical360 site and scrapes images and data
sql can be set to the name of a sql database to save info to, so the process can be stopped part way through
'''
if sql is not None:
# didn't get this working...couldn't figure out lists in sql
dbname=sql
conn = psycopg2.connect("dbname=" + sql + " host='localhost'")
if mongo is not None:
client = MongoClient()
db = client[mongo[0]]
coll = db[mongo[1]]
driver = setup_driver()
driver.get(MAIN_URL)
headers, cooks = get_headers_cookies(driver)
if not os.path.exists(base_im_path):
os.mkdir(base_im_path)
# pages that aren't really flowers, but concentrates
# others that have broken images links
black_list = set(['http://analytical360.com/m/flowers/604216', 'http://analytical360.com/m/flowers/550371'])
name_black_list = set(['Raw Pulp CJ',
'Batch 35 Spent Trim',
'B21 Spent Trim (CBD)',
'B21 CBD',
'B22 Spent Trim (THC)',
'ACDC x Bubster #14 Male',
'ACDC x Bubster #47 Male',
'Blue Dog #19 Male',
'Blue Dog #31 Male',
'Canna-Tsu #16 Male',
'Canna-Tsu #19 Male',
'Foo Dog #3 Male',
'Foo Dog #11 Male',
'Foo Dog #12 Male',
'Harle-Tsu #2 Male',
'Harle-Tsu #7 Male',
'Miami Blues #24',
'Swiss Gold #6 Male',
'Swiss Gold #18 Male',
'Swiss Gold #26 Male',
'Under Foo #8 Male',
'Under Foo #11 Male',
'Under Foo #27 Male',
'Under Foo #35 Male',
'Harle-Tsu #7Male'])
# broke here first time thru
# startrow = flow_df[flow_df['name'] == 'Mango Haze'].index[0]
# df_remain = flow_df.iloc[startrow:, :]
cannabinoids = []
terpenes = []
im_sources = []
no_imgs = []
names = []
clean_names = []
for i, r in df.iterrows():
if delay is not None:
time.sleep(delay)
link = r['link']
id = link.split('/')[-1]
if link in black_list or r['name'] in name_black_list or re.search('.*male.*', r['name'], re.IGNORECASE) is not None or re.search('.*raw\s*pulp.*', r['name'], re.IGNORECASE) is not None or re.search('.*spent\s+trim.*', r['name'], re.IGNORECASE) is not None:
continue
clean_name = re.sub('/', '-', r['name'])
clean_name = re.sub('[ + ' + string.punctuation + '\s]+', '', clean_name).lower()
clean_names.append(clean_name)
save_path = base_im_path + clean_name + id + '.jpg'
if mongo is not None and coll.find({'link':link}).count() != 0:
print('already processed', r['name'])
continue
print(r['name'])
names.append(r['name'])
driver.get(link)
print(link)
try:
img = driver.find_element_by_xpath('//*[@id="mainwrapper"]/div[4]/div[1]/div[5]/div/div[1]/img[1]')
src = img.get_attribute('src')
im_sources.append(src)
print(src)
if os.path.exists(save_path):
print(r['name'], 'already saved image')
else:
print(save_path)
if not isedible:
try:
download_image(src, save_path, headers, cooks)
except:
no_imgs.append(r)
im_sources.pop()
src = ''
except:
no_imgs.append(r)
src = ''
try:
table1 = driver.find_element_by_xpath('//*[@id="mainwrapper"]/div[4]/div[1]/div[7]/div/div[1]/ul')
except:
cannabinoids.append([])
terpenes.append([])
continue
table1soup = bs(table1.get_attribute('innerHTML'), 'lxml')
table1rows = [l.get_text() for l in table1soup.findAll('li')]
isedible = False
if re.search('serving\s*size', table1rows[0], re.IGNORECASE) is not None:
isedible = True
cannabinoids.append(table1rows)
try:
table2 = driver.find_element_by_xpath('//*[@id="mainwrapper"]/div[4]/div[1]/div[8]/div/div/ul')
except:
try:
table2 = driver.find_element_by_xpath('//*[@id="mainwrapper"]/div[4]/div[1]/div[9]/div/div/ul')
except:
terpenes.append([])
continue
table2soup = bs(table2.get_attribute('innerHTML'), 'lxml')
table2rows = [l.get_text() for l in table2soup.findAll('li')]
terpenes.append(table2rows)
coll.insert_one(
{'cannabinoids': table1rows,
'terpenes': table2rows,
'clean_name': clean_name,
'link':link,
'im_source':src,
'isedible':isedible,
'save_path':save_path,
'name': r['name']})
client.close()
return cannabinoids, terpenes, im_sources, no_imgs, names, clean_names
def save_raw_scrape(cannabinoids, terpenes, no_imgs, im_sources, names, clean_names, prefix=None):
if prefix is None:
pk.dump(cannabinoids, open('analytical360/cannabinoids.pk', 'w'), 2)
pk.dump(terpenes, open('analytical360/terpenes.pk', 'w'), 2)
pk.dump(no_imgs, open('analytical360/no_imgs.pk', 'w'), 2)
pk.dump(im_sources, open('analytical360/im_sources.pk', 'w'), 2)
pk.dump(names, open('analytical360/names.pk', 'w'), 2)
pk.dump(clean_names, open('analytical360/clean_names.pk', 'w'), 2)
else:
pk.dump(cannabinoids, open('analytical360/' + prefix + 'cannabinoids.pk', 'w'), 2)
pk.dump(terpenes, open('analytical360/' + prefix + 'terpenes.pk', 'w'), 2)
pk.dump(no_imgs, open('analytical360/' + prefix + 'no_imgs.pk', 'w'), 2)
pk.dump(im_sources, open('analytical360/' + prefix + 'im_sources.pk', 'w'), 2)
pk.dump(names, open('analytical360/' + prefix + 'names.pk', 'w'), 2)
pk.dump(clean_names, open('analytical360/' + prefix + 'clean_names.pk', 'w'), 2)
def load_raw_scrape(prefix=None):
if prefix is None:
cannabinoids = pk.load(open('analytical360/cannabinoids.pk'))
terpenes = pk.load(open('analytical360/terpenes.pk'))
no_imgs = pk.load(open('analytical360/no_imgs.pk'))
im_sources = pk.load(open('analytical360/im_sources.pk'))
names = pk.load(open('analytical360/names.pk'))
clean_names = pk.load(open('analytical360/clean_names.pk'))
else:
cannabinoids = pk.load(open('analytical360/' + prefix + 'cannabinoids.pk'))
terpenes = pk.load(open('analytical360/' + prefix + 'terpenes.pk'))
no_imgs = pk.load(open('analytical360/' + prefix + 'no_imgs.pk'))
im_sources = pk.load(open('analytical360/' + prefix + 'im_sources.pk'))
names = pk.load(open('analytical360/' + prefix + 'names.pk'))
clean_names = pk.load(open('analytical360/' + prefix + 'clean_names.pk'))
return cannabinoids, terpenes, no_imgs, im_sources, names, clean_names
def parse_raw_scrape(cannabinoids, terpenes, names):
'''
parses raw scrape data for cannabinoids and terpenes. Returns dataframe
with
'''
trail = deque([0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1])
cannabinoid_strs = deque(['thc-a', 'thc', 'cbn', 'thc total', 'thc-total', 'cbd-a', 'cbd', 'cbd-total', 'cbd total', 'cbg', 'cbc', 'activated total', 'activated', 'active'])
c_dict_keys = ['thca', 'thc', 'cbn', 'thc_total', 'cbda', 'cbd', 'cbd_total', 'cbg', 'cbc', 'activated_total']
conversion_dict = {'thc-a':'thca',
'thc total':'thc_total',
'thc-total':'thc_total',
'cbd-a':'cbda',
'cbd-total':'cbd_total',
'cbd total':'cbd_total',
'activated total':'activated_total',
'activated':'activated_total',
'active':'activated_total'} # converts similar strings to the dict key forf cannabiniod dict
cannabinoid_dict = {}
screen_tups = list(zip(list(range(len(trail))), trail, cannabinoid_strs))
for i, cann in enumerate(cannabinoids):
print(i)
temp_cann = c_dict_keys[:]
#cannabinoid_dict.setdefault('name', []).append(names[i])
for ca in cann:
for j, t, c in screen_tups:
has_str, num = find_string(ca, c, t)
if has_str:
# idx = list(cannabinoid_strs).index(c)
# cannabinoid_strs.rotate(-idx) # move that entry to the beginning of the list
# trail.rotate(-idx)
# screen_tups = zip(range(len(trail)), trail, cannabinoid_strs)
print('found', c, ca)
if c in conversion_dict:
cannabinoid_dict.setdefault(conversion_dict[c], []).append(num)
temp_cann.remove(conversion_dict[c])
else:
cannabinoid_dict.setdefault(c, []).append(num)
temp_cann.remove(c)
break
if len(temp_cann) > 0:
print('didn\'t scrape:', temp_cann)
for t in temp_cann:
cannabinoid_dict.setdefault(t, []).append('')
terp_strs = deque(['beta-Pinene',
'Humulene',
'Limonene',
'alpha-Pinene',
'Caryophyllene',
'Beta Pinene',
'Linalool',
'Caryophyllene oxide',
'Myrcene',
'TERPENE-TOTAL',
'Terpinolene',
'Ocimene',
'Alpha Pinene'])
t_dict_keys = ['beta_pinene',
'alpha_pinene',
'caryophyllene_oxide',
'Humulene',
'Limonene',
'Caryophyllene',
'Linalool',
'Myrcene',
'Terpinolene',
'Ocimene',
'total_terpenes']
# converts similar strings to the dict key for terp dict
terp_conv_dict = {'beta-Pinene':'beta_pinene',
'Beta Pinene':'beta_pinene',
'alpha-Pinene':'alpha_pinene',
'Alpha Pinene':'alpha_pinene',
'Caryophyllene oxide':'caryophyllene_oxide',
'TERPENE-TOTAL':'total_terpenes'}
terp_dict = {}
for i, terp in enumerate(terpenes):
print(i)
temp_cann = t_dict_keys[:]
#terp_dict.setdefault('name', []).append(names[i])
for ta in terp:
for c in terp_strs:
has_str, num = find_string(ta, c)
if has_str:
idx = list(terp_strs).index(c)
print('found', c, ta)
if c in terp_conv_dict:
terp_dict.setdefault(terp_conv_dict[c], []).append(num)
temp_cann.remove(terp_conv_dict[c])
else:
terp_dict.setdefault(c, []).append(num)
temp_cann.remove(c)
break
if len(temp_cann) > 0:
print('didn\'t scrape:', temp_cann)
for t in temp_cann:
terp_dict.setdefault(t, []).append('')
cannabinoid_dict['name'] = names
for k in cannabinoid_dict:
print(k, len(cannabinoid_dict[k]))
for k in terp_dict:
print(k, len(terp_dict[k]))
cdf = pd.DataFrame(cannabinoid_dict)
tdf = pd.DataFrame(terp_dict)
total_df = cdf.merge(tdf, left_index=True, right_index=True)
return total_df
def find_string(search_str, str_to_find='THC-A', trail=False):
if search_str.find('8-THC') != -1:
return 0, 0
# if search_str.find('< 0.01 TERPENE-TOTAL') != -1:
# return 1, 0
if trail:
find_str = '.*' + str_to_find + '.*'
else:
find_str = '.*' + str_to_find + '$'
has_str = 0
res = re.search(find_str, search_str, re.IGNORECASE)
if res:
num = re.search('[\d\.]*', search_str).group(0)
if search_str.find('<\s*0.01') != -1:
return 1, 0
return 1, num
return 0, 0
def check_for_string(cannabinoids, str_to_find='THC-A', trail=True):
if trail:
find_str = '.*' + str_to_find + '.*'
else:
find_str = '.*' + str_to_find
#c = [' '.join(r) for r in cannabinoids]
has_str = []
for c in cannabinoids:
has_str_val = 0
for j in c:
res = re.search(find_str, j, re.IGNORECASE)
if res:
has_str_val = 1
break
has_str.append(has_str_val)
return has_str
def check_if_fields_present():
trail = [0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1]
cannabinoid_strs = ['thc-a', 'thc', 'cbn', 'thc total', 'thc-total', 'cbd-a', 'cbd', 'cbd-total', 'cbd total', 'cbg', 'cbc', 'activated total', 'activated', 'active']
for t, c in zip(trail, cannabinoid_strs):
has_str = check_for_string(cannabinoids, c, t)
print(c, np.mean(has_str))
def stuff():
testdf = | pd.DataFrame({'name':names}) | pandas.DataFrame |
import os
import pickle
import numpy as np
import pandas as pd
import gzip
import fcsparser
# Load Kuzushiji Japanese Handwritten dataset
def load_kmnist(path, dtype="kmnist", kind='train'):
images_path = os.path.join(path, f'{dtype}-{kind}-imgs.npz')
labels_path = os.path.join(path, f'{dtype}-{kind}-labels.npz')
images = np.load(images_path)
images = images.f.arr_0
images = images.reshape(images.shape[0], -1)
labels = np.load(labels_path)
labels = labels.f.arr_0
labels = labels.reshape(-1)
return images, labels
# FASHION MNIST (60000+10000, 784), 26MB
def load_mnist(path, kind="train"): # train, t10k
"""Load MNIST data from `path`"""
labels_path = os.path.join(path, "%s-labels-idx1-ubyte.gz" % kind)
images_path = os.path.join(path, "%s-images-idx3-ubyte.gz" % kind)
with gzip.open(labels_path, "rb") as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)
with gzip.open(images_path, "rb") as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(
len(labels), 784
)
return images, labels
# CIFAR 10 (50000+10000, 3072), 163MB
def load_pickle(f):
return pickle.load(f, encoding="latin1")
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, "rb") as f:
datadict = load_pickle(f)
X = datadict["data"]
Y = datadict["labels"]
X = X.reshape(10000, 3072)
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1, 6):
f = os.path.join(ROOT, "data_batch_%d" % (b,))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, "test_batch"))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(cifar10_dir):
# Load the raw CIFAR-10 data
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
x_train = X_train.astype("float32")
x_test = X_test.astype("float32")
x_train /= 255
x_test /= 255
return x_train, y_train, x_test, y_test
def get_flow_data(ROOT):
fcs_data = fcsparser.parse(os.path.join(ROOT, "pbmc_luca.fcs"))
raw_data = fcs_data[1]
selected_columns = [col for col in raw_data.columns if col.endswith("-A")] + ['Time']
x = np.arcsinh(raw_data[selected_columns].values / 150.0).astype(np.float32, order='C')
return x
def get_data(dname, n_samples=None):
if dname == "spheres":
path = os.path.join(os.getcwd(), "data", "spheres")
df = pd.read_csv(os.path.join(path, 'spheres.csv')) # load data
x = df.drop(columns=['label']).to_numpy()
label = df['label'].to_numpy()
return x, label
elif dname == "allen":
path = os.path.join(os.getcwd(), "data", "allen")
df = pd.read_csv(os.path.join(path, 'allen.csv')) # load data
x = df.drop(columns=['label']).to_numpy()
label = df['label'].to_numpy()
return x, label
elif dname == "spheres_small":
path = os.path.join(os.getcwd(), "data", "spheres")
df = pd.read_csv(os.path.join(path, 'spheres_small.csv')) # load data
x = df.drop(columns=['label']).to_numpy()
label = df['label'].to_numpy()
return x, label
elif dname == "mnist":
path = os.path.join(os.getcwd(), "data", "MNIST", "raw")
return load_mnist(path=path, kind="train") # kind="t10k"
elif dname == "fmnist":
path = os.path.join(os.getcwd(), "data", "FashionMNIST", "raw")
return load_mnist(path=path, kind="train") # kind="t10k"
elif dname == "kmnist":
path = os.path.join(os.getcwd(), "data", "KuzushijiMNIST", "raw")
return load_kmnist(path=path, kind="train") # kind="t10k"
elif dname == "cifar10":
path = os.path.join(os.getcwd(), "data", "cifar-10-batches-py")
x, label, _, _ = get_CIFAR10_data(path)
return x, label
elif dname == "flow":
path = os.path.join(os.getcwd(), "data", "flow", "raw")
x = get_flow_data(path)
return x, np.arange(x.shape[0])
elif dname == "swissroll":
from sklearn import datasets
x, label = datasets.make_swiss_roll(n_samples=n_samples)
return x, label
elif dname == "scurve":
from sklearn import datasets
x, label = datasets.make_s_curve(n_samples=n_samples)
return x, label
elif dname == "single-cell":
path = os.path.join(os.getcwd(), "data", "single-cell")
data_path = os.path.join(path, "sc_10x.count.csv")
label_path = os.path.join(path, "sc_10x.metadata.csv")
x = pd.read_csv(data_path)
x = np.asarray(x)
x = np.swapaxes(x, 0, 1)
labels = pd.read_csv(label_path)
labels = labels['cell_line_demuxlet']
labels = np.asarray(labels)
label_uniq = list(set(labels))
label_uniq.sort()
for i, label in enumerate(labels):
if label == label_uniq[0]:
labels[i] = 0
elif label == label_uniq[1]:
labels[i] = 1
else:
labels[i] = 2
return x, labels
elif dname == "single-cell2":
path = os.path.join(os.getcwd(), "data", "single-cell")
data_path = os.path.join(path, "sc_10x_5cl.count.csv")
label_path = os.path.join(path, "sc_10x_5cl.metadata.csv")
x = pd.read_csv(data_path)
x = np.asarray(x)
x = np.swapaxes(x, 0, 1)
labels = pd.read_csv(label_path)
labels = labels['cell_line_demuxlet']
labels = np.asarray(labels)
label_uniq = list(set(labels))
label_uniq.sort()
print(label_uniq)
for i, label in enumerate(labels):
if label == label_uniq[0]:
labels[i] = 0
elif label == label_uniq[1]:
labels[i] = 1
elif label == label_uniq[2]:
labels[i] = 2
elif label == label_uniq[3]:
labels[i] = 3
else:
labels[i] = 4
return x, labels
elif dname == "single-cell3":
path = os.path.join(os.getcwd(), "data", "single-cell")
data_path = os.path.join(path, "sc_celseq2_5cl_p1.count.csv")
label_path = os.path.join(path, "sc_celseq2_5cl_p1.metadata.csv")
x = pd.read_csv(data_path)
x = np.asarray(x)
x = np.swapaxes(x, 0, 1)
labels = | pd.read_csv(label_path) | pandas.read_csv |
from contextlib import nullcontext
import copy
import numpy as np
import pytest
from pandas._libs.missing import is_matching_na
from pandas.core.dtypes.common import is_float
from pandas import (
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize(
"arr, idx",
[
([1, 2, 3, 4], [0, 2, 1, 3]),
([1, np.nan, 3, np.nan], [0, 2, 1, 3]),
(
[1, np.nan, 3, np.nan],
MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c"), (3, "c")]),
),
],
)
def test_equals(arr, idx):
s1 = Series(arr, index=idx)
s2 = s1.copy()
assert s1.equals(s2)
s1[1] = 9
assert not s1.equals(s2)
@pytest.mark.parametrize(
"val", [1, 1.1, 1 + 1j, True, "abc", [1, 2], (1, 2), {1, 2}, {"a": 1}, None]
)
def test_equals_list_array(val):
# GH20676 Verify equals operator for list of Numpy arrays
arr = np.array([1, 2])
s1 = Series([arr, arr])
s2 = s1.copy()
assert s1.equals(s2)
s1[1] = val
cm = (
tm.assert_produces_warning(FutureWarning, check_stacklevel=False)
if isinstance(val, str)
else nullcontext()
)
with cm:
assert not s1.equals(s2)
def test_equals_false_negative():
# GH8437 Verify false negative behavior of equals function for dtype object
arr = [False, np.nan]
s1 = Series(arr)
s2 = s1.copy()
s3 = Series(index=range(2), dtype=object)
s4 = s3.copy()
s5 = s3.copy()
s6 = s3.copy()
s3[:-1] = s4[:-1] = s5[0] = s6[0] = False
assert s1.equals(s1)
assert s1.equals(s2)
assert s1.equals(s3)
assert s1.equals(s4)
assert s1.equals(s5)
assert s5.equals(s6)
def test_equals_matching_nas():
# matching but not identical NAs
left = Series([np.datetime64("NaT")], dtype=object)
right = Series([np.datetime64("NaT")], dtype=object)
assert left.equals(right)
assert Index(left).equals(Index(right))
assert left.array.equals(right.array)
left = Series([np.timedelta64("NaT")], dtype=object)
right = Series([np.timedelta64("NaT")], dtype=object)
assert left.equals(right)
assert Index(left).equals(Index(right))
assert left.array.equals(right.array)
left = Series([np.float64("NaN")], dtype=object)
right = Series([np.float64("NaN")], dtype=object)
assert left.equals(right)
assert Index(left, dtype=left.dtype).equals(Index(right, dtype=right.dtype))
assert left.array.equals(right.array)
def test_equals_mismatched_nas(nulls_fixture, nulls_fixture2):
# GH#39650
left = nulls_fixture
right = nulls_fixture2
if hasattr(right, "copy"):
right = right.copy()
else:
right = copy.copy(right)
ser = Series([left], dtype=object)
ser2 = Series([right], dtype=object)
if is_matching_na(left, right):
assert ser.equals(ser2)
elif (left is None and is_float(right)) or (right is None and is_float(left)):
assert ser.equals(ser2)
else:
assert not ser.equals(ser2)
def test_equals_none_vs_nan():
# GH#39650
ser = Series([1, None], dtype=object)
ser2 = Series([1, np.nan], dtype=object)
assert ser.equals(ser2)
assert Index(ser, dtype=ser.dtype).equals( | Index(ser2, dtype=ser2.dtype) | pandas.Index |
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import numpy as np
import pandas as pd
import warnings
from sklearn.linear_model import LinearRegression
import scipy.cluster.hierarchy as sch
import datetime
import random
class backtest_model:
"""
Given a user-defined portfolio construction strategy (a function that takes in stock-related data and returns portfolio weights) and
the data that the user wish the strategy to be tested on, calculate several evaluation metrics of the portfolio, including
net_returns, sharpe ratio, certainty equivalent returns, turnover, etc.
Various inputs can be modified to suit the needs of strategy and backtesting scenarios, such as price-impact models,
transaction costs, etc.
Initiate the model with the strategy function, and clarify involved data types needed, whose sequence MUST be consistent
with that of the list of dataframes used inside strategy function
:param strategy: user-defined function that serves as portfolio construction strategy
:type strategy: function
:param involved_data_type: a list of strings that indicate the type of data {'price','return','ex_return'} used in the strategy, the order of the strings will be the order that data are passed to the strategy
:type involved_data_type: list
:param need_extra_data: indicate whether the strategy need extra_data (data other than {'price','return','ex_return'}) to function. Note: 1. the datetime index of extra_data must match that of the provided data. 2. change-of-frequency functionality will be suspended if extra data is needed
:type need_extra_data: bool
:param trace_back: indicate whether the strategy need to trace back to past portfolios to function. Note: please handle the boundary situation where past portfolios is empty in the strategy function
:type trace_back: bool
:param name: name of the strategy to be tested
:type name: str
:param missing_val : indicate whether user strategy function can handle missing values in the data on its own. True means the function can deal with missing values. False means it cannot
:type missing_val: bool
"""
def __init__(self, strategy, involved_data_type, need_extra_data=False, trace_back=False, name='Unnamed', missing_val=False):
"""
Initiate the model with the strategy function, and clarify involved data types needed, whose sequence MUST be consistent
with that of the list of dataframes used inside strategy function
:param strategy: user-defined function that serves as portfolio construction strategy
:type strategy: function
:param involved_data_type: a list of strings that indicate the type of data {'price','return','ex_return'} used in the strategy, the order of the strings will be the order that data are passed to the strategy
:type involved_data_type: list
:param need_extra_data: indicate whether the strategy need extra_data (data other than {'price','return','ex_return'}) to function. Note: 1. the datetime index of extra_data must match that of the provided data. 2. change-of-frequency functionality will be suspended if extra data is needed
:type need_extra_data: bool
:param trace_back: indicate whether the strategy need to trace back to past portfolios to function. Note: please handle the boundary situation where past portfolios is empty in the strategy function
:type trace_back: bool
:param name: name of the strategy to be tested
:type name: str
:param missing_val : indicate whether user strategy function can handle missing values in the data on its own. True means the function can deal with missing values. False means it cannot. A wrapper function would be applied to the strategy function to deal with missing data. It will only pass in columns with full data and assign to other assets weight 0 while keeping the relative position the same. Warning: 1. The wrapper will slow the running speed significantly. 2. The wrapper does not cover missing data in "extra_data"..
:type missing_val: bool
"""
def wrapper(function, list_df, extra_data=pd.DataFrame(), historical_portfolios=pd.DataFrame()):
length = list_df[0].shape[1]
for frame in list_df:
if length >= len(frame.columns[frame.isna().any() == False]):
length = len(frame.columns[frame.isna().any() == False])
position_nan = frame.isna().any().values
w = np.zeros(list_df[0].shape[1])
if need_extra_data:
if trace_back:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df],extra_data, historical_portfolios)
else:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df],extra_data)
else:
if trace_back:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df],historical_portfolios)
else:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df])
return w
if not missing_val:
if name not in ['naive allocation portfolio',
'inverse variance allocation portfolio',
'min. variance allocation portfolio',
'basic mean-variance allocation portfolio',
'Fama-French 3-factor model portfolio',
'hierarchical-risk-parity portfolio',
'Bayes_Stein_shrinkage portfolio']:
warnings.warn('The library will deal with missing data. Running speed will be significantly reduced!')
if need_extra_data:
if trace_back:
self.__strategy = lambda x,y,z: wrapper(strategy, x,extra_data=y,historical_portfolios=z)
else:
self.__strategy = lambda x,y: wrapper(strategy, x,extra_data=y)
else:
if trace_back:
self.__strategy = lambda x,z: wrapper(strategy, x,historical_portfolios=z)
else:
self.__strategy = lambda x: wrapper(strategy, x)
else:
self.__strategy = strategy
if type(involved_data_type) != list:
raise Exception('"involved_data_type" must be given in a list')
else:
self.__involved_data_type = involved_data_type
if type(need_extra_data) != bool:
raise Exception('"need_extra_data" must be a bool variable')
else:
self.__need_extra_data = need_extra_data
if type(trace_back) != bool:
raise Exception('"trace_back" must be a bool variable')
else:
self.__trace_back = trace_back
if type(name) != str:
raise Exception('"name" must be a string variable')
else:
self.name = name
self.__last_test_frequency = None
self.__last_test_portfolios = None
self.__price_impact = False
self.__sharpe = None
self.__ceq = None
self.__average_turnover = None
self.__total_turnover = None
self.__net_returns = None
self.__net_excess_returns = None
# function to prepare data, including change of frequency, convert between price, return and ex_return
def __prepare_data(self, data, freq_data, data_type, rf, interval, window, freq_strategy,
volume=pd.DataFrame(), price_impact=False):
if not isinstance(data, pd.DataFrame):
raise Exception('Please provide correct format of test data!')
try:
data.index = pd.to_datetime(data.index)
except:
print(
'Invalid index provided in your test data, please make sure that index is in compatible datetime format')
volume.index = pd.to_datetime(volume.index)
data = data.copy()
if data_type == 'return':
if freq_data != freq_strategy:
warnings.warn(
'data_type==return with interval>1 or change of frequency, Expect large amount of computational error')
data['###rf'] = rf # add 'rf' to the dataframe to go through transformation together
data = (1 + data).apply(lambda x: np.cumprod(x))
data = data.resample(freq_strategy).ffill().fillna(method='ffill').pct_change(fill_method=None).dropna(axis=0, how='all')
normal_return_df = data.iloc[:,:-1]
risk_free_df=data.iloc[:,-1]
excess_return_df = normal_return_df.sub(risk_free_df.values, axis=0).dropna(axis=0, how='all')
return (normal_return_df, excess_return_df, risk_free_df,
pd.DataFrame(index=normal_return_df.index))
else:
normal_return_df = data
excess_return_df = normal_return_df.sub(rf.values, axis=0)
return (normal_return_df, excess_return_df, rf.loc[normal_return_df.index],
pd.DataFrame(index=normal_return_df.index))
elif data_type == 'ex_return':
if freq_data != freq_strategy:
warnings.warn(
'data_type==ex_return with interval>1 or change of frequency, Expect large amount of computational error')
data = data.add(rf, axis=0)
data['###rf'] = rf # add 'rf' to the dataframe to go through transformation together
data = (1 + data).apply(lambda x: np.cumprod(x))
data = data.resample(freq_strategy).ffill().fillna(method='ffill').pct_change(fill_method=None).dropna(axis=0, how='all')
normal_return_df = data.iloc[:, :-1]
risk_free_df = data.iloc[:, -1]
excess_return_df = normal_return_df.sub(risk_free_df.values, axis=0).dropna(axis=0, how='all')
return (normal_return_df, excess_return_df, risk_free_df,
pd.DataFrame(index=normal_return_df.index))
else:
excess_return_df = data
normal_return_df = excess_return_df.add(rf, axis=0)
return (normal_return_df, excess_return_df, rf.loc[normal_return_df.index],
pd.DataFrame(index=normal_return_df.index))
elif data_type == 'price':
#data['###rf'] = rf # add 'rf' to the dataframe to go through transformation together
rf_df=np.cumprod(1+rf)
if freq_data != freq_strategy:
data = data.resample(freq_strategy).ffill().fillna(method='ffill')
rf_df=rf_df.resample(freq_strategy).ffill().fillna(method='ffill')
if price_impact:
volume = volume.resample(freq_strategy).mean()
normal_return_df = data.pct_change(fill_method=None).dropna(axis=0, how='all')
risk_free_df=rf_df.pct_change(fill_method=None).dropna(axis=0,how='all').loc[normal_return_df.index]
excess_return_df = normal_return_df.sub(risk_free_df.values, axis=0)
if price_impact:
return (normal_return_df, excess_return_df, volume.loc[normal_return_df.index],
risk_free_df,
data.loc[normal_return_df.index])
else:
return (normal_return_df, excess_return_df, risk_free_df,
data.loc[normal_return_df.index])
# rebalance function to be applied to each rolling window of length (window)
def __rebalance(self, ex_return_df, normal_return_df, price_df, window, extra_data=None):
historical_portfolios = []
map = {'price': price_df, 'ex_return': ex_return_df, 'return': normal_return_df}
if self.__need_extra_data:
if self.__trace_back:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type],
extra_data.loc[df.index],
historical_portfolios))
else:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type],
extra_data.loc[df.index]))
else:
if self.__trace_back:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type],
historical_portfolios))
else:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type]))
return historical_portfolios
def __test_price_impact(self, data, freq_data, data_type, rf, interval, window, freq_strategy, ptc_buy,
ptc_sell, ftc, volume, c, initial_wealth, extra_data, price_impact_model='default',power=0.6):
# prepare data
normal_return_df, excess_return_df, volume, risk_free_rate, price_df = self.__prepare_data(data, freq_data,
data_type, rf,
interval, window,
freq_strategy,
volume,
price_impact=True)
T = excess_return_df.shape[0] # length of dataset
N = excess_return_df.shape[1] # number of assets
if window < N:
warnings.warn('window length smaller than the number of assets, may not get feasible portfolios')
if window >= T - 2: # 2 here can change later
raise Exception(
'Too few samples to test on will result in poor performance : reduce window or decrease interval or '
'increase length of data')
# apply rolling windows with __rebalance
portfolios = self.__rebalance(excess_return_df, normal_return_df, price_df, window, extra_data)
try:
assert sum(portfolios[0]) <= 1 + 0.000001
except:
raise Exception(
'Please make sure your strategy builds a portfolios whose sum of weights does not exceed 1!')
portfolios = pd.DataFrame(portfolios).iloc[::interval]
# save the portfolios for calling
self.__last_test_portfolios = portfolios.set_axis(excess_return_df.columns.values, axis='columns').set_axis(
excess_return_df.iloc[window - 1::interval].index.values, axis='index')
if interval > 1:
if price_df.empty:
df=normal_return_df.join(risk_free_rate)
df=(1+df.iloc[window-1:]).apply(lambda x:np.cumprod(x)).iloc[::interval].pct_change(fill_method=None).dropna(axis=0,how='all')
normal_return_df=df.iloc[:,:-1]
risk_free_rate=df.iloc[:,-1]
excess_return_df = normal_return_df.sub(risk_free_rate.values, axis=0)
price_df = price_df.iloc[window - 1::interval].iloc[1:]
else:
price_df = price_df.iloc[window - 1::interval]
normal_return_df=price_df.pct_change(fill_method=None).dropna(axis=0,how='all')
risk_free_rate=np.cumprod(1+risk_free_rate[window-1:]).iloc[::interval].pct_change(fill_method=None).dropna(axis=0,how='all')
excess_return_df=normal_return_df.sub(risk_free_rate.values, axis=0)
price_df=price_df.iloc[1:]
else:
excess_return_df = excess_return_df.iloc[window:]
normal_return_df = normal_return_df.iloc[window:]
risk_free_rate = risk_free_rate.iloc[window:]
price_df = price_df.iloc[window:]
# pre_balance portfolios that serves as denominators
pre_balance_portfolios = (1 + normal_return_df).mul(portfolios.iloc[:-1].values)
# turnover
# normalise portfolio weight before rebalancing at the start of each period
# note that turnover ratio is not affected by price-impact model
pre_balance_portfolios = pre_balance_portfolios.div(pre_balance_portfolios.sum(axis=1).values, axis=0)
diff = (portfolios.iloc[1:].sub(pre_balance_portfolios.values)).dropna(axis=0, how='all')
self.__total_turnover = abs(diff).sum(axis=1).sum()
self.__average_turnover = self.__total_turnover / (T - window)
# pre_balance portfolios that serves as nominators
pre_balance_portfolios_2 = (1 + normal_return_df.iloc[1:]).mul(portfolios.iloc[1:-1].values)
# factor in the initial_wealth for all 'diff','portfolios'
portfolios *= initial_wealth
pre_balance_portfolios *= initial_wealth
pre_balance_portfolios_2 *= initial_wealth
diff *= initial_wealth
# transform volume to average volume
volume = volume.rolling(window).mean().dropna(axis=0, how='all').fillna(method='ffill').loc[normal_return_df.index]
# evolution of money account
pre_balance_money = np.zeros(risk_free_rate.shape[0])
# Money account value after each period, before rebalancing
pi_models = {'default': {'buy': 1 + c * (diff[diff > 0].div((volume * price_df).values)) ** power,
'sell': 1 - c * (abs(diff[diff < 0]).div((volume * price_df).values)) ** power}}
pi_buy, pi_sell = pi_models[price_impact_model]['buy'], pi_models[price_impact_model]['sell']
# sell = ((abs(diff[diff < 0]).mul(1 - ptc_sell)) * (
# 1 - c * (abs(diff[diff < 0]).div((volume * price_df).values)) ** 0.6)).sum(axis=1)
# buy = ((diff[diff >= 0].mul(1 + ptc_buy)) * (
# 1 + c * (diff[diff >= 0].div((volume * price_df).values)) ** 0.6)).sum(axis=1)
sell = ((abs(diff[diff < 0]).mul(1 - ptc_sell)) * pi_sell).sum(axis=1)
buy = ((diff[diff > 0].mul(1 + ptc_buy)) * pi_buy).sum(axis=1)
fixed = diff[diff != 0].count(axis=1).mul(ftc)
after_balance_money = pre_balance_money + sell - buy - fixed
pre_balance_money_2 = after_balance_money[:-1].mul((1 + risk_free_rate.iloc[1:]).values)
# net_returns
self.__net_returns = (pre_balance_portfolios_2.sum(axis=1).add(pre_balance_money_2.values)).div(
pre_balance_portfolios.sum(axis=1).add(pre_balance_money).iloc[:-1].values) - 1
self.__net_excess_returns = self.__net_returns.sub(risk_free_rate.iloc[1:].values)
self.__sharpe = np.mean(self.__net_excess_returns) / np.std(self.__net_excess_returns, ddof=1)
def __test_no_price_impact(self, data, freq_data, data_type, rf, interval, window, freq_strategy, ptc_buy,
ptc_sell, ftc, initial_wealth, extra_data):
# prepare data
normal_return_df, excess_return_df, risk_free_rate, price_df = self.__prepare_data(data, freq_data,
data_type, rf,
interval, window,
freq_strategy)
T = excess_return_df.shape[0] # length of dataset
N = excess_return_df.shape[1] # number of assets
if window < N:
warnings.warn('window length smaller than the number of assets, may not get feasible portfolios')
if window >= T - 2: # 3 here can change later
raise Exception(
'Too few samples to test on will result in poor performance : reduce window or decrease interval or '
'increase length of data')
# apply rolling windows with __rebalance
portfolios = self.__rebalance(excess_return_df, normal_return_df, price_df, window, extra_data)
try:
assert sum(portfolios[0]) <= 1 + 0.000001
except:
raise Exception(
'Please make sure your strategy builds a portfolios whose sum of weights does not exceed 1!')
portfolios = pd.DataFrame(portfolios).iloc[::interval]
# save the portfolios for calling
self.__last_test_portfolios = portfolios.set_axis(excess_return_df.columns.values, axis='columns').set_axis(
excess_return_df.iloc[window - 1::interval].index.values, axis='index')
if interval > 1:
if price_df.empty:
df = normal_return_df.join(risk_free_rate)
df = (1 + df.iloc[window - 1:]).apply(lambda x: np.cumprod(x)).iloc[::interval].pct_change(fill_method=None).dropna(
axis=0, how='all')
normal_return_df = df.iloc[:, :-1]
risk_free_rate = df.iloc[:, -1]
excess_return_df = normal_return_df.sub(risk_free_rate.values, axis=0)
price_df = price_df.iloc[window - 1::interval].iloc[1:]
else:
price_df = price_df.iloc[window - 1::interval]
normal_return_df = price_df.pct_change(fill_method=None).dropna(axis=0, how='all')
risk_free_rate=np.cumprod(1+risk_free_rate[window-1:]).iloc[::interval].pct_change(fill_method=None).dropna(axis=0,how='all')
excess_return_df = normal_return_df.sub(risk_free_rate.values, axis=0)
price_df = price_df.iloc[1:]
else:
excess_return_df = excess_return_df.iloc[window:]
normal_return_df = normal_return_df.iloc[window:]
risk_free_rate = risk_free_rate.iloc[window:]
price_df = price_df.iloc[window:]
# pre_balance portfolios that serves as denominators
pre_balance_portfolios = (1 + normal_return_df).mul(portfolios.iloc[:-1].values)
# turnover
# normalise portfolio weight before rebalancing at the start of each period
# note that turnover ratio is not affected by price-impact model
pre_balance_portfolios = pre_balance_portfolios.div(pre_balance_portfolios.sum(axis=1).values, axis=0)
diff = (portfolios.iloc[1:].sub(pre_balance_portfolios.values)).dropna(axis=0, how='all')
self.__total_turnover = abs(diff).sum(axis=1).sum()
self.__average_turnover = self.__total_turnover / (T - window)
# pre_balance portfolios that serves as nominators
pre_balance_portfolios_2 = (1 + normal_return_df.iloc[1:]).mul(portfolios.iloc[1:-1].values)
# if ftc != 0:
# # factor in the initial_wealth for all 'diff','portfolios'
# portfolios *= initial_wealth
# pre_balance_portfolios *= initial_wealth
# pre_balance_portfolios_2 *= initial_wealth
# diff *= initial_wealth
#
# # transaction cost impacts
# sell = (abs(diff[diff < 0]).mul(1 - ptc_sell)).sum(axis=1)
# buy = (diff[diff >= 0].mul(1 + ptc_buy)).sum(axis=1)
# fixed = diff[diff != 0].count(axis=1).mul(ftc)
# # evolution of money account
# pre_balance_money = np.zeros(risk_free_rate.shape[0])
# after_balance_money = pre_balance_money + sell - buy - fixed
# pre_balance_money_2 = after_balance_money[:-1].mul((1 + risk_free_rate.iloc[1:]).values)
#
# self.__net_returns = (pre_balance_portfolios_2.sum(axis=1).add(pre_balance_money_2.values)).div(
# pre_balance_portfolios.sum(axis=1).add(pre_balance_money).iloc[:-1].values) - 1
#
# self.__net_excess_returns = self.__net_returns.sub(risk_free_rate.iloc[1:].values)
# else:
# # transaction cost impacts
# sell = (abs(diff[diff < 0]).mul(1 - ptc_sell)).sum(axis=1)
# buy = (diff[diff >= 0].mul(1 + ptc_buy)).sum(axis=1)
# # evolution of money account
# pre_balance_money = np.zeros(risk_free_rate.shape[0])
# after_balance_money = pre_balance_money + sell - buy
# pre_balance_money_2 = after_balance_money[:-1].mul((1 + risk_free_rate.iloc[1:]).values)
#
# self.__net_returns = (pre_balance_portfolios_2.sum(axis=1).add(pre_balance_money_2.values)).div(
# pre_balance_portfolios.sum(axis=1).add(pre_balance_money).iloc[:-1].values) - 1
#
# self.__net_excess_returns = self.__net_returns.sub(risk_free_rate.iloc[1:].values)
portfolios *= initial_wealth
pre_balance_portfolios *= initial_wealth
pre_balance_portfolios_2 *= initial_wealth
diff *= initial_wealth
# transaction cost impacts
sell = (abs(diff[diff < 0]).mul(1 - ptc_sell)).sum(axis=1)
buy = (diff[diff >= 0].mul(1 + ptc_buy)).sum(axis=1)
fixed = diff[diff != 0].count(axis=1).mul(ftc)
# evolution of money account
pre_balance_money = np.zeros(risk_free_rate.shape[0])
after_balance_money = pre_balance_money + sell - buy - fixed
pre_balance_money_2 = after_balance_money[:-1].mul((1 + risk_free_rate.iloc[1:]).values)
self.__net_returns = (pre_balance_portfolios_2.sum(axis=1).add(pre_balance_money_2.values)).div(
pre_balance_portfolios.sum(axis=1).add(pre_balance_money).iloc[:-1].values) - 1
self.__net_excess_returns = self.__net_returns.sub(risk_free_rate.iloc[1:].values)
self.__sharpe = np.mean(self.__net_excess_returns) / np.std(self.__net_excess_returns, ddof=1)
def backtest(self, data, freq_data, volume=pd.DataFrame(), data_type='price', rf=pd.Series(dtype='float'),
interval=1, window=60,
freq_strategy='D',
price_impact=False, ptc_buy=0, ptc_sell=0, ftc=0, c=1, initial_wealth=1E6,
extra_data=pd.DataFrame(), price_impact_model='default',power=0.6):
"""
Start the backtesting process with the built model. The function itself will not return anything. To get the results,
please call respective functions.
:param data: historical data that the strategy to be tested on. Index must be datetime format compatible
:type data: pd.DataFrame
:param freq_data: The frequency of the data provided, choose between {'D','W','M'}, where 'D' for day,'W' for week and 'M' for month. 'data' must be taken in the smallest unit of respective frequency, e.g. the frequency 'M' means the data is taken at each month
:type freq_data: str
:param volume: trading volume of each asset during each period (array of size T*N), or average trading volume for each asset over all periods (N-d array). If passing in as pd.DataFrame, then its index must match that of the data.
:type volume: pd.DataFrame or list or np.ndarray or pd.Series
:param data_type: choose from {'price','return','ex_return'} where 'price' stands for price data of assets at each timestamp, 'return' stands for normal percentage return of each asset in each period, 'ex_return' stands for percentage return net of risk-free rate
:type data_type: str
:param rf: data for risk-free rate in each period. Note: if 'rf' is passed in as a dataframe or series, the index of 'rf' must match that of 'data'
:type rf: pd.Series or pd.DataFrame or int or float
:param interval: number of periods that users want their portfolios to be rebalanced, the unit is based on 'freq_strategy'. e.g. If 'freq_data' is 'D', while 'freq_strategy' is 'M', and 'interval' is 2, then the portfolio will be rebalanced every 2 months using the user-defined portfolio-construction strategy
:type interval: int
:param window: length of rolling windows of 'data' wanted to feed into 'strategy' function. e.g. 'window'=60 means each time during rebalancing, past 60 periods of 'data' will be passed into user-defined strategy function
:type window: int
:param freq_strategy: The frequency on which the user want to use 'strategy' to rebalance the portfolio, choose between {'D','W','M'}. If "freq_strategy" is different from "freq_data", the library will resample data on "freq_strategy". Note: 'freq_data' should be smaller than 'freq_strategy' with the sequence 'D' < 'W' < 'M'
:type freq_strategy: str
:param price_impact: indicate whether to use price-impact model or not
:type price_impact: bool
:param ptc_buy: proportional transaction cost of buying each asset, measured in basis point. Can be a Series or array that provide one cost for each asset, or a single variable that stands for universal transaction cost. Note: Cannot be a list, and must not contain provide labels
:type ptc_buy: pd.Series or np.ndarray or int or float
:param ptc_sell: proportional transaction cost of selling each asset, measured in basis point. Can be a Series or array that provide one cost for each asset, or a single variable that stands for universal transaction cost. Note: Cannot be a list, and must not contain provide labels
:type ptc_sell: pd.Series or np.ndarray or int or float
:param ftc: dollar value of fixed transaction cost of each transaction, measured in one unit of any currency.
:type ftc: int or float
:param c: market depth indicators. Can be a Series or array that provide one market depth for each asset, or a single variable that stands for universal market depth. Note: Do NOT provide labels
:type c: pd.Series or int or np.ndarray or float
:param initial_wealth: dollar value of initial wealth of testing when 'price-impact' is true or 'ftc'!=0
:type initial_wealth: int or float
:param extra_data: extra_data to be passed into 'strategy' only when 'need_extra_data'==True. Note: 1. the datetime index of extra_data must match that of the provided data. 2. change-of-frequency functionality will be suspended if extra data is needed
:type extra_data: pd.DataFrame
:param price_impact_model: choose the price impact model you want to use from {'default'} (testing feature, to be built on)
:type price_impact_model: str
:return: None
"""
random.seed(1)
if price_impact_model not in {'default'}:
raise Exception('Unknown type of "price_impact_model"!')
if type(initial_wealth) != int and type(initial_wealth) != float:
raise Exception('Wrong type of "initial_wealth" given!')
if type(c) != float and type(c) != int and not isinstance(c, pd.Series) and not isinstance(c.np.ndarray):
raise Exception("Wrong type of 'c' given!")
if type(ftc) != int and type(ftc) != float:
raise Exception("Wrong type of 'ftc' given!")
if type(ptc_buy) != int and type(ptc_buy) != float and not isinstance(ptc_buy, pd.Series) and not isinstance(
ptc_buy,
np.ndarray):
raise Exception("Wrong type of 'ptc_buy' provided!")
else:
ptc_buy /= 10000
if type(ptc_sell) != int and type(ptc_sell) != float and not isinstance(ptc_sell, pd.Series) and not isinstance(
ptc_sell,
np.ndarray):
raise Exception("Wrong type of 'ptc_sell' provided!")
else:
ptc_sell /= 10000
if type(price_impact) != bool:
raise Exception("'price_impact' must be a boolean variable")
if freq_data not in {'D', 'W', 'M'}:
raise Exception("'freq_data' must be chosen from {'D','W','M'}")
if freq_strategy not in {'D', 'W', 'M'}:
raise Exception("'freq_strategy' must be chosen from {'D','W','M'}")
if freq_data == 'W' and freq_strategy == 'D':
raise Exception("'freq_data' should be smaller than 'freq_strategy' with the sequence 'D' < 'W' < 'M'")
if freq_data == 'M' and freq_strategy in {'D', 'W'}:
raise Exception("'freq_data' should be smaller than 'freq_strategy' with the sequence 'D' < 'W' < 'M'")
if type(window) != int:
raise Exception("'window' must be an 'int' variable")
if type(interval) != int:
raise Exception("'interval' must be an 'int' variable")
if initial_wealth == 1E6:
if price_impact == True or ftc != 0:
warnings.warn('Using default initial_wealth value @1E6!')
if self.__need_extra_data == True:
if isinstance(extra_data, pd.DataFrame) or isinstance(extra_data, pd.Series):
if extra_data.empty:
raise Exception('Please provide extra_data as dataframe')
try:
extra_data.index = pd.to_datetime(extra_data.index)
except:
print(
'Invalid index provided in your "extra_data", please make sure that index is in compatible datetime format')
else:
raise Exception(
'"extra_data" need to be a Series or DataFrame with datetime index corresponding to test data provided')
# if user-defined strategy need extra_data to operate, the library will NOT provide change of frequency functionality
if freq_strategy != freq_data:
raise Exception(
'If "extra_data" needed for your strategy, please make sure "freq_strategy" matches "freq_data"!')
if not extra_data.index.equals(data.index):
raise IndexError('Index of extra_data and index of data do not match!')
if (data_type == 'return' or data_type == 'ex_return') and ('price' in self.__involved_data_type):
raise Exception('"price" data type is involved in your strategy, please provide data with type "price"')
if isinstance(rf, pd.Series) or isinstance(rf, pd.DataFrame):
# if rf.empty and (('ex_return' in self.__involved_data_type) or ('return' in self.__involved_data_type)):
if rf.empty:
raise Exception(
'Please provide risk-free rate! (Set it to 0 if you do not want to consider it. Note that in this case, net_returns and net_excess_returns will be the same)')
if not rf.index.equals(data.index):
raise IndexError('Index of "rf" and index of "data" do not match!')
elif type(rf) == int or type(rf) == float:
rf = pd.Series([rf] * data.shape[0], index=data.index)
else:
raise Exception('Wrong format of "rf" is given.')
# if ftc != 0:
# if data_type != 'price':
# raise Exception('data_type must be "price" when using fixed transaction cost (ftc!=0)')
# divide into price_impact model and no_price_impact model
self.__price_impact = price_impact
frequency_map = {'D': 'Day', 'W': 'Week', 'M': 'Month'}
if price_impact == False:
self.__last_test_frequency = f'{interval} {frequency_map[freq_strategy]}'
self.__test_no_price_impact(data, freq_data, data_type, rf, interval, window, freq_strategy,
ptc_buy, ptc_sell, ftc, initial_wealth, extra_data)
else:
if isinstance(volume, pd.DataFrame):
if not volume.index.equals(data.index):
raise Exception('Index of "volume" and "index" of data do not match!')
elif isinstance(volume, pd.Series) or isinstance(volume, np.ndarray):
try:
volume = pd.DataFrame(volume.reshape(1, -1), columns=data.columns)
except:
print('Check your volume data!')
volume = pd.concat([volume] * data.shape[0]).set_index(data.index)
elif isinstance(volume, list):
try:
volume = pd.DataFrame([volume], columns=data.columns)
except:
print('Check your volume data!')
volume = | pd.concat([volume] * data.shape[0]) | pandas.concat |
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
from difflib import SequenceMatcher
import seaborn as sns
from statistics import mean
from ast import literal_eval
from scipy import stats
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from pygam import LinearGAM, s, l, f
from matplotlib import lines
import six
def extract_boar_teloFISH_as_list(path):
"""
FUNCTION FOR PULLING KELLY'S TELOFISH DATA FOR 40 BOARS into a LIST.. TO BE MADE INTO A DATAFRAME & JOINED W/
MAIN DATAFRAME if possible
These excel files take forever to load.. the objective here is to synthesize all the excel files for
telomere FISH data into one dataframe, then save that dataframe to csv file to be retrieved later
loading one whole csv file containing all the data will be much, much faster than loading the parts of the whole
Along the way, we'll normalize the teloFISH data using controls internal to each excel file
"""
boar_teloFISH_list = []
for file in os.scandir(path):
if 'Hyb' in file.name:
print(f'Handling {file.name}...')
full_name = path + file.name
# making a dict of excel sheets, where KEY:VALUE pairs are SAMPLE ID:TELO DATA
telo_excel_dict = pd.read_excel(full_name, sheet_name=None, skiprows=4, usecols=[3], nrows=5000)
if 'Telomere Template' in telo_excel_dict.keys():
del telo_excel_dict['Telomere Template']
excel_file_list = []
for sample_id, telos in telo_excel_dict.items():
telos_cleaned = clean_individ_telos(telos)
if sample_id != 'Control':
excel_file_list.append([sample_id, telos_cleaned.values, np.mean(telos_cleaned)])
elif sample_id == 'Control':
control_value = np.mean(telos_cleaned)
#normalize teloFISH values by control value
for sample in excel_file_list:
sample_data = sample
#normalize individual telos
sample_data[1] = np.divide(sample_data[1], control_value)
#normalize telo means
sample_data[2] = np.divide(sample_data[2], control_value)
boar_teloFISH_list.append(sample_data)
print('Finished collecting boar teloFISH data')
return boar_teloFISH_list
def gen_missing_values_andimpute_or_randomsampledown(n_cells, telosPercell, df):
max_telos = n_cells * telosPercell
half_telos = (n_cells * telosPercell) / 2
if df.size > max_telos:
df_sampled = df.sample(max_telos)
return df_sampled
if df.size > 25 and df.size <= half_telos:
missing_data_difference = abs( (n_cells * telosPercell) - df.size )
rsampled = df.sample(missing_data_difference, replace=True, random_state=28)
concat_ed = pd.concat([rsampled, df], sort=False)
np.random.shuffle(concat_ed.to_numpy())
return concat_ed
if df.size > 25 and df.size < max_telos:
missing_data_difference = abs( (n_cells * telosPercell) - df.size )
rsampled = df.sample(missing_data_difference, random_state=28)
concat_ed = pd.concat([rsampled, df], sort=False)
np.random.shuffle(concat_ed.to_numpy())
return concat_ed
else:
return df
def clean_individ_telos(telo_data):
labels=[6, 172, 338, 504, 670, 836, 1002, 1168, 1334, 1500, 1666, 1832,
1998, 2164, 2330, 2496, 2662, 2828, 2994, 3160, 3326, 3492, 3658, 3824,
3990, 4156, 4322, 4488, 4654, 4820]
labels_offset_by6 = [(x-6) for x in labels]
telo_data = telo_data.drop(labels_offset_by6)
telo_data = pd.to_numeric(telo_data.iloc[:,0], errors='coerce')
telo_data = telo_data.dropna(axis=0, how='any')
telo_data = telo_data.to_frame(name=None)
telo_data = telo_data[(np.abs(stats.zscore(telo_data)) < 3).all(axis=1)]
telo_data = | pd.Series(telo_data.iloc[:,0]) | pandas.Series |
import os
import pandas as pd
import sys
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import random
import statistics
import itertools
JtokWh = 2.7778e-7
weight_factor = [1.50558832,0.35786005,1.0]
path_test = os.path.join(sys.path[0])
representative_days_path= os.path.join(path_test,'ScenarioReduction')
sizing_path = os.path.join(path_test, 'Design_results')
operation_path = os.path.join(path_test, 'Operation_results')
editable_data_path =os.path.join(path_test, 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
editable_data_sizing_path =os.path.join(path_test, 'editable_values_design.csv')
editable_data_sizing = pd.read_csv(editable_data_sizing_path, header=None, index_col=0, squeeze=True).to_dict()[1]
num_scenarios = int(editable_data['num_scenarios'])
num_clusters= int(editable_data['Cluster numbers'])+2
population_size = int(editable_data['population_size'])
population_size_sizing = int(editable_data_sizing['population_size'])
idf_names = ['ASHRAE901_OfficeMedium_STD2019_Denver','ASHRAE901_Hospital_STD2019_Denver','ASHRAE901_RetailStandalone_STD2019_Denver']
thermal_eff_dict = {idf_names[0]:0.8,idf_names[1]:0.8125,idf_names[2]:0.82}
city=editable_data['city']
lat = float(editable_data['Latitude'])
lon = float(editable_data['Longitude'])
start_year = int(editable_data['starting_year'])
end_year = int(editable_data['ending_year'])
epw_names = []
for i_temp in range(num_scenarios):
for i_solar in range(num_scenarios):
epw_names.append('T_'+str(i_temp)+'_S_'+str(i_solar))
demand_directory = os.path.join(path_test, 'IDFBuildingsFiles')
# epw main files
weather_data = []
weather_data_names =[]
weather_data_bar_names =[]
for year in range(start_year,end_year+1):
weather_data.append(city+'_'+str(lat)+'_'+str(lon)+'_psm3_60_'+str(year))
weather_data_names.append('AMY '+str(year))
weather_data_bar_names.append('AMY \n'+str(year))
dict_EPWs= {}
dict_EPWs['AMYs']=weather_data
dict_EPWs['TMYs']=['USA_UT_Salt.Lake.City.Intl.AP.725720_TMY','USA_UT_Salt.Lake.City.725720_TMY2','USA_UT_Salt.Lake.City.Intl.AP.725720_TMY3']
#dict_EPWs['TMYs']=['USA_UT_Salt.Lake.City.Intl.AP.725720_TMY3']
dict_EPWs['FMYs']=['USA_Salt Lake City Intl AP_HadCM3-A2-'+str(2050),'USA_Salt Lake City Intl AP_HadCM3-A2-'+str(2080)]
dict_EPWs_names= {}
dict_EPWs_names['AMYs']=weather_data_names
dict_EPWs_names['TMYs']=['TMY','TMY2','TMY3']
#dict_EPWs_names['TMYs']=['TMY3']
dict_EPWs_names['FMYs']=['FMY '+str(2050),'FMY '+str(2080)]
dict_EPWs_bar_names= {}
dict_EPWs_bar_names['AMYs']=weather_data_bar_names
dict_EPWs_bar_names['TMYs']=['TMY \n','TMY2 \n','TMY3 \n']
dict_EPWs_bar_names['FMYs']=['FMY \n'+str(2050),'FMY \n'+str(2080)]
main_weather_epw = {}
output_directory = os.path.join(path_test, 'IDFBuildingsFiles')
results_compare = os.path.join(path_test, 'Results')
if not os.path.exists(results_compare):
os.makedirs(results_compare)
years=list(range(1998,2020))
years= ['AMY \n'+str(i) for i in years]
years.append('TMY')
years.append('TMY2')
years.append('TMY3')
years.append('FMY \n'+str(2050))
years.append('FMY \n'+str(2080))
### Representative Days ###
def representative_day_function():
global representative_days,weight_representative_day_main,weight_representative_day_scenario
representative_days = defaultdict(list)
weight_representative_day_scenario = defaultdict(list)
weight_representative_day_main = defaultdict(list)
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
for representative_day in range(num_clusters):
rep_day= pd.read_csv(os.path.join(representative_days_path,output_prefix + 'Represent_days_modified_'+str(representative_day)+ '.csv'))
representative_days[output_prefix].append(rep_day)
weight_representative_day_main[output_prefix].append(rep_day['Percent %']/100*365)
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
for representative_day in range(num_clusters):
rep_day= pd.read_csv(os.path.join(representative_days_path,output_prefix + 'Represent_days_modified_'+str(representative_day)+ '.csv'))
representative_days[output_prefix].append(rep_day)
weight_representative_day_scenario[output_prefix].append(rep_day['Percent %']/100*365)
### Energy Demands ###
def energy_demands():
global elect_buildings,gas_buildings,cool_buildings,elect_annual,gas_annual,cool_annual,total_elect_buildings,total_gas_buildings,total_cool_buildings,total_elect_annual,total_gas_annual,total_cool_annual
elect_buildings = defaultdict(list)
gas_buildings = defaultdict(list)
cool_buildings = defaultdict(list)
elect_annual= defaultdict(list)
gas_annual = defaultdict(list)
cool_annual = defaultdict(list)
total_elect_buildings= []
total_gas_buildings = []
total_cool_buildings = []
total_elect_annual= []
total_gas_annual = []
total_cool_annual = []
for scenario in range(len(epw_names)):
sum_electricity_buildings = []
sum_heating_buildings = []
sum_cooling_buildings = []
sum_electricity_annual = []
sum_heating_annual = []
sum_cooling_annual = []
for building_type in idf_names:
output_prefix = building_type+'_'+epw_names[scenario]+'_mtr.csv'
demand_data_path = os.path.join(demand_directory, output_prefix)
data = pd.read_csv(demand_data_path)
elect_data = (data['Electricity:Facility [J](Hourly)']-data['Heating:Electricity [J](Hourly)'] - data['Cooling:Electricity [J](Hourly)'])*JtokWh
heat_data = (data['Gas:Facility [J](Hourly)']*thermal_eff_dict[building_type]+data['Heating:Electricity [J](Hourly)'])*JtokWh
cool_data = (data['Cooling:Electricity [J](Hourly)'])*JtokWh
elect_buildings[building_type].append(elect_data)
gas_buildings[building_type].append(heat_data)
cool_buildings[building_type].append(cool_data)
elect_annual[building_type].append(sum(elect_data))
gas_annual[building_type].append(sum(heat_data))
cool_annual[building_type].append(sum(cool_data))
sum_electricity_buildings.append(elect_data*weight_factor[idf_names.index(building_type)])
sum_heating_buildings.append(heat_data*weight_factor[idf_names.index(building_type)])
sum_cooling_buildings.append(cool_data*weight_factor[idf_names.index(building_type)])
sum_electricity_annual.append(sum(elect_data*weight_factor[idf_names.index(building_type)]))
sum_heating_annual.append(sum(heat_data*weight_factor[idf_names.index(building_type)]))
sum_cooling_annual.append(sum(cool_data*weight_factor[idf_names.index(building_type)]))
total_elect_buildings.append(sum(sum_electricity_buildings))
total_gas_buildings.append(sum(sum_heating_buildings))
total_cool_buildings.append(sum(sum_cooling_buildings))
total_elect_annual.append(sum(sum_electricity_annual))
total_gas_annual.append(sum(sum_heating_annual))
total_cool_annual.append(sum(sum_cooling_annual))
global elect_buildings_main,gas_buildings_main,cool_buildings_main,elect_annual_main,gas_annual_main,cool_annual_main,total_elect_buildings_main,total_gas_buildings_main,total_cool_buildings_main,total_elect_annual_main,total_gas_annual_main,total_cool_annual_main
elect_buildings_main = defaultdict(list)
gas_buildings_main = defaultdict(list)
cool_buildings_main = defaultdict(list)
elect_annual_main = defaultdict(list)
gas_annual_main = defaultdict(list)
cool_annual_main = defaultdict(list)
total_elect_annual_main = []
total_gas_annual_main = []
total_cool_annual_main = []
total_elect_buildings_main = []
total_gas_buildings_main = []
total_cool_buildings_main = []
global output_prefix_short
output_prefix_short ={}
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
output_prefix_short[output_prefix] = dict_EPWs_names[key][dict_EPWs[key].index(epw_file_name)]
sum_electricity_buildings_main = []
sum_heating_buildings_main = []
sum_cooling_buildings_main = []
sum_electricity_annual_main = []
sum_heating_annual_main = []
sum_cooling_annual_main = []
for building_type in idf_names:
output_prefix = building_type+'_'+epw_file_name+'_mtr.csv'
demand_data_path = os.path.join(demand_directory, output_prefix)
data = pd.read_csv(demand_data_path)
elect_data = (data['Electricity:Facility [J](Hourly)']-data['Heating:Electricity [J](Hourly)'] - data['Cooling:Electricity [J](Hourly)'])*JtokWh
heat_data = (data['Gas:Facility [J](Hourly)']*thermal_eff_dict[building_type]+data['Heating:Electricity [J](Hourly)'])*JtokWh
cool_data = (data['Cooling:Electricity [J](Hourly)'])*JtokWh
elect_buildings_main[building_type].append(elect_data)
gas_buildings_main[building_type].append(heat_data)
cool_buildings_main[building_type].append(cool_data)
elect_annual_main[building_type].append(sum(elect_data))
gas_annual_main[building_type].append(sum(heat_data))
cool_annual_main[building_type].append(sum(cool_data))
sum_electricity_buildings_main.append(elect_data*weight_factor[idf_names.index(building_type)])
sum_heating_buildings_main.append(heat_data*weight_factor[idf_names.index(building_type)])
sum_cooling_buildings_main.append(cool_data*weight_factor[idf_names.index(building_type)])
sum_electricity_annual_main.append(sum(elect_data*weight_factor[idf_names.index(building_type)]))
sum_heating_annual_main.append(sum(heat_data*weight_factor[idf_names.index(building_type)]))
sum_cooling_annual_main.append(sum(cool_data*weight_factor[idf_names.index(building_type)]))
total_elect_buildings_main.append(sum(sum_electricity_buildings_main))
total_gas_buildings_main.append(sum(sum_heating_buildings_main))
total_cool_buildings_main.append(sum(sum_cooling_buildings_main))
total_elect_annual_main.append(sum(sum_electricity_annual_main))
total_gas_annual_main.append(sum(sum_heating_annual_main))
total_cool_annual_main.append(sum(sum_cooling_annual_main))
j = 0
def generate_combo_plots(mode):
SMALL_SIZE = 30
MEDIUM_SIZE = 32
BIGGER_SIZE = 38
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (30,20)
color = ["#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)])
for i in range(27)]
if mode=='seperate':
marker = itertools.cycle(('v','+','s','^','o','x','*'))
for building_type in idf_names:
plt.figure()
j=0
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
label = output_prefix_short[output_prefix]
if key=='AMYs':
year_selected_number=int(label.replace('AMY',''))
if year_selected_number==2019 or year_selected_number==2018 or year_selected_number==2016 or year_selected_number==2014 or year_selected_number==2012:
plt.scatter(cool_annual_main[building_type][j]/1000,gas_annual_main[building_type][j]/1000,s=400, cmap=cmap, label = label,marker=next(marker))
plt.annotate(label,xy=(cool_annual_main[building_type][j]/1000,gas_annual_main[building_type][j]/1000),xytext=(cool_annual_main[building_type][j]/1000*1.0005,gas_annual_main[building_type][j]/1000*1.0005),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
j = j+1
elif key=='TMYs':
if label=='TMY3':
plt.scatter(cool_annual_main[building_type][j]/1000,gas_annual_main[building_type][j]/1000,s=400, cmap=cmap, label = label,marker=next(marker))
plt.annotate(label,xy=(cool_annual_main[building_type][j]/1000,gas_annual_main[building_type][j]/1000),xytext=(cool_annual_main[building_type][j]/1000*1.0005,gas_annual_main[building_type][j]/1000*1.0005),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
j = j+1
elif key=='FMYs':
plt.scatter(cool_annual_main[building_type][j]/1000,gas_annual_main[building_type][j]/1000,s=400, cmap=cmap, label = label,marker=next(marker))
plt.annotate(label,xy=(cool_annual_main[building_type][j]/1000,gas_annual_main[building_type][j]/1000),xytext=(cool_annual_main[building_type][j]/1000*1.0005,gas_annual_main[building_type][j]/1000*1.0005),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
j = j+1
plt.xlabel('Total Cooling Demand (MWh)')
plt.ylabel('Total Hot Water Demand (MWh)')
plt.savefig(os.path.join(results_compare,building_type+'_annual_main_combo_demands_WC'+'.png'),dpi=100,facecolor='w',bbox_inches='tight')
plt.close()
marker_list = ['v','+','s','^','o','x','*','s','>','<']
color_list= [ 'tab:blue', 'tab:orange','tab:green','black','yellow','tab:red','tab:cyan','tab:olive','peru','tab:purple']
for building_type in idf_names:
plt.figure()
label_dict = {}
for scenario in range(len(epw_names)):
key_label = round(cool_annual[building_type][scenario]/1000,0)
#if key_label not in label_dict.keys():
#label_dict[key_label] = epw_names[scenario]
label_short= epw_names[scenario].replace('_','')
marker = marker_list[int(label_short[1])]
color = color_list[int(label_short[3])]
if (int(label_short[1])==0 or int(label_short[1])==5 or int(label_short[1])==9) and (int(label_short[3])==0 or int(label_short[3])==5 or int(label_short[3])==9):
if int(label_short[1])==0:
label_T='Tmin'
elif int(label_short[1])==5:
label_T='Tmed'
elif int(label_short[1])==9:
label_T='Tmax'
if int(label_short[3])==0:
label_S='Smin'
elif int(label_short[3])==5:
label_S='Smed'
elif int(label_short[3])==9:
label_S='Smax'
label = label_T + label_S
if building_type==idf_names[1]:
weight_factor_pareto_front =0.9955
else:
weight_factor_pareto_front = 1
plt.scatter(cool_annual[building_type][scenario]/1000,gas_annual[building_type][scenario]/1000,color=color,marker=marker,s=300, cmap=cmap, label=label_short)
plt.annotate(label,xy=(cool_annual[building_type][scenario]/1000,gas_annual[building_type][scenario]/1000),xytext=(cool_annual[building_type][scenario]/1000*1.005*weight_factor_pareto_front,gas_annual[building_type][scenario]/1000),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
plt.xlabel('Total Cooling Demand (MWh)')
plt.ylabel('Total Hot Water Demand (MWh)')
plt.savefig(os.path.join(results_compare,building_type+'_annual_scenario_combo_demands_WC'+'.png'),dpi=100,facecolor='w',bbox_inches='tight')
plt.close()
elif mode =='total':
marker = itertools.cycle(('v','+','s','^','o','x','*'))
plt.figure()
j=0
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
label = output_prefix_short[output_prefix]
if key=='AMYs':
year_selected_number=int(label.replace('AMY',''))
if year_selected_number==2019 or year_selected_number==2018 or year_selected_number==2016 or year_selected_number==2014 or year_selected_number==2012:
plt.scatter(total_cool_annual_main[j]/1000,total_gas_annual_main[j]/1000,s=400, cmap=cmap, label = label,marker=next(marker))
plt.annotate(label,xy=(total_cool_annual_main[j]/1000,total_gas_annual_main[j]/1000),xytext=(total_cool_annual_main[j]/1000*1.0005,total_gas_annual_main[j]/1000*1.0005),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
j = j+1
elif key=='TMYs':
if label=='TMY3':
plt.scatter(total_cool_annual_main[j]/1000,total_gas_annual_main[j]/1000,s=400, cmap=cmap, label = label,marker=next(marker))
plt.annotate(label,xy=(total_cool_annual_main[j]/1000,total_gas_annual_main[j]/1000),xytext=(total_cool_annual_main[j]/1000*1.0005,total_gas_annual_main[j]/1000*1.0005),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
j = j+1
elif key=='FMYs':
plt.scatter(total_cool_annual_main[j]/1000,total_gas_annual_main[j]/1000,s=400, cmap=cmap, label = label,marker=next(marker))
plt.annotate(label,xy=(total_cool_annual_main[j]/1000,total_gas_annual_main[j]/1000),xytext=(total_cool_annual_main[j]/1000*1.0005,total_gas_annual_main[j]/1000*1.0005),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
j = j+1
plt.xlabel('Total Cooling Demand (MWh)')
plt.ylabel('Total Hot Water Demand (MWh)')
plt.savefig(os.path.join(results_compare,'total_annual_main_combo_demands_WC'+'.png'),dpi=100,facecolor='w',bbox_inches='tight')
plt.close()
marker_list = ['v','+','s','^','o','x','*','s','>','<']
color_list= [ 'tab:blue', 'tab:orange','tab:green','black','yellow','tab:red','tab:cyan','tab:olive','peru','tab:purple']
label_dict = {}
for scenario in range(len(epw_names)):
key_label = round(total_cool_annual[scenario]/1000,0)
#if key_label not in label_dict.keys():
# label_dict[key_label] = epw_names[scenario]
label_short= epw_names[scenario].replace('_','')
marker = marker_list[int(label_short[1])]
color = color_list[int(label_short[3])]
if (int(label_short[1])==0 or int(label_short[1])==5 or int(label_short[1])==9) and (int(label_short[3])==0 or int(label_short[3])==5 or int(label_short[3])==9):
if int(label_short[1])==0:
label_T='Tmin'
elif int(label_short[1])==5:
label_T='Tmed'
elif int(label_short[1])==9:
label_T='Tmax'
if int(label_short[3])==0:
label_S='Smin'
elif int(label_short[3])==5:
label_S='Smed'
elif int(label_short[3])==9:
label_S='Smax'
label = label_T + label_S
plt.scatter(total_cool_annual[scenario]/1000,total_gas_annual[scenario]/1000,s=300,c=color,marker=marker, cmap=cmap, label=label_short)
plt.annotate(label,xy=(total_cool_annual[scenario]/1000,total_gas_annual[scenario]/1000),xytext=(total_cool_annual[scenario]/1000*1.001,total_gas_annual[scenario]/1000*1.001),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
plt.xlabel('Total Cooling Demand (MWh)')
plt.ylabel('Total Hot Water Demand (MWh)')
plt.savefig(os.path.join(results_compare,'total_annual_scenario_combo_demands_WC'+'.png'),dpi=100,facecolor='w',bbox_inches='tight')
plt.close()
def stats_energy_demands():
cols_revised = ['Office Medium','Hospital','Retail stand-alone', 'Total']
weight_factor_dict = {idf_names[0]:weight_factor[0],idf_names[1]:weight_factor[1],idf_names[2]:weight_factor[2]}
stats_table_seperate = defaultdict(list)
k=0
for building_type in idf_names:
#stats_table_seperate[k].append(round(np.mean(elect_annual_main[building_type])*weight_factor_dict[building_type]/1000,2))
#stats_table_seperate[k].append(round(np.std(elect_annual_main[building_type])*weight_factor_dict[building_type]/1000,2))
#stats_table_seperate[k].append(round(np.std(elect_annual_main[building_type])*100/np.mean(elect_annual_main[building_type]),2))
stats_table_seperate[k].append(round(np.mean(gas_annual_main[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(gas_annual_main[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(gas_annual_main[building_type])*100/np.mean(gas_annual_main[building_type]),2))
stats_table_seperate[k].append(round(np.mean(cool_annual_main[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(cool_annual_main[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(cool_annual_main[building_type])*100/np.mean(cool_annual_main[building_type]),2))
k = k+1
stats_table_total = []
#stats_table_total.append(round(np.mean(total_elect_annual_main)/1000,2))
#stats_table_total.append(round(np.std(total_elect_annual_main)/1000,2))
#stats_table_total.append(round(np.std(total_elect_annual_main)*100/np.mean(total_elect_annual_main),2))
stats_table_total.append(round(np.mean(total_gas_annual_main)/1000,2))
stats_table_total.append(round(np.std(total_gas_annual_main)/1000,2))
stats_table_total.append(round(np.std(total_gas_annual_main)*100/np.mean(total_gas_annual_main),2))
stats_table_total.append(round(np.mean(total_cool_annual_main)/1000,2))
stats_table_total.append(round(np.std(total_cool_annual_main)/1000,2))
stats_table_total.append(round(np.std(total_cool_annual_main)*100/np.mean(total_cool_annual_main),2))
statistics_table = {#'Elect Mean': [stats_table_seperate[0][0],stats_table_seperate[1][0],stats_table_seperate[2][0],stats_table_total[0]],
#'Elect STD': [stats_table_seperate[0][1],stats_table_seperate[1][1],stats_table_seperate[2][1],stats_table_total[1]],
#'CV \% Elect': [stats_table_seperate[0][2],stats_table_seperate[1][2],stats_table_seperate[2][2],stats_table_total[2]],
'Heat Mean': [stats_table_seperate[0][3],stats_table_seperate[1][3],stats_table_seperate[2][3],stats_table_total[3]],
'Heat STD': [stats_table_seperate[0][4],stats_table_seperate[1][4],stats_table_seperate[2][4],stats_table_total[4]],
'CV \% Heat': [stats_table_seperate[0][5],stats_table_seperate[1][5],stats_table_seperate[2][5],stats_table_total[5]],
'Cool Mean': [stats_table_seperate[0][6],stats_table_seperate[1][6],stats_table_seperate[2][6],stats_table_total[6]],
'Cool STD': [stats_table_seperate[0][7],stats_table_seperate[1][7],stats_table_seperate[2][7],stats_table_total[7]],
'CV \% Cool': [stats_table_seperate[0][8],stats_table_seperate[1][8],stats_table_seperate[2][8],stats_table_total[8]]}
df_statistics_table= pd.DataFrame(statistics_table)
df_statistics_table.insert(0, "", cols_revised, True)
for i in range(1,len(df_statistics_table.columns)*2):
if i%2!=0:
df_statistics_table.insert(i, "&", ["&"]*len(df_statistics_table), True)
df_statistics_table.insert(i, "\\\\ \hline", ["\\\\ \hline"]*len(df_statistics_table), True)
df_statistics_table.to_csv(os.path.join(results_compare,'stats_main_seperate_demand_WC_table.csv'))
stats_table_seperate = defaultdict(list)
weight_factor_dict = {idf_names[0]:weight_factor[0],idf_names[1]:weight_factor[1],idf_names[2]:weight_factor[2]}
k=0
for building_type in idf_names:
#print(
#building_type,np.std(elect_annual[building_type])*weight_factor_dict[building_type]/1000)
#stats_table_seperate[k].append(round(np.mean(elect_annual[building_type])*weight_factor_dict[building_type]/1000,2))
#stats_table_seperate[k].append(round(np.std(elect_annual[building_type])*weight_factor_dict[building_type]/1000,2))
#stats_table_seperate[k].append(round(np.std(elect_annual[building_type])*100/np.mean(elect_annual[building_type]),2))
stats_table_seperate[k].append(round(np.mean(gas_annual[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(gas_annual[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(gas_annual[building_type])*100/np.mean(gas_annual[building_type]),2))
stats_table_seperate[k].append(round(np.mean(cool_annual[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(cool_annual[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(cool_annual[building_type])*100/np.mean(cool_annual[building_type]),2))
k = k+1
stats_table_total = []
#stats_table_total.append(round(np.mean(total_elect_annual)/1000,2))
#stats_table_total.append(round(np.std(total_elect_annual)/1000,2))
#stats_table_total.append(round(np.std(total_elect_annual)*100/np.mean(total_elect_annual),2))
stats_table_total.append(round(np.mean(total_gas_annual)/1000,2))
stats_table_total.append(round(np.std(total_gas_annual)/1000,2))
stats_table_total.append(round(np.std(total_gas_annual)*100/np.mean(total_gas_annual),2))
stats_table_total.append(round(np.mean(total_cool_annual)/1000,2))
stats_table_total.append(round(np.std(total_cool_annual)/1000,2))
stats_table_total.append(round(np.std(total_cool_annual)*100/np.mean(total_cool_annual),2))
statistics_table = {#'Elect Mean': [stats_table_seperate[0][0],stats_table_seperate[1][0],stats_table_seperate[2][0],stats_table_total[0]],
#'Elect STD': [stats_table_seperate[0][1],stats_table_seperate[1][1],stats_table_seperate[2][1],stats_table_total[1]],
#'CV \% Elect': [stats_table_seperate[0][2],stats_table_seperate[1][2],stats_table_seperate[2][2],stats_table_total[2]],
'Heat Mean': [stats_table_seperate[0][3],stats_table_seperate[1][3],stats_table_seperate[2][3],stats_table_total[3]],
'Heat STD': [stats_table_seperate[0][4],stats_table_seperate[1][4],stats_table_seperate[2][4],stats_table_total[4]],
'CV \% Heat': [stats_table_seperate[0][5],stats_table_seperate[1][5],stats_table_seperate[2][5],stats_table_total[5]],
'Cool Mean': [stats_table_seperate[0][6],stats_table_seperate[1][6],stats_table_seperate[2][6],stats_table_total[6]],
'Cool STD': [stats_table_seperate[0][7],stats_table_seperate[1][7],stats_table_seperate[2][7],stats_table_total[7]],
'CV \% Cool': [stats_table_seperate[0][8],stats_table_seperate[1][8],stats_table_seperate[2][8],stats_table_total[8]]}
df_statistics_table= pd.DataFrame(statistics_table)
df_statistics_table.insert(0, "", cols_revised, True)
for i in range(1,len(df_statistics_table.columns)*2):
if i%2!=0:
df_statistics_table.insert(i, "&", ["&"]*len(df_statistics_table), True)
df_statistics_table.insert(i, "\\\\ \hline", ["\\\\ \hline"]*len(df_statistics_table), True)
df_statistics_table.to_csv(os.path.join(results_compare,'stats_scenario_seperate_WC_demand_table.csv'))
def bar_energy_demands(mode):
SMALL_SIZE = 30
MEDIUM_SIZE = 32
BIGGER_SIZE = 38
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (45,15)
if mode=='seperate':
for building_type in idf_names:
print(building_type, 'highest elect', years[elect_annual_main[building_type].index(np.max(elect_annual_main[building_type]))],
' heat', years[gas_annual_main[building_type].index(np.max(gas_annual_main[building_type]))],
' cool', years[cool_annual_main[building_type].index(np.max(cool_annual_main[building_type]))],
)
print(building_type, 'lowest elect', years[elect_annual_main[building_type].index(np.min(elect_annual_main[building_type]))],
' heat', years[gas_annual_main[building_type].index(np.min(gas_annual_main[building_type]))],
' cool', years[cool_annual_main[building_type].index(np.min(cool_annual_main[building_type]))],
)
r = np.arange(n)
width = 0.25
plt.figure()
#plt.bar(r,[number/1000 for number in elect_annual_main[building_type]],width = width,color='darkorange', edgecolor = 'black',label='Annual Electricity')
plt.bar(r,[number/1000 for number in gas_annual_main[building_type]],width = width,color='darkred', edgecolor = 'black',label = 'Annual Hot Water')
plt.bar(r+width,[number/1000 for number in cool_annual_main[building_type]],width = width,color='darkblue', edgecolor = 'black',label = 'Annual Cooling')
plt.xlabel('Weather Files')
plt.ylabel('Energy Demands (MWh)')
plt.xticks(r + width/2,years)
#plt.yticks
plt.legend(loc='center left')
plt.ticklabel_format(style='plain', axis='y')
#plt.title('annual energy demands of' + building_type)
plt.savefig(os.path.join(results_compare,building_type+'_bar_annual_main_demands_WC'+'.png'),dpi=100,facecolor='w',bbox_inches='tight')
plt.close()
elif mode =='total':
print(#'total', 'highest elect', years[total_elect_annual_main.index(np.max(total_elect_annual_main))],
' heat', years[total_gas_annual_main.index(np.max(total_gas_annual_main))],
' cool', years[total_cool_annual_main.index(np.max(total_cool_annual_main))],
)
print(#'total', 'lowest elect', years[total_elect_annual_main.index(np.min(total_elect_annual_main))],
' heat', years[total_gas_annual_main.index(np.min(total_gas_annual_main))],
' cool', years[total_cool_annual_main.index(np.min(total_cool_annual_main))],
)
print(#'total range','elect', (np.max(total_elect_annual_main)-np.min(total_elect_annual_main))/1000,
'heat',(np.max(total_gas_annual_main)-np.min(total_gas_annual_main))/1000,
'cool', (np.max(total_cool_annual_main)-np.min(total_cool_annual_main))/1000)
n=len(total_elect_annual_main)
r = np.arange(n)
width = 0.25
plt.figure()
#plt.bar(r,[number/1000 for number in total_elect_annual_main],width = width,color='darkorange', edgecolor = 'black',label='Annual Electricity')
plt.bar(r,[number/1000 for number in total_gas_annual_main],width = width,color='darkred', edgecolor = 'black',label = 'Annual Hot Water')
plt.bar(r+width,[number/1000 for number in total_cool_annual_main],width = width,color='darkblue', edgecolor = 'black',label = 'Annual Cooling')
plt.xlabel('Weather Files')
plt.ylabel('Energy Demands (MWh)')
plt.xticks(r + width/2,years)
#plt.yticks(fontsize=BIGGER_SIZE)
plt.legend(loc='center left')
plt.ticklabel_format(style='plain', axis='y')
#plt.title('Total annual energy demands')
plt.savefig(os.path.join(results_compare,'total_annual_main_demands_WC'+'.png'),dpi=100,facecolor='w',bbox_inches='tight')
plt.close()
def hist_scenarios(mode):
SMALL_SIZE = 20
MEDIUM_SIZE = 24
BIGGER_SIZE = 28
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (10,8)
if mode=='seperate':
for building_type in idf_names:
#plt.ylabel('Percentage %')
#plt.hist([number/1000 for number in elect_annual[building_type]],color='darkorange',bins=10,weights=np.ones(len([number/1000 for number in elect_annual[building_type]]))*100 / len([number/1000 for number in elect_annual[building_type]]))
#plt.xlabel('Total Electricity Demand (MWh)')
#plt.savefig(os.path.join(results_compare,'hist_'+building_type+'_annual_main_electricity_WC_demand'+'.png'),dpi=100,facecolor='w')
plt.close()
plt.ylabel('Percentage %')
plt.hist([number/1000 for number in gas_annual[building_type]],color='darkred',bins=10,weights=np.ones(len([number/1000 for number in gas_annual[building_type]]))*100 / len([number/1000 for number in gas_annual[building_type]]))
plt.xlabel('Total Heating Demand (MWh)')
plt.savefig(os.path.join(results_compare,'hist_'+building_type+'_annual_main_heating_WC_demand'+'.png'),dpi=100,facecolor='w')
plt.close()
plt.ylabel('Percentage %')
plt.hist([number/1000 for number in cool_annual[building_type]],color='darkblue',bins=10,weights=np.ones(len([number/1000 for number in cool_annual[building_type]]))*100 / len([number/1000 for number in cool_annual[building_type]]))
plt.xlabel('Total Cooling Demand (MWh)')
plt.savefig(os.path.join(results_compare,'hist_'+building_type+'_annual_main_cooling_WC_demand'+'.png'),dpi=100,facecolor='w')
plt.close()
elif mode =='total':
#plt.ylabel('Percentage %')
#plt.hist([number/1000 for number in total_elect_annual],color='darkorange',bins=10,weights=np.ones(len([number/1000 for number in total_elect_annual]))*100 / len([number/1000 for number in total_elect_annual]))
#plt.xlabel('Total Electricity Demand (MWh)')
#plt.savefig(os.path.join(results_compare,'hist_total_annual_main_electricity_WC_demand'+'.png'),dpi=100,facecolor='w')
plt.close()
plt.hist([number/1000 for number in total_gas_annual],color='darkred',bins=10,weights=np.ones(len([number/1000 for number in total_gas_annual]))*100 / len([number/1000 for number in total_gas_annual]))
plt.ylabel('Percentage %')
plt.xlabel('Total Heating Demand (MWh)')
plt.savefig(os.path.join(results_compare,'hist_total_annual_main_heating_WC_demand'+'.png'),dpi=100,facecolor='w')
plt.close()
plt.hist([number/1000 for number in total_gas_annual],color='darkblue',bins=10,weights=np.ones(len([number/1000 for number in total_gas_annual]))*100 / len([number/1000 for number in total_gas_annual]))
plt.ylabel('Percentage %')
plt.xlabel('Total Cooling Demand (MWh)')
plt.savefig(os.path.join(results_compare,'hist_total_annual_main_cooling_WC_demand'+'.png'),dpi=100,facecolor='w')
plt.close()
energy_demands()
generate_combo_plots('seperate')
generate_combo_plots('total')
#bar_energy_demands('seperate')
#bar_energy_demands('total')
#hist_scenarios('total')
#hist_scenarios('seperate')
#stats_energy_demands()
### Sizing of DES ###
def sizing():
global annual_df_object_sizing_main,annual_df_operation_sizing_main
annual_df_object_sizing_main= {}
annual_df_operation_sizing_main = {}
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
file_name = output_prefix+city+'_Discrete_EF_'+str(float(editable_data_sizing['renewable percentage']) )+'_design_'+str(int(editable_data_sizing['num_iterations']))+'_'+str(editable_data_sizing['population_size'])+'_'+str(editable_data_sizing['num_processors'])+'_processors'
annual_df_object_sizing_main[output_prefix]=pd.read_csv(os.path.join(sizing_path,file_name, 'objectives.csv'))
annual_df_operation_sizing_main[output_prefix]=pd.read_csv(os.path.join(sizing_path,file_name, 'sizing_all.csv'))
global annual_df_object_sizing_scenario, annual_df_operation_sizing_scenario
annual_df_object_sizing_scenario= {}
annual_df_operation_sizing_scenario = {}
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
file_name = output_prefix+city+'_Discrete_EF_'+str(float(editable_data_sizing['renewable percentage']) )+'_design_'+str(int(editable_data_sizing['num_iterations']))+'_'+str(editable_data_sizing['population_size'])+'_'+str(editable_data_sizing['num_processors'])+'_processors'
annual_df_object_sizing_scenario[output_prefix]=pd.read_csv(os.path.join(sizing_path,file_name , 'objectives.csv'))
annual_df_operation_sizing_scenario[output_prefix]=pd.read_csv(os.path.join(sizing_path,file_name, 'sizing_all.csv'))
def main_paretofront_sizing():
global sorted_annual_df_object_sizing_main,output_prefix_short
SMALL_SIZE = 22
MEDIUM_SIZE = 24
BIGGER_SIZE = 28
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (30,15)
plt.figure()
j=0
color = ["#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)])
for i in range(27)]
sorted_cost = []
output_prefix_short ={}
marker = itertools.cycle(('v','+','s','^','o','x','*'))
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
output_prefix_short[output_prefix] = dict_EPWs_names[key][dict_EPWs[key].index(epw_file_name)]
annual_df_object_sizing_main[output_prefix]=annual_df_object_sizing_main[output_prefix].sort_values('Cost ($)')
annual_df_object_sizing_main[output_prefix]=annual_df_object_sizing_main[output_prefix].reset_index()
if key is 'AMYs':
year_selected_number=int(output_prefix_short[output_prefix].replace('AMY',''))
if year_selected_number==2019 or year_selected_number==2018 or year_selected_number==2016 or year_selected_number==2014 or year_selected_number==2012:
sorted_cost.append(annual_df_object_sizing_main[output_prefix]['Cost ($)'][0]/10**6)
elif key is 'TMYs':
if epw_file_name=='USA_UT_Salt.Lake.City.Intl.AP.725720_TMY3':
sorted_cost.append(annual_df_object_sizing_main[output_prefix]['Cost ($)'][0]/10**6)
else:
sorted_cost.append(annual_df_object_sizing_main[output_prefix]['Cost ($)'][0]/10**6)
sorted_cost = sorted(sorted_cost)
sorted_annual_df_object_sizing_main = {}
for i in sorted_cost:
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
if annual_df_object_sizing_main[output_prefix]['Cost ($)'][0]/10**6 == i:
sorted_annual_df_object_sizing_main[output_prefix] =annual_df_object_sizing_main[output_prefix]
sorted_cost_scenario = []
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
annual_df_object_sizing_scenario[output_prefix]=annual_df_object_sizing_scenario[output_prefix].sort_values('Cost ($)')
annual_df_object_sizing_scenario[output_prefix]=annual_df_object_sizing_scenario[output_prefix].reset_index()
sorted_cost_scenario.append(annual_df_object_sizing_scenario[output_prefix]['Cost ($)'][0]/10**6)
sorted_cost_scenario = sorted(sorted_cost_scenario)
sorted_annual_df_object_sizing_scenario = {}
for i in sorted_cost_scenario:
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
if annual_df_object_sizing_scenario[output_prefix]['Cost ($)'][0]/10**6 == i:
sorted_annual_df_object_sizing_scenario[output_prefix] =annual_df_object_sizing_scenario[output_prefix]
j=0
#fig, ax = plt.subplots()
for key in sorted_annual_df_object_sizing_main:
output_prefix = key
cost = [i/10**6 for i in sorted_annual_df_object_sizing_main[output_prefix]['Cost ($)']]
emissions = [j/10**6 for j in sorted_annual_df_object_sizing_main[output_prefix]['Emission (kg CO2)']]
label = output_prefix_short[output_prefix]
#plt.scatter(cost,emissions,c=color[j], s=100, cmap=cmap,marker=next(marker))
#plt.title('Cost and emissions trade-off')
if j==0:
plt.annotate(label,xy=(cost[-1], emissions[-1]),xytext=(cost[-1]*1.05, emissions[-1]),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:blue'
elif j==1:
plt.annotate(label,xy=(cost[0], emissions[0]),xytext=(cost[0]*0.9, emissions[0]*1.15),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:orange'
elif j==2:
plt.annotate(label,xy=(cost[-1], emissions[-1]),xytext=(cost[-1]*1.05, emissions[-1]*0.8),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:green'
elif j==3:
plt.annotate(label,xy=(cost[0], emissions[0]),xytext=(cost[0]*0.85, emissions[0]*1.1),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:purple'
elif j==4:
plt.annotate(label,xy=(cost[-1], emissions[-1]),xytext=(cost[-1]*1.05, emissions[-1]*0.8),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'black'
elif j==5:
plt.annotate(label,xy=(cost[0], emissions[0]),xytext=(cost[0]*0.85, emissions[0]*1.1),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:red'
elif j==6:
plt.annotate(label,xy=(cost[-1], emissions[-1]),xytext=(cost[-1]*0.9, emissions[-1]*1.4),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:cyan'
elif j==7:
plt.annotate(label,xy=(cost[0], emissions[0]),xytext=(cost[0]*0.85, emissions[0]*1.2),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:olive'
plt.scatter(cost,emissions,c=color, s=100, cmap=cmap,marker=next(marker))
j = j+1
j=0
#plt.legend()
plt.xlabel("Cost (million $)")
plt.ylabel("Emissions (million kg $CO_2$)")
plt.savefig(os.path.join(results_compare ,'ParetoFront_sizing.png'),dpi=100,facecolor='w',bbox_inches='tight')
def scenario_paretofront_sizing():
SMALL_SIZE = 22
MEDIUM_SIZE = 24
BIGGER_SIZE = 28
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (30,15)
plt.figure()
j=0
sorted_cost = []
output_prefix_short ={}
sorted_cost_scenario = []
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
annual_df_object_sizing_scenario[output_prefix]=annual_df_object_sizing_scenario[output_prefix].sort_values('Cost ($)')
annual_df_object_sizing_scenario[output_prefix]=annual_df_object_sizing_scenario[output_prefix].reset_index()
sorted_cost_scenario.append(annual_df_object_sizing_scenario[output_prefix]['Cost ($)'][0]/10**6)
sorted_cost_scenario = sorted(sorted_cost_scenario)
sorted_annual_df_object_sizing_scenario = {}
for i in sorted_cost_scenario:
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
label = output_prefix.replace('_','').replace('total','')
if annual_df_object_sizing_scenario[output_prefix]['Cost ($)'][0]/10**6 == i and (int(label[1])==0 or int(label[1])==5 or int(label[1])==9) and (int(label[3])==0 or int(label[3])==5 or int(label[3])==9):
sorted_annual_df_object_sizing_scenario[output_prefix] =annual_df_object_sizing_scenario[output_prefix]
j=0
marker_list = ['v','+','s','^','o','x','*','s','>','<']
color_list= [ 'tab:blue', 'tab:orange','tab:green','black','yellow','tab:red','tab:cyan','tab:olive','peru','tab:purple']
for key in sorted_annual_df_object_sizing_scenario:
output_prefix = key
cost = [i/10**6 for i in sorted_annual_df_object_sizing_scenario[output_prefix]['Cost ($)']]
emissions = [j/10**6 for j in sorted_annual_df_object_sizing_scenario[output_prefix]['Emission (kg CO2)']]
label = key.replace('_','').replace('total','')
int_marker= int(label[1])
int_color = int(label[3])
#print(int_marker, type(int_marker), marker[int_marker],len( marker))
marker = marker_list[int_marker]
color = color_list[int_color]
plt.scatter(cost,emissions,c=color, s=300, cmap=cmap, label = label,marker=marker)
#plt.title('Cost and emissions trade-off')
plt.xlabel("Cost (million $)")
plt.ylabel("Emissions (million kg $CO_2$)")
if int(label[1])==0:
label_T='Tmin'
elif int(label[1])==5:
label_T='Tmed'
elif int(label[1])==9:
label_T='Tmax'
if int(label[3])==0:
label_S='Smin'
elif int(label[3])==5:
label_S='Smed'
elif int(label[3])==9:
label_S='Smax'
label = label_T + label_S
if j == 0:
plt.annotate(label,xy=(cost[0], emissions[0]),xytext=(cost[0]*0.90, emissions[0]*1.1),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
else:
plt.annotate(label,xy=(cost[0], emissions[0]),xytext=(cost[0]*0.88, emissions[0]*1.1),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
j=j+1
#plt.legend()
plt.savefig(os.path.join(results_compare ,'scenario_ParetoFront_sizing.png'),dpi=100,facecolor='w',bbox_inches='tight')
def stats_scenario_sizing():
global sorted_annual_df_operation_sizing_scenario
statistics_table = {}
mean_table = defaultdict(list)
std_table = defaultdict(list)
CV_table = defaultdict(list)
cost_points= defaultdict(list)
emissions_points=defaultdict(list)
label_points=defaultdict(lambda: defaultdict(list))
sorted_cost = []
output_prefix_short ={}
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
annual_df_operation_sizing_scenario[output_prefix]=annual_df_operation_sizing_scenario[output_prefix].sort_values('Cost ($)')
annual_df_operation_sizing_scenario[output_prefix]=annual_df_operation_sizing_scenario[output_prefix].reset_index()
sorted_cost.append(annual_df_operation_sizing_scenario[output_prefix]['Cost ($)'][0]/10**6)
sorted_cost = sorted(sorted_cost)
sorted_annual_df_operation_sizing_scenario = {}
for i in sorted_cost:
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
if annual_df_operation_sizing_scenario[output_prefix]['Cost ($)'][0]/10**6 == i:
sorted_annual_df_operation_sizing_scenario[output_prefix] =annual_df_operation_sizing_scenario[output_prefix]
cols = ['Boilers Capacity (kW)', 'CHP Electricty Capacity (kW)', 'Battery Capacity (kW)','Solar Area (m^2)','Swept Area (m^2)','Emission (kg CO2)','Cost ($)']
cols_revised = ['Boilers (kW)', 'CHP (kW)', 'Battery (kW)','Solar (m^2)','Wind (m^2)','Emissions (million ton)','Cost (million \$)']
for point in range(population_size_sizing):
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
cost_points[point].append(sorted_annual_df_operation_sizing_scenario[output_prefix]['Cost ($)'][point])
emissions_points[point].append(sorted_annual_df_operation_sizing_scenario[output_prefix]['Emission (kg CO2)'][point])
for component in cols:
label_points[point][component].append(sorted_annual_df_operation_sizing_scenario[output_prefix][component][point])
for point in range(population_size_sizing):
for component in cols:
if len(label_points[point][component])!=0:
if component=='Emission (kg CO2)' or component=='Cost ($)':
std_table[point].append(round(statistics.stdev(label_points[point][component])/10**6,2))
mean_table[point].append(round(np.mean(label_points[point][component])/10**6,2))
else:
std_table[point].append(round(statistics.stdev(label_points[point][component]),2))
mean_table[point].append(round(np.mean(label_points[point][component]),2))
if np.mean(label_points[point][component])!=0:
CV_table[point].append(round(statistics.stdev(label_points[point][component])*100/np.mean(label_points[point][component]),2))
else:
CV_table[point].append(0)
statistics_table = {'Mean PP1': mean_table[0], 'STD PP1': std_table[0], 'CV \% PP1': CV_table[0],
'Mean medium cost': mean_table[24], 'STD medium cost': std_table[24], 'CV \% PP5': CV_table[24],
'Mean max cost': mean_table[49], 'STD max cost': std_table[49], 'CV \% PP9': CV_table[49]
}
df_statistics_table= pd.DataFrame(statistics_table)
df_statistics_table.insert(0, "", cols_revised, True)
for i in range(1,len(df_statistics_table.columns)*2):
if i%2!=0:
df_statistics_table.insert(i, "&", ["&"]*len(df_statistics_table), True)
df_statistics_table.insert(i, "\\\\ \hline", ["\\\\ \hline"]*len(df_statistics_table), True)
df_statistics_table.to_csv(os.path.join(results_compare,'stats_scenario_sizing_table.csv'))
def stats_main_sizing():
global sorted_annual_df_operation_sizing_main
statistics_table = {}
mean_table = defaultdict(list)
std_table = defaultdict(list)
CV_table = defaultdict(list)
cost_points= defaultdict(list)
emissions_points=defaultdict(list)
label_points=defaultdict(lambda: defaultdict(list))
sorted_cost = []
output_prefix_short =[]
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
output_prefix_short.append(dict_EPWs_bar_names[key][dict_EPWs[key].index(epw_file_name)])
annual_df_operation_sizing_main[output_prefix]=annual_df_operation_sizing_main[output_prefix].sort_values('Cost ($)')
annual_df_operation_sizing_main[output_prefix]=annual_df_operation_sizing_main[output_prefix].reset_index()
sorted_cost.append(annual_df_operation_sizing_main[output_prefix]['Cost ($)'][0]/10**6)
sorted_cost = sorted(sorted_cost)
sorted_annual_df_operation_sizing_main = {}
for i in sorted_cost:
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
if annual_df_operation_sizing_main[output_prefix]['Cost ($)'][0]/10**6 == i:
sorted_annual_df_operation_sizing_main[output_prefix] =annual_df_operation_sizing_main[output_prefix]
cols = ['Boilers Capacity (kW)', 'CHP Electricty Capacity (kW)', 'Battery Capacity (kW)','Solar Area (m^2)','Swept Area (m^2)','Emission (kg CO2)','Cost ($)']
cols_revised = ['Boilers (kW)', 'CHP (kW)', 'Battery (kW)','Solar (m^2)','Wind (m^2)','Emissions (million ton)','Cost (million \$)']
for point in range(population_size_sizing):
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
cost_points[point].append(sorted_annual_df_operation_sizing_main[output_prefix]['Cost ($)'][point])
emissions_points[point].append(sorted_annual_df_operation_sizing_main[output_prefix]['Emission (kg CO2)'][point])
for component in cols:
label_points[point][component].append(sorted_annual_df_operation_sizing_main[output_prefix][component][point])
SMALL_SIZE = 22
MEDIUM_SIZE = 24
BIGGER_SIZE = 28
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (35,15)
for component in cols:
data_1 = []
data_2 = []
data_3 = []
if component=='Emission (kg CO2)' or component=='Cost ($)':
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
data_1.append(sorted_annual_df_operation_sizing_main[output_prefix][component][0]/10**6)
data_2.append(sorted_annual_df_operation_sizing_main[output_prefix][component][24]/10**6)
data_3.append(sorted_annual_df_operation_sizing_main[output_prefix][component][49]/10**6)
if component=='Emission (kg CO2)':
component='Emissions (million ton)'
elif component=='Cost ($)':
component='Cost (million \$)'
else:
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
data_1.append(sorted_annual_df_operation_sizing_main[output_prefix][component][0])
data_2.append(sorted_annual_df_operation_sizing_main[output_prefix][component][24])
data_3.append(sorted_annual_df_operation_sizing_main[output_prefix][component][49])
data = [data_1,data_2,data_3]
index = output_prefix_short
df = pd.DataFrame({'min cost': data[0],
'med cost': data[1],
'max cost':data[2] }, index=index)
ax = df.plot.bar(rot=0)
ax.set_xlabel('Weather Files')
ax.set_ylabel(component)
ax.figure.savefig(os.path.join(results_compare,component.replace('\\$','')+'_bar_main.png'),bbox_inches='tight')
for point in range(population_size_sizing):
for component in cols:
if len(label_points[point][component])!=0:
if component=='Emission (kg CO2)' or component=='Cost ($)':
#print(point,round(np.mean(label_points[point]['Cost ($)'])/10**6,2))
std_table[point].append(round(statistics.stdev(label_points[point][component])/10**6,2))
mean_table[point].append(round(np.mean(label_points[point][component])/10**6,2))
else:
std_table[point].append(round(statistics.stdev(label_points[point][component]),2))
mean_table[point].append(round(np.mean(label_points[point][component]),2))
if np.mean(label_points[point][component])!=0:
CV_table[point].append(round(statistics.stdev(label_points[point][component])*100/np.mean(label_points[point][component]),2))
else:
CV_table[point].append(0)
statistics_table = {'Mean PP1': mean_table[0], 'STD PP1': std_table[0], 'CV \% PP1': CV_table[0],
'Mean medium cost': mean_table[24], 'STD medium cost': std_table[24], 'CV \% PP5': CV_table[24],
'Mean max cost': mean_table[49], 'STD max cost': std_table[49], 'CV \% PP9': CV_table[49]}
df_statistics_table= | pd.DataFrame(statistics_table) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
from datetime import datetime, timedelta
def get_data():
# Load json data
with open('../data/json_file.json') as data_file:
patients = json.load(data_file)
print("JSON file loaded")
# Features computation
print("Features computation launched...")
visits = []
for patient in patients.values():
for i in range(1, len(patient['visits']) + 1):
visits.append(patient['visits'][str(i)])
n_visits = len(visits)
print("n_visits = %s" % n_visits)
# Features DataFrame with encounter_nums index
encounter_nums = [int(visit.get('encounter_num')) for visit in visits]
X = pd.DataFrame(index=encounter_nums)
# Time vector & censoring indicator
print("Adding labels...", end="")
next_visit = [visit.get('next_visit') for visit in visits]
T = np.array([1e10 if str(t) == 'none' else t for t in next_visit]).astype(
int)
end_dates = pd.to_datetime([visit.get('end_date') for visit in visits])
start_dates = pd.to_datetime([visit.get('start_date') for visit in visits])
C = pd.to_datetime('2016-01-15 00:00:00') - end_dates
days, seconds = C.days, C.seconds
C = days * 24 + seconds // 3600 # in hours (discrete)
delta = (T <= C).astype(int)
Y = T
Y[delta == 0] = C[delta == 0]
labels = | pd.DataFrame({'Y': Y, 'delta': delta}, index=encounter_nums) | pandas.DataFrame |
from keras.layers import Bidirectional, Input, LSTM, Dense, Activation, Conv1D, Flatten, Embedding, MaxPooling1D, Dropout
#from keras.layers.embeddings import Embedding
from keras.preprocessing.sequence import pad_sequences
from keras import optimizers
from keras.models import Sequential, Model
import pandas as pd
import numpy as np
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from sklearn.utils import shuffle
import pickle
from sklearn.model_selection import train_test_split
import re
from sklearn.utils import shuffle
import keras
import joblib
import tokenizer_util as tu
import os.path
from keras.callbacks import Callback
from keras import backend as K
TRAIN_FILE_PATH = 'train.csv'#'/data/train.csv'
TEST_FILE = 'test.csv'#'/data/test.csv'
TIME_STEPS = 300
BATCH_SIZE = 256
LEARNING_RATE = 0.01
DECAY = 0.25
EPOCH_SIZE = 10
TOKENIZER_FILE = 'tokenizer'
EMBEDDING_FILE = 'embedding'
TENSORFLOW_LOGDIR = 'logs'#'/output/tensorboard_logs'
MODEL_SAVE_PATH = 'models/best_model_new.h5' #'/output/best_model.h5'
OUTPUT_FILENAME = 'first_submission.csv'
def main():
df = | pd.read_csv(TRAIN_FILE_PATH) | pandas.read_csv |
"""
Perform a simple random sample of your words and run optimus on the sample.
Then use a knn to put it all back together at the end.
"""
#-- Imports ---------------------------------------------------------------------
# third party
import fastText as ft
import pandas as pd
from optimus import Optimus
from sklearn.neighbors import KNeighborsClassifier
#-- Functions -------------------------------------------------------------------
def main():
# load in the data, sample from it and run optimus on the sample
words = pd.read_csv('data/words.csv', header=None, names=('description',))['description']
sample = words.sample(1000)
O = Optimus(config_path='config.json')
result = O(sample)
df_sample = pd.DataFrame({'original': sample, 'label': result})
# use a knn on vectors + assigned labels to apply these labels to the whole dataset
model = ft.load_model('models/wiki.en.bin')
embedSample = [model.get_word_vector(word) for word in sample.tolist()]
# train the model on the result and the embedded sample vectors
classifier = KNeighborsClassifier()
trained = classifier.fit(embedSample, result)
nonsampled = [word for word in words.tolist() if word not in sample.tolist()]
outvectors = [model.get_word_vector(word) for word in nonsampled]
predictions = trained.predict(outvectors)
df_unsampled = pd.DataFrame({'original': nonsampled, 'label': predictions})
df = | pd.concat([df_sample, df_unsampled]) | pandas.concat |
import sys
sys.path.append("../")
from settings import *
import re
import pandas as pd
import numpy as np
import os
stopwords = {
'max>\'ax>\'ax>\'ax>\'ax>\'ax>\'ax>\'ax>\'ax>\'ax>\'ax>\'ax>\'ax>\'ax>\'ax': 1,
'edu': 1,
'subject': 1,
'com': 1,
'r<g': 1,
'_?w': 1,
'isc': 1,
'cx^': 1,
'usr': 1,
'uga': 1,
'sam': 1,
'mhz': 1,
'b8f': 1,
'34u': 1,
'pl+': 1,
'1993apr20': 1,
'1993apr15': 1,
'xterm': 1,
'utexas': 1,
'x11r5': 1,
'o+r': 1,
'iastate': 1,
'udel': 1,
'uchicago': 1,
'1993apr21': 1,
'uxa': 1,
'argic': 1,
'optilink': 1,
'imho': 1,
'umich': 1,
'openwindows': 1,
'1993apr19': 1,
'1993apr22': 1,
'unk': 1
}
word_len_threshold = 2
np.random.seed(6)
def load_stopwords(lanague='EN'):
# url: https://github.com/igorbrigadir/stopwords/blob/master/en/gensim.txt
if lanague == 'EN':
stopwords_file = EN_STOP_WORDS
else:
stopwords_file = EN_STOP_WORDS
with open(stopwords_file, mode='r', encoding='utf-8') as reader:
for line in reader:
word = line.strip()
stopwords[word] = 1
def is_num(w):
try:
float(w)
return True
except:
False
# load_stopwords(stopwords)
def clean_words(words):
new_words = []
for word in words:
word = word.lower()
if word in stopwords:
continue
word = word.strip('_\[\]\'\".,()*! #@~`\\%^&;:/-+=“”‘’<>{}|?$^&'). \
replace('isn\'t', '').replace('\'s', '').replace('\'re', ''). \
replace('\'t', '').replace('\'ll', '').replace('\'m', ''). \
replace('\'am', '').replace('\'ve', '').replace('\'d', '')
segs = re.split('[()@.\-/#\\\\"`\[\]=:&<>%\']', word)
new_word = []
for s in segs:
seg = s
# seg = ps.stem(seg)
if seg not in stopwords and seg and len(seg) > word_len_threshold and not is_num(seg):
new_word.append(seg)
# word = ' '.join(new_word)
# if word and len(word) > word_len_threshold:
# if word not in stopwords:
# new_words.append(word)
new_words.extend(new_word)
return new_words
def parse_sent(line):
delim = '[.|?|,|!|;]'
sents = []
segs = re.split(delim, line)
for seg in segs:
if seg not in delim and len(seg) > 5:
sents.append(seg)
return sents
def process_dataset(dataset='AG', STOPWORD=False):
if STOPWORD:
load_stopwords()
if dataset == 'BNC':
path = BNC_ADDR + '/raw/'
data1 = open(path + 'train.txt').readlines()
data1 = pd.DataFrame(data1)
data1['train'] = 1
data2 = open(path + 'valid.txt').readlines()
data2 = pd.DataFrame(data2)
data2['train'] = -1
data3 = open(path + 'test.txt').readlines()
data3 = pd.DataFrame(data3)
data3['train'] = 0
data = data1.append(data2)
data = data.append(data3)
data.columns = ['content', 'train']
data['label'] = 1
data = data[['label', 'content', 'train']]
elif dataset == 'News20':
from sklearn.datasets import fetch_20newsgroups
newsgroups_train = fetch_20newsgroups(subset='train',
remove=('headers', 'quotes','footers'))
# remove=('headers', 'footers', 'quotes'))
train_data = [[a, b] for b, a in zip(newsgroups_train.data, newsgroups_train.target)]
data1 = pd.DataFrame(train_data, columns=['label', 'content'])
data1['train'] = 1
newsgroups_test = fetch_20newsgroups(subset='test',
remove=('headers', 'quotes', 'footers'))
# remove=('headers', 'footers', 'quotes'))
test_data = [[a, b] for b, a in zip(newsgroups_test.data, newsgroups_test.target)]
data2 = pd.DataFrame(test_data, columns=['label', 'content'])
data2['train'] = 0
N = len(data1)
ids = np.random.choice(range(N), size=3766, replace=False, p=None)
data1.iloc[ids, -1] = -1
data = data1.append(data2)
data = data.dropna()
data.to_csv(NEWS20_ADDR + '/raw/data.csv', quoting=1, header=True, index=False)
elif dataset == 'TMN':
path = TMN_ADDR + '/raw/'
data1 = open(path + 'tagmynews.txt').readlines()
N = int(len(data1)/8)
ldct={}
values = []
for i in range(N):
content = data1[8*i]
label = data1[8*i+6]
if label not in ldct:
id = len(ldct)
ldct[label]=id
l= ldct[label]
values.append([l, content])
data = pd.DataFrame(values,columns=[ 'label', 'content'])
N = len(data)
SN=9000
ids = np.random.choice(range(N), size=SN, replace=False, p=None)
N2 = len(ids)
ids2 = np.random.choice(range(N2), size=int(SN/3), replace=False, p=None)
ids2 = np.array(ids)[ids2]
data['train'] = 1
data.iloc[ids, -1] = 0
data.iloc[ids2, -1] = -1
elif dataset in ['Reuters']:
path = Reuters_ADDR+'/raw'
train_path = path+'/training/'
trains = os.listdir(train_path)
data1=[]
for t in trains:
f = train_path+t
text = ' '.join([str(l).strip() for l in open(f, 'rb').readlines()])
data1.append(text)
data1 = pd.DataFrame(data1, columns=[ 'content'])
data1['train'] = 1
N = len(data1)
ids = np.random.choice(range(N), size=int(N / 8), replace=False, p=None)
data1.iloc[ids, -1] = -1
test_path = path + '/test/'
tests = os.listdir(test_path)
data2 = []
for t in tests:
f = test_path + t
text = ' '.join([str(l).strip() for l in open(f, 'rb').readlines()])
data2.append(text)
data2 = pd.DataFrame(data2, columns=['content'])
data2['train'] = 0
data = data1.append(data2)
data['label']=1
data=data[['label', 'content', 'train']]
data = data.reset_index()
data['idx'] = data.index
print(data['content'].values[0])
vocab = {}
labels = []
contents = []
# data = data.iloc[:10, :]
for i, row in enumerate(data[['label','content']].values):
if i % 1000 == 0:
print(i)
label = row[0]
content = row[1]
sents = parse_sent(content)
new_sents = []
for sen in sents:
words = sen.strip().split()
words = clean_words(words)
for w in words:
try:
vocab[w.strip()] += 1
except:
vocab[w.strip()] = 1
new_sents.append(' '.join(words))
new_sents = ' '.join(new_sents)
labels.append(int(label))
contents.append(new_sents)
data['content'] = contents
data['label'] = labels
data = data[data['content'].apply(lambda x: len(x) > 5)]
return data, vocab
def clean_vocab(path, STOPWORD=False, freq_threshold=5):
vocab = {}
if STOPWORD:
data = | pd.read_csv(path + 'overall_stop.csv', header=0, dtype={'label': int}) | pandas.read_csv |
import unittest
import pandas as pd
import numpy as np
from math import sqrt
import numba
import hpat
from hpat.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs,
count_parfor_OneD_Vars, count_array_OneD_Vars,
dist_IR_contains)
from datetime import datetime
import random
class TestDate(unittest.TestCase):
@unittest.skip("needs support for boxing/unboxing DatetimeIndex")
def test_datetime_index_in(self):
def test_impl(dti):
return dti
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
dti = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(dti).values, test_impl(dti).values)
def test_datetime_index(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).values
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_kw(self):
def test_impl(df):
return pd.DatetimeIndex(data=df['str_date']).values
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_arg(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_datetime_getitem(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
self.assertEqual(hpat_func(A), test_impl(A))
def test_ts_map(self):
def test_impl(A):
return A.map(lambda x: x.hour)
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_ts_map_date(self):
def test_impl(A):
return A.map(lambda x: x.date())[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_ts_map_date2(self):
def test_impl(df):
return df.apply(lambda row: row.dt_ind.date(), axis=1)[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
df['dt_ind'] = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_ts_map_date_set(self):
def test_impl(df):
df['hpat_date'] = df.dt_ind.map(lambda x: x.date())
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
df['dt_ind'] = pd.DatetimeIndex(df['str_date'])
hpat_func(df)
df['pd_date'] = df.dt_ind.map(lambda x: x.date())
np.testing.assert_array_equal(df['hpat_date'], df['pd_date'])
def test_date_series_unbox(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series().map(lambda x: x.date())
self.assertEqual(hpat_func(A), test_impl(A))
def test_date_series_unbox2(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).map(lambda x: x.date())
self.assertEqual(hpat_func(A), test_impl(A))
def test_datetime_index_set(self):
def test_impl(df):
df['hpat'] = pd.DatetimeIndex(df['str_date']).values
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
hpat_func(df)
df['std'] = pd.DatetimeIndex(df['str_date'])
allequal = (df['std'].equals(df['hpat']))
self.assertTrue(allequal)
def test_timestamp(self):
def test_impl():
dt = datetime(2017, 4, 26)
ts = pd.Timestamp(dt)
return ts.day + ts.hour + ts.microsecond + ts.month + ts.nanosecond + ts.second + ts.year
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_extract(self):
def test_impl(s):
return s.month
hpat_func = hpat.jit(test_impl)
ts = pd.Timestamp(datetime(2017, 4, 26).isoformat())
month = hpat_func(ts)
self.assertEqual(month, 4)
def test_timestamp_date(self):
def test_impl(s):
return s.date()
hpat_func = hpat.jit(test_impl)
ts = pd.Timestamp(datetime(2017, 4, 26).isoformat())
self.assertEqual(hpat_func(ts), test_impl(ts))
def test_datetimeindex_str_comp(self):
def test_impl(df):
return (df.A >= '2011-10-23').values
df = pd.DataFrame({'A': pd.DatetimeIndex(['2015-01-03', '2010-10-11'])})
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetimeindex_str_comp2(self):
def test_impl(df):
return ('2011-10-23' <= df.A).values
df = pd.DataFrame({'A': pd.DatetimeIndex(['2015-01-03', '2010-10-11'])})
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_df(self):
def test_impl(df):
df = pd.DataFrame({'A': pd.DatetimeIndex(df['str_date'])})
return df.A
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_date(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).date
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_max(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).max()
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
self.assertEqual(hpat_func(df), test_impl(df))
def test_datetime_index_min(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).min()
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
self.assertEqual(hpat_func(df), test_impl(df))
def test_datetime_index_timedelta_days(self):
def test_impl(df):
s = pd.DatetimeIndex(df['str_date'])
t = s - s.min()
return t.days
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_timedelta_seconds(self):
def test_impl(df):
s = pd.DatetimeIndex(df['str_date'])
t = s - s.min()
return t.seconds
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_timedelta_microseconds(self):
def test_impl(df):
s = pd.DatetimeIndex(df['str_date'])
t = s - s.min()
return t.microseconds
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_timedelta_nanoseconds(self):
def test_impl(df):
s = pd.DatetimeIndex(df['str_date'])
t = s - s.min()
return t.nanoseconds
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_ret(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date'])
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
pd.testing.assert_index_equal(hpat_func(df), test_impl(df),
check_names=False)
def test_datetime_index_year(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).year
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_month(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).month
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_day(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).day
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_hour(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).hour
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_minute(self):
def test_impl(df):
return | pd.DatetimeIndex(df['str_date']) | pandas.DatetimeIndex |
#!/Users/amos/anaconda3/bin/python
# Pythono3 code to extract multiple space delimited txt files into pandas and then manipulate it into a single excel file
# importing pandas and os module
import os
import pandas as pd
#set working directory to where text files are stored
os.chdir("/Volumes/DANIEL/dti_freesurf_MCIP/diffusion_recons")
#create a master pandas data frame that stats as empty
df_master = pd.DataFrame()
#For loop that runs through subject+condition and uses it to index into each subjects folder for txt file extraction
#Reads in space delimited txt file into a pandas frame and adds column heading names
#Drops the data frame rows that we aren't interested in and adds a column named 'subj' that includes the subject name
#Concatinates the individual data frame to the origionally empty master data frame so that all text file data for each subject is in the master dataframe
for subj in os.listdir("/Volumes/DANIEL/dti_freesurf_MCIP/diffusion_recons/"):
os.chdir("/Volumes/DANIEL/dti_freesurf_MCIP/diffusion_recons/{0}/mri/".format(subj))
df= | pd.read_table('lh.hippoSfVolumes-T1.long.v21.txt', delim_whitespace=True,names=['loacation','volume']) | pandas.read_table |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * Series(rng5f + 0.1)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_df_fail):
# RangeIndex fails to return NotImplemented, for others
# DataFrame tries to broadcast incorrectly
box = box_df_fail
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__
def test_td64arr_div_nat_invalid(self, box_df_fail):
# don't allow division by NaT (maybe could in the future)
box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
def test_td64arr_div_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx // 1
tm.assert_equal(result, idx)
def test_td64arr_floordiv_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame returns m8[ns] instead of int64 dtype
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi // delta
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_df_fail):
# GH#19125
box = box_df_fail # DataFrame op returns m8[ns] instead of f8 dtype
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# Operations with invalid others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="__mul__ op treats "
"timedelta other as i8; "
"rmul OK",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
td1 * scalar_td
with tm.assert_raises_regex(TypeError, pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box, one, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box, two, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser / two
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('op', [operator.mul, ops.rmul])
def test_td64arr_rmul_numeric_array(self, op, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
# TODO: Make this up-casting more systematic?
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = op(vector, tdser)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_div_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = tdser / vector
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
vector / tdser
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_mul_int_series(self, box, names):
# GH#19042 test for correct name attachment
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(['0days', '1day', '4days', '9days', '16days'],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_float_series_rdiv_td64arr(self, box, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
expected = Series([tdi[n] / ser[n] for n in range(len(ser))],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser.__rdiv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeInvalidArithmeticOps(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="raises ValueError "
"instead of TypeError",
strict=True))
])
def test_td64arr_pow_invalid(self, scalar_td, box):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
scalar_td ** td1
with tm.assert_raises_regex(TypeError, pattern):
td1 ** scalar_td
# ------------------------------------------------------------------
@pytest.fixture(params=[pd.Float64Index(np.arange(5, dtype='float64')),
pd.Int64Index(np.arange(5, dtype='int64')),
pd.UInt64Index(np.arange(5, dtype='uint64'))],
ids=lambda x: type(x).__name__)
def idx(request):
return request.param
zeros = [box_cls([0] * 5, dtype=dtype)
for box_cls in [pd.Index, np.array]
for dtype in [np.int64, np.uint64, np.float64]]
zeros.extend([np.array(0, dtype=dtype)
for dtype in [np.int64, np.uint64, np.float64]])
zeros.extend([0, 0.0, long(0)])
@pytest.fixture(params=zeros)
def zero(request):
# For testing division by (or of) zero for Index with length 5, this
# gives several scalar-zeros and length-5 vector-zeros
return request.param
class TestDivisionByZero(object):
def test_div_zero(self, zero, idx):
expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
result = idx / zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') / np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, Series(result))
def test_floordiv_zero(self, zero, idx):
expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
result = idx // zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') // np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, Series(result))
def test_mod_zero(self, zero, idx):
expected = pd.Index([np.nan, np.nan, np.nan, np.nan, np.nan],
dtype=np.float64)
result = idx % zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') % np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, Series(result))
def test_divmod_zero(self, zero, idx):
exleft = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
exright = pd.Index([np.nan, np.nan, np.nan, np.nan, np.nan],
dtype=np.float64)
result = divmod(idx, zero)
tm.assert_index_equal(result[0], exleft)
tm.assert_index_equal(result[1], exright)
# ------------------------------------------------------------------
@pytest.mark.parametrize('dtype2', [
np.int64, np.int32, np.int16, np.int8,
np.float64, np.float32, np.float16,
np.uint64, np.uint32, np.uint16, np.uint8])
@pytest.mark.parametrize('dtype1', [np.int64, np.float64, np.uint64])
def test_ser_div_ser(self, dtype1, dtype2):
# no longer do integer div for any ops, but deal with the 0's
first = Series([3, 4, 5, 8], name='first').astype(dtype1)
second = Series([0, 0, 0, 3], name='second').astype(dtype2)
with np.errstate(all='ignore'):
expected = Series(first.values.astype(np.float64) / second.values,
dtype='float64', name=None)
expected.iloc[0:3] = np.inf
result = first / second
tm.assert_series_equal(result, expected)
assert not result.equals(second / first)
def test_rdiv_zero_compat(self):
# GH#8674
zero_array = np.array([0] * 5)
data = np.random.randn(5)
expected = Series([0.] * 5)
result = zero_array / Series(data)
tm.assert_series_equal(result, expected)
result = Series(zero_array) / data
tm.assert_series_equal(result, expected)
result = Series(zero_array) / Series(data)
tm.assert_series_equal(result, expected)
def test_div_zero_inf_signs(self):
# GH#9144, inf signing
ser = Series([-1, 0, 1], name='first')
expected = Series([-np.inf, np.nan, np.inf], name='first')
result = ser / 0
tm.assert_series_equal(result, expected)
def test_rdiv_zero(self):
# GH#9144
ser = Series([-1, 0, 1], name='first')
expected = Series([0.0, np.nan, 0.0], name='first')
result = 0 / ser
tm.assert_series_equal(result, expected)
def test_floordiv_div(self):
# GH#9144
ser = Series([-1, 0, 1], name='first')
result = ser // 0
expected = Series([-np.inf, np.nan, np.inf], name='first')
tm.assert_series_equal(result, expected)
def test_df_div_zero_df(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / df
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
tm.assert_frame_equal(result, expected)
def test_df_div_zero_array(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
with np.errstate(all='ignore'):
arr = df.values.astype('float') / df.values
result = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_df_div_zero_int(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / 0
expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns)
expected.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') / 0
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_div_zero_series_does_not_commute(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser / df
res2 = df / ser
assert not res.fillna(0).equals(res2.fillna(0))
# ------------------------------------------------------------------
# Mod By Zero
def test_df_mod_zero_df(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
result = df % df
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
from __future__ import print_function
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import color, feature, filters, io, measure, morphology, segmentation, img_as_ubyte, transform
import warnings
import math
import pandas as pd
import argparse
import subprocess
import re
import glob
from skimage.segmentation import clear_border
from ortools.graph import pywrapgraph
import time
from fatetrack_connections import buildFeatureFrame, buildOffsetFrame, generateCandidates, generateLinks, DivSimScore, DivSetupScore, DivisionCanditates, UpdateConnectionsDiv, TranslationTable, SolveMinCostTable, ReviewCostTable
def TranslateConnections(ConnectionTable, TranslationTable, timepoint, preference = "Master_ID"):
subTranslationTable_0 = TranslationTable.loc[:,[preference,"slabel"]]
subTranslationTable_0['slabel_t0'] = subTranslationTable_0['slabel']
subTranslationTable_1 = TranslationTable.loc[:,[preference,"slabel"]]
subTranslationTable_1['slabel_t1'] = subTranslationTable_1['slabel']
merge_0 = pd.merge(ConnectionTable, subTranslationTable_0, on="slabel_t0")
merge = pd.merge(merge_0, subTranslationTable_1, on="slabel_t1")
pref = str(preference)
result = merge.loc[:,[pref+"_x",pref+"_y"]]
result = result.drop_duplicates()
result = result.dropna(thresh=1)
result = result.reset_index(drop=True)
result = result.rename(columns = {(pref+"_x") : (pref+"_"+str(timepoint)), (pref+"_y") : (pref+"_"+str(timepoint+1))})
return(result)
def RajTLG_wrap(filename_t0, filename_t1,timepoint,ConnectionTable,TranslationTable,path="./"):
frame0 = buildFeatureFrame(filename_t0,timepoint,pathtoimage=path);
frame1 = buildFeatureFrame(filename_t1,timepoint+1,pathtoimage=path);
frames = pd.concat([frame0,frame1])
frames["timepoint"] = frames["time"]
InfoDF = pd.merge(frames,TranslationTable, on=['label','timepoint'])
RajTLG_translation = TranslateConnections(ConnectionTable=ConnectionTable, TranslationTable=TranslationTable, timepoint=timepoint, preference="RajTLG_ID")
RajTLGFrame = | pd.DataFrame() | pandas.DataFrame |
# Name: ZStandardizeFields.py
# Purpose: Will add selected fields as standarized Z scores by extending a numpy array to the feature class.
# Author: <NAME>
# Last Modified: 4/16/2021
# Copyright: <NAME>
# Python Version: 2.7-3.1
# ArcGIS Version: 10.4 (Pro)
# --------------------------------
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------
# Import Modules
import os, arcpy
import pandas as pd
import SharedArcNumericalLib as san
# Function Definitions
def add_standarized_fields(in_fc, input_Fields, ignore_nulls=True):
""" This function will take in an feature class, and use pandas/numpy to calculate Z-scores and then
join them back to the feature class using arcpy.
Parameters
-----------------
in_fc- input feature class to add percentile fields
input_fields - table fields to add Z Scores too
ignore_nulls - ignore null values in percentile calculations"""
try:
arcpy.env.overwriteOutput = True
desc = arcpy.Describe(in_fc)
OIDFieldName = desc.OIDFieldName
workspace = os.path.dirname(desc.catalogPath)
input_Fields_List = input_Fields
finalColumnList = []
scored_df = None
for column in input_Fields_List:
try:
field_series = san.arcgis_table_to_dataframe(in_fc, [column], skip_nulls=ignore_nulls, null_values=0)
san.arc_print("Creating standarized column for field {0}.".format(str(column)), True)
col_Standarized = arcpy.ValidateFieldName("Zscore_" + column, workspace)
field_series[col_Standarized] = (field_series[column] - field_series[column].mean()) / field_series[
column].std(ddof=0)
finalColumnList.append(col_Standarized)
if col_Standarized != column:
del field_series[column]
if scored_df is None:
san.arc_print("Test")
scored_df = field_series
else:
scored_df = | pd.merge(scored_df, field_series, how="outer", left_index=True, right_index=True) | pandas.merge |
#!/usr/bin/env python3
# This script assumes that the non-numerical column headers
# in train and predi files are identical.
# Thus the sm header(s) in the train file must be numeric (day/month/year).
import sys
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA #TruncatedSVD as SVD
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
def mask(df, f):
return df[f(df)]
def is_int(val):
try:
int(val)
return True
except:
return False
def remove_sparse_rows(data, error=-99999.0):
data_matrix = data.as_matrix()
data_matrix = [row for row in data_matrix if error in row]
return pd.DataFrame(data_matrix, columns=data.columns)
def fit_data(train_data, num_comps="mle"):
# Build pipeline and fit it to training data.
scaler = StandardScaler()
# https://github.com/scikit-learn/scikit-learn/issues/9884
pca = PCA(n_components=num_comps, svd_solver="full")
pipeline = Pipeline([("scaler", scaler), ("pca", pca)])
pipeline.fit(train_data)
return pipeline
#Select the target number of components.
# Uses Avereage Eigenvalue technique from:
# http://pubs.acs.org/doi/pdf/10.1021/ie990110i
def choose_num_comps(train_data, bound=1):
model = fit_data(train_data)
eigenvals = model.named_steps['pca'].explained_variance_
#print(f"eigenvals:\n{eigenvals}\n")
return len([ev for ev in eigenvals if (ev >= bound)])
# Assumes the first two columns are x/y-coordinates
# and integer-headed columns are sm data, not covariates.
def get_params(data):
columns = list(data.columns)[2:]
return [col for col in columns if not is_int(col)]
# Apply to {df} pca transformatio {model}
# that maps {params}-headed data to {num_comps} new columns.
def apply_model(df, model, params, num_comps):
pre_model = df[params]
post_model = model.transform(pre_model)
#print(f"one row of post_model:\n{post_model[0]}")
new_cols = [f"Component{i}" for i in range(num_comps)]
post_model = pd.DataFrame(post_model, columns=new_cols)
#print(f"one row of post_model:\n{post_model.iloc[0]}")
pre_base = df.drop(params, axis=1)
#print(f"one row of pre_base:\n{pre_base.iloc[0]}")
post_model.reset_index(drop=True, inplace=True)
pre_base.reset_index(drop=True, inplace=True)
post_full = | pd.concat([pre_base, post_model], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as pl
import seaborn as sns
import tensorflow as tf
import re
import json
from functools import partial
from itertools import filterfalse
from wordcloud import WordCloud
from tensorflow import keras
from tensorflow.keras import layers
df = pd.read_csv('data.csv')
columns = ['speaker','headline','description','event','duration','date_published','views_as_of_06162017','tags','transcript']
df = df[columns]
df['duration'] = pd.to_timedelta(df['duration']).dt.total_seconds()
df['date_published'] = pd.to_datetime(df['date_published'])
df = df.rename(columns={'views_as_of_06162017':'views'})
df = df.dropna()
wc = WordCloud()
def transcript_to_tokens(s):
s = list(map(lambda s: s.strip(), filter(len,s.split('\r'))))
s = ' '.join(filterfalse(partial(re.match,'[0-9]+\:[0-9]+'),s))
s = s.replace('.','').replace(',','').replace('!','').replace('?','').replace(':','').replace(';','').replace('"','').lower()
emotes = re.findall('\(([^)]+)\)',s)
speech = ' '.join(re.split('\(([^)]+)\)',s)).split()
emotes = emotes + list(filter(lambda s: s in ['applause','laughter'],speech)) # Inconsistent annotation in transcript
speech = filter(lambda s: not s in ['applause','laughter'],speech)
speech = list(filter(lambda s: s not in wc.stopwords, speech))
return (emotes,speech)
def word_count(s):
return len(pd.value_counts(s))
def translate_df(df):
emotes, words = zip(*df['transcript'].apply(transcript_to_tokens).to_list())
df.loc[:,'emotes'] = list(emotes)
df.loc[:,'words'] = list(words)
df['unique_words'] = df['words'].apply(word_count)
df['year_published'] = df['date_published'].dt.year
df['month_published'] = df['date_published'].dt.month
return df
df = translate_df(df)
all_words = [ x for xs in df['words'].to_list() for x in xs ]
word_counts = | pd.value_counts(all_words) | pandas.value_counts |
import pandas as pd
import numpy as np
from datetime import datetime
from tqdm import tqdm
from tqdm.notebook import tqdm as tqdmn
try:
from trade import Trade
except:
pass
try:
from backtest.trade import Trade
except:
pass
import chart_studio.plotly as py
import plotly.graph_objs as go
from plotly import subplots
import plotly.express as px
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
pd.options.display.float_format = '{:.5f}'.format
import random
class Backtest:
def __init__(self, strategy, data, from_date, to_date, balance=10000, leverage=0, max_units=10000000, verbose=True, ipynb=False, direct=True, test=False, ddw=0,
commission=0.0, rfr=0.02):
# initial variables
self.strategy = strategy # trading strategy
self.Leverage = leverage # leverage
self.FromDate = str(from_date).split(' ')[0] # starting date
self.ToDate = str(to_date).split(' ')[0] # ending date
self.Data = self.section(data, self.FromDate, self.ToDate) # slice from the dataset
self.Data['MC'] = ((self.Data['AC'] + self.Data['BC']) / 2) # middle close price
self.Data['MO'] = ((self.Data['AO'] + self.Data['BO']) / 2) # middle open price
self.Datasets = [] # all datasets nad instruments
self.Verbose = verbose # verbose checkker
self.ipynb = ipynb # only for Jupyter notebook
self.Direct = direct # calculating instrument units directly or indirectly
self.Test = test # run as a test only, with no balance calculation
self.DDW = ddw # drawdown value
self.RfR = rfr # risk-free rate
# variables for the simulation
self.Commission = commission # commision per trade (percentage)
self.OpenPositions = [] # list of the opened trades
self.CurrentProfit = 0 # unrealized profit/loos
self.GrossLoss = 0 # total loss
self.GrossProfit = 0 # total profit
self.TotalPL = 0 # total profit/loss
self.InitBalance = balance # initial balance
self.Balance = balance # account balance with closed trades
self.MarginLeft = balance # margin left with unrealized profit
self.Unrealized = 0 # unrealized profit/loss
self.MaxUnits = max_units # maximal trading ammount
self.History = [] # list to store previus prices for the user
self.IndicatorList = [] # list to store indicators
columns=['Type', 'Open Time', 'Close Time', 'Units', 'Margin Used', 'Open Price', 'Close Price', 'Spread', 'Profit', 'Balance', 'AutoClose', 'TP', 'SL']
self.Results = pd.DataFrame(columns = ['Ratio', 'Value']) # dataframe for result analysis
self.TradeLog = pd.DataFrame(columns = columns) # pandas dataframe to log activity
self.AutoCloseCount = 0 # counts how many times were trades closed automatically
snp_benchmark = None # loading S&P as benchmark
dji_benchmark = None # loading DJI as benchmark
dax_benchmark = None # loading DAX as benchmark
try:
snp_benchmark = pd.read_csv('data/datasets/spx500usd/spx500usd_hour.csv')
except:
snp_benchmark = pd.read_csv('../data/datasets/spx500usd/spx500usd_hour.csv')
try:
dji_benchmark = pd.read_csv('data/datasets/djiusd/djiusd_hour.csv')
except:
dji_benchmark = pd.read_csv('../data/datasets/djiusd/djiusd_hour.csv')
try:
dax_benchmark = pd.read_csv('data/datasets/de30eur/de30eur_hour.csv')
except:
dax_benchmark = pd.read_csv('../data/datasets/de30eur/de30eur_hour.csv')
self.DJI_Benchmark = self.section(dji_benchmark, self.FromDate, self.ToDate)
self.SNP_Benchmark = self.section(snp_benchmark, self.FromDate, self.ToDate)
self.DAX_Benchmark = self.section(dax_benchmark, self.FromDate, self.ToDate)
def add_ma(self, n):
name = 'MA' + str(n)
self.IndicatorList.append(name)
self.Data[name] = self.Data['MC'].rolling(n).mean()
def add_wma(self, n):
name = 'WMA' + str(n)
self.IndicatorList.append(name)
weights = np.arange(1,n+1)
self.Data[name] = self.Data['MC'].rolling(n).apply(lambda prices: np.dot(prices, weights) / weights.sum(), raw=True)
def add_ema(self, n):
name = 'EMA' + str(n)
self.IndicatorList.append(name)
sma = self.Data['MC'].rolling(n).mean()
mod_price = self.Data['MC'].copy()
mod_price.iloc[0:10] = sma[0:10]
self.Data[name] = mod_price.ewm(span=n, adjust=False).mean()
def add_dema(self, n):
name = 'DEMA' + str(n)
self.IndicatorList.append(name)
# calculating EMA
sma = self.Data['MC'].rolling(n).mean()
mod_price = self.Data['MC'].copy()
mod_price.iloc[0:10] = sma[0:10]
ema = mod_price.ewm(span=n, adjust=False).mean()
# calculatung EMA of EMA
sma_ema = ema.rolling(n).mean()
mod_price_of_ema = ema.copy()
mod_price_of_ema.iloc[0:10] = sma_ema[0:10]
ema_of_ema = mod_price_of_ema.ewm(span=n, adjust=False).mean()
self.Data[name] = 2 * ema - ema_of_ema
def add_tema(self, n):
name = 'TEMA' + str(n)
self.IndicatorList.append(name)
# calculating EMA
sma = self.Data['MC'].rolling(n).mean()
mod_price = self.Data['MC'].copy()
mod_price.iloc[0:10] = sma[0:10]
ema1 = mod_price.ewm(span=n, adjust=False).mean()
# calculatung EMA of EMA1
sma_ema1 = ema1.rolling(n).mean()
mod_price_of_ema1 = ema1.copy()
mod_price_of_ema1.iloc[0:10] = sma_ema1[0:10]
ema2 = mod_price_of_ema1.ewm(span=n, adjust=False).mean()
# calculatung EMA of EMA
sma_ema2 = ema2.rolling(n).mean()
mod_price_of_ema2 = ema2.copy()
mod_price_of_ema2.iloc[0:10] = sma_ema2[0:10]
ema3 = mod_price_of_ema2.ewm(span=n, adjust=False).mean()
self.Data[name] = (3 * ema1) - (3 * ema2) + ema3
def add_heikin_ashi(self):
self.IndicatorList.append('HAC')
self.IndicatorList.append('HAO')
self.Data['HAH'] = self.Data.max(axis=1)
self.Data['HAL'] = self.Data.drop(['ACh', 'BCh']).min(axis=1)
self.Data['HAC'] = 0.25 * (self.Data['BO'] + self.Data['BH'] + self.Data['BL'] +self.Data['BC'])
self.Data['HAO'] = 0.5 * (self.Data[1:]['BO'] + self.Data[1:]['BC'])
def section(self, dt, from_date, to_date):
start = dt.index[dt['Date'] == from_date].tolist()[0]
end = dt.index[dt['Date'] == to_date].tolist()
end = end[len(end) - 1]
return dt[start:end].reset_index()
def buy(self, row, instrument, trade_ammount, stop_loss=0, take_profit=0, units=0):
if not self.Test:
units = trade_ammount * self.Balance * self.Leverage
units = units - units * self.Commission
else:
units = trade_ammount * units * self.Leverage
if not self.Direct:
units /= row['AC']
if units > self.MaxUnits:
units = self.MaxUnits
self.OpenPositions.append(Trade(instrument[:6], 'BUY', units, row, stop_loss, take_profit, self.Direct))
return True
def sell(self, row, instrument, trade_ammount, stop_loss=0, take_profit=0, units=0):
if not self.Test:
units = trade_ammount * self.Balance * self.Leverage
units = units - units * self.Commission
else:
units = trade_ammount * units * self.Leverage
if not self.Direct:
units /= row['BC']
if units > self.MaxUnits:
units = self.MaxUnits
self.OpenPositions.append(Trade(instrument[:6], 'SELL', units, row, stop_loss, take_profit, self.Direct))
return True
def close(self, row, idx):
if len(self.OpenPositions) == 0:
return
trade = self.OpenPositions.pop(idx)
trade.close(row)
if trade.Profit > 0:
self.GrossProfit += trade.Profit
else:
self.GrossLoss += trade.Profit
self.TotalPL += trade.Profit
self.Balance += trade.Profit
if not self.Direct:
self.TradeLog.loc[len(self.TradeLog)] = [trade.Type, trade.OT, trade.CT, trade.Units, trade.Units / self.Leverage,
trade.OP, trade.CP, trade.CP - trade.OP, trade.Profit, self.Balance, trade.AutoClose, trade.TP, trade.SL]
else:
self.TradeLog.loc[len(self.TradeLog)] = [trade.Type, trade.OT, trade.CT, trade.Units, trade.Units / self.Leverage,
trade.OP, trade.CP, trade.CP - trade.OP, trade.Profit, self.Balance, trade.AutoClose, trade.TP, trade.SL]
def close_all(self, row):
j = len(self.OpenPositions)
while j != 0:
self.close(row, 0)
j -= 1
def max_dd(self, data_slice):
max2here = data_slice.expanding().max()
dd2here = data_slice - max2here
return dd2here.min()
def run(self):
simulation = None
if self.Verbose:
if not self.ipynb:
simulation = tqdm(range(len(self.Data)))
else:
simulation = tqdmn(range(len(self.Data)))
else:
simulation = range(len(self.Data))
for i in simulation:
if self.Verbose:
simulation.set_description('Balance: {:.2f}'.format(self.Balance))
row = self.Data.loc[i]
self.Unrealized = 0
for trade in self.OpenPositions:
if not trade.Closed and (trade.update(row)):
self.AutoCloseCount += 1
else:
self.Unrealized += trade.Profit
j = 0
while j < len(self.OpenPositions):
if self.OpenPositions[j].Closed:
self.close(row, j)
j += 1
if not self.Test:
if self.Unrealized < -self.Balance:
self.close_all(row)
if self.Verbose:
print('[INFO] Test stopped, inefficient funds.')
break
self.strategy(self, row, i)
self.close_all(row)
# analysis
if len(self.TradeLog) > 0:
if self.DDW != 0:
self.TradeLog['Drawdown'] = self.TradeLog['Balance'].rolling(self.DDW).apply(self.max_dd)
else:
dd_length = len(self.Data) / len(self.TradeLog)
elf.TradeLog['Drawdown'] = self.TradeLog['Balance'].rolling(dd_length).apply(self.max_dd)
columns = ['Nr. of Trades', 'Profit / Loss', 'Profit Factor', 'Win Ratio', 'Average P/L', 'Drawdown', 'DDW (%)', 'Buy & Hold', 'Sharpe Ratio', 'Balance', 'Max. Balance',
'Min. Balance', 'Gross Profit', 'Gross Loss', 'Winning Trades', 'Losing Trades', 'Average Profit', 'Average Loss', 'Profit Std.', 'Loss Std.', 'SL/TP Activated']
if self.GrossLoss == 0:
self.GrossLoss = 1
buy = self.TradeLog[self.TradeLog['Type'] == 'BUY']
buy_values = [len(buy), buy['Profit'].sum(), buy[buy['Profit'] > 0]['Profit'].sum() / abs(buy[buy['Profit'] < 0]['Profit'].sum()),
len(buy[buy['Profit'] > 0]) / len(buy), buy['Profit'].sum() / len(buy), None, None, None, None, None, None, None,
buy[buy['Profit'] > 0]['Profit'].sum(), buy[buy['Profit'] < 0]['Profit'].sum(),
len(buy[buy['Profit'] > 0]), len(buy[buy['Profit'] < 0]),
buy.loc[buy['Profit'] > 0]['Profit'].mean(), buy.loc[buy['Profit'] < 0]['Profit'].mean(),
buy.loc[buy['Profit'] > 0]['Profit'].std(), buy.loc[buy['Profit'] < 0]['Profit'].std(),
buy['AutoClose'].sum()]
sell = self.TradeLog[self.TradeLog['Type'] == 'SELL']
sell_values = [len(sell), sell['Profit'].sum(), sell[sell['Profit'] > 0]['Profit'].sum() / abs(sell[sell['Profit'] < 0]['Profit'].sum()),
len(sell[sell['Profit'] > 0]) / len(sell), sell['Profit'].sum() / len(sell), None, None, None, None, None, None, None,
sell[sell['Profit'] > 0]['Profit'].sum(), abs(sell[sell['Profit'] < 0]['Profit'].sum()),
len(sell[sell['Profit'] > 0]), len(sell[sell['Profit'] < 0]),
sell.loc[sell['Profit'] > 0]['Profit'].mean(), sell.loc[sell['Profit'] < 0]['Profit'].mean(),
sell.loc[sell['Profit'] > 0]['Profit'].std(), sell.loc[sell['Profit'] < 0]['Profit'].std(),
sell['AutoClose'].sum()]
BnH = (self.Data['BC'][len(self.Data)-1] - self.Data['AC'][0]) * (1 / self.Data['BC'][len(self.Data)-1]) * 10000 * self.Leverage
if not self.Direct:
BnH = (self.Data['BC'][len(self.Data)-1] - self.Data['AC'][0]) * 10000 * self.Leverage / self.Data['AC'][0]
sharpe_ratio = (self.Balance / self.InitBalance - 1 - self.RfR) / (self.TradeLog['Balance'] / self.InitBalance).std()
all_values = [len(self.TradeLog), self.TotalPL, self.GrossProfit / abs(self.GrossLoss), len(self.TradeLog[self.TradeLog['Profit'] > 0]) / len(self.TradeLog),
self.TradeLog['Profit'].sum() / len(self.TradeLog), self.TradeLog['Drawdown'].min(), abs(self.TradeLog['Drawdown'].min()) / self.TradeLog['Balance'].max(),
BnH, sharpe_ratio, self.Balance, self.TradeLog['Balance'].max(), self.TradeLog['Balance'].min(), self.GrossProfit, self.GrossLoss,
len(self.TradeLog[self.TradeLog['Profit'] > 0]), len(self.TradeLog[self.TradeLog['Profit'] < 0]),
self.TradeLog.loc[self.TradeLog['Profit'] > 0]['Profit'].mean(), self.TradeLog.loc[self.TradeLog['Profit'] < 0]['Profit'].mean(),
self.TradeLog.loc[self.TradeLog['Profit'] > 0]['Profit'].std(), self.TradeLog.loc[self.TradeLog['Profit'] < 0]['Profit'].std(),
self.AutoCloseCount]
self.Results['Ratio'] = columns
self.Results['All'] = all_values
self.Results['Long'] = buy_values
self.Results['Short'] = sell_values
def plot_results(self, name='backtest_result.html'):
if (len(self.TradeLog) > 0):
fig = subplots.make_subplots(rows=3, cols=3, column_widths=[0.55, 0.27, 0.18],
specs=[[{}, {}, {"rowspan": 2, "type": "table"}],
[{}, {}, None],
[{}, {"type": "table", "colspan": 2}, None]],
shared_xaxes=True,
subplot_titles=("Balance", "Benchmarks", "Performance Analysis", "Profit and Loss", "Monte Carlo Simulation", "Entries and Exits", "List of Trades"),
vertical_spacing=0.06, horizontal_spacing=0.02)
buysell_color = []
entry_shape = []
profit_color = []
for _, trade in self.TradeLog.iterrows():
if trade['Type'] == 'BUY':
buysell_color.append('#83ccdb')
entry_shape.append('triangle-up')
else:
buysell_color.append('#ff0050')
entry_shape.append('triangle-down')
if trade['Profit'] > 0:
profit_color.append('#cdeaf0')
else:
profit_color.append('#ffb1cc')
buysell_marker = dict(color=buysell_color, size=self.TradeLog['Profit'].abs() / self.TradeLog['Profit'].abs().max() * 40)
balance_plot = go.Scatter(x=pd.concat([pd.Series([self.TradeLog['Open Time'][0]]), self.TradeLog['Close Time']]),
y=pd.concat([ | pd.Series([self.InitBalance]) | pandas.Series |
from time import time
import pandas as pd
from numpy import arange
results_df = pd.read_csv('../data/botbrnlys-rand.csv')
def extract_best_vals_index(results_df, df, classifier, hp):
final_df = | pd.DataFrame() | pandas.DataFrame |
import argparse
from umap import UMAP
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description='Visualize DAE compressed output using UMAP algorithm.')
parser.add_argument('csv_output', type=str, help='Output CSV file generated from DAE.py')
args = parser.parse_args()
df = | pd.read_csv(args.csv_output, header=None) | pandas.read_csv |
# encoding: utf-8
from opendatatools.common import RestAgent
from bs4 import BeautifulSoup
from progressbar import ProgressBar
import pandas as pd
import re
lianjia_city_map = {
'北京' : 'bj',
'上海' : 'sh',
'成都' : 'cd',
'杭州' : 'hz',
'广州' : 'gz',
'深圳' : 'sz',
'厦门' : 'xm',
'苏州' : 'su',
'重庆' : 'cq',
'长沙' : 'cs',
'大连' : 'dl',
'海口' : 'hk',
'合肥' : 'hf',
'济南' : 'jn',
'青岛' : 'qd',
'南京' : 'nj',
'石家庄': 'sjz',
'沈阳' : 'sy',
'天津' : 'tj',
'武汉' : 'wh',
'无锡' : 'wx',
'西安' : 'xa',
'烟台' : 'yt',
'中山' : 'zs',
'珠海' : 'zh',
'郑州' : 'zz',
}
class LianjiaAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def get_city_list(self):
return lianjia_city_map.keys()
def get_district_by_city(self, city):
city_code = lianjia_city_map[city]
url = 'https://%s.lianjia.com/ershoufang/' % city_code
response = self.do_request(url)
soup = BeautifulSoup(response, "html5lib")
divs = soup.find_all('div')
data_map = {}
for div in divs:
if div.has_attr('data-role') and 'ershoufang' in div['data-role']:
sub_divs = div.find_all('div')
sub_div = sub_divs[0]
links = sub_div.find_all('a')
for link in links:
#link_addr = link
#data_list.append(data)
key = link.text
value = link['href'].replace('/ershoufang/', '').replace('/', '')
data_map[key] = value
return data_map
@staticmethod
def clear_text(text):
return text.replace('\n', '').strip()
def get_esf_list(self, city, max_page_no):
if max_page_no>100:
max_page_no = 100
if city in lianjia_city_map:
city_code = lianjia_city_map[city]
return self._get_esf_list(city_code, max_page_no)
def get_esf_list_by_district(self, city, district, max_page_no):
if max_page_no>100:
max_page_no = 100
if city in lianjia_city_map:
city_code = lianjia_city_map[city]
district_map = self.get_district_by_city(city)
if district in district_map:
district_code = district_map[district]
return self._get_esf_list_by_district(city_code, district_code, max_page_no)
def _get_esf_list(self, city_code, max_page_no):
page_no = 1
result_list = []
process_bar = ProgressBar().start(max_value=max_page_no)
while page_no <= max_page_no:
process_bar.update(page_no)
#print('getting data from lianjia.com for page %d' % page_no)
data_list = self._get_erf_list_url('https://%s.lianjia.com/ershoufang/pg%d/' % (city_code, page_no))
page_no = page_no + 1
if (len(data_list) == 0):
break
result_list.extend(data_list)
return pd.DataFrame(result_list)
def _get_esf_list_by_district(self, city_code, district_code, max_page_no):
page_no = 1
result_list = []
process_bar = ProgressBar().start(max_value=max_page_no)
while page_no <= max_page_no:
process_bar.update(page_no)
#print('getting data from lianjia.com for page %d' % page_no)
data_list = self._get_erf_list_url('https://%s.lianjia.com/ershoufang/%s/pg%d/' % (city_code, district_code, page_no))
page_no = page_no + 1
if (len(data_list) == 0):
break
result_list.extend(data_list)
return | pd.DataFrame(result_list) | pandas.DataFrame |
#!/usr/bin/env python3
#-*- coding: utf8 -*-
"""Scrape products from Woolworths
Returns:
(dict): Prices, product name, datetime
References:
[1] https://github.com/nguyenhailong253/grosaleries-web-scrapers
"""
import argparse
import re
import subprocess
import sys
import traceback
import warnings
from abc import ABC, abstractmethod
from datetime import datetime
from pathlib import Path
from time import sleep
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup as soup
PROJECT_ROOT = Path(subprocess.Popen(['git', 'rev-parse', '--show-toplevel'],
stdout=subprocess.PIPE).communicate()[0].rstrip().decode('utf-8'))
DATA = PROJECT_ROOT / "data"
sys.path.append(str(PROJECT_ROOT))
from src.base import SupermarketNames, str_supermarketnames_map
from src.api.web import HTTPResponseError, WebAPI
class Supermarket(ABC, WebAPI):
quote = {
"Availability": []
,"Brand": []
,"Category": []
,"Datetime": []
,"Name": []
,"Pic": []
,"Product Price": []
,"Product Quantity": []
,"Unit Price": []
,"Unit Quantity": []
}
@abstractmethod
def product_info_container(self):
pass
@abstractmethod
def url(self):
pass
@abstractmethod
def get_products_list(self, container_soup, search_item: str):
pass
@abstractmethod
def scrape_products(self):
pass
class Woolworths(Supermarket):
def __init__(self) -> None:
super().__init__()
self.name = SupermarketNames.woolworths
def product_info_container(self):
return ('div', {'class': 'shelfProductTile-information'})
def url(self, search_item: str, page_number: int=1):
return f"https://www.woolworths.com.au/shop/search/products?searchTerm={search_item}&pageNumber={page_number}"
def get_products_list(self, container_soup, search_item: str):
return self.scrape_products(container_soup, search_item)
def scrape_products(self, container_soup, category):
products_list = []
for container in container_soup:
# get the product name
product_name = container.find("span", {"class": "sr-only"}).text.strip()
# initial product is available
availability = True
unit_availability = True
# get the date and time of the scrapping time
date_now = datetime.now()
# check price and availability of each item
price_dollar = container.find('span',{'class':'price-dollars'})
price_cent = container.find('span', {'class': 'price-cents'})
price = np.nan
try:
price = float(price_dollar.text + '.' + price_cent.text)
except AttributeError:
availability = False
unit_price = np.nan
unit_quantity = np.nan
try:
price_per_unit = container.find('div', {'class': 'shelfProductTile-cupPrice'}).text.strip().replace(" ", "").split("/")
unit_price = float(price_per_unit[0].strip("$"))
unit_quantity = price_per_unit[1]
except AttributeError:
pass
self.quote["Availability"].append(availability)
self.quote["Brand"].append(None)
self.quote["Category"].append(category)
self.quote["Datetime"].append(date_now)
self.quote["Name"].append(product_name)
self.quote["Pic"].append(None)
self.quote["Product Price"].append(price)
self.quote["Product Quantity"].append(np.nan)
self.quote["Unit Price"].append(unit_price)
self.quote["Unit Quantity"].append(unit_quantity)
return pd.DataFrame.from_dict(self.quote)
class Coles(Supermarket):
def __init__(self) -> None:
super().__init__()
self.name = SupermarketNames.coles
def product_info_container(self):
return ('header', {'class': 'product-header'})
def url(self, search_item: str, page_number: int=1):
search_item = search_item.replace(" ", "%20")
return f"https://shop.coles.com.au/a/national/everything/search/{search_item}?pageNumber={page_number}"
def get_products_list(self, container_soup, search_item: str):
return self.scrape_products(container_soup, search_item)
def scrape_products(self, container_soup, category):
arr = []
for container in container_soup:
# get the product name
product_name = container.find("span", {"class": "product-name"}).text.strip()
product_brand = container.find("span", {"class": "product-brand"}).text.strip()
package_sizes = container.find_all("span", {"class": "accessibility-inline"})
pattern = re.compile(r"\d+\W{0,1}\w+", re.IGNORECASE)
valid_sizes = [re.search(pattern, i.text.rstrip()) for i in package_sizes]
product_quantity = [x for x in valid_sizes if x is not None]
try:
product_quantity = product_quantity[0].group(0)
except IndexError:
product_quantity = None
# initial product is available
availability = True
# get the date and time of the scrapping time
date_now = datetime.now()
# check price and availability of each item
if (container.find('span', {'class': 'dollar-value'})) :
price = container.find('span', {'class': 'dollar-value'}).text.strip() + container.find('span', {'class': 'cent-value'}).text.strip()
else:
price = np.nan
availability = False
package_price = container.find('span', {'class': 'package-price'}).text.strip()
if (package_price == '') | (not package_price):
unit_price = np.nan
unit_quantity = np.nan
else:
text = package_price.strip().split("per")
unit_price = float(text[0].strip("$"))
unit_quantity = text[1].strip()
self.quote["Availability"].append(availability)
self.quote["Brand"].append(product_brand)
self.quote["Category"].append(category)
self.quote["Datetime"].append(date_now)
self.quote["Name"].append(product_name)
self.quote["Pic"].append(None)
self.quote["Product Price"].append(price)
self.quote["Product Quantity"].append(product_quantity)
self.quote["Unit Price"].append(unit_price)
self.quote["Unit Quantity"].append(unit_quantity)
return pd.DataFrame.from_dict(self.quote)
class HarrisFarm(Supermarket):
def __init__(self) -> None:
super().__init__()
self.name = SupermarketNames.coles
def product_info_container(self):
return ('div', {'class': 'product-item columns large-2'})
def url(self, search_item: str, page_number: int=1):
search_item = search_item.replace(" ", "%20")
return f"https://www.harrisfarm.com.au/search?q={search_item}&hPP=24&idx=shopify_products&p={page_number}&is_v=1"
def get_products_list(self, container_soup, search_item: str):
return self.scrape_products(container_soup, search_item)
def scrape_products(self, container_soup, category):
arr = []
for container in container_soup:
# get the product name
product_name = container.find("p", {"class": "title"}).text.strip()
product_brand = None
package_sizes = container.find_all("span", {"class": "accessibility-inline"})
pattern = re.compile(r"\d+\W{0,1}\w+", re.IGNORECASE)
valid_sizes = [re.search(pattern, i.text.rstrip()) for i in package_sizes]
product_quantity = [x for x in valid_sizes if x is not None]
try:
product_quantity = product_quantity[0].group(0)
except IndexError:
product_quantity = None
# initial product is available
availability = True
# get the date and time of the scrapping time
date_now = datetime.now()
# check price and availability of each item
price = container.find('span', {'class': 'from_price'})
if (price == '') | (not price):
price = np.nan
availability = False
else:
try:
price = float(container.find('span', {'class': 'from_price'}).text.strip().strip("$"))
except ValueError:
price = np.nan
package_price = container.find('span', {'class': 'compare_at_price unit_price'}).text.strip()
if (package_price == '') | (not package_price):
unit_price = np.nan
unit_quantity = np.nan
else:
text = package_price.strip()
unit_price = re.findall("\d+.*\d*", text)[0]
unit_quantity = re.findall("\s\w+\Z", text)[0].strip()
self.quote["Availability"].append(availability)
self.quote["Brand"].append(product_brand)
self.quote["Category"].append(category)
self.quote["Datetime"].append(date_now)
self.quote["Name"].append(product_name)
self.quote["Pic"].append(None)
self.quote["Product Price"].append(price)
self.quote["Product Quantity"].append(product_quantity)
self.quote["Unit Price"].append(unit_price)
self.quote["Unit Quantity"].append(unit_quantity)
return | pd.DataFrame.from_dict(self.quote) | pandas.DataFrame.from_dict |
"""
Authors:
ITryagain <<EMAIL>>
Reference:
https://www.ibm.com/developerworks/community/blogs/jfp/entry/Fast_Computation_of_AUC_ROC_score?lang=en
https://www.kaggle.com/uberkinder/efficient-metric
https://www.kaggle.com/artgor
introduce:
this file contains the use of models such as LightGBM, XGBoost, CatBoost and so on
"""
import time
import gc
import numpy as np
import pandas as pd
import lightgbm as lgb
import xgboost as xgb
from catboost import CatBoostRegressor, CatBoostClassifier
from sklearn import metrics
from numba import jit
from handyML.preprocessing.Encoding import BetaEncoder
from tqdm import tqdm
# compute auc faster than sklearn
# However, when there are ties in predictions computed value may differ from sklearn value.
# https://www.ibm.com/developerworks/community/blogs/jfp/entry/Fast_Computation_of_AUC_ROC_score?lang=en
@jit
def fast_auc(y_true, y_prob):
y_true = np.asarray(y_true)
y_true = y_true[np.argsort(y_prob)]
nfalse = 0
auc = 0
n = len(y_true)
for i in range(n):
y_i = y_true[i]
nfalse += (1 - y_i)
auc += y_i * nfalse
auc /= (nfalse * (n - nfalse))
return auc
# define metric by ourself
def eval_auc(preds, dtrain):
return 'auc', fast_auc(dtrain, preds), True
def group_mean_log_mae(y_true, y_pred, group, floor=1e-9):
"""
Fast metric computation for this competition: https://www.kaggle.com/c/champs-scalar-coupling
Code is from this kernel: https://www.kaggle.com/uberkinder/efficient-metric
"""
maes = (y_true - y_pred).abs().groupby(group).mean()
return np.log(maes.map(lambda x: max(x, floor))).mean()
def train_model_regression(X, X_test, target_col, params, folds, model_type='lgb', eval_metric='mae', columns=None,
model=None, verbose=1000, early_stopping_rounds=200, n_estimators=50000, metrics_dict=None,
beta_encoding=False, cat_col=None, encode_col=None, N_min=1, encoding_nan=False, encoding_type='mean',
feature_importance=True):
"""
A function to train a variety of regression models.
Returns dictionary with oof predictions, test predictions, scores and, if necessary, feature importances.
:param: X - training data, can be pd.DataFrame (after normalizing)
:param: X_test - test data, can be pd.DataFrame (after normalizing)
:param: target_col - target col name
:param: folds - folds to split data
:param: model_type - type of model to use
:param: eval_metric - metric to use
:param: columns - columns to use. If None - use all columns
:param: plot_feature_importance - whether to plot feature importance of LGB
:param: model - sklearn model, works only for "sklearn" model type
:param: beta_encoding - do beta_encoding in k-folds
:param: feature_importance - return feature importance
:param: encode_col - the columns used for encoding
"""
if beta_encoding:
if (not isinstance(encode_col, list)) and (not isinstance(encode_col, np.ndarray)):
raise TypeError('cat_col should be list or np.ndarry')
if columns is None:
columns = [col for col in X.columns if col != target_col]
if feature_importance:
if model_type == 'sklearn':
feature_importance = False
if metrics_dict is None:
metrics_dict = {
'mae': {
'lgb_metric_name': 'mae',
'catboost_metric_name': 'MAE',
'xgb_metric_name': 'mae',
'sklearn_scoring_function': metrics.mean_absolute_error
},
'group_mae': {
'lgb_metric_name': 'mae',
'catboost_metric_name': 'MAE',
'xgb_metric_name': 'mae',
'scoring_function': group_mean_log_mae
},
'mse': {
'lgb_metric_name': 'mse',
'catboost_metric_name': 'MSE',
'xgb_metric_name': 'rmse',
'sklearn_scoring_function': metrics.mean_squared_error
}
}
result_dict = {}
# list of scores on folds
scores = [] # list of scores on folds
oof = np.zeros(len(X)) # out-of-fold predictions on train data
prediction = np.zeros(len(X_test)) # averaged predictions on train data
feature_importance_df = pd.DataFrame()
for fold_n, (train_index, valid_index) in enumerate(folds.split(X[columns], X[target_col])):
print('Fold {} started at {}'.format({fold_n + 1}, {time.ctime()}))
X_train, y_train, X_valid, y_valid = X[columns].iloc[train_index], X[target_col].iloc[train_index], X[columns].iloc[valid_index], X[target_col].iloc[valid_index]
if beta_encoding:
# encode variables
feature_col = []
for var_name in tqdm(encode_col):
# fit encoder
be = BetaEncoder(var_name, encoding_nan)
be.fit(X_train[[var_name, target_col]], target_col)
feature_name = var_name + encoding_type
X_train[feature_name] = be.transform(X_train[[var_name]], encoding_type, N_min)
X_valid[feature_name] = be.transform(X_valid[[var_name]], encoding_type, N_min)
X_test[feature_name] = be.transform(X_test[[var_name]], encoding_type, N_min)
feature_col.append(feature_name)
gc.collect()
columns += feature_col
if model_type == 'lgb':
model = lgb.LGBMRegressor(**params, n_estimators=n_estimators, n_jobs=-1)
model.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
eval_metric=metrics_dict[eval_metric]['lgb_metric_name'],
categorical_feature=cat_col,
verbose=verbose,
early_stopping_rounds=early_stopping_rounds)
y_pred_valid = model.predict(X_valid)
y_pred = model.predict(X_test[columns], num_iteration=model.best_iteration_)
elif model_type == 'xgb':
model = xgb.XGBRegressor(**params, n_estimators=n_estimators, n_jobs=-1)
model.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
eval_metric=metrics_dict[eval_metric]['xgb_metric_name'],
verbose=verbose,
early_stopping_rounds=early_stopping_rounds,
)
y_pred_valid = model.predict(X_valid, ntree_limit=model.best_ntree_limit)
y_pred = model.predict(X_test[columns], ntree_limit=model.best_ntree_limit)
elif model_type == 'cat':
model = CatBoostRegressor(iterations=n_estimators,
eval_metric=metrics_dict[eval_metric]['catboost_metric_name'],
loss_function=metrics_dict[eval_metric]['catboost_metric_name'],
verbose=verbose,
cat_features=cat_col,
**params)
model.fit(X_train, y_train, eval_set=(X_valid, y_valid))
gc.collect()
y_pred_valid = model.predict(X_valid)
y_pred = model.predict(X_test[columns])
elif model_type == 'sklearn':
model = model
model.fit(X_train, y_train)
y_pred_valid = model.predict(X_valid)
score = metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid)
print("Fold {}. {}: {}.".format({fold_n}, {eval_metric}, {score}))
print('')
y_pred = model.predict(X_test[columns])
oof[valid_index] = y_pred_valid.reshape(-1,)
if eval_metric != 'group_mae':
scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid))
else:
scores.append(metrics_dict[eval_metric]['scoring_function'](y_valid, y_pred_valid,
X_valid['group']))
prediction += y_pred.reshape(-1,)
if feature_importance:
fold_importance_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 25 21:53:10 2018
改自selectSubjID_inScale_V2
根据给定的条件筛选大表的item和subjects' folder
inputs:
file_all:大表
column_basic1=[0,11,19,20,21,22,23,27,28,29,30]:基本信息列
column_basic2=['学历(年)','中国人利手量表']:基本信息名
column_hamd17=np.arange(104,126,1),
column_hama=np.arange(126,141,1),
column_yars=np.arange(141,153,1),
column_bprs=np.arange(153,177,1)
column_diagnosis='诊断':诊断的列名
column_quality='Resting_quality'
column_note1='诊断备注'
column_note2='备注'
note1_keyword='复扫':重复备注文字
outputs:
folder:筛选出来的ID
basic:筛选出来的基本信息
hamd17,hamm,yars,bprs:筛选出来的量表
logicIndex_scale:量表的逻辑index
logicIndex_repeat:重复量表的index
...
to fetch other output,please check results_dict
@author: <NAME>
new feature:任意条件筛选
"""
# ===============================================
import sys
# sys.path.append(r'D:\myCodes\MVPA_LIChao\MVPA_Python\workstation')
import pandas as pd
import re
import os
import numpy as np
class select_SubjID():
# initial parameters
def __init__(self,
file_all=r"D:\WorkStation_2018\WorkStation_2018_08_Doctor_DynamicFC_Psychosis\Scales\8.30大表.xlsx",
# 基本信息和量表,暂时不能加条件来筛选行
column_basic1=[0, 11, 19, 20, 21, 22, 23, 27, 28, 29, 30],
column_basic2=['学历(年)', '中国人利手量表', '诊断备注', '备注'],
column_hamd17=np.arange(104, 126, 1),
column_hama=np.arange(126, 141, 1),
column_yars=np.arange(141, 153, 1),
column_bprs=np.arange(153, 177, 1),
# 可以加条件筛选行的列(item),字典形式,key为列名,value为条件
# condition_name:{condition:[include_or_exclude,match_method]}
# 注意:对于某一列的所有条件而言,暂时只支持一种筛选方法,要么全纳入,要么全排出
# 事实上,一般情况下,纳入与排出不应该用在同一列
screening_dict={
'诊断': {1: ['include', 'exact'], 2: ['include', 'exact'], 3: ['include', 'exact'], 4: ['include', 'exact']},
'Resting_quality': {'Y': ['include', 'exact']},
'诊断备注': {'复扫': ['exclude', 'fuzzy'], '糖尿病': ['exclude', 'fuzzy'], '不能入组': ['exclude', 'fuzzy']},
'备注': {'复扫': ['exclude', 'fuzzy']}
}
# screening_dict={
# '诊断':{1:['include','exact'],2:['include','exact'],3:['include','exact'],4:['include','exact']},
# 'Resting_quality':{'Y':['include','exact']},
# '诊断备注':{'复扫':['exclude','fuzzy']}
# }
):
# ====================================================
self.file_all = file_all
self.column_basic1 = column_basic1
self.column_basic2 = column_basic2
self.column_hamd17 = column_hamd17
self.column_hama = column_hama
self.column_yars = column_yars
self.column_bprs = column_bprs
self.screening_dict = screening_dict
print('Initialized!\n')
# ====================================================
def loadExcel(self):
# load all clinical data in excel
self.allClinicalData = pd.read_excel(self.file_all)
return self
def extract_one_series(self, column_var):
# 选项目,项目列可以是数字编号,也可以是列名字符串
if isinstance(column_var[0], str):
data = self.allClinicalData.loc[:, column_var]
elif isinstance(self.column_basic1[0], np.int32):
data = self.allClinicalData.iloc[:, column_var]
elif isinstance(self.column_basic1[0], int):
data = self.allClinicalData.iloc[:, column_var]
else:
print('basicIndex 的输入有误!\n')
return data
# ====================================================
def select_item(self):
# 选项目,项目列可以是数字编号,也可以是列名字符串(注意:这些项目暂时不支持行筛选)
basic1 = self.extract_one_series(self.column_basic1)
basic2 = self.extract_one_series(self.column_basic2)
self.basic = pd.concat([basic1, basic2], axis=1)
self.hamd17 = self.extract_one_series(self.column_hamd17)
self.hama = self.extract_one_series(self.column_hama)
self.yars = self.extract_one_series(self.column_yars)
self.bprs = self.extract_one_series(self.column_bprs)
return self
# ====================================================
# 条件筛选
def screen_data_according_conditions_in_dict_one(
self, series_for_screening, condition_in_dict):
# 根据字典里面的条件筛选,并得到index。注意条件可能是字符串也可以是数字。
# 注意:此函数只处理一列。
# 由于contains函数不能处理null,先把null替换为'未知'
series_for_screening = series_for_screening.mask(
series_for_screening.isnull(), '未知')
# 生成index为series_for_screening的index的空pd.DataFrame,用于后续join
screened_ind_all = pd.DataFrame([])
for condition_name in condition_in_dict:
screened_ind = pd.DataFrame([], index=series_for_screening.index)
# 每个key值筛选后,都用pd.DataFrame.join方法求并集
# print(condition_name)
# print(condition_in_dict[condition_name])
# print(condition_in_dict[condition_name][-1])
# 进入条件筛选
# 精确匹配,一般数字为精确匹配
if condition_in_dict[condition_name][-1] == 'exact':
if condition_in_dict[condition_name][0] == 'exclude':
screened_ind = screened_ind.loc[series_for_screening.index[series_for_screening != condition_name]]
elif condition_in_dict[condition_name][0] == 'include':
screened_ind = screened_ind.loc[series_for_screening.index[series_for_screening == condition_name]]
# 模糊匹配
elif condition_in_dict[condition_name][-1] == 'fuzzy':
if condition_in_dict[condition_name][0] == 'exclude':
screened_ind_tmp = series_for_screening.mask(
series_for_screening.str.contains(condition_name), None).dropna()
screened_ind = screened_ind.loc[screened_ind_tmp.dropna(
).index]
elif condition_in_dict[condition_name][0] == 'include':
screened_ind_tmp = series_for_screening.where(
series_for_screening.str.contains(condition_name), None)
screened_ind = screened_ind.loc[screened_ind_tmp.dropna(
).index]
# 未指名匹配方式
else:
print(
'__ini__ is wrong!\n### may be words "exact OR fuzzy" is wrong ###\n')
# pd.join 求并集或交集
# 注意:此处混合有'exclude','和include'时,有可能筛选出错,所以对于某一列而言,最好只用一种方法
if screened_ind_all.index.empty:
screened_ind_all = screened_ind_all.join(
| pd.DataFrame(screened_ind) | pandas.DataFrame |
import json
import requests
import streamlit as st
from pandas import DataFrame
from web3 import Web3
from opensea_api_client import Client
# page init
client = Client()
def render_asset(asset):
if asset['name'] is not None:
st.subheader(asset['name'])
else:
st.subheader(f"{asset['collection']['name']} #{asset['token_id']}")
if asset['description'] is not None:
st.write(asset['description'])
else:
st.write(asset['collection']['description'])
if asset['image_url'].endswith('mp4') or asset['image_url'].endswith('mov'):
st.video(asset['image_url'])
elif asset['image_url'].endswith('svg'):
svg = requests.get(asset['image_url']).content.decode()
st.image(svg)
elif asset['image_url']:
st.image(asset['image_url'])
st.sidebar.header("Endpoints")
endpoint_choices = ['Assets', 'Events', 'Rarity']
endpoint = st.sidebar.selectbox("Choose an Endpoint", endpoint_choices)
st.title(f"OpenSea API Explorer - {endpoint}")
if endpoint == 'Assets':
st.sidebar.header('Filters')
owner = st.sidebar.text_input("Owner")
collection = st.sidebar.text_input("Collection", 'nft-worlds')
assets_resp = client.get_assets(collection=collection, owner=owner, limit=5)
for asset in assets_resp['assets']:
render_asset(asset)
if endpoint == 'Events':
collection = st.sidebar.text_input("Collection")
asset_contract_address = st.sidebar.text_input("Contract Address")
token_id = st.sidebar.text_input("Token ID")
event_type = st.sidebar.selectbox("Event Type", ['offer_entered', 'cancelled', 'bid_withdrawn', 'transfer', 'approve'])
events_resp = client.get_events()
event_list = []
for event in events_resp['asset_events']:
if event_type == 'offer_entered':
if event['bid_amount']:
bid_amount = Web3.fromWei(int(event['bid_amount']), 'ether')
if event['from_account']['user']:
bidder = event['from_account']['user']['username']
else:
bidder = event['from_account']['address']
event_list.append([event['created_date'], bidder, float(bid_amount), event['asset']['collection']['name'], event['asset']['token_id']])
df = | DataFrame(event_list, columns=['time', 'bidder', 'bid_amount', 'collection', 'token_id']) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": | pandas.StringDtype() | pandas.StringDtype |
import numpy as np
import pandas as pd
from os import path
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
def get_youtube_search(query,order,regionCode,channel_id = ''):
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
#os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secret.json"
# Get credentials and create an API client
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes)
credentials = flow.run_console()
youtube = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials)
if channel_id == '':
request = youtube.search().list(
part="snippet",
maxResults=50,
order=order,
q=query,
regionCode=regionCode
)
else:
request = youtube.search().list(
part="snippet",
maxResults=50,
channelId=channel_id,
order=order,
q=query,
regionCode=regionCode
)
response = request.execute()
return response
#-----------Россия 24-------------
search_result_1 = get_youtube_search(query = "Россия 24", order = "viewCount", regionCode = "RU")
search_result_1_df = pd.DataFrame(search_result_1['items'])
search_result_2 = get_youtube_search(query = "", order = "viewCount", regionCode = "RU",channel_id = 'UC_IEcnNeHc_bwd92Ber-lew')
search_result_2_df = pd.DataFrame(search_result_2['items'])
search_result_3 = get_youtube_search(query = "репортаж", order = "viewCount", regionCode = "RU",channel_id = 'UC_IEcnNeHc_bwd92Ber-lew')
search_result_3_df = pd.DataFrame(search_result_3['items'])
search_result_4 = get_youtube_search(query = "новости", order = "viewCount", regionCode = "RU",channel_id = 'UC_IEcnNeHc_bwd92Ber-lew')
search_result_4_df = pd.DataFrame(search_result_4['items'])
data_for_wordcloud = []
for i in range(2,search_result_1_df.shape[0]):
title = search_result_1_df.snippet[i]['title']
title = title.split(' - Россия 24')[0]
data_for_wordcloud.append(title)
for i in range(2,search_result_2_df.shape[0]):
title = search_result_2_df.snippet[i]['title']
title = title.split(' - Россия 24')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_3_df.shape[0]):
title = search_result_3_df.snippet[i]['title']
title = title.split(' Специальный репортаж')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_4_df.shape[0]):
title = search_result_4_df.snippet[i]['title']
title = title.split(' - Россия 24')[0]
data_for_wordcloud.append(title)
data_for_wordcloud_set = set(data_for_wordcloud)
data_for_wordcloud_preprocessed = []
for title in data_for_wordcloud_set:
title = title.lower()
title = title.replace('"','')
title = title.replace(' - россия 24','')
title = title.replace('россия 24','')
title = title.replace('специальный репортаж','')
title = title.replace('документальный фильм','')
title = title.replace('александра сладкова','')
title = title.replace('москва 24','')
title = title.replace('москва. кремль. кутин','')
title = title.replace('// . от 07.03.2021','')
title = title.replace('авторская программа <NAME>','')
title = title.replace('марата кримчеева','')
title = title.replace('фильм анны афанасьевой','')
title = title.replace('последние','')
title = title.replace('новости','')
title = title.replace('репортаж','')
title = title.replace('интервью','')
title = title.replace('программа','')
title = title.replace('эксклюзивный','')
title = title.replace('дежурная часть','')
title = title.replace('вести недели с дмитрием киселевым','')
title = title.replace(' на ',' ')
title = title.replace(' не ',' ')
title = title.replace(' из ',' ')
title = title.replace(' за ',' ')
title = title.replace(' для ',' ')
title = title.replace(' по ',' ')
title = title.replace(' от ',' ')
title = title.replace('путина','путин')
title = title.replace('россии','россия')
title = title.replace('китая','китай')
title = title.replace('донбассе','донбасс')
title = title.replace('карабахе','карабах')
data_for_wordcloud_preprocessed.append(title)
data_for_wordcloud_preprocessed_string_r24 = ''
for title in data_for_wordcloud_preprocessed:
data_for_wordcloud_preprocessed_string_r24 = data_for_wordcloud_preprocessed_string_r24 + title + ' '
wordcloud = WordCloud(width = 1000,
height = 500,
max_font_size=400,
max_words=200,
color_func=lambda *args, **kwargs: "white",
relative_scaling=0.5,
background_color="red").generate(data_for_wordcloud_preprocessed_string_r24)
wordcloud.to_file("Russia24_WordCloud.png")
#-----------RT на русском-------------
search_result_1 = get_youtube_search(query = "RT на русском", order = "viewCount", regionCode = "RU")
search_result_1_df = pd.DataFrame.from_dict(search_result_1['items'])
search_result_2 = get_youtube_search(query = "", order = "viewCount", regionCode = "RU", channel_id = 'UCFU30dGHNhZ-hkh0R10LhLw')
search_result_2_df = pd.DataFrame.from_dict(search_result_2['items'])
search_result_3 = get_youtube_search(query = "репортаж", order = "viewCount", regionCode = "RU", channel_id = 'UCFU30dGHNhZ-hkh0R10LhLw')
search_result_3_df = pd.DataFrame.from_dict(search_result_3['items'])
search_result_4 = get_youtube_search(query = "новости", order = "viewCount", regionCode = "RU", channel_id = 'UCFU30dGHNhZ-hkh0R10LhLw')
search_result_4_df = pd.DataFrame.from_dict(search_result_4['items'])
data_for_wordcloud = []
for i in range(1,search_result_1_df.shape[0]):
title = search_result_1_df.snippet[i]['title']
title = title.split(' / ')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_2_df.shape[0]):
title = search_result_2_df.snippet[i]['title']
title = title.split(' / ')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_3_df.shape[0]):
title = search_result_3_df.snippet[i]['title']
title = title.split(' / ')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_4_df.shape[0]):
title = search_result_4_df.snippet[i]['title']
title = title.split(' / ')[0]
data_for_wordcloud.append(title)
data_for_wordcloud_set = set(data_for_wordcloud)
data_for_wordcloud_preprocessed = []
for title in data_for_wordcloud_set:
title = title.lower()
title = title.replace('"','')
title = title.replace('специальный репортаж','')
title = title.replace('документальный фильм','')
title = title.replace('последние','')
title = title.replace('новости','')
title = title.replace('новости','')
title = title.replace('репортаж','')
title = title.replace('репортаж','')
title = title.replace(' на ',' ')
title = title.replace(' не ',' ')
title = title.replace(' из ',' ')
title = title.replace(' за ',' ')
title = title.replace(' для ',' ')
title = title.replace(' по ',' ')
title = title.replace(' от ',' ')
title = title.replace('россии','россия')
title = title.replace('путина','путин')
title = title.replace('путин:','путин')
title = title.replace('владимира','владимир')
title = title.replace('коронавируса','коронавирус')
title = title.replace('украинские','украина')
title = title.replace('украинской','украина')
title = title.replace('украине','украина')
title = title.replace('украину','украина')
title = title.replace('украины','украина')
title = title.replace(': ',' ')
data_for_wordcloud_preprocessed.append(title)
data_for_wordcloud_preprocessed_string_rt = ''
for title in data_for_wordcloud_preprocessed:
data_for_wordcloud_preprocessed_string_rt = data_for_wordcloud_preprocessed_string_rt + title + ' '
wordcloud = WordCloud(width = 1000,
height = 500,
max_font_size=400,
max_words=200,
color_func=lambda *args, **kwargs: "white",
relative_scaling=0.5,
background_color="green").generate(data_for_wordcloud_preprocessed_string_rt)
wordcloud.to_file("RT_WordCloud.png")
#-----------<NAME>-------------
search_result_1 = get_youtube_search(query = "<NAME>", order = "viewCount", regionCode = "RU")
search_result_1_df = pd.DataFrame.from_dict(search_result_1['items'])
search_result_2 = get_youtube_search(query = "", order = "viewCount", regionCode = "RU", channel_id = 'UCdubelOloxR3wzwJG9x8YqQ')
search_result_2_df = pd.DataFrame.from_dict(search_result_2['items'])
search_result_3 = get_youtube_search(query = "репортаж", order = "viewCount", regionCode = "RU", channel_id = 'UCdubelOloxR3wzwJG9x8YqQ')
search_result_3_df = pd.DataFrame.from_dict(search_result_3['items'])
search_result_4 = get_youtube_search(query = "новости", order = "viewCount", regionCode = "RU", channel_id = 'UCdubelOloxR3wzwJG9x8YqQ')
search_result_4_df = pd.DataFrame.from_dict(search_result_4['items'])
data_for_wordcloud = []
for i in range(1,search_result_1_df.shape[0]):
title = search_result_1_df.snippet[i]['title']
title = title.split(' /')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_2_df.shape[0]):
title = search_result_2_df.snippet[i]['title']
title = title.split(' /')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_3_df.shape[0]):
title = search_result_3_df.snippet[i]['title']
title = title.split(' /')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_4_df.shape[0]):
title = search_result_4_df.snippet[i]['title']
title = title.split(' /')[0]
data_for_wordcloud.append(title)
data_for_wordcloud_set = set(data_for_wordcloud)
data_for_wordcloud_preprocessed = []
for title in data_for_wordcloud_set:
title = title.lower()
title = title.replace('"','')
title = title.replace('специальный репортаж','')
title = title.replace('документальный фильм','')
title = title.replace('последние','')
title = title.replace('новости','')
title = title.replace('"','')
title = title.replace('репортаж','')
title = title.replace('репортаж','')
title = title.replace(' на ',' ')
title = title.replace(' не ',' ')
title = title.replace(' из ',' ')
title = title.replace(' за ',' ')
title = title.replace(' для ',' ')
title = title.replace(' по ',' ')
title = title.replace(' от ',' ')
title = title.replace('россии','россия')
title = title.replace('путина','путин')
title = title.replace('навального','навальный')
title = title.replace('владимира','владимир')
title = title.replace('коронавируса','коронавирус')
title = title.replace('беларуси','беларусь')
title = title.replace(': ',' ')
data_for_wordcloud_preprocessed.append(title)
data_for_wordcloud_preprocessed_string_rain = ''
for title in data_for_wordcloud_preprocessed:
data_for_wordcloud_preprocessed_string_rain = data_for_wordcloud_preprocessed_string_rain + title + ' '
wordcloud = WordCloud(width = 1000,
height = 500,
max_font_size=400,
max_words=200,
color_func=lambda *args, **kwargs: "white",
relative_scaling=0.5,
background_color="purple").generate(data_for_wordcloud_preprocessed_string_rain)
wordcloud.to_file("TvRain_WordCloud.png")
#-----------DW на русском-------------
search_result_1 = get_youtube_search(query = "DW на русском", order = "viewCount", regionCode = "RU")
search_result_1_df = pd.DataFrame.from_dict(search_result_1['items'])
search_result_2 = get_youtube_search(query = "", order = "viewCount", regionCode = "RU", channel_id = 'UCXoAjrdHFa2hEL3Ug8REC1w')
search_result_2_df = pd.DataFrame.from_dict(search_result_2['items'])
search_result_3 = get_youtube_search(query = "репортаж", order = "viewCount", regionCode = "RU", channel_id = 'UCXoAjrdHFa2hEL3Ug8REC1w')
search_result_3_df = pd.DataFrame.from_dict(search_result_3['items'])
search_result_4 = get_youtube_search(query = "новости", order = "viewCount", regionCode = "RU", channel_id = 'UCXoAjrdHFa2hEL3Ug8REC1w')
search_result_4_df = pd.DataFrame.from_dict(search_result_4['items'])
data_for_wordcloud = []
for i in range(1,search_result_1_df.shape[0]):
title = search_result_1_df.snippet[i]['title']
title = title.split('DW Новости')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_2_df.shape[0]):
title = search_result_2_df.snippet[i]['title']
title = title.split('DW Новости')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_3_df.shape[0]):
title = search_result_3_df.snippet[i]['title']
title = title.split('DW Новости')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_4_df.shape[0]):
title = search_result_4_df.snippet[i]['title']
title = title.split('DW Новости')[0]
data_for_wordcloud.append(title)
data_for_wordcloud_set = set(data_for_wordcloud)
data_for_wordcloud_preprocessed = []
for title in data_for_wordcloud_set:
title = title.lower()
title = title.replace('"','')
title = title.replace('специальный репортаж','')
title = title.replace('документальный фильм','')
title = title.replace('последние','')
title = title.replace('новости','')
title = title.replace('"','')
title = title.replace('репортаж','')
title = title.replace('dw','')
title = title.replace(' на ',' ')
title = title.replace(' не ',' ')
title = title.replace(' из ',' ')
title = title.replace(' за ',' ')
title = title.replace(' для ',' ')
title = title.replace(' по ',' ')
title = title.replace(' от ',' ')
title = title.replace('россии','россия')
title = title.replace('путина','путин')
title = title.replace('путину','путин')
title = title.replace('москве','москва')
title = title.replace('москву','москва')
title = title.replace('навального','навальный')
title = title.replace('навальным','навальный')
title = title.replace('владимира','владимир')
title = title.replace('коронавируса','коронавирус')
title = title.replace('беларуси','беларусь')
title = title.replace('германии','германия')
title = title.replace('германию','германия')
title = title.replace('кремля','кремль')
title = title.replace('санкций','санкции')
title = title.replace(': ',' ')
data_for_wordcloud_preprocessed.append(title)
data_for_wordcloud_preprocessed_string_dw = ''
for title in data_for_wordcloud_preprocessed:
data_for_wordcloud_preprocessed_string_dw = data_for_wordcloud_preprocessed_string_dw + title + ' '
wordcloud = WordCloud(width = 1000,
height = 500,
max_font_size=400,
max_words=200,
color_func=lambda *args, **kwargs: "white",
relative_scaling=0.5,
background_color="blue").generate(data_for_wordcloud_preprocessed_string_dw)
wordcloud.to_file("DW_WordCloud.png")
#-----------Настоящее Время-------------
search_result_1 = get_youtube_search(query = "Настоящее Время", order = "viewCount", regionCode = "RU", channel_id = 'UCBG57608Hukev3d0d-gvLhQ')
search_result_1_df = pd.DataFrame.from_dict(search_result_1['items'])
search_result_2 = get_youtube_search(query = "", order = "viewCount", regionCode = "RU", channel_id = 'UCBG57608Hukev3d0d-gvLhQ')
search_result_2_df = pd.DataFrame.from_dict(search_result_2['items'])
search_result_3 = get_youtube_search(query = "репортаж", order = "viewCount", regionCode = "RU", channel_id = 'UCBG57608Hukev3d0d-gvLhQ')
search_result_3_df = pd.DataFrame.from_dict(search_result_3['items'])
search_result_4 = get_youtube_search(query = "новости", order = "viewCount", regionCode = "RU", channel_id = 'UCBG57608Hukev3d0d-gvLhQ')
search_result_4_df = pd.DataFrame.from_dict(search_result_4['items'])
data_for_wordcloud = []
for i in range(1,search_result_1_df.shape[0]):
title = search_result_1_df.snippet[i]['title']
title = title.split('|')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_2_df.shape[0]):
title = search_result_2_df.snippet[i]['title']
title = title.split('|')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_3_df.shape[0]):
title = search_result_3_df.snippet[i]['title']
title = title.split('|')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_4_df.shape[0]):
title = search_result_4_df.snippet[i]['title']
title = title.split('|')[0]
data_for_wordcloud.append(title)
data_for_wordcloud_set = set(data_for_wordcloud)
data_for_wordcloud_preprocessed = []
for title in data_for_wordcloud_set:
title = title.lower()
title = title.replace('"','')
title = title.replace('специальный репортаж','')
title = title.replace('документальный фильм','')
title = title.replace('последние','')
title = title.replace('новости','')
title = title.replace('"','')
title = title.replace('репортаж','')
title = title.replace('dw','')
title = title.replace(' на ',' ')
title = title.replace(' не ',' ')
title = title.replace(' из ',' ')
title = title.replace(' за ',' ')
title = title.replace(' для ',' ')
title = title.replace(' по ',' ')
title = title.replace(' от ',' ')
title = title.replace('россии','россия')
title = title.replace('путина','путин')
title = title.replace('путину','путин')
title = title.replace('москве','москва')
title = title.replace('москву','москва')
title = title.replace('навального','навальный')
title = title.replace('навальным','навальный')
title = title.replace('владимира','владимир')
title = title.replace('коронавируса','коронавирус')
title = title.replace('беларуси','беларусь')
title = title.replace('германии','германия')
title = title.replace('германию','германия')
title = title.replace('кремля','кремль')
title = title.replace('санкций','санкции')
title = title.replace('таджикистана','таджикистан')
title = title.replace('таджикистане','таджикистан')
title = title.replace('казахстана','казахстан')
title = title.replace('казахстане','казахстан')
title = title.replace('кыргызстана','кыргызстан')
title = title.replace('кыргызстане','кыргызстан')
title = title.replace('хабаровского','хабаровск')
title = title.replace('хабаровске','хабаровск')
title = title.replace(': ',' ')
title = title.replace('.',' ')
data_for_wordcloud_preprocessed.append(title)
data_for_wordcloud_preprocessed_string_nt = ''
for title in data_for_wordcloud_preprocessed:
data_for_wordcloud_preprocessed_string_nt = data_for_wordcloud_preprocessed_string_nt + title + ' '
wordcloud = WordCloud(width = 1000,
height = 500,
max_font_size=400,
max_words=200,
color_func=lambda *args, **kwargs: "darkblue",
relative_scaling=0.5,
background_color="white").generate(data_for_wordcloud_preprocessed_string_nt)
wordcloud.to_file("NT_WordCloud.png")
#-----------Новости на Первом канале-------------
search_result_1 = get_youtube_search(query = "", order = "viewCount", regionCode = "RU", channel_id = 'UCKonxxVHzDl55V7a9n_Nlgg')
search_result_1_df = pd.DataFrame.from_dict(search_result_1['items'])
search_result_2 = get_youtube_search(query = "репортаж", order = "viewCount", regionCode = "RU", channel_id = 'UCKonxxVHzDl55V7a9n_Nlgg')
search_result_2_df = pd.DataFrame.from_dict(search_result_2['items'])
data_for_wordcloud = []
for i in range(1,search_result_1_df.shape[0]):
title = search_result_1_df.snippet[i]['title']
if 'Выпуск ' not in title:
data_for_wordcloud.append(title)
for i in range(1,search_result_2_df.shape[0]):
title = search_result_2_df.snippet[i]['title']
if 'Выпуск ' not in title:
data_for_wordcloud.append(title)
data_for_wordcloud_set = set(data_for_wordcloud)
data_for_wordcloud_preprocessed = []
for title in data_for_wordcloud_set:
title = title.lower()
title = title.replace('"','')
title = title.replace('специальный репортаж','')
title = title.replace('документальный фильм','')
title = title.replace('эксклюзивное интервью','')
title = title.replace('последние','')
title = title.replace('новости','')
title = title.replace('"','')
title = title.replace('репортаж','')
title = title.replace('dw','')
title = title.replace(' на ',' ')
title = title.replace(' не ',' ')
title = title.replace(' из ',' ')
title = title.replace(' за ',' ')
title = title.replace(' для ',' ')
title = title.replace(' по ',' ')
title = title.replace(' от ',' ')
title = title.replace('как',' ')
title = title.replace('россии','россия')
title = title.replace('путина','путин')
title = title.replace('путину','путин')
title = title.replace('москве','москва')
title = title.replace('москву','москва')
title = title.replace('первому','')
title = title.replace('первого','')
title = title.replace('канала','')
title = title.replace('каналу','')
title = title.replace('навального','навальный')
title = title.replace('навальным','навальный')
title = title.replace('владимира','владимир')
title = title.replace('коронавируса','коронавирус')
title = title.replace('беларуси','беларусь')
title = title.replace('германии','германия')
title = title.replace('германию','германия')
title = title.replace('кремля','кремль')
title = title.replace('санкций','санкции')
title = title.replace('таджикистана','таджикистан')
title = title.replace('таджикистане','таджикистан')
title = title.replace('казахстана','казахстан')
title = title.replace('казахстане','казахстан')
title = title.replace('кыргызстана','кыргызстан')
title = title.replace('кыргызстане','кыргызстан')
title = title.replace('хабаровского','хабаровск')
title = title.replace('хабаровске','хабаровск')
title = title.replace(': ',' ')
title = title.replace('.',' ')
data_for_wordcloud_preprocessed.append(title)
data_for_wordcloud_preprocessed_string_tv1 = ''
for title in data_for_wordcloud_preprocessed:
data_for_wordcloud_preprocessed_string_tv1 = data_for_wordcloud_preprocessed_string_tv1 + title + ' '
wordcloud = WordCloud(width = 1000,
height = 500,
max_font_size=400,
max_words=200,
color_func=lambda *args, **kwargs: "white",
relative_scaling=0.5,
background_color="darkblue").generate(data_for_wordcloud_preprocessed_string_tv1)
wordcloud.to_file("TV1_WordCloud.png")
#-----------BBC News - Русская служба-------------
search_result_1 = get_youtube_search(query = "BBC News - Русская служба", order = "viewCount", regionCode = "RU", channel_id = 'UC8zQiuT0m1TELequJ5sp5zw')
search_result_1_df = | pd.DataFrame.from_dict(search_result_1['items']) | pandas.DataFrame.from_dict |
from io import BytesIO
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
def test_compression_roundtrip(compression):
df = pd.DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
tm.assert_frame_equal(df, pd.read_json(path, compression=compression))
# explicitly ensure file was compressed.
with tm.decompress_file(path, compression) as fh:
result = fh.read().decode("utf8")
tm.assert_frame_equal(df, pd.read_json(result))
def test_read_zipped_json(datapath):
uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json")
uncompressed_df = pd.read_json(uncompressed_path)
compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip")
compressed_df = pd.read_json(compressed_path, compression="zip")
tm.assert_frame_equal(uncompressed_df, compressed_df)
@td.skip_if_not_us_locale
def test_with_s3_url(compression, s3_resource, s3so):
# Bucket "pandas-test" created in tests/io/conftest.py
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
with open(path, "rb") as f:
s3_resource.Bucket("pandas-test").put_object(Key="test-1", Body=f)
roundtripped_df = pd.read_json(
"s3://pandas-test/test-1", compression=compression, storage_options=s3so
)
tm.assert_frame_equal(df, roundtripped_df)
def test_lines_with_compression(compression):
with tm.ensure_clean() as path:
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
df.to_json(path, orient="records", lines=True, compression=compression)
roundtripped_df = | pd.read_json(path, lines=True, compression=compression) | pandas.read_json |
import json
import geopandas as gp
import numpy as np
import pandas as pd
import pygeos as pg
from pyproj.transformer import Transformer
from shapely.wkb import loads
def to_crs(geometries, src_crs, target_crs):
"""Convert coordinates from one CRS to another CRS
Parameters
----------
geometries : ndarray of pygeos geometries
src_crs : CRS or params to create it
target_crs : CRS or params to create it
"""
if src_crs == target_crs:
return geometries.copy()
transformer = Transformer.from_crs(src_crs, target_crs, always_xy=True)
coords = pg.get_coordinates(geometries)
new_coords = transformer.transform(coords[:, 0], coords[:, 1])
result = pg.set_coordinates(geometries.copy(), np.array(new_coords).T)
return result
def sjoin(left, right, predicate="intersects", how="left"):
"""Join data frames on geometry, comparable to geopandas.
NOTE: pygeos-backed version currently in progress in geopandas
NOTE: left vs right must be determined in advance for best performance, unlike geopandas.
Parameters
----------
left : GeoDataFrame
right : GeoDataFrame
predicate : str, optional (default "intersects")
how : str, optional (default "left")
Returns
-------
GeoDataFrame
Includes all columns from left and all columns from right except geometry, suffixed by _right where
column names overlap.
"""
# NOTE: spatial join is inner to avoid recasting indices to float.
# Have to put inside Series to keep original indices intact because
# we use .values.data (returns ndarray) to get pygeos geometries.
joined = sjoin_geometry(
pd.Series(left.geometry.values.data, index=left.index),
pd.Series(right.geometry.values.data, index=right.index),
predicate,
how="inner",
)
joined = left.join(joined, how=how).join(
right.drop(columns=["geometry"]), on="index_right", rsuffix="_right"
)
return joined
def sjoin_geometry(left, right, predicate="intersects", how="inner"):
"""Use pygeos to do a spatial join between 2 series or ndarrays of pygeos geometries.
Parameters
----------
left : Series or ndarray
left geometries, will form basis of index that is returned
right : Series or ndarray
right geometries, their indices will be returned where thy meet predicate
predicate : str, optional (default: "intersects")
name of pygeos predicate function (any of the pygeos predicates should work: intersects, contains, within, overlaps, crosses)
how : str, optional (default: "inner")
one of "inner" or "left"; "right" is not supported at this time.
Returns
-------
Series
indexed on index of left, containing values of right index
"""
if not how in ("inner", "left"):
raise NotImplementedError("Other join types not implemented")
if isinstance(left, pd.Series):
left_values = left.values
left_index = left.index
else:
left_values = left
left_index = np.arange(0, len(left))
if isinstance(right, pd.Series):
right_values = right.values
right_index = right.index
else:
right_values = right
right_index = np.arange(0, len(right))
tree = pg.STRtree(right_values)
# hits are in 0-based indicates of right
hits = tree.query_bulk(left_values, predicate=predicate)
if how == "inner":
index = left_index[hits[0]]
values = right_index[hits[1]]
elif how == "left":
index = left_index.copy()
values = np.empty(shape=index.shape)
values.fill(np.nan)
values[hits[0]] = right_index[hits[1]]
return | pd.Series(values, index=index, name="index_right") | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.