prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# pylint: disable-msg=E1101,W0613,W0603
import os
import copy
from collections import defaultdict
import numpy as np
import pandas.json as _json
from pandas.tslib import iNaT
from pandas.compat import StringIO, long, u
from pandas import compat, isnull
from pandas import Series, DataFrame, to_datetime
from pandas.io.common import get_filepath_or_buffer, _get_handle
from pandas.core.common import AbstractMethodError
from pandas.formats.printing import pprint_thing
loads = _json.loads
dumps = _json.dumps
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False):
if lines and orient != 'records':
raise ValueError(
"'lines' keyword only valid when 'orient' is records")
if isinstance(obj, Series):
s = SeriesWriter(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
elif isinstance(obj, DataFrame):
s = FrameWriter(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
if lines:
s = _convert_to_line_delimits(s)
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, 'w') as fh:
fh.write(s)
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
return dumps(
self.obj,
orient=self.orient,
double_precision=self.double_precision,
ensure_ascii=self.ensure_ascii,
date_unit=self.date_unit,
iso_dates=self.date_format == 'iso',
default_handler=self.default_handler)
class SeriesWriter(Writer):
_default_orient = 'index'
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'%s'" % self.orient)
class FrameWriter(Writer):
_default_orient = 'columns'
def _format_axes(self):
""" try to axes if they are datelike """
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'%s'." % self.orient)
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'%s'." % self.orient)
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
convert_axes=True, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False):
"""
Convert a JSON string to pandas object
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file
could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
If True, infer dtypes, if a dict of column to dtype, then use those,
if False, then don't infer dtypes at all, applies only to the data.
convert_axes : boolean, default True
Try to convert the axes to the proper dtypes.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
See Also
--------
DataFrame.to_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
"""
filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf,
encoding=encoding)
if isinstance(filepath_or_buffer, compat.string_types):
try:
exists = os.path.exists(filepath_or_buffer)
# if the filepath is too long will raise here
# 5874
except (TypeError, ValueError):
exists = False
if exists:
fh, handles = _get_handle(filepath_or_buffer, 'r',
encoding=encoding)
json = fh.read()
fh.close()
else:
json = filepath_or_buffer
elif hasattr(filepath_or_buffer, 'read'):
json = filepath_or_buffer.read()
else:
json = filepath_or_buffer
if lines:
# If given a json lines file, we break the string into lines, add
# commas and put it in a json list to make a valid json object.
lines = list(StringIO(json.strip()))
json = u'[' + u','.join(lines) + u']'
obj = None
if typ == 'frame':
obj = FrameParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
dtype = dict(data=dtype)
obj = SeriesParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
return obj
class Parser(object):
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
's': long(31536000),
'ms': long(31536000000),
'us': long(31536000000000),
'ns': long(31536000000000000)}
def __init__(self, json, orient, dtype=True, convert_axes=True,
convert_dates=True, keep_default_dates=False, numpy=False,
precise_float=False, date_unit=None):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of %s' %
(self._STAMP_UNITS,))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"checks that dict has only the appropriate keys for orient='split'"
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(u("JSON data had unexpected key(s): %s") %
pprint_thing(bad_keys))
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
""" try to convert axes """
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
""" try to parse a ndarray like into a column by inferring dtype """
# don't try to coerce, unless a force conversion
if use_dtypes:
if self.dtype is False:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except:
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == 'object':
# try float
try:
data = data.astype('float64')
result = True
except:
pass
if data.dtype.kind == 'f':
if data.dtype != 'float64':
# coerce floats to 64
try:
data = data.astype('float64')
result = True
except:
pass
# do't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
# coerce ints if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
data = new_data
result = True
except:
pass
# coerce ints to 64
if data.dtype == 'int':
# coerce floats to 64
try:
data = data.astype('int64')
result = True
except:
pass
return data, result
def _try_convert_to_date(self, data):
""" try to parse a ndarray like into a date column
try to coerce object in epoch/iso formats and
integer/float in epcoh formats, return a boolean if parsing
was successful """
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == 'object':
try:
new_data = data.astype('int64')
except:
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (isnull(new_data.values) | (new_data > self.min_stamp) |
(new_data.values == iNaT))
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors='raise',
unit=date_unit)
except ValueError:
continue
except:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(
loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
'data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
_split_keys = ('columns', 'index', 'data')
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(json, dtype=None, numpy=True, labelled=True,
precise_float=self.precise_float)
if args:
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
else:
self.obj = DataFrame(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = | DataFrame(dtype=None, **decoded) | pandas.DataFrame |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = | Timedelta('1 days') | pandas.Timedelta |
# -*- coding: utf-8 -*-
"""
This module contains the classes for testing the model module of mpcpy.
"""
import unittest
from mpcpy import models
from mpcpy import exodata
from mpcpy import utility
from mpcpy import systems
from mpcpy import units
from mpcpy import variables
from testing import TestCaseMPCPy
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import pickle
import os
#%%
class SimpleRC(TestCaseMPCPy):
'''Test simple model simulate and estimate.
'''
def setUp(self):
self.start_time = '1/1/2017';
self.final_time = '1/2/2017';
# Set measurements
self.measurements = {};
self.measurements['T_db'] = {'Sample' : variables.Static('T_db_sample', 1800, units.s)};
def tearDown(self):
del self.start_time
del self.final_time
del self.measurements
def test_simulate(self):
'''Test simulation of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
controls.collect_data(self.start_time, self.final_time);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
control_data = controls.data);
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
df_test = model.get_base_measurements('Simulated');
self.check_df(df_test, 'simulate_base.csv');
def test_simulate_with_save_parameter_input_data(self):
'''Test simulation of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
controls.collect_data(self.start_time, self.final_time);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
control_data = controls.data,
save_parameter_input_data=True);
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
df_test = model.get_base_measurements('Simulated');
self.check_df(df_test, 'simulate_base.csv');
def test_estimate_one_par(self):
'''Test the estimation of one parameter of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate system
system = systems.EmulationFromFMU(self.measurements, \
moinfo = (mopath, modelpath, {}));
system.collect_measurements(self.start_time, self.final_time);
# Define parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 1000000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', True, units.boolean);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Estimate models
model.estimate(self.start_time, self.final_time, ['T_db'])
# Check references
data = [model.parameter_data['heatCapacitor.C']['Value'].display_data()]
index = ['heatCapacitor.C']
df_test = pd.DataFrame(data=data, index=index, columns=['Value'])
self.check_df(df_test, 'estimate_one_par.csv', timeseries=False)
def test_estimate_two_par(self):
'''Test the estimation of two parameters of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate system
system = systems.EmulationFromFMU(self.measurements, \
moinfo = (mopath, modelpath, {}));
system.collect_measurements(self.start_time, self.final_time);
# Define parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 1000000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', True, units.boolean);
parameter_data['thermalResistor.R'] = {};
parameter_data['thermalResistor.R']['Value'] = variables.Static('R_Value', 0.02, units.K_W);
parameter_data['thermalResistor.R']['Minimum'] = variables.Static('R_Min', 0.001, units.K_W);
parameter_data['thermalResistor.R']['Maximum'] = variables.Static('R_Max', 0.1, units.K_W);
parameter_data['thermalResistor.R']['Free'] = variables.Static('R_Free', True, units.boolean);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Estimate models
model.estimate(self.start_time, self.final_time, ['T_db'])
# Check references
data = [model.parameter_data['heatCapacitor.C']['Value'].display_data(),
model.parameter_data['thermalResistor.R']['Value'].display_data(),]
index = ['heatCapacitor.C', 'thermalResistor.R']
df_test = pd.DataFrame(data=data, index=index, columns=['Value'])
self.check_df(df_test, 'estimate_two_par.csv', timeseries=False)
def test_simulate_continue(self):
'''Test simulation of a model in steps.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
controls.collect_data(self.start_time, self.final_time);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
control_data = controls.data);
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
# Simulate model in 4-hour chunks
sim_steps = pd.date_range(self.start_time, self.final_time, freq=str('8H'))
for i in range(len(sim_steps)-1):
if i == 0:
model.simulate(sim_steps[i], sim_steps[i+1]);
else:
model.simulate('continue', sim_steps[i+1]);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_step{0}.csv'.format(i));
def test_simulate_noinputs(self):
'''Test simulation of a model with no external inputs.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}));
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_noinputs.csv');
def test_estimate_error_nofreeparameters(self):
'''Test error raised if no free parameters passed.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate model
model_no_params = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}));
# Check error raised with no parameters
with self.assertRaises(ValueError):
model_no_params.estimate(self.start_time, self.final_time, []);
# Set parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 100000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', False, units.boolean);
# Instantiate model
model_no_free = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Check error raised with no free parameters
with self.assertRaises(ValueError):
model_no_params.estimate(self.start_time, self.final_time, []);
def test_estimate_error_nomeasurements(self):
'''Test error raised if measurement_variable_list not in measurements dictionary.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Set parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 100000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', True, units.boolean);
# Instantiate model
model_no_meas = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Check error raised with no free parameters
with self.assertRaises(ValueError):
model_no_meas.estimate(self.start_time, self.final_time, ['wrong_meas']);
def test_instantiate_error_incompatible_estimation(self):
'''Test error raised if estimation method is incompatible with model.'''
# Set model path
fmupath = os.path.join(self.get_unittest_path(), 'resources', 'building', 'LBNL71T_Emulation_JModelica_v1.fmu');
with self.assertRaises(ValueError):
model = models.Modelica(models.JModelica, models.RMSE, {}, fmupath=fmupath);
#%%
class EstimateFromJModelicaRealCSV(TestCaseMPCPy):
'''Test parameter estimation of a model using JModelica from real csv data.
'''
def setUp(self):
## Setup building fmu emulation
self.building_source_file_path_est = os.path.join(self.get_unittest_path(), 'resources', 'building', 'RealMeasurements_est.csv');
self.building_source_file_path_val = os.path.join(self.get_unittest_path(), 'resources', 'building', 'RealMeasurements_val.csv');
self.building_source_file_path_val_missing = os.path.join(self.get_unittest_path(), 'resources', 'building', 'RealMeasurements_val_missing.csv');
self.zone_names = ['wes', 'hal', 'eas'];
self.weather_path = os.path.join(self.get_unittest_path(), 'resources', 'weather', 'USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw');
self.internal_path = os.path.join(self.get_unittest_path(), 'resources', 'internal', 'sampleCSV.csv');
self.internal_variable_map = {'intRad_wes' : ('wes', 'intRad', units.W_m2), \
'intCon_wes' : ('wes', 'intCon', units.W_m2), \
'intLat_wes' : ('wes', 'intLat', units.W_m2), \
'intRad_hal' : ('hal', 'intRad', units.W_m2), \
'intCon_hal' : ('hal', 'intCon', units.W_m2), \
'intLat_hal' : ('hal', 'intLat', units.W_m2), \
'intRad_eas' : ('eas', 'intRad', units.W_m2), \
'intCon_eas' : ('eas', 'intCon', units.W_m2), \
'intLat_eas' : ('eas', 'intLat', units.W_m2)};
self.control_path = os.path.join(self.get_unittest_path(), 'resources', 'building', 'ControlCSV_0.csv');
self.control_variable_map = {'conHeat_wes' : ('conHeat_wes', units.unit1), \
'conHeat_hal' : ('conHeat_hal', units.unit1), \
'conHeat_eas' : ('conHeat_eas', units.unit1)};
# Measurements
self.measurements = {};
self.measurements['wesTdb'] = {'Sample' : variables.Static('wesTdb_sample', 1800, units.s)};
self.measurements['halTdb'] = {'Sample' : variables.Static('halTdb_sample', 1800, units.s)};
self.measurements['easTdb'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['wesPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['halPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['easPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['Ptot'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurement_variable_map = {'wesTdb_mea' : ('wesTdb', units.K),
'halTdb_mea' : ('halTdb', units.K),
'easTdb_mea' : ('easTdb', units.K),
'wesPhvac_mea' : ('wesPhvac', units.W),
'halPhvac_mea' : ('halPhvac', units.W),
'easPhvac_mea' : ('easPhvac', units.W),
'Ptot_mea' : ('Ptot', units.W)}
## Setup model
self.mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_MPC.mo');
self.modelpath = 'LBNL71T_MPC.MPC';
self.libraries = os.environ.get('MODELICAPATH');
self.estimate_method = models.JModelica;
self.validation_method = models.RMSE;
# Instantiate exo data sources
self.weather = exodata.WeatherFromEPW(self.weather_path);
self.internal = exodata.InternalFromCSV(self.internal_path, self.internal_variable_map, tz_name = self.weather.tz_name);
self.control = exodata.ControlFromCSV(self.control_path, self.control_variable_map, tz_name = self.weather.tz_name);
# Parameters
self.parameters = exodata.ParameterFromCSV(os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_Parameters.csv'));
self.parameters.collect_data();
self.parameters.data['lat'] = {};
self.parameters.data['lat']['Value'] = self.weather.lat;
# Instantiate test building
self.building_est = systems.RealFromCSV(self.building_source_file_path_est,
self.measurements,
self.measurement_variable_map,
tz_name = self.weather.tz_name);
# Exogenous collection time
self.start_time_exodata = '1/1/2015';
self.final_time_exodata = '1/30/2015';
# Estimation time
self.start_time_estimation = '1/1/2015';
self.final_time_estimation = '1/4/2015';
# Validation time
self.start_time_validation = '1/4/2015';
self.final_time_validation = '1/5/2015';
# Measurement variables for estimate
self.measurement_variable_list = ['wesTdb', 'easTdb', 'halTdb'];
# Exodata
self.weather.collect_data(self.start_time_exodata, self.final_time_exodata);
self.internal.collect_data(self.start_time_exodata, self.final_time_exodata);
self.control.collect_data(self.start_time_exodata, self.final_time_exodata);
# Collect measurement data
self.building_est.collect_measurements(self.start_time_estimation, self.final_time_estimation);
# Instantiate model
self.model = models.Modelica(self.estimate_method, \
self.validation_method, \
self.building_est.measurements, \
moinfo = (self.mopath, self.modelpath, self.libraries), \
zone_names = self.zone_names, \
weather_data = self.weather.data, \
internal_data = self.internal.data, \
control_data = self.control.data, \
parameter_data = self.parameters.data, \
tz_name = self.weather.tz_name);
# Simulate model with initial guess
self.model.simulate(self.start_time_estimation, self.final_time_estimation)
def tearDown(self):
del self.model
del self.building_est
del self.weather
del self.internal
del self.control
del self.parameters
del self.measurements
def test_estimate_and_validate(self):
'''Test the estimation of a model's coefficients based on measured data.'''
plt.close('all');
# Check references
df_test = self.model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_initial_parameters.csv');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list);
# Finish test
self._finish_estimate_validate('')
def test_estimate_and_validate_missing_measurements(self):
'''Test the estimation of a model's coefficients based on measured data.
Some of the validation measurement data is missing.
'''
plt.close('all');
# Check references
df_test = self.model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_initial_parameters.csv');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list);
# Validate model based on estimation data
self.model.validate(self.start_time_estimation, self.final_time_estimation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_estimation_csv'), plot=0)
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'estimate_RMSE.csv', timeseries=False);
# Instantiate validate building
building_val = systems.RealFromCSV(self.building_source_file_path_val_missing,
self.measurements,
self.measurement_variable_map,
tz_name = self.weather.tz_name);
# Validate on validation data
building_val.collect_measurements(self.start_time_validation, self.final_time_validation);
self.model.measurements = building_val.measurements;
self.model.validate(self.start_time_validation, self.final_time_validation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_validation_csv'), plot=0);
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE_missing.csv', timeseries=False);
def test_estimate_and_validate_global_start_init(self):
'''Test the estimation of a model's coefficients based on measured data using global start and user-defined initial value.'''
plt.close('all');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list, global_start=7, seed=0, use_initial_values=True);
# Finish test
self._finish_estimate_validate('_global_start_winit')
def test_estimate_and_validate_global_start_woinit(self):
'''Test the estimation of a model's coefficients based on measured data using global start and no user-defined initial value.'''
plt.close('all');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list, global_start=7, seed=0, use_initial_values=False);
# Finish test
self._finish_estimate_validate('_global_start_woinit')
def test_estimate_and_validate_global_start_maxexceeded(self):
'''Test the estimation of a model's coefficients based on measured data using global start and maximum cpu time and iterations.'''
plt.close('all');
# Set maximum cpu time for JModelica
opt_options = self.model._estimate_method.opt_problem.get_optimization_options();
opt_options['IPOPT_options']['max_cpu_time'] = 60;
opt_options['IPOPT_options']['max_iter'] = 100;
self.model._estimate_method.opt_problem.set_optimization_options(opt_options);
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list, global_start=7, seed=0, use_initial_values=True);
# Finish test
self._finish_estimate_validate('_global_start_maxexceeded')
def _finish_estimate_validate(self,tag):
'''Internal method for finishing the estimate and valudate tests.'''
# Validate model based on estimation data
self.model.validate(self.start_time_estimation, self.final_time_estimation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_estimation_csv'), plot=0)
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'estimate_RMSE{0}.csv'.format(tag), timeseries=False);
# All estimates if global estimate
try:
glo_est_data_test = self.model.get_global_estimate_data()
self.check_json(glo_est_data_test, 'estimate_gloest{0}.txt'.format(tag));
except:
pass
# Instantiate validate building
self.building_val = systems.RealFromCSV(self.building_source_file_path_val,
self.measurements,
self.measurement_variable_map,
tz_name = self.weather.tz_name);
# Validate on validation data
self.building_val.collect_measurements(self.start_time_validation, self.final_time_validation);
self.model.measurements = self.building_val.measurements;
self.model.validate(self.start_time_validation, self.final_time_validation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_validation_csv'), plot=0);
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE{0}.csv'.format(tag), timeseries=False);
class EstimateFromJModelicaEmulationFMU(TestCaseMPCPy):
'''Test emulation-based parameter estimation of a model using JModelica.
'''
def setUp(self):
## Setup building fmu emulation
self.building_source_file_path = os.path.join(self.get_unittest_path(), 'resources', 'building', 'LBNL71T_Emulation_JModelica_v2.fmu');
self.zone_names = ['wes', 'hal', 'eas'];
self.weather_path = os.path.join(self.get_unittest_path(), 'resources', 'weather', 'USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw');
self.internal_path = os.path.join(self.get_unittest_path(), 'resources', 'internal', 'sampleCSV.csv');
self.internal_variable_map = {'intRad_wes' : ('wes', 'intRad', units.W_m2), \
'intCon_wes' : ('wes', 'intCon', units.W_m2), \
'intLat_wes' : ('wes', 'intLat', units.W_m2), \
'intRad_hal' : ('hal', 'intRad', units.W_m2), \
'intCon_hal' : ('hal', 'intCon', units.W_m2), \
'intLat_hal' : ('hal', 'intLat', units.W_m2), \
'intRad_eas' : ('eas', 'intRad', units.W_m2), \
'intCon_eas' : ('eas', 'intCon', units.W_m2), \
'intLat_eas' : ('eas', 'intLat', units.W_m2)};
self.control_path = os.path.join(self.get_unittest_path(), 'resources', 'building', 'ControlCSV_0.csv');
self.control_variable_map = {'conHeat_wes' : ('conHeat_wes', units.unit1), \
'conHeat_hal' : ('conHeat_hal', units.unit1), \
'conHeat_eas' : ('conHeat_eas', units.unit1)};
# Measurements
self.measurements = {};
self.measurements['wesTdb'] = {'Sample' : variables.Static('wesTdb_sample', 1800, units.s)};
self.measurements['halTdb'] = {'Sample' : variables.Static('halTdb_sample', 1800, units.s)};
self.measurements['easTdb'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['wesPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['halPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['easPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['Ptot'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
## Setup model
self.mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_MPC.mo');
self.modelpath = 'LBNL71T_MPC.MPC';
self.libraries = os.environ.get('MODELICAPATH');
self.estimate_method = models.JModelica;
self.validation_method = models.RMSE;
# Instantiate exo data sources
self.weather = exodata.WeatherFromEPW(self.weather_path);
self.internal = exodata.InternalFromCSV(self.internal_path, self.internal_variable_map, tz_name = self.weather.tz_name);
self.control = exodata.ControlFromCSV(self.control_path, self.control_variable_map, tz_name = self.weather.tz_name);
# Parameters
self.parameters = exodata.ParameterFromCSV(os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_Parameters.csv'));
self.parameters.collect_data();
self.parameters.data['lat'] = {};
self.parameters.data['lat']['Value'] = self.weather.lat;
# Instantiate building
building_parameters_data = {};
building_parameters_data['lat'] = {};
building_parameters_data['lat']['Value'] = self.weather.lat;
self.building = systems.EmulationFromFMU(self.measurements, \
fmupath = self.building_source_file_path, \
zone_names = self.zone_names, \
parameter_data = building_parameters_data);
def tearDown(self):
del self.building
del self.weather
del self.internal
del self.control
del self.parameters
del self.measurements
def test_estimate_and_validate(self):
'''Test the estimation of a model's coefficients based on measured data.'''
plt.close('all');
# Exogenous collection time
self.start_time_exodata = '1/1/2015';
self.final_time_exodata = '1/30/2015';
# Estimation time
self.start_time_estimation = '1/1/2015';
self.final_time_estimation = '1/4/2015';
# Validation time
self.start_time_validation = '1/4/2015';
self.final_time_validation = '1/5/2015';
# Measurement variables for estimate
self.measurement_variable_list = ['wesTdb', 'easTdb', 'halTdb'];
# Exodata
self.weather.collect_data(self.start_time_exodata, self.final_time_exodata);
self.internal.collect_data(self.start_time_exodata, self.final_time_exodata);
self.control.collect_data(self.start_time_exodata, self.final_time_exodata);
# Set exodata to building emulation
self.building.weather_data = self.weather.data;
self.building.internal_data = self.internal.data;
self.building.control_data = self.control.data;
self.building.tz_name = self.weather.tz_name;
# Collect measurement data
self.building.collect_measurements(self.start_time_estimation, self.final_time_estimation);
# Instantiate model
self.model = models.Modelica(self.estimate_method, \
self.validation_method, \
self.building.measurements, \
moinfo = (self.mopath, self.modelpath, self.libraries), \
zone_names = self.zone_names, \
weather_data = self.weather.data, \
internal_data = self.internal.data, \
control_data = self.control.data, \
parameter_data = self.parameters.data, \
tz_name = self.weather.tz_name,
save_parameter_input_data=True);
# Simulate model with initial guess
self.model.simulate(self.start_time_estimation, self.final_time_estimation)
# Check references
df_test = self.model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_initial_parameters.csv');
# Check parameter and input data were saved
df_test = | pd.read_csv('mpcpy_simulation_inputs_model.csv', index_col='Time') | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_parsing_roll_call_votes.ipynb (unless otherwise specified).
__all__ = ['get_ix', 'useful_string', 'SummaryParser', 'VotesParser', 'get_all_issues']
# Cell
import PyPDF2 as pdf
from pathlib import Path
import typing
import re
import pandas as pd
import collections
import plotly.express as px
import plotly.graph_objects as go
# Cell
def get_ix(strings:typing.List[str], pattern_fun:typing.Callable): # , must_match:bool=True
#print(strings)
return [i for i, s in enumerate(strings) if pattern_fun(s)] # (must_match and s==pattern) or (not must_match and pattern in s)
def useful_string(s): return ('docx' not in s) and (len(s)>0) \
and (s != ' ')
# Cell
class SummaryParser:
pattern = re.compile('(\.\.+)')
is_toc = lambda self, text: 'SOMMAIRE' in text or 'CONTENTS' in text
def __init__(self, all_texts:typing.List[str]):
self.num_pages = len(all_texts)
self.ix_start, summary_text = self.get_start(all_texts)
self.vote_names, self.vote_page_numbers = self.parse_names_and_page_numbers(summary_text)
self.ix_end, summary_text = self.get_end(all_texts, summary_text)
if self.ix_end > self.ix_start:
self.vote_names, self.vote_page_numbers = self.parse_names_and_page_numbers(summary_text)
def get_start(self, all_texts:typing.List[str]):
ix_start = get_ix(all_texts, self.is_toc)[0]
return ix_start, all_texts[ix_start]
def parse_names_and_page_numbers(self, all_texts:typing.List[str]):
texts = re.split(r'(\d+\.)(\s*[a-zA-Z])', all_texts)
ix_content = get_ix(texts, self.is_toc)[0]
texts = texts[ix_content+1:]
given_number = texts[::3]
contents = [text0.strip()+text1.strip() for text0, text1 in zip(texts[1::3],
texts[2::3])]
page_numbers, vote_names = [], []
for text in contents:
if 'docx' in text:
_text = text.split('\n')
_ix = get_ix(_text, lambda x: 'docx' in x)[0]
_text = '\n'.join(_text[:_ix])
else:
_text = text
try:
re.search(r'(\d+)$', _text).group()
except:
print('failed at parsing', [text], 'to', [_text])
page_numbers.append(int(re.search(r'(\d+)$', _text).group()))
vote_names.append(re.sub(r'\.*\d+$', '', _text).strip())
assert len(vote_names) == len(page_numbers)
assert len(set(page_numbers)) == len(page_numbers), collections.Counter(page_numbers).most_common()
return vote_names, [nr-1 for nr in page_numbers]
def get_end(self, all_texts:typing.List[str], summary_text:str):
if self.ix_start + 1 == self.vote_page_numbers[0]:
return self.ix_start, summary_text
ix_end = self.vote_page_numbers[0]
summary_text = '\n'.join(all_texts[self.ix_start:ix_end])
return ix_end, summary_text
@property
def df(self):
return pd.DataFrame({
'vote name': self.vote_names,
'start page': self.vote_page_numbers,
'end page': [nr - 1 for nr in self.vote_page_numbers[1:]] + [self.num_pages]
})
# Cell
class VotesParser:
def __init__(self, start_page:int, end_page:int, all_texts:typing.List[str]):
'`end_page` is inclusive'
assert start_page <= end_page
self.start_page = start_page
self.end_page = end_page
votes_texts = self.preprocess(all_texts)
original_votes, vote_corrections = self.check_for_corrections(votes_texts)
#print(original_votes, vote_corrections)
self.vote_counts, self.votes = self.parse_votes(original_votes)
if vote_corrections is not None:
self.corrections = self.parse_vote_corrections(vote_corrections)
@property
def df_votes(self):
df = []
for outcome in self.votes:
for party in self.votes[outcome]:
df.extend([(mep, outcome, party) for mep in self.votes[outcome][party]])
return pd.DataFrame(df, columns=['MEP', 'vote', 'Party'])
@property
def df_corrections(self):
df = []
for outcome in self.votes:
df.extend([(mep, outcome) for mep in self.corrections[outcome]])
return pd.DataFrame(df, columns=['MEP', 'vote'])
def preprocess(self, all_texts:typing.List[str]):
votes_texts = "\n".join(all_texts[self.start_page:self.end_page+1])
s = slice(self.start_page, self.end_page+1)
votes_texts = "\n".join(all_texts[s]).split("\n")
votes_texts = [text for text in votes_texts if \
useful_string(text)]
return votes_texts
def parse_votes(self, votes_texts:typing.List[str]):
#print(votes_texts)
ix_yes, ix_no, ix_abstain = self.get_yes_no_abstain_indices(votes_texts)
counts = {
'yes': int(votes_texts[ix_yes-1]),
'no': int(votes_texts[ix_no-1]),
'abstain': int(votes_texts[ix_abstain-1]),
}
votes = {
'yes': self.get_mep_and_party_vote_for_outcome(votes_texts[ix_yes:ix_no-1]),
'no': self.get_mep_and_party_vote_for_outcome(votes_texts[ix_no:ix_abstain-1]),
'abstain': self.get_mep_and_party_vote_for_outcome(votes_texts[ix_abstain:])
}
#print(counts)
#print([(k, (x:=[len(val) for val in v.values()]), sum(x)) for k,v in votes.items()])
return counts, votes
def get_yes_no_abstain_indices(self, votes_texts):
ix_yes = min(get_ix(votes_texts, lambda x: x == '+'))
ix_no = get_ix(votes_texts, lambda x: x == '-')[0]
ix_abstain = max(get_ix(votes_texts, lambda x: x == '0'))
assert ix_yes > 0
assert ix_yes < ix_no
assert ix_no < ix_abstain
return ix_yes, ix_no, ix_abstain
def parse_vote_corrections(self, votes_corrections_texts:typing.List[str]):
ix_yes, ix_no, ix_abstain = self.get_yes_no_abstain_indices(votes_corrections_texts)
yes_meps = '\n'.join(votes_corrections_texts[ix_yes+1:ix_no]).split(',')
no_meps = '\n'.join(votes_corrections_texts[ix_no+1:ix_abstain]).split(',')
abstain_meps = '\n'.join(votes_corrections_texts[ix_abstain+1:]).split(',')
return {
'yes': [self.process_mep(mep) for mep in yes_meps if useful_string(mep.strip())],
'no': [self.process_mep(mep) for mep in no_meps if useful_string(mep.strip())],
'abstain': [self.process_mep(mep) for mep in abstain_meps if useful_string(mep.strip())]
}
def get_mep_and_party_vote_for_outcome(self, votes_texts:typing.List[str]):
ix_party = [(i,text) for i, text in enumerate(votes_texts)\
if len(text)>0 and (':' == text[0] or ':' == text[-1])]
ix_party = [i if (text[-1]==':' and len(text)>1) else i-1 for i, text in ix_party]
parties = [votes_texts[ix] for ix in ix_party]
meps = [','.join(votes_texts[ix0+1:ix1]).split(',') for ix0, ix1 in
zip(ix_party, ix_party[1:]+[len(votes_texts)])]
votes = {party: [self.process_mep(mep) for mep in _meps if useful_string(mep.strip())] for party, _meps in zip(parties, meps)}
return votes
def process_mep(self, mep:str, placeholder:str='unknown mep'):
mep = mep.strip()
if ':' in mep:
if mep.startswith(':'):
mep = mep[1:]
if mep.endswith(':'):
mep = mep[:-1]
if mep == '':
mep = placeholder
return mep
def check_for_corrections(self, votes_texts:typing.List[str]):
ix = get_ix(votes_texts, lambda x: 'CORRECCIONES E INTENCIONES DE VOTO' in x)
assert len(ix) <= 1
no_corrections = len(ix) == 0
if no_corrections:
return votes_texts, None
else:
return votes_texts[:ix[0]], votes_texts[ix[0]:]
# Cell
def get_all_issues(df_summary:pd.DataFrame, all_texts:typing.List[str]):
all_votes, all_corrections = [], []
all_counts = []
for i,row in df_summary.iterrows():
votes = VotesParser(row['start page'], row['end page'], all_texts)
df_votes = votes.df_votes
df_votes['vote name'] = row['vote name']
all_votes.append(df_votes)
counts = votes.vote_counts
counts['vote name'] = row['vote name']
all_counts.append(counts)
df_corrections = votes.df_corrections
df_corrections['vote name'] = row['vote name']
all_corrections.append(df_corrections)
all_counts = pd.DataFrame(all_counts)
all_votes = pd.concat(all_votes)
all_corrections = | pd.concat(all_corrections) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import os
from tqdm import trange
# In[2]:
df = | pd.read_excel("https://censusindia.gov.in/2011Census/Language-2011/DDW-C19-0000.xlsx") | pandas.read_excel |
#!/usr/bin/env python3
# std
from math import ceil
import logging
from typing import List
# 3d party
import matplotlib.pyplot as plt
import matplotlib
# noinspection PyUnresolvedReferences
from mpl_toolkits.mplot3d import Axes3D # NOTE BELOW (*)
import numpy as np
import pandas as pd
# ours
from clusterking.util.log import get_logger
from clusterking.plots.colors import ColorScheme
# (*) This import line is not explicitly used, but do not remove it!
# It is necessary to load the 3d support!
# fixme: Maybe not take _setup_all?
# todo: also have the 3d equivalent of ClusterPlot.fill (using voxels)
# todo: option to disable legend
class ClusterPlot(object):
"""Plot clusters in parameter space.
After initialization, use the 'scatter' or 'fill' method for plotting.
You can modify the attributes of this class to tweak some properties
of the plots.
"""
def __init__(self, data):
"""
Args:
data: :py:class:`~clusterking.data.data.Data` object
"""
#: logging.Logger object
self.log = get_logger("ClusterPlot", sh_level=logging.WARNING)
#: Instance of pandas.DataFrame
self.data = data
# (Advanced) config values
# Documented in docstring of this class
#: Color scheme
self.color_scheme = None
#: List of markers of the get_clusters (scatter plot only).
self.markers = None
if not self.markers:
self.markers = ["o", "v", "^", "v", "<", ">"]
#: Maximal number of subplots
self.max_subplots = 16
#: Maximal number of columns of the subplot grid
self.max_cols = 4
#: Formatting of key-value pairs in title of plots
self.kv_formatter = "{}={:.2f}"
#: figure size of each subplot
self.fig_base_size = 4
#: Ratio of height/width. None: Automatically inferred
self.aspect_ratio = None
#: The name of the column that holds the cluster index
self.cluster_column = "cluster"
#: The name of the column that holds the benchmark yes/no information
self.bpoint_column = "bpoint"
#: Default marker size
self.default_marker_size = (
1 / 2 * matplotlib.rcParams["lines.markersize"] ** 2
)
#: Marker size of benchmark points
self.bpoint_marker_size = 6 * self.default_marker_size
#: If true, a legend is drawn
self.draw_legend = True
# Internal values: Do not modify
# ----------------------------------------------------------------------
# Names of the columns to be on the axes of
# the plot.
self._axis_columns = None
self._clusters = None
self._df_dofs = None
self._fig = None
self._axs = None
# ==========================================================================
# User access via property
# ==========================================================================
@property
def fig(self):
"""The figure."""
return self._fig
# ==========================================================================
# Quick access of simple logic
# ==========================================================================
@property
def _ndim(self):
return len(self._axis_columns)
@property
def figsize(self):
"""Figure size per subplot (width, height)"""
if self.aspect_ratio is not None:
aspect_ratio = self.aspect_ratio
else:
if len(self._axis_columns) == 1:
aspect_ratio = 2 / self.fig_base_size
elif len(self._axis_columns) == 2:
y_width = self._get_lims(1)[1] - self._get_lims(1)[0]
x_width = self._get_lims(0)[1] - self._get_lims(0)[0]
if x_width == 0:
aspect_ratio = 1
else:
aspect_ratio = y_width / x_width
else:
aspect_ratio = 1
return self.fig_base_size, aspect_ratio * self.fig_base_size
@property
def _axli(self):
"""Note: axs contains all axes (subplots) as a 2D grid, axsli contains
the same objects but as a simple list (easier to iterate over)"""
return self._axs.flatten()
@property
def _has_bpoints(self):
"""True if we have benchmark points."""
return self.bpoint_column in self.data.df.columns
@property
def _nsubplots(self):
"""Number of subplots."""
# +1 to have space for legend!
return max(1, len(self._df_dofs)) + int(self.draw_legend)
@property
def _ncols(self):
"""Number of columns of the subplot grid."""
return min(self.max_cols, self._nsubplots)
@property
def _nrows(self):
"""Number of rows of the subplot grid."""
# the ``int`` technically does not make a difference, but pycharm
# thinks that ``ceil`` returns floats and therefore complains
# otherwise
return int(ceil(self._nsubplots / self._ncols))
# ==========================================================================
# Helper functions
# ==========================================================================
def _find_dofs(self):
"""Find all parameters that are not axes on
the plots and attain at least two different values.
These parameters are called the degrees of freedom (dofs)."""
dofs = []
for col in self.data.par_cols:
if col not in self._axis_columns:
if len(self.data.df[col].unique()) >= 2:
dofs.append(col)
self.log.debug("dofs = {}".format(dofs))
self._dofs = dofs
def _sample_dofs(self):
"""For every dof, select values to be shown on it.
Save this as the dataframe self._df_dofs"""
if not self._dofs:
df_dofs = | pd.DataFrame([]) | pandas.DataFrame |
# Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing dataframe loading from CSV:
load_dataframe
Input:
filename,
Outputs:
Tuple with following elements:
- dataframe
- name of allele column
- name of peptide column
- name of affinity column
"""
from mhcflurry.dataset_helpers import load_dataframe
import numpy as np
from tempfile import NamedTemporaryFile
from pandas import DataFrame
from nose.tools import eq_
def make_dummy_dataframe():
dummy_ic50_values = np.array([
50000.0,
500.0,
1.0,
])
dummy_epitope_sequences = ["A" * 8, "A" * 9, "A" * 10]
# make sure we prepared the test data correctly
assert len(dummy_ic50_values) == len(dummy_epitope_sequences)
dummy_binding_data = {
"species": ["mouse", "human", "human"],
"mhc": ["H-2-Kb", "HLA-A*02:01", "HLA-A*01:01"],
"peptide_length": [len(s) for s in dummy_epitope_sequences],
"sequence": dummy_epitope_sequences,
"meas": dummy_ic50_values,
}
return | DataFrame(dummy_binding_data) | pandas.DataFrame |
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import datetime
import itertools
import logging
import os
import statistics
from collections import Counter, defaultdict
from functools import reduce
from typing import Dict, List, Union
from dataclasses import dataclass
from wca.scheduler.algorithms.base import query_data_provider, sum_resources
from wca.scheduler.cluster_simulator import Resources, Node, ClusterSimulator, Task
from wca.scheduler.types import NodeName, AppName
log = logging.getLogger(__name__)
class AssignmentsCounts:
def __init__(self, tasks: List[Task], nodes: List[Node]):
per_node: Dict[NodeName, Counter]
per_cluster: Counter
unassigned: Counter
# represents sum of all other siblings in the tree
ALL = '__ALL__'
# 1) assignments per node per app
per_node: Dict[NodeName, Counter[AppName]] = {}
for node in nodes:
per_node[node.name] = Counter(Counter({ALL: 0}))
for task in tasks:
if task.assignment == node:
app_name = task.get_core_name()
per_node[node.name].update([app_name])
per_node[node.name].update([ALL])
assert sum([leaf for node_assig in per_node.values()
for _, leaf in node_assig.items()]) == len(
[task for task in tasks if task.assignment is not None]) * 2
# 2) assignments per app (for the whole cluster)
per_cluster: Counter[AppName] = Counter({ALL: 0})
# 3) unassigned apps
unassigned: Counter[AppName] = Counter({ALL: 0})
for task in tasks:
app_name = task.get_core_name()
if task.assignment is not None:
per_cluster.update([app_name])
per_cluster.update([ALL])
else:
unassigned.update([app_name])
unassigned.update([ALL])
assert sum(per_cluster.values()) + sum(unassigned.values()) == len(tasks) * 2
self.per_node = per_node
self.per_cluster = per_cluster
self.unassigned = unassigned
@dataclass
class IterationData:
cluster_resource_usage: Resources
per_node_resource_usage: Dict[Node, Resources]
broken_assignments: Dict[Node, int]
assignments_counts: AssignmentsCounts
tasks_types_count: Dict[str, int]
metrics: Dict[str, List[float]]
def get_total_capacity_and_demand(
nodes_capacities, assigned_apps, unassigned_apps_count, apps_spec):
"""Sum of total cluster capacity and sum of all requirements of all scheduled tasks"""
total_capacity = reduce(sum_resources, nodes_capacities.values())
# Total demand
total_apps_count = defaultdict(int)
assigned_apps_counts = {node_name: {app: len(tasks)
for app, tasks in apps.items()}
for node_name, apps in assigned_apps.items()}
for apps_count in list(assigned_apps_counts.values()) + [unassigned_apps_count]:
for app, count in apps_count.items():
total_apps_count[app] += count
total_demand = defaultdict(int)
for app, count in total_apps_count.items():
app_spec = apps_spec[app]
for res, value in app_spec.items():
total_demand[res] += value * count
total_demand = dict(total_demand)
return total_capacity, total_demand, total_apps_count
def generate_subexperiment_report(
simulator_args_text,
exp_name: str,
exp_dir: str,
subexp_name: str,
iterations_data: List[IterationData],
task_gen,
algorithm,
charts: bool,
metrics: Union[bool, List[str]]
) -> dict:
if metrics is True:
extra_metrics = algorithm.get_metrics_names()
elif metrics is False:
extra_metrics = []
else:
extra_metrics = metrics
# Resource types that were handled by simulator (depends on node and tasks dimensions)
resource_types = list(iterations_data[-1].per_node_resource_usage.values())[0].data.keys()
# ------------------ Text report -----------------------
with open('{}/{}.txt'.format(exp_dir, subexp_name), 'w') as fref:
# Total demand and total capacity based from data from scheduler
nodes_capacities, assigned_apps, apps_spec, unassigend_apps_count = \
query_data_provider(algorithm.data_provider, algorithm.dimensions)
total_capacity, total_demand, total_apps_count = \
get_total_capacity_and_demand(nodes_capacities, assigned_apps,
unassigend_apps_count, apps_spec)
fref.write('Total capacity: %s\n' % total_capacity)
fref.write('Total demand: %s\n' % total_demand)
ideal_utilization = {} # for each resource
for r in total_demand:
if r in total_capacity:
ideal_utilization[r] = total_demand[r] / total_capacity[r]
fref.write('Ideal possible utilization %%: %s\n' % ideal_utilization)
total_tasks_dict = dict(iterations_data[-1].tasks_types_count)
fref.write("Scheduled tasks (might not be successfully assigned): {}\n"
.format(total_tasks_dict))
# Check consistency of iterations data and data provider.
if total_apps_count != total_tasks_dict:
fref.write(
"!Scheduled tasks different from "
"total_apps_count from query! "
"total_apps_count={}\n".format(dict(total_apps_count)))
assert False, 'should not happen!'
scheduled_tasks = sum(total_tasks_dict.values())
assignments_counts = iterations_data[-1].assignments_counts
fref.write("Unassigned tasks: {}\n".format(dict(assignments_counts.unassigned)))
broken_assignments = sum(iterations_data[-1].broken_assignments.values())
fref.write("Broken assignments: {}\n".format(broken_assignments))
total_nodes = len(assignments_counts.per_node.keys())
node_names = assignments_counts.per_node.keys()
nodes_info = ','.join('%s=%d' % (node_type, len(list(nodes))) for node_type, nodes
in itertools.groupby(sorted(node_names), lambda x: x.split('_')[0]))
fref.write(
"\nAssigned tasks per cluster: {}\n".format(dict(assignments_counts.per_cluster)))
assigned_tasks = dict(assignments_counts.per_cluster)['__ALL__']
fref.write("Assigned tasks per node:\n")
for node, counters in assignments_counts.per_node.items():
fref.write(" {}: {}\n".format(node, ' '.join(
'%s=%d' % (k, v) for k, v in sorted(dict(counters).items()) if k != '__ALL__')))
# Summary about resource usage (history)
last_iteration_resource_usage_data = iterations_data[-1].cluster_resource_usage.data
fref.write("\nresource_usage = {}\n".format(last_iteration_resource_usage_data))
nodes_utilization = []
nodes_utilization_avg = []
for node, usages in iterations_data[-1].per_node_resource_usage.items():
fref.write(' %s = %s\n' % (node.name, usages.data))
nodes_utilization.extend(usages.data.values())
nodes_utilization_avg.append(statistics.mean(usages.data.values()))
util_var = statistics.variance(nodes_utilization)
available_metrics = {m.split('{')[0] for iterdata in iterations_data for m in
iterdata.metrics}
fref.write("\n\nAvailable metrics: {}\n".format(', '.join(sorted(available_metrics))))
fref.write("Start of experiment: {}\n".format(
datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d_%H%M')))
fref.write("Iterations: {}\n".format(len(iterations_data)))
app_profiles = algorithm.data_provider.get_apps_profile()
app_profiles = list(sorted(app_profiles.items(), key=lambda x: x[1], reverse=True))
fref.write(
"Scores (app_profiles): {}\n".format(app_profiles)
)
# ----------------------- Stats --------------------------------------
stats = {}
stats['SIM'] = simulator_args_text
stats['TASKS'] = str(task_gen) # sum(total_tasks_dict.values())
stats['NODES'] = '%s(%s)' % (total_nodes, nodes_info)
stats['ALGO'] = str(algorithm)
stats['balance'] = 1 - util_var
for rt, value in last_iteration_resource_usage_data.items():
stats['%s_util%%' % rt.value] = value * 100
# Special handling for nodes starting with "aep name"
if any('aep' in node.name for node, _ in iterations_data[-1].per_node_resource_usage.items()):
for rt in resource_types:
stats['%s_util(AEP)%%' % (rt.value)] = 100 * statistics.mean(
resources.data[rt] for node, resources in
iterations_data[-1].per_node_resource_usage.items() if 'aep' in node.name)
else:
for rt in resource_types:
stats['%s_util(AEP)%%' % (rt)] = float('nan')
stats['scheduled'] = scheduled_tasks
stats['assigned%'] = int((assigned_tasks / scheduled_tasks) * 100)
stats['assigned_broken%'] = int((broken_assignments / scheduled_tasks) * 100)
stats['utilization%'] = int(statistics.mean(last_iteration_resource_usage_data.values()) * 100)
# Chart report
if charts:
generate_charts(
exp_name,
exp_dir,
subexp_name,
extra_metrics,
iterations_data,
)
return stats
def generate_charts(exp_name, exp_dir, subexp_name, extra_metrics, iterations_data):
"""Generate charts if optional libraries are available!"""
try:
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
except ImportError:
# No installed packages required for report generation.
log.warning(
'matplotlib, seaborn, pandas or numpy not installed, charts will not be generated!')
exit(1)
iterations = list(range(len(iterations_data)))
# Generic options
extra_metrics = extra_metrics or []
plt.style.use('ggplot')
number_of_metrics = len(extra_metrics)
fig, axs = plt.subplots(2 + number_of_metrics)
fig.set_size_inches(8, 8 + 6 * number_of_metrics)
resource_types = list(iterations_data[-1].per_node_resource_usage.values())[0].data.keys()
# ---- Resource usage history chart ----
legend = []
for rt in resource_types:
usage_history = [i.cluster_resource_usage.data[rt] for i in iterations_data]
axs[0].plot(iterations, usage_history)
legend.append('%s usage' % rt.value)
axs[0].legend(legend)
axs[0].set_title('{} {}'.format(exp_name, subexp_name), fontsize=10)
axs[0].set_xlim(0, len(iterations))
axs[0].set_ylim(0, 1.01)
# ---- Broken assignments chart ---
broken_assignments = [sum(list(i.broken_assignments.values())) for i in iterations_data]
axs[1].plot(iterations, broken_assignments, 'g--')
axs[1].legend(['broken assignments'])
axs[1].set_ylabel('')
axs[1].set_xlim(0, len(iterations))
axs[1].set_ylim(min(broken_assignments), max(broken_assignments) + 1)
# ---- Metrics charts (using sns) TODO: rewrite to matplotlib ------
for chart_id, metric in enumerate(extra_metrics):
from matplotlib.markers import MarkerStyle
dicts = []
for iteration, idata in enumerate(iterations_data):
d = {k.split('{')[1][:-1]: v
for k, v in idata.metrics.items() if k.startswith(metric)}
if not d:
log.warning('metric %s not found: available: %s', metric,
', '.join([m.split('{')[0] for m in idata.metrics.keys()]))
dicts.append(d)
df = pd.DataFrame(dicts)
try:
x = sns.lineplot(data=df,
markers=MarkerStyle.filled_markers,
dashes=False,
ax=axs[2 + chart_id])
except ValueError:
x = sns.lineplot(data=df,
dashes=False,
ax=axs[2 + chart_id])
x.set_title(metric)
x.set_xlim(0, len(iterations))
fig.savefig('{}/{}.png'.format(exp_dir, subexp_name))
def generate_experiment_report(stats_dicts, exp_dir):
if not stats_dicts:
return
# Save as csv.
# Group by keys.
def by_keys(d): return tuple(d.keys())
stats_dicts_sorted = list(sorted(stats_dicts, key=by_keys))
for i, (keys, rows) in enumerate(itertools.groupby(stats_dicts_sorted, key=by_keys)):
with open(os.path.join(exp_dir, 'summary_%i.csv' % i), 'w') as f:
w = csv.DictWriter(f, keys, lineterminator='\n')
w.writeheader()
w.writerows(rows)
# Format with pandas
try:
import pandas as pd
except ImportError:
log.warn('cannot generate reports (require pandas, numpy and pivottablejs')
return
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 200)
df = | pd.DataFrame(stats_dicts) | pandas.DataFrame |
from RecSearch.DataWorkers.Abstract import DataWorkers
from RecSearch.ExperimentSupport.ExperimentData import ExperimentData
import pandas as pd
class Recommenders(DataWorkers):
"""
Recommenders class creates recommender data.
"""
# Configs inline with [[NAME]]
@classmethod
def set_config(cls):
additional_config = {'required': {'precedence': {'validate': 'integer(default=30)'}}}
cls.cfg = super().update_config(cls.cfg, additional_config)
def __init__(self, name: str, data_worker_config: dict, Data: ExperimentData):
self.class_name = self.get_classname()
super().__init__(self.class_name, name, data_worker_config, Data)
@classmethod
def get_classname(cls):
return cls.__name__
def get_recommendations(self, column_name: str, whos: pd.DataFrame, possible: pd.DataFrame,
parameters: dict) -> pd.DataFrame:
"""
Get recommendations (list of items) for every id in whos.index
:param column_name: output column name
:param whos: who(s) [with related data] to get recommendation
:param possible: potential neighbors to offer recommendation [with related data]
:param parameters: additional parameters
:return: dataframe with column containing list of ids of neighbors for each who in who(s)
"""
df = | pd.DataFrame(columns=[ckey := 'R__' + column_name]) | pandas.DataFrame |
import unittest
from triple_walk import utils
from triple_walk import rw
from triple_walk.model import CBOWTriple, SkipGramTriple
import torch
import numpy as np
import pandas as pd
class ModelTest(unittest.TestCase):
def test_model_cbow(self):
# triples
triples_list = [
("A","r1","B"),
("B","r2","D"),
("A","r1","C"),
("C","r2","E"),
("C","r3","B"),
("A","r2","D"),
("D","r3","A"),
("D","r2","C")
]
triple_list_pd = pd.DataFrame(data=triples_list,columns=["head","relation","tail"])
# convert to indexed triples
triples_index, entities_map,relations_map = utils.to_indexed_triples(triple_list_pd)
# convert to torch tensor
triples_index_tensor = torch.from_numpy(triples_index)
# get target nodes
target_entities_list = list(set(triples_index_tensor[:,0].tolist()+triples_index_tensor[:,2].tolist()))
target_entities_tensor = torch.Tensor(target_entities_list).to(int)
# build node edge index
relation_tail_index,triples_index_tensor_sorted = utils.build_relation_tail_index(triples_index_tensor,target_entities_tensor)
# create a list of all entities
all_entities_list = list(entities_map.values()) + list(relations_map.values())
# sort the list
all_entities_list.sort()
# create the padding index
padding_idx = all_entities_list[-1] + 1
# perform walk
walks = rw.walk_triples(triples_indexed=triples_index_tensor_sorted,
relation_tail_index=relation_tail_index,
target_nodes=target_entities_tensor,
walk_length=6,
seed=10,
padding_idx=padding_idx,
restart=False
)
# split walk to windows
pos_target, neg_target, context = rw.to_windows_triples_cbow(walks=walks,
window_size=4,
num_nodes=30,
padding_idx=padding_idx,
triples=triples_index_tensor_sorted,
seed=20)
# create the model
model = CBOWTriple(num_nodes=len(all_entities_list),
embedding_dim=32,
padding_index=padding_idx
)
# train the model for one step
loss = model(pos_target,neg_target,context)
assert loss != 0, "loss cannot be zero"
def test_model_skipgram(self):
# triples
triples_list = [
("A","r1","B"),
("B","r2","D"),
("A","r1","C"),
("C","r2","E"),
("C","r3","B"),
("A","r2","D"),
("D","r3","A"),
("D","r2","C")
]
triple_list_pd = | pd.DataFrame(data=triples_list,columns=["head","relation","tail"]) | pandas.DataFrame |
# from dotenv import load_dotenv
import os
import psycopg2
from psycopg2.extensions import register_adapter, AsIs
import pandas as pd
import json
from dotenv import load_dotenv
import logging
import random
from fastapi import APIRouter
import pandas as pd
from pydantic import BaseModel, Field, validator
log = logging.getLogger(__name__)
router = APIRouter()
# Load environment variables from .env
load_dotenv()
class PostgreSQL:
def __init__(self):
self.DB_NAME = os.getenv("DB_NAME")
self.DB_USER = os.getenv("DB_USER")
self.DB_PASSWORD = os.getenv("DB_PASSWORD")
self.DB_HOST = os.getenv("DB_HOST")
self.DB_PORT = os.getenv("DB_PORT")
self.connection = psycopg2.connect(
dbname=self.DB_NAME, user=self.DB_USER, password=self.DB_PASSWORD, host=self.DB_HOST, port='5432')
def adapters(*args):
for adapter in args:
register_adapter(adapter, psycopg2._psycopg.AsIs)
def cursor(self):
self.cursor = self.connection.cursor()
def execute(self, query):
self.cursor.execute(query)
def close(self):
self.connection.close()
def fetch_query_records(self, query: str):
cursor = self.connection.cursor()
cursor.execute(query)
result = cursor.fetchall()
return result
def fetch_all_records(self):
cursor = self.connection.cursor()
# 'ID','country','province','district','district_id','sector','sector_id','cell','cell_id','village','village_id','name','project_code','type','stage','sub_stage','individuals_directly_served','span','lat','long','form','case_safe_id','opportunity_id','inc_income','inc_income_rwf','inc_income_usd','bridge_image']
query = f"""SELECT "ID", country, province, district,district_id,sector,sector_id,cell,cell_id,village,village_id,name,project_code,type,stage,sub_stage,individuals_directly_served,span,lat,long,form,case_safe_id,opportunity_id,inc_income,inc_income_rwf,inc_income_usd,bridge_image FROM public."Bridges" """
# query = f"""SELECT "ID", country FROM public."Bridges" limit 5 """
# query = f"""SELECT 'ID','country' FROM public."Bridges" """
# query = f"""SELECT * FROM public."Bridges" """
# query = f"""SELECT "ID","country" FROM public."Bridges" limit top 5"""
cursor.execute(query)
result = cursor.fetchall()
columns = ['ID', 'country', 'province', 'district', 'district_id', 'sector', 'sector_id', 'cell', 'cell_id', 'village', 'village_id', 'name', 'project_code', 'type', 'stage',
'sub_stage', 'individuals_directly_served', 'span', 'lat', 'long', 'form', 'case_safe_id', 'opportunity_id', 'inc_income', 'inc_income_rwf', 'inc_income_usd', 'bridge_image']
# columns = ['ID', 'country']
df = pd.DataFrame(result, columns=columns)
df_json = df.to_json(orient='records')
parsed = json.loads(df_json)
return parsed
def fetch_query_given_project(self, project_code: int):
cursor = self.connection.cursor()
query = f"""SELECT * FROM public."Bridges" where project_code={project_code} """
cursor.execute(query)
result = cursor.fetchall()
columns = ['ID', 'country', 'province', 'district', 'district_id', 'sector', 'sector_id', 'cell', 'cell_id', 'village', 'village_id', 'name', 'project_code', 'type', 'stage', 'sub_stage', 'individuals_directly_served', 'span', 'lat', 'long', 'community_served_1', 'community_served_1_id',
'community_served_2', 'community_served_2_id', 'community_served_3', 'community_served_3_id', 'community_served_4', 'community_served_4_id', 'community_served_5', 'community_served_5_id', 'form', 'case_safe_id', 'opportunity_id', 'inc_income', 'inc_income_rwf', 'inc_income_usd', 'bridge_image']
df = pd.DataFrame(result, columns=columns)
df_json = df.to_json(orient='records')
parsed = json.loads(df_json)
return parsed
def fetch_query_given_project_and_columns(self, project_code: int, columns: []):
cursor = self.connection.cursor()
query = f"""SELECT * FROM public."Bridges" where project_code={project_code} """
cursor.execute(query)
result = cursor.fetchall()
columns = ['ID', 'country', 'province', 'district', 'district_id', 'sector', 'sector_id', 'cell', 'cell_id', 'village', 'village_id', 'name', 'project_code', 'type', 'stage', 'sub_stage', 'individuals_directly_served', 'span', 'lat', 'long', 'community_served_1', 'community_served_1_id',
'community_served_2', 'community_served_2_id', 'community_served_3', 'community_served_3_id', 'community_served_4', 'community_served_4_id', 'community_served_5', 'community_served_5_id', 'form', 'case_safe_id', 'opportunity_id', 'inc_income', 'inc_income_rwf', 'inc_income_usd', 'bridge_image']
df = | pd.DataFrame(result, columns=columns) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import combinations, permutations
import logging
import networkx as nx
import numpy as np
import pandas as pd
# +
# generate a random adjacency matrix
# traces: Number or Domino Traces
# If traces>1 the output will be a data frame of list
# nodes: Number of nodes
# parent_max: number max of possible parents per node
def adjacency_generator(traces,nodes,parents_max):
def connected_graph(adjacency_matrix):
g = nx.DiGraph(adjacency_matrix)
connect = nx.is_weakly_connected(g)
return connect
data = []
for k in range(traces):
finished = False
while not finished:
permutation = np.random.permutation(range(0,nodes)) #creating permutation matrix
idx = np.empty_like(permutation)
idx[permutation] = np.arange(len(permutation))
adjacency_matrix = np.zeros((nodes, nodes),dtype=int)
for j in range(1, nodes):
nb_parents = np.random.randint(0, min([parents_max, j])+1) # selecting number of parents for each node min 1
for i in np.random.choice(range(0,j), nb_parents, replace=True):# selecting randomly connections betwwen nodes
adjacency_matrix[i, j] = 1
adjacency_matrix[:, idx]
adjacency_matrix[:] = adjacency_matrix[:, idx]
adjacency_matrix[:] = adjacency_matrix[idx,:]
finished = connected_graph(adjacency_matrix)
data.append(adjacency_matrix) # generating nested list of adjacency matrix
return data
# +
# Add causal relation between parents and childs nodes
# adjacency_matrix : a adjacency matrix it must be a numpy array
# nodes: Number of nodes
# parent_max: number max of possible parents per node
# N : Number of experimental cases (sample) per domino trace
# p : probability that the initial root fall or not in the Domino trace
# eps: Quantity of noise level in the data
def theta_generator_multi(adjacency_matrix,nodes,parents_max, N, p, eps):
data = [] # creating empty data sets
num_rows = len(adjacency_matrix)
for b in range(num_rows):
matrix = adjacency_matrix[b]
nodes = len(matrix)
X = np.zeros(shape=(nodes, N), dtype=int)
for t in range (2):
for i in range(0, nodes):
if not sum(matrix[:, i]):
X[i,:] = np.random.binomial(1, p, size=N)
for k in range(0, nodes):
if sum(matrix[:, k]):
parents = np.where(matrix[:,k] == 1)[0]
X[k, :] = np.ones_like(X[k, :])
for a in parents:
noise = np.random.binomial(1, eps, size=N)
X[k, :] = X[k,:]*[(1-noise[j])*X[a,j]+noise[j]*(1-X[a,j]) for j in range(N)]
theta = X.sum(axis=1)/N
data.append({"Trace": matrix, "Theta": theta, "Matrix_theta": X}) # generating nested list of adjacency matrix
df = pd.DataFrame(data=data).sample(frac=1).reset_index(drop=True)
return df
# +
# Metrics of recall and precision of the skeleton
# between ground truth and predicted graph
def diff_DAG(dag, skel):
dag_edges = set(list(dag.edges()))
skel_edges = set(list(skel.edges()))
dag_edges_inv = {(j, i) for i, j in dag_edges }
edges_skel_inv = {(j, i) for i, j in skel_edges }
additions = skel_edges - dag_edges - dag_edges_inv
deletions = dag_edges - skel_edges - edges_skel_inv
diff = len(additions) + len(deletions)
true_positives = len(dag_edges) - len(deletions)
false_negatives = len(deletions)
false_positives = len(additions)
if false_positives + true_positives != 0 and true_positives + false_negatives != 0 :
precision = true_positives/(false_positives + true_positives)
recall = true_positives/(true_positives + false_negatives)
else :
precision = 0
recall = 0
return precision, recall, len(dag_edges)
# +
# Metrics of recall and precision of the direction of edges
# between ground truth and predicted graph
def rec_directions(dag, pred_dag):
dag_edges = set(list(dag.edges()))
pred_dag_edges = set(list(pred_dag.edges()))
dag_edges_inv = {(j, i) for i, j in dag_edges }
edges_pred_dag_inv = {(j, i) for i, j in pred_dag_edges }
pred_dag_inv_diff = edges_pred_dag_inv - pred_dag_edges
additions = pred_dag_edges - dag_edges - pred_dag_inv_diff
deletions = dag_edges - pred_dag_edges - dag_edges_inv
true_positives = len(pred_dag_edges) - len(additions)
false_positives = len(additions)
false_negatives = len(deletions)
if false_positives + true_positives != 0 and true_positives + false_negatives != 0 :
precision = true_positives/(false_positives + true_positives)
recall = true_positives/(true_positives + false_negatives)
else :
precision = 0
recall = 0
return precision, recall
# -
def estimate_skeleton_mulalpha(indep_test_func, data_matrix, alpha, **kwargs):
l_pval = []
def method_stable(kwargs):
return ('method' in kwargs) and kwargs['method'] == "stable"
node_ids = range(data_matrix.shape[1])
node_size = data_matrix.shape[1]
sep_set = [[set() for i in range(node_size)] for j in range(node_size)]
if 'init_graph' in kwargs:
g = kwargs['init_graph']
if not isinstance(g, nx.Graph):
raise ValueError
elif not g.number_of_nodes() == len(node_ids):
raise ValueError('init_graph not matching data_matrix shape')
for (i, j) in combinations(node_ids, 2):
if not g.has_edge(i, j):
sep_set[i][j] = None
sep_set[j][i] = None
else:
g = _create_complete_graph(node_ids)
l = 0
print("multi")
while True:
cont = False
remove_edges = []
for (i, j) in permutations(node_ids, 2):
adj_i = list(g.neighbors(i))
if j not in adj_i:
continue
else:
adj_i.remove(j)
if len(adj_i) >= l:
_logger.debug('testing %s and %s' % (i,j))
_logger.debug('neighbors of %s are %s' % (i, str(adj_i)))
if len(adj_i) < l:
continue
for k in combinations(adj_i, l):
p_val = indep_test_func(data_matrix, i, j, set(k),
**kwargs)
l_pval.append({"i":i,"j":j,"set_k":set(k),"p_val":p_val})
if p_val > alpha:
if g.has_edge(i, j):
if method_stable(kwargs):
remove_edges.append((i, j))
else:
g.remove_edge(i, j)
sep_set[i][j] |= set(k)
sep_set[j][i] |= set(k)
break
cont = True
l += 1
if method_stable(kwargs):
g.remove_edges_from(remove_edges)
if cont is False:
break
if ('max_reach' in kwargs) and (l > kwargs['max_reach']):
break
df_pval = | pd.DataFrame(data=l_pval) | pandas.DataFrame |
def meanOrderFrequency(path_to_dataset):
"""
Displays the mean order frequency by utilizing the orders table.
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
import pandas as pd
order_file_path = path_to_dataset + '/orders.csv'
orders = pd.read_csv(order_file_path)
print('On an average, people order once every ', orders['days_since_prior_order'].mean(), 'days')
def numOrdersVsDays(path_to_dataset):
"""
Displays the number of orders and how this number varies with change in days since last order.
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
order_file_path = path_to_dataset + '/orders.csv'
orders = pd.read_csv(order_file_path)
order_by_date = orders.groupby(by='days_since_prior_order').count()
fig = plt.figure(figsize = [15, 7.5])
ax = fig.add_subplot()
order_by_date['order_id'].plot.bar(color = '0.75')
ax.set_xticklabels(ax.get_xticklabels(), fontsize= 15)
plt.yticks(fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000))))
ax.set_xlabel('Days since previous order', fontsize=16)
ax.set_ylabel('Number of orders / 1000', fontsize=16)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_children()[7].set_color('0.1')
ax.get_children()[14].set_color('0.1')
ax.get_children()[21].set_color('0.1')
ax.get_children()[30].set_color('0.1')
my_yticks = ax.get_yticks()
plt.yticks([my_yticks[-2]], visible=True)
plt.xticks(rotation = 'horizontal');
def numOrderDaysSizeBubble(path_to_dataset):
"""
Plots a bubble plot in which:
x: Days since Previous Order
y: Number of orders/1000
size: Average Size of order given it was placed on x
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
assert isinstance(path_to_dataset, str)
order_file_path = path_to_dataset + '/orders.csv'
order_product_prior_file_path = path_to_dataset + '/order_products__prior.csv'
orders = pd.read_csv(order_file_path)
order_products_prior = pd.read_csv(order_product_prior_file_path)
order_id_count_products = order_products_prior.groupby(by='order_id').count()
orders_with_count = order_id_count_products.merge(orders, on='order_id')
order_by_date = orders.groupby(by='days_since_prior_order').count()
# take above table and group by days_since_prior_order
df_mean_order_size = orders_with_count.groupby(by='days_since_prior_order').mean()['product_id']
df_mean_order_renamed = df_mean_order_size.rename('average_order_size')
bubble_plot_dataframe = pd.concat([order_by_date['order_id'], df_mean_order_renamed], axis=1)
bubble_plot_dataframe['average_order_size'].index.to_numpy()
fig = plt.figure(figsize=[15,7.5])
ax = fig.add_subplot()
plt.scatter(bubble_plot_dataframe['average_order_size'].index.to_numpy(), bubble_plot_dataframe['order_id'].values, s=((bubble_plot_dataframe['average_order_size'].values/bubble_plot_dataframe['average_order_size'].values.mean())*10)**3.1, alpha=0.5, c = '0.5')
plt.xticks(np.arange(0, 31, 1.0));
ax.xaxis.grid(True)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel('Days since previous order', fontsize=16)
ax.set_ylabel('Number of orders / 1000', fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000))))
my_yticks = ax.get_yticks()
plt.yticks([my_yticks[-2], my_yticks[0]], visible=True);
fig = plt.figure(figsize=[10,9])
ax = fig.add_subplot()
plt.scatter(bubble_plot_dataframe['average_order_size'].index.to_numpy()[:8], bubble_plot_dataframe['order_id'].values[:8], s=((bubble_plot_dataframe['average_order_size'].values[:8]/bubble_plot_dataframe['average_order_size'].values.mean())*10)**3.1, alpha=0.5, c = '0.5')
plt.xticks(np.arange(0, 8, 1.0));
ax.xaxis.grid(True)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel('Days since previous order', fontsize=16)
ax.set_ylabel('Number of orders / 1000', fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000))))
my_yticks = ax.get_yticks()
plt.yticks([my_yticks[-2], my_yticks[0]], visible=True);
def orderTimeHeatMaps(path_to_dataset):
"""
Plots the distribution of order with respect to hour of day and day of the week.
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
order_file_path = path_to_dataset + '/orders.csv'
orders = pd.read_csv(order_file_path)
grouped_data = orders.groupby(["order_dow", "order_hour_of_day"])["order_number"].aggregate("count").reset_index()
grouped_data = grouped_data.pivot('order_dow', 'order_hour_of_day', 'order_number')
grouped_data.index = | pd.CategoricalIndex(grouped_data.index, categories=[0,1,2,3,4,5,6]) | pandas.CategoricalIndex |
__all__ = ["spectrometer_sensitivity"]
# standard library
from typing import List, Union
# dependent packages
import numpy as np
import pandas as pd
from .atmosphere import eta_atm_func
from .instruments import eta_Al_ohmic_850, photon_NEP_kid, window_trans
from .physics import johnson_nyquist_psd, rad_trans, T_from_psd
from .physics import c, h, k
from .filter import eta_filter_lorentzian, eta_filter_csv, weighted_average
# type aliases
ArrayLike = Union[np.ndarray, List[float], List[int], float, int]
# main functions
def spectrometer_sensitivity(
filter_transmission_csv: str = "",
F: ArrayLike = 350.0e9,
R: float = 500.0,
F_res: int = 30,
overflow: int = 80,
pwv: float = 0.5,
EL: float = 60.0,
eta_M1_spill: ArrayLike = 0.99,
eta_M2_spill: ArrayLike = 0.90,
eta_wo_spill: ArrayLike = 0.99,
n_wo_mirrors: int = 4.0,
window_AR: bool = True,
eta_co: ArrayLike = 0.65,
eta_lens_antenna_rad: ArrayLike = 0.81,
eta_circuit: ArrayLike = 0.32,
eta_IBF: ArrayLike = 0.5,
KID_excess_noise_factor: float = 1.1,
theta_maj: ArrayLike = 22.0 * np.pi / 180.0 / 60.0 / 60.0,
theta_min: ArrayLike = 22.0 * np.pi / 180.0 / 60.0 / 60.0,
eta_mb: ArrayLike = 0.6,
telescope_diameter: float = 10.0,
Tb_cmb: ArrayLike = 2.725,
Tp_amb: ArrayLike = 273.0,
Tp_cabin: ArrayLike = 290.0,
Tp_co: ArrayLike = 4.0,
Tp_chip: ArrayLike = 0.12,
snr: float = 5.0,
obs_hours: float = 10.0,
on_source_fraction: float = 0.4 * 0.9,
on_off: bool = True,
):
"""Calculate the sensitivity of a spectrometer.
Parameters which are functions of frequency can be a vector (see Parameters).
Output is a pandas DataFrame which containts results of simulation (see Returns).
Parameters
----------
filter_transmission_csv
Optional. File location of a .csv file with transmission for filter channels
Header: Frequencies
rows: filter channels with transmission per column frequency
F
Used when filter_transmission_csv isn't used.
Frequency of the astronomical signal. Units: Hz.
R
Used when filter_transmission_csv isn't used.
Spectral resolving power in F/W_F where W_F is equivalent bandwidth and HWHM
of filters.
Units: None. See also: http://www.astrosurf.com/buil/us/spe2/hresol7.htm
F_res
Used when filter_transmission_csv isn't used.
The number of frequency bins within a FWHM
Units: none.
Overflow
Used when filter_transmission_csv isn't used.
The number of extra FHWM's below the first and above the last channel
Units: none.
pwv
Precipitable water vapour. Units: mm.
EL
Telescope elevation angle. Units: degrees.
eta_M1_spill
Spillover efficiency at the telescope primary mirror. Units: None.
eta_M2_spill
Spillover efficiency at the telescope secondary mirror. Units: None.
eta_wo_spill
Product of all spillover losses in the warm optics in the cabin. Units: None.
n_wo_mirrors
Number of cabin optics excluding telescope M1 and M2. Units: None.
window_AR
Whether the window is supposed to be coated by Ar (True) or not (False).
eta_co
Product of following. Units: None.
(1) Cold spillover.
(2) Cold ohmic losses.
(3) Filter transmission loss.
eta_lens_antenna_rad
The loss at chip temperature, *that is not in the circuit.*
Product of the following. Units: None.
(1) Front-to-back ratio of the lens-antenna on the chip (defalut: 0.93).
(2) Reflection efficiency at the surface of the lens (default: 0.9).
(3) Matching efficiency, due to the mismatch (default: 0.98).
(4) Spillover efficiency of the lens-antenna (default: 0.993).
These values can be found in D2_2V3.pdf, p14.
eta_circuit
The loss at chip temperature, *in the circuit.*. Units: None.
eta_IBF
Fraction of the filter power transmission that is within the filter
channel bandwidth. Units: None. The rest of the power is cross talk,
picking up power that is in the bands of neighboring channels.
This efficiency applies to the coupling to astronomical line signals.
This efficiency does not apply to the coupling to continuum,
including the the coupling to the atmosphere for calculating the NEP.
KID_excess_noise_factor
Need to be documented. Units: None.
theta_maj
The HPBW along the major axis, assuming a Gaussian beam. Units: radians.
theta_min
The HPBW along the minor axis, assuming a Gaussian beam. Units: radians.
eta_mb
Main beam efficiency. Units: None. Note that eta_mb includes
the following terms from D2_2V3.pdf from Shahab's report.
because a decrease in these will launch the beam to the sky
but not couple it to the point source (See also FAQ.).
(1) eta_Phi.
(2) eta_amp.
telescope_diameter
Diameter of the telescope. Units: m.
Tb_cmb
Brightness temperature of the CMB. Units: K.
Tp_amb
Physical temperature of the atmosphere and ambient environment
around the telescope. Units: K.
Tp_cabin
Physical temperature of the telescope cabin. Units: K.
Tp_co
Physical temperature of the cold optics inside the cryostat. Units: K.
Tp_chip
Physical temperature of the chip. Units: K.
snr
Target signal to noise to be reached (for calculating the MDLF). Units: None.
obs_hours
Observing hours, including off-source time and the slew overhead
between on- and off-source. Units: hours.
on_source_fraction
Fraction of the time on source (between 0. and 1.). Units: None.
on_off
If the observation involves on_off chopping, then the SNR degrades
by sqrt(2) because the signal difference includes the noise twice.
Returns
----------
F
Best-fit center frequencies from filter_transmission_csv.
Same as input if filter_transmission_csv isn't used. Units: Hz.
pwv
Same as input.
EL
Same as input
eta_atm
Atmospheric transmission within the FHWM of the channel. Units: None.
eta_atm_cont
Atmospheric transmission across the entire widht of the filter. Units: None.
R
best-fit F/FWHM fitted from filter_transmission_csv
Equivalent bandwidth within F/R if filter_transmission_csv isn't used.
Units: None
W_F_spec
Best-fit Equivalent bandwith within the FWHM from filter_transmission_csv
Equivalent bandwidth within F/R if filter_transmission_csv isn't used.
Units: Hz.
W_F_cont
Equivalent bandwidth of 1 channel including the power coupled
outside of the filter channel band. Units: Hz.
theta_maj
Same as input.
theta_min
Same as input.
eta_a
Aperture efficiency. Units: None.
See also: https://deshima.kibe.la/notes/324
eta_mb
Main beam efficiency. Units: None.
eta_forward
Forward efficiency within the FHWM of the channel. Units: None.
See also: https://deshima.kibe.la/notes/324
eta_forward_cont
Forward efficiency across the entire widht of the filter. Units: None.
See also: https://deshima.kibe.la/notes/324
eta_sw
Coupling efficiency from a spectral point source to the cryostat window. Units: None.
eta_sw_cont
Coupling efficiency from a continuum point source to the cryostat window. Units: None.
eta_window
Transmission of the cryostat window within the FHWM of the channel.
Units: None.
eta_window_cont
Transmission of the cryostat window across the entire widht of the filter.
Units: None.
eta_inst
Instrument optical efficiency within the FHWM of the channel. Units: None.
See also: https://arxiv.org/abs/1901.06934
eta_inst_cont
Instrument optical efficiency across the entire widht of the filter. Units: None.
See also: https://arxiv.org/abs/1901.06934
eta_circuit
Equivalent efficiency of Lorentzian fit from filter_transmission.csv.
Same as input if filter_transmission.csv isn't used. Units: None
Tb_sky
Planck brightness temperature of the sky. Units: K.
Tb_M1
Planck brightness temperature looking into the telescope primary. Units: K.
Tb_M2
Planck brightness temperature looking into the telescope secondary,
including the spillover to the cold sky. Units: K.
Tb_wo
Planck brightness temperature looking into the warm optics. Units: K.
Tb_window
Planck brightness temperature looking into the window. Units: K.
Tb_co
Planck brightness temperature looking into the cold optis. Units: K.
Tb_filter
Planck brightness temperature looking into the lens from the filter. Units: K.
Tb_KID
Planck brightness temperature looking into the filter from the KID. Units: K.
Pkid
Power absorbed by the KID. Units: W.
Pkid_sky
Power of the sky loading to the KID. Units: W
Pkid_warm
Power of the warm optics loading to the KID. Units: W
Pkid_cold
Power of the cold optics and circuit loading to the KID. Units: W
n_ph
Photon occupation number within the FHWM of the channel. Units: None.
See also: http://adsabs.harvard.edu/abs/1999ASPC..180..671R
n_ph_cont
Photon occupation number across the entire widht of the filter. Units: None.
See also: http://adsabs.harvard.edu/abs/1999ASPC..180..671R
NEPkid
Noise equivalent power at the KID with respect to the absorbed power.
Units: W Hz^0.5.
NEPinst
Instrumnet NEP within within the FHWM of the channel. Units: W Hz^0.5.
See also: https://arxiv.org/abs/1901.06934
NEPinst_cont
Instrumnet NEP across the entire widht of the filter. Units: W Hz^0.5.
See also: https://arxiv.org/abs/1901.06934
NEFD_line
Noise Equivalent Flux Density for couploing to a line that is not wider
than the filter bandwidth. Units: W/m^2/Hz * s^0.5.
NEFD_continuum
Noise Equivalent Flux Density for couploing to a countinuum source.
Units: W/m^2/Hz * s^0.5.
NEF
Noise Equivalent Flux within the FHWM of the channel. Units: W/m^2 * s^0.5.
NEF_cont
Noise Equivalent Flux across the entire widht of the filter. Units: W/m^2 * s^0.5.
MDLF
Minimum Detectable Line Flux. Units: W/m^2.
MS
Mapping Speed. Units: arcmin^2 mJy^-2 h^-1.
snr
Same as input.
obs_hours
Same as input.
on_source_fraction
Same as input.
on_source_hours
Observing hours on source. Units: hours.
equivalent_Trx
Equivalent receiver noise temperature within the FHWM of the channel. Units: K.
at the moment this assumes Rayleigh-Jeans!
equivalent_Trx_cont
Equivalent receiver noise temperature across the entire widht of the filter.
Units: K.
at the moment this assumes Rayleigh-Jeans!
chi_sq
The Chi Squared value of the Lorentzian fit from filter_transmission_csv
Zero when filter_transmission_csv is not used. Units: None.
Notes
-----
The parameters to calculate the window transmission / reflection
is hard-coded in the function window_trans().
"""
# Filter approximation or read from csv?
if filter_transmission_csv == "":
# Generate filter
(
eta_filter,
eta_inband,
F,
F_int,
W_F_int,
box_height,
box_width,
chi_sq,
) = eta_filter_lorentzian(F, F / R, eta_circuit, F_res, overflow)
R = F / box_width
else:
# Read from csv
(
eta_filter,
eta_inband,
F,
F_int,
W_F_int,
box_height,
box_width,
chi_sq,
) = eta_filter_csv(filter_transmission_csv)
# Equivalent Bandwidth of 1 channel, modelled as a box filter.
# Used for calculating loading and coupling to a continuum source
W_F_cont = box_width / eta_IBF
# Used for calculating coupling to a line source,
# with a linewidth not wider than the filter channel
W_F_spec = box_width
# Efficiency of filter channels
eta_circuit = box_height
# #############################################################
# 1. Calculating loading power absorbed by the KID, and the NEP
# #############################################################
# .......................................................
# Efficiencies for calculating sky coupling
# .......................................................
# Ohmic loss as a function of frequency, from skin effect scaling
eta_Al_ohmic = 1.0 - (1.0 - eta_Al_ohmic_850) * np.sqrt(F_int / 850.0e9)
eta_M1_ohmic = eta_Al_ohmic
eta_M2_ohmic = eta_Al_ohmic
# Collect efficiencies at the same temperature
eta_M1 = eta_M1_ohmic * eta_M1_spill
eta_wo = eta_Al_ohmic**n_wo_mirrors * eta_wo_spill
# Forward efficiency: does/should not include window loss
# because it is defined as how much power out of
# the crystat window couples to the cold sky.
eta_forward_spec = weighted_average(
eta_M1 * eta_M2_ohmic * eta_M2_spill * eta_wo + (1.0 - eta_M2_spill) * eta_wo,
eta_inband,
)
eta_forward_cont = weighted_average(
eta_M1 * eta_M2_ohmic * eta_M2_spill * eta_wo + (1.0 - eta_M2_spill) * eta_wo,
eta_filter,
)
# Calcuate eta at center of integration bin
eta_atm = eta_atm_func(F=F_int, pwv=pwv, EL=EL)
# Johnson-Nyquist Power Spectral Density (W/Hz)
# for the physical temperatures of each stage
psd_jn_cmb = johnson_nyquist_psd(F=F_int, T=Tb_cmb)
psd_jn_amb = johnson_nyquist_psd(F=F_int, T=Tp_amb)
psd_jn_cabin = johnson_nyquist_psd(F=F_int, T=Tp_cabin)
psd_jn_co = johnson_nyquist_psd(F=F_int, T=Tp_co)
psd_jn_chip = johnson_nyquist_psd(F=F_int, T=Tp_chip)
# Optical Chain
# Sequentially calculate the Power Spectral Density (W/Hz) at each stage.
# Uses only basic radiation transfer: rad_out = eta*rad_in + (1-eta)*medium
psd_sky = rad_trans(rad_in=psd_jn_cmb, medium=psd_jn_amb, eta=eta_atm)
psd_M1 = rad_trans(rad_in=psd_sky, medium=psd_jn_amb, eta=eta_M1)
psd_M2 = rad_trans(rad_in=psd_M1, medium=psd_jn_amb, eta=eta_M2_ohmic)
psd_M2_spill = rad_trans(rad_in=psd_M2, medium=psd_sky, eta=eta_M2_spill)
psd_wo = rad_trans(rad_in=psd_M2_spill, medium=psd_jn_cabin, eta=eta_wo)
[psd_window, eta_window] = window_trans(
F=F_int,
psd_in=psd_wo,
psd_cabin=psd_jn_cabin,
psd_co=psd_jn_co,
window_AR=window_AR,
)
psd_co = rad_trans(rad_in=psd_window, medium=psd_jn_co, eta=eta_co)
psd_filter = rad_trans(rad_in=psd_co, medium=psd_jn_chip, eta=eta_lens_antenna_rad)
# Instrument optical efficiency as in JATIS 2019
# (eta_inst can be calculated only after calculating eta_window)
eta_inst_spec = (
eta_lens_antenna_rad
* eta_co
* eta_circuit
* weighted_average(eta_window, eta_inband)
)
eta_inst_cont = (
eta_lens_antenna_rad
* eta_co
* eta_circuit
* weighted_average(eta_window, eta_filter)
)
# Calculating Sky loading, Warm loading and Cold loading individually for reference
# (Not required for calculating Pkid, but serves as a consistency check.)
# .................................................................................
# Sky loading
psd_KID_sky_1 = (
psd_sky
* eta_M1
* eta_M2_spill
* eta_M2_ohmic
* eta_wo
* eta_lens_antenna_rad
* eta_co
* eta_window
)
psd_KID_sky_2 = (
rad_trans(0, psd_sky, eta_M2_spill)
* eta_M2_ohmic
* eta_wo
* eta_lens_antenna_rad
* eta_co
* eta_window
)
psd_KID_sky = psd_KID_sky_1 + psd_KID_sky_2
skycoup = weighted_average(
psd_KID_sky / psd_sky, eta_filter
) # To compare with Jochem
# Warm loading
psd_KID_warm = (
window_trans(
F=F_int,
psd_in=rad_trans(
rad_trans(
rad_trans(
rad_trans(0, psd_jn_amb, eta_M1), 0, eta_M2_spill
), # sky spillover does not count for warm loading
psd_jn_amb,
eta_M2_ohmic,
),
psd_jn_cabin,
eta_wo,
),
psd_cabin=psd_jn_cabin,
psd_co=0,
window_AR=window_AR,
)[0]
* eta_co
* eta_lens_antenna_rad
)
# Cold loading
psd_KID_cold = rad_trans(
rad_trans(
window_trans(
F=F_int,
psd_in=0.0,
psd_cabin=0.0,
psd_co=psd_jn_co,
window_AR=window_AR,
)[0],
psd_jn_co,
eta_co,
),
psd_jn_chip,
eta_lens_antenna_rad,
)
# Loadig power absorbed by the KID
# .............................................
""" if np.all(psd_filter != psd_KID_sky + psd_KID_warm + psd_KID_cold):
print("WARNING: psd_filter != psd_KID_sky + psd_KID_warm + psd_KID_cold")
"""
Pkid = np.sum(psd_filter * W_F_int * eta_filter, axis=1)
Pkid_sky = np.sum(psd_KID_sky * W_F_int * eta_filter, axis=1)
Pkid_warm = np.sum(psd_KID_warm * W_F_int * eta_filter, axis=1)
Pkid_cold = np.sum(psd_KID_cold * W_F_int * eta_filter, axis=1)
# Photon + R(ecombination) NEP of the KID
# .............................................
n_ph_spec = weighted_average(psd_filter / (h * F_int), eta_inband) * eta_circuit
n_ph_cont = weighted_average(psd_filter / (h * F_int), eta_filter) * eta_circuit
NEPkid = (
photon_NEP_kid(F_int, psd_filter * W_F_int * eta_filter, W_F_int)
* KID_excess_noise_factor
)
# Instrument NEP as in JATIS 2019
# .............................................
NEPinst_spec = NEPkid / eta_inst_spec # Instrument NEP
NEPinst_cont = NEPkid / eta_inst_cont # Instrument NEP
# ##############################################################
# 2. Calculating source coupling and sensitivtiy (MDLF and NEFD)
# ##############################################################
# Efficiencies
# .........................................................
Ag = np.pi * (telescope_diameter / 2.0) ** 2.0 # Geometric area of the telescope
omega_mb = np.pi * theta_maj * theta_min / np.log(2) / 4 # Main beam solid angle
omega_a = omega_mb / eta_mb # beam solid angle
Ae = (c / F) ** 2 / omega_a # Effective Aperture (m^2): lambda^2 / omega_a
eta_a = Ae / Ag # Aperture efficiency
# Coupling from the "S"ource to outside of "W"indow
eta_pol = 0.5 # Instrument is single polarization
eta_atm_spec = weighted_average(eta_atm, eta_inband)
eta_atm_cont = weighted_average(eta_atm, eta_filter)
eta_sw_spec = (
eta_pol * eta_a * eta_forward_spec * eta_atm_spec
) # Source-Window coupling
eta_sw_cont = (
eta_pol * eta_a * eta_forward_cont * eta_atm_cont
) # Source-Window coupling
# NESP: Noise Equivalent Source Power (an intermediate quantitiy)
# .........................................................
NESP_spec = NEPinst_spec / eta_sw_spec # Noise equivalnet source power
NESP_cont = NEPinst_cont / eta_sw_cont # Noise equivalnet source power
# NEF: Noise Equivalent Flux (an intermediate quantitiy)
# .........................................................
# From this point, units change from Hz^-0.5 to t^0.5
# sqrt(2) is because NEP is defined for 0.5 s integration.
NEF_spec = NESP_spec / Ag / np.sqrt(2) # Noise equivalent flux
NEF_cont = NESP_cont / Ag / np.sqrt(2) # Noise equivalent flux
# If the observation is involves ON-OFF sky subtraction,
# Subtraction of two noisy sources results in sqrt(2) increase in noise.
if on_off:
NEF_spec = np.sqrt(2) * NEF_spec
NEF_cont = np.sqrt(2) * NEF_cont
# MDLF (Minimum Detectable Line Flux)
# .........................................................
# Note that eta_IBF does not matter for MDLF because it is flux.
MDLF = NEF_spec * snr / np.sqrt(obs_hours * on_source_fraction * 60.0 * 60.0)
# NEFD (Noise Equivalent Flux Density)
# .........................................................
continuum_NEFD = NEF_cont / W_F_cont
spectral_NEFD = NEF_spec / W_F_spec # = continuum_NEFD / eta_IBF > spectral_NEFD
# Mapping Speed (line, 1 channel) (arcmin^2 mJy^-2 h^-1)
# .........................................................
MS = (
60.0
* 60.0
* 1.0
* omega_mb
* (180.0 / np.pi * 60.0) ** 2.0
/ (np.sqrt(2) * spectral_NEFD * 1e29) ** 2.0
)
# Equivalent Trx
# .........................................................
Trx_spec = NEPinst_spec / k / np.sqrt(2 * W_F_cont) - T_from_psd(
F, weighted_average(psd_wo, eta_inband)
) # assumes RJ!
Trx_cont = NEPinst_spec / k / np.sqrt(2 * W_F_cont) - T_from_psd(
F, weighted_average(psd_wo, eta_filter)
) # assumes RJ!
# ############################################
# 3. Output results as Pandas DataFrame
# ############################################
result = pd.concat(
[
pd.Series(F, name="F"),
pd.Series(pwv, name="PWV"),
pd.Series(EL, name="EL"),
pd.Series(eta_atm_spec, name="eta_atm"),
pd.Series(eta_atm_cont, name="eta_atm_cont"),
pd.Series(R, name="R"),
pd.Series(W_F_spec, name="W_F_spec"),
pd.Series(W_F_cont, name="W_F_cont"),
pd.Series(theta_maj, name="theta_maj"),
pd.Series(theta_min, name="theta_min"),
pd.Series(eta_a, name="eta_a"),
pd.Series(eta_mb, name="eta_mb"),
pd.Series(eta_forward_spec, name="eta_forward"),
pd.Series(eta_forward_cont, name="eta_forward_cont"),
pd.Series(eta_sw_spec, name="eta_sw"),
pd.Series(eta_sw_cont, name="eta_sw_cont"),
pd.Series(weighted_average(eta_window, eta_inband), name="eta_window"),
pd.Series(weighted_average(eta_window, eta_filter), name="eta_window_cont"),
pd.Series(eta_inst_spec, name="eta_inst"),
pd.Series(eta_inst_cont, name="eta_inst_cont"),
pd.Series(eta_circuit, name="eta_circuit"),
pd.Series(
weighted_average(T_from_psd(F_int, psd_sky), eta_filter), name="Tb_sky"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_M1), eta_filter), name="Tb_M1"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_M2), eta_filter), name="Tb_M2"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_wo), eta_filter), name="Tb_wo"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_window), eta_filter),
name="Tb_window",
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_co), eta_filter), name="Tb_co"
),
pd.Series(
weighted_average(T_from_psd(F_int, psd_filter), eta_filter),
name="Tb_filter",
),
pd.Series(
T_from_psd(F, eta_circuit * weighted_average(psd_filter, eta_filter)),
name="Tb_KID",
),
pd.Series(Pkid, name="Pkid"),
pd.Series(Pkid_sky, name="Pkid_sky"),
pd.Series(Pkid_warm, name="Pkid_warm"),
pd.Series(Pkid_cold, name="Pkid_cold"),
pd.Series(n_ph_spec, name="n_ph"),
pd.Series(n_ph_cont, name="n_ph_cont"),
pd.Series(NEPkid, name="NEPkid"),
pd.Series(NEPinst_spec, name="NEPinst"),
pd.Series(NEPinst_cont, name="NEPinst_cont"),
| pd.Series(spectral_NEFD, name="NEFD_line") | pandas.Series |
from datetime import datetime
from functools import lru_cache
from typing import Union, Callable, Tuple
import dateparser
import pandas as pd
from dateutil.relativedelta import relativedelta
from numpy.distutils.misc_util import as_list
from wetterdienst.dwd.metadata import Parameter, TimeResolution, PeriodType
from wetterdienst.dwd.metadata.column_names import (
DWDMetaColumns,
DWDOrigDataColumns,
DWDDataColumns,
)
from wetterdienst.dwd.metadata.column_types import (
DATE_FIELDS_REGULAR,
DATE_FIELDS_IRREGULAR,
QUALITY_FIELDS,
INTEGER_FIELDS,
STRING_FIELDS,
)
from wetterdienst.dwd.metadata.datetime import DatetimeFormat
from wetterdienst.dwd.metadata.parameter import TIME_RESOLUTION_PARAMETER_MAPPING
from wetterdienst.dwd.metadata.time_resolution import (
TIME_RESOLUTION_TO_DATETIME_FORMAT_MAPPING,
)
from wetterdienst.exceptions import InvalidParameter
def check_parameters(
parameter: Parameter, time_resolution: TimeResolution, period_type: PeriodType
) -> bool:
"""
Function to check for element (alternative name) and if existing return it
Differs from foldername e.g. air_temperature -> tu
"""
check = TIME_RESOLUTION_PARAMETER_MAPPING.get(time_resolution, {}).get(
parameter, []
)
if period_type not in check:
return False
return True
def coerce_field_types(
df: pd.DataFrame, time_resolution: TimeResolution
) -> pd.DataFrame:
"""
A function used to create a unique dtype mapping for a given list of column names.
This function is needed as we want to ensure the expected dtypes of the returned
DataFrame as well as for mapping data after reading it from a stored .h5 file. This
is required as we want to store the data in this file with the same format which is
a string, thus after reading data back in the dtypes have to be matched.
Args:
df: the station_data gathered in a pandas.DataFrame
time_resolution: time resolution of the data as enumeration
Return:
station data with converted dtypes
"""
for column in df.columns:
# Station ids are handled separately as they are expected to not have any nans
if column == DWDMetaColumns.STATION_ID.value:
df[column] = df[column].astype(int)
elif column in DATE_FIELDS_REGULAR:
df[column] = pd.to_datetime(
df[column],
format=TIME_RESOLUTION_TO_DATETIME_FORMAT_MAPPING[time_resolution],
)
elif column in DATE_FIELDS_IRREGULAR:
df[column] = pd.to_datetime(
df[column], format=DatetimeFormat.YMDH_COLUMN_M.value
)
elif column in QUALITY_FIELDS or column in INTEGER_FIELDS:
df[column] = pd.to_numeric(df[column], errors="coerce").astype(
pd.Int64Dtype()
)
elif column in STRING_FIELDS:
df[column] = df[column].astype(pd.StringDtype())
else:
df[column] = df[column].astype(float)
return df
def parse_enumeration_from_template(
enum_: Union[str, Parameter, TimeResolution, PeriodType],
enum_template: Union[Parameter, TimeResolution, PeriodType, Callable],
) -> Union[Parameter, TimeResolution, PeriodType]:
"""
Function used to parse an enumeration(string) to a enumeration based on a template
:param "enum_": Enumeration as string or Enum
:param enum_template: Base enumeration from which the enumeration is parsed
:return: Parsed enumeration from template
:raises InvalidParameter: if no matching enumeration found
"""
try:
return enum_template[enum_.upper()]
except (KeyError, AttributeError):
try:
return enum_template(enum_)
except ValueError:
raise InvalidParameter(
f"{enum_} could not be parsed from {enum_template.__name__}."
)
@lru_cache(maxsize=None)
def create_humanized_column_names_mapping(
time_resolution: TimeResolution, parameter: Parameter
) -> dict:
"""
Function to create an extend humanized column names mapping. The function
takes care of the special cases of quality columns. Therefor it requires the
time resolution and parameter.
Args:
time_resolution: time resolution enumeration
parameter: parameter enumeration
Returns:
dictionary with mappings extended by quality columns mappings
"""
column_name_mapping = {
orig_column.value: humanized_column.value
for orig_column, humanized_column in zip(
DWDOrigDataColumns[time_resolution.name][parameter.name],
DWDDataColumns[time_resolution.name][parameter.name],
)
}
return column_name_mapping
def parse_enumeration(template, values):
return list(
map(lambda x: parse_enumeration_from_template(x, template), as_list(values))
)
def parse_datetime(date_string: str) -> datetime:
"""
Function used mostly for client to parse given date
Args:
date_string: the given date as string
Returns:
any kind of datetime
"""
# Tries out any given format of DatetimeFormat enumeration
return dateparser.parse(
date_string, date_formats=[dt_format.value for dt_format in DatetimeFormat]
)
def mktimerange(
time_resolution: TimeResolution,
date_from: Union[datetime, str],
date_to: Union[datetime, str] = None,
) -> Tuple[datetime, datetime]:
"""
Compute appropriate time ranges for monthly and annual time resolutions.
This takes into account to properly floor/ceil the date_from/date_to
values to respective "begin of month/year" and "end of month/year" values.
Args:
time_resolution: time resolution as enumeration
date_from: datetime string or object
date_to: datetime string or object
Returns:
Tuple of two Timestamps: "date_from" and "date_to"
"""
if date_to is None:
date_to = date_from
if time_resolution == TimeResolution.ANNUAL:
date_from = pd.to_datetime(date_from) + relativedelta(month=1, day=1)
date_to = | pd.to_datetime(date_to) | pandas.to_datetime |
import pytest
from pandas import Series
import pandas._testing as tm
class TestSeriesUnaryOps:
# __neg__, __pos__, __inv__
def test_neg(self):
ser = tm.makeStringSeries()
ser.name = "series"
| tm.assert_series_equal(-ser, -1 * ser) | pandas._testing.assert_series_equal |
#!/home/wli/env python3
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os.path as osp
import openslide
from pathlib import Path
from skimage.filters import threshold_otsu
import glob
#before importing HDFStore, make sure 'tables' is installed by pip3 install tables
from pandas import HDFStore
from openslide.deepzoom import DeepZoomGenerator
from sklearn.model_selection import StratifiedShuffleSplit
import cv2 as cv2
from skimage import io
import math
from keras.utils.np_utils import to_categorical
training_patches_tumor = pd.DataFrame(columns=['patch_path', 'is_tumor'])
tumor_patch_folder = '/home/wli/Downloads/test/augtumor2'
tumor_patch_paths = glob.glob(osp.join(tumor_patch_folder, '*.png'))
tumor_patch = pd.Series(tumor_patch_paths)
training_patches_tumor['patch_path'] = tumor_patch.values
training_patches_tumor['is_tumor'] = 1
training_patches_normal = pd.DataFrame(columns=['patch_path', 'is_tumor'])
normal_patch_folder_i = '/home/wli/Downloads/test/augnormal'
normal_patch_paths_i = glob.glob(osp.join(normal_patch_folder_i, '*.png'))
normal_patch_folder_ii = '/home/li-computer1/augnormal'
normal_patch_paths_ii = glob.glob(osp.join(normal_patch_folder_ii, '*.png'))
normal_patch_paths = normal_patch_paths_i + normal_patch_paths_ii
normal_patch = pd.Series(normal_patch_paths)
training_patches_normal['patch_path'] = normal_patch.values
training_patches_normal['is_tumor'] = 0
training_patches = | pd.concat([training_patches_tumor, training_patches_normal]) | pandas.concat |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
# http://github.com/timestocome
# take a look at the differences in daily returns for recent bull and bear markets
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
# pandas display options
pd.options.display.max_rows = 1000
pd.options.display.max_columns = 25
pd.options.display.width = 1000
######################################################################
# data
########################################################################
# read in datafile created in LoadAndMatchDates.py
data = pd.read_csv('StockDataWithVolume.csv', index_col='Date', parse_dates=True)
features = [data.columns.values]
# create target --- let's try Nasdaq value 1 day change
data['returns'] = (data['NASDAQ'] - data['NASDAQ'].shift(1)) / data['NASDAQ']
# remove nan row from target creation
data = data.dropna()
'''
############################################################################
# plot returns on NASDAQ training data
#############################################################################
fig = plt.figure(figsize=(10,10))
plt.subplot(2,1,2)
plt.subplot(2,1,1)
plt.plot(data['returns'])
plt.title("Nasdaq daily returns")
# histogram of returns
plt.subplot(2,1,2)
plt.hist(data['returns'], bins=200)
plt.xlabel("Returns")
plt.ylabel("Probability")
plt.title("Histogram daily Nasdaq returns")
plt.grid(True)
# median
median_return = data['returns'].median()
l = plt.axvspan(median_return-0.0001, median_return+0.0001, color='red')
plt.show()
'''
#########################################################################
# split into bear and bull markets
##########################################################################
bull1_start = pd.to_datetime('01-01-1990') # beginning of this dataset
bull1_end = pd.to_datetime('07-16-1990')
iraq_bear_start = pd.to_datetime('07-17-1990')
iraq_bear_end = pd.to_datetime('10-11-1990')
bull2_start = pd.to_datetime('10-12-1990')
bull2_end = | pd.to_datetime('01-13-2000') | pandas.to_datetime |
# -*- coding: utf-8 -*-
import sys
import io
import os
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
import yaml
# Test if the required parameters are received
if len(sys.argv) != 3:
sys.stderr.write("Arguments error. Usage:\n")
sys.stderr.write(
"\tpython3 src/prepare/prepare_dataset.py data-dir-path"
" features-dir-path\n"
)
sys.exit(1)
with open("params.yaml") as file:
params = yaml.load(file, Loader=yaml.SafeLoader)
params = params["dataset"]
# Configure Paths and Folders
input_data_folder, output_data_folder = sys.argv[1:]
train_in_path = Path(f"{input_data_folder}/train.csv").resolve()
test_in_path = Path(f"{input_data_folder}/test.csv").resolve()
train_out_path = Path(f"{output_data_folder}/train.csv").resolve()
test_out_path = Path(f"{output_data_folder}/test.csv").resolve()
# If doesn't exist, create the output data folder
os.makedirs(output_data_folder, exist_ok=True)
# Load train and test datasets
train_in = pd.read_csv(train_in_path, index_col=[0])
test_in = pd.read_csv(test_in_path, index_col=[0])
x_train_in, y_train_in = (
train_in.drop(params["target"], axis=1),
train_in[[params["target"]]],
)
x_test_in = test_in
## ====== Create a Pipeline to transform the data =============
preprocessor = ColumnTransformer(
"", verbose_feature_names_out=False, remainder="passthrough"
)
# ==============================================================
# Fit & Transform the train data
x_train_out = preprocessor.fit_transform(x_train_in)
# Transform the test data
x_test_out = preprocessor.transform(x_test_in)
## Export the output data
# The index should be reconstituted.
# The columns' names should be reconstituted. In this example,
# the features of input are the same as the output. When you
# build a pipeline, make sure to rebuild the names of the columns
# correctly
cols = preprocessor.get_feature_names_out()
train_in_index = train_in.index
test_in_index = test_in.index
x_train_out = | pd.DataFrame(x_train_out, index=train_in_index, columns=cols) | pandas.DataFrame |
"""A module to describe information coming from ensemble averaging
"""
class Population():
max_moments = 2
@classmethod
def load(cls, filename):
from numpy import load
try:
pop_dict = load(filename)
except ValueError:
pop_dict = load(filename, allow_pickle=True, encoding="latin1")
items = pop_dict.item()
pop = cls(labels=items['feature_labels'])
for d, w, l in zip(items['datas'], items['weights'],
items['sample_labels']):
pop.append(d, weight=w, label=l)
return pop
def __init__(self, labels=None):
self.cumsums = None
self.datas = []
self.sample_labels = []
self.feature_labels = labels
self.weights = []
def append(self, data, weight=1.0, label=None):
"""
Insert new data to the population.
Args:
data (array-like, pandas.DataFrame, dict): the data to append.
In case of a dictionary, the data is internally converted into
a dataframe.
weight (float): the weight that the sample has in the population
label (str): the label of the sample
"""
from pandas import DataFrame
if isinstance(data, dict):
data_ = DataFrame(data)
if self.feature_labels is None:
self.feature_labels = data_.columns
else:
data_ = data
if self.feature_labels is None:
self.feature_labels = [str(i) for i in range(len(data))]
sums = safe_moments(data, self.max_moments, weight)
# [weight*data_, weight*data_**2, weight]
self.weights.append(weight)
if self.cumsums is None:
self.cumsums = sums
else:
for isum in range(self.max_moments+1):
self.cumsums[isum] = safe_add(self.cumsums[isum],
safe_multiply_and_pow(sums[isum],
1, 1))
# self.cumsums[isum] += sums[isum]
self.datas.append(data_)
self.sample_labels.append(
str(len(self.datas)-1) if label is None else label)
def to_dataframe(self, sample_labels=None, feature_labels=None):
"""
Convert the population into a pandas dataframe
Args:
sample_labels (list): overrides the sample labels
feature_labels (list): overrides the feature labels
Returns:
pandas.Dataframe: the dataframe of the population
"""
import pandas as pd
if sample_labels is None:
sample_labels = self.sample_labels
if feature_labels is None:
feature_labels = self.feature_labels
dd = {sample: data for sample, data in zip(sample_labels, self.datas)}
return pd.DataFrame.from_dict(dd, orient='index',
columns=feature_labels)
def to_dict(self):
"""
Convert the population to a dictionary
"""
return {att: getattr(self, att)
for att in ['datas', 'feature_labels', 'sample_labels',
'weights']}
def to_file(self, filename):
"""
Dump the populations to a numpy file.
Args:
filename (str): the file for the dumping
"""
from numpy import save
pop_dict = self.to_dict()
save(filename, pop_dict)
def to_excel(self, filename=None, writer=None, sheets=['mean', 'std'],
prefix='', mappable=None):
"""
Dump the population to an excel file.
Args:
filename (str): Name of the file to write the data to
writer (pandas.ExcelWriter): the instance of the writer class.
Useful to append more sheets to the same excel file
sheets (list, str): list of the mehtods that will be written per
each of the sheet of the file. It can be also a string, like
"all" to write all the data of the population in a separate
file
prefix (str): prefix of the sheets to be employed
mappable (func): a function to be applied to the data before
writing them to the file
Returns:
pandas.ExcelWriter: the instance of the writer needed to create the
file.
"""
import pandas as pd
if writer is None:
writer = pd.ExcelWriter(filename, engine='xlsxwriter')
if sheets == 'all':
datas = self.datas
shn = [prefix + '-' + str(lb) for lb in self.sample_labels]
else:
datas = [getattr(self, sht) for sht in sheets]
shn = [prefix + '-' + sht for sht in sheets]
for d, n in zip(datas, shn):
df = d if mappable is None else mappable(d)
df.to_excel(writer, sheet_name=n)
return writer
@property
def full_weight(self):
return self.cumsums[self.max_moments]
@property
def mean(self):
return safe_multiply_and_pow(self.cumsums[0],
1.0/self.full_weight, 1)
@property
def std(self):
from numpy import sqrt, abs # to avoid rundoff problems
if len(self.datas) > 1:
tmp1 = safe_sub(safe_multiply_and_pow(self.cumsums[1],
1.0/self.full_weight, 1),
safe_multiply_and_pow(self.mean, 1.0, 2))
tmp1 = safe_unary_op(tmp1, abs)
tmp1 = safe_unary_op(tmp1, sqrt)
return tmp1
# sqrt(abs(self.cumsums[1]/self.full_weight-self.mean**2))
else:
return 0.0
def remove_nan(df, transpose=False):
df1 = df.dropna(how='all')
dft = df1.transpose()
df1 = dft.dropna(how='all')
if transpose:
return df1
else:
return df1.transpose()
def symmetrize_df(df1):
"""
From a dataframe that should be asymmetrix matrix,
construct the symmetrized dataframe
"""
import numpy as np
from pandas import DataFrame
A = df1.values
W = np.tril(A) + np.triu(A.T, 1)
return DataFrame(W, columns=df1.columns, index=df1.index)
def reorder(dft, transpose=False):
from BigDFT.IO import reorder_fragments
dft1 = dft.reindex(index=reorder_fragments(dft.index))
df1 = dft1.transpose()
dft1 = df1.reindex(index=reorder_fragments(df1.index))
if transpose:
return dft1
else:
return dft1.transpose()
def clean_dataframe(df, symmetrize=True):
"""
Symmetrize a dataframe and remove the NaN rows and columns
Args:
df (Dataframe)
symmetrize (bool): symmetrize the dataframe if applicable
Returns:
Dataframe: the cleaned dataframe
"""
dft = remove_nan(df, transpose=True)
if symmetrize:
dft = symmetrize_df(dft)
return reorder(dft, transpose=True)
def stacked_dataframe(pop):
"""
Construct a stacked dataframe with all the data of the population
Warning:
Weights are ignored, therefore the average value of such stacked
dataframe may be different from the population mean.
"""
from pandas import DataFrame as DF
df = | DF() | pandas.DataFrame |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
import os.path
import pickle
import pytest
import pandas as pd
from intake.source.tests.util import verify_datasource_interface
from .util import assert_items_equal
from intake import Catalog, RemoteCatalog
from intake.catalog.remote import RemoteCatalogEntry
TEST_CATALOG_PATH = os.path.join(os.path.dirname(__file__), 'catalog1.yml')
def test_info_describe(intake_server):
catalog = Catalog(intake_server)
assert_items_equal(list(catalog), ['use_example1', 'nested', 'entry1',
'entry1_part', 'remote_env',
'local_env', 'text', 'arr', 'datetime'])
info = catalog['entry1'].describe()
expected = {
'container': 'dataframe',
'description': 'entry1 full',
'name': 'entry1',
'direct_access': 'forbid',
'user_parameters': []
}
for k in expected:
assert info[k] == expected[k]
info = catalog['entry1_part'].describe()
assert info['direct_access'] == 'allow'
def test_bad_url(intake_server):
bad_url = intake_server + '/nonsense_prefix'
with pytest.raises(Exception):
Catalog(bad_url)
def test_metadata(intake_server):
catalog = Catalog(intake_server)
assert hasattr(catalog, 'metadata')
assert catalog.metadata['test'] is True
assert catalog.version == 1
def test_nested_remote(intake_server):
from intake.catalog.local import LocalCatalogEntry
catalog = Catalog()
catalog._entries = {
'server': LocalCatalogEntry('server', 'remote test', 'intake_remote',
True, {'url': intake_server}, [],
[], {}, None)
}
assert 'entry1' in catalog.server()
def test_remote_direct(intake_server):
from intake.container.dataframe import RemoteDataFrame
catalog = Catalog(intake_server)
s0 = catalog.entry1()
s0.discover()
s = RemoteDataFrame(intake_server.replace('intake', 'http'), {},
name='entry1', parameters={},
npartitions=s0.npartitions,
shape=s0.shape,
metadata=s0.metadata,
dtype=s0.dtype)
assert s0.read().equals(s.read())
def test_entry_metadata(intake_server):
catalog = Catalog(intake_server)
entry = catalog['arr']
assert entry.metadata == entry().metadata
def test_unknown_source(intake_server):
catalog = Catalog(intake_server)
with pytest.raises(Exception):
catalog['does_not_exist'].describe()
def test_remote_datasource_interface(intake_server):
catalog = Catalog(intake_server)
d = catalog['entry1'].get()
verify_datasource_interface(d)
def test_environment_evaluation(intake_server):
catalog = Catalog(intake_server)
import os
os.environ['INTAKE_TEST'] = 'client'
catalog['remote_env']
def test_read(intake_server):
catalog = Catalog(intake_server)
d = catalog['entry1'].get()
test_dir = os.path.dirname(__file__)
file1 = os.path.join(test_dir, 'entry1_1.csv')
file2 = os.path.join(test_dir, 'entry1_2.csv')
expected_df = pd.concat((pd.read_csv(file1), pd.read_csv(file2)))
meta = expected_df[:0]
info = d.discover()
assert info['datashape'] is None
assert info['dtype'] == {k: str(v) for k, v
in meta.dtypes.to_dict().items()}
assert info['npartitions'] == 2
assert info['shape'] == (None, 3) # Do not know CSV size ahead of time
md = d.metadata.copy()
md.pop('catalog_dir', None)
assert md == dict(foo='bar', bar=[1, 2, 3])
df = d.read()
assert expected_df.equals(df)
def test_read_direct(intake_server):
catalog = Catalog(intake_server)
d = catalog['entry1_part'].get(part='2')
test_dir = os.path.dirname(__file__)
file2 = os.path.join(test_dir, 'entry1_2.csv')
expected_df = pd.read_csv(file2)
meta = expected_df[:0]
info = d.discover()
assert info['datashape'] is None
assert info['dtype'] == {k: str(v) for k, v
in meta.dtypes.to_dict().items()}
assert info['npartitions'] == 1
assert info['shape'] == (None, 3) # Do not know CSV size ahead of time
md = info['metadata'].copy()
md.pop('catalog_dir', None)
assert md == {'bar': [2, 4, 6], 'foo': 'baz'}
md = d.metadata.copy()
md.pop('catalog_dir', None)
assert md == dict(foo='baz', bar=[2, 4, 6])
assert d.description == 'entry1 part'
df = d.read()
assert expected_df.equals(df)
def test_read_chunks(intake_server):
catalog = Catalog(intake_server)
d = catalog.entry1.get()
chunks = list(d.read_chunked())
assert len(chunks) == 2
test_dir = os.path.dirname(__file__)
file1 = os.path.join(test_dir, 'entry1_1.csv')
file2 = os.path.join(test_dir, 'entry1_2.csv')
expected_df = pd.concat((pd.read_csv(file1), pd.read_csv(file2)))
assert expected_df.equals(pd.concat(chunks))
def test_read_partition(intake_server):
catalog = Catalog(intake_server)
d = catalog.entry1.get()
p2 = d.read_partition(1)
p1 = d.read_partition(0)
test_dir = os.path.dirname(__file__)
file1 = os.path.join(test_dir, 'entry1_1.csv')
file2 = os.path.join(test_dir, 'entry1_2.csv')
assert pd.read_csv(file1).equals(p1)
assert pd.read_csv(file2).equals(p2)
def test_close(intake_server):
catalog = Catalog(intake_server)
d = catalog.entry1.get()
d.close()
def test_with(intake_server):
catalog = Catalog(intake_server)
with catalog.entry1.get() as f:
assert f.discover()
def test_pickle(intake_server):
catalog = Catalog(intake_server)
d = catalog.entry1.get()
new_d = pickle.loads(pickle.dumps(d, pickle.HIGHEST_PROTOCOL))
df = new_d.read()
test_dir = os.path.dirname(__file__)
file1 = os.path.join(test_dir, 'entry1_1.csv')
file2 = os.path.join(test_dir, 'entry1_2.csv')
expected_df = pd.concat((pd.read_csv(file1), pd.read_csv(file2)))
assert expected_df.equals(df)
def test_to_dask(intake_server):
catalog = Catalog(intake_server)
d = catalog.entry1.get()
df = d.to_dask()
assert df.npartitions == 2
def test_remote_env(intake_server):
import os
os.environ['INTAKE_TEST'] = 'client'
catalog = Catalog(intake_server)
with pytest.raises(Exception) as e:
catalog.remote_env.get()
assert 'path-server' in str(e.value)
with pytest.raises(Exception) as e:
catalog.local_env.get()
assert 'path-client' in str(e.value)
# prevents *client* from getting env
catalog = Catalog(intake_server, getenv=False)
with pytest.raises(Exception) as e:
catalog.local_env.get()
assert 'INTAKE_TEST' in str(e.value)
def test_remote_sequence(intake_server):
import glob
d = os.path.dirname(TEST_CATALOG_PATH)
catalog = Catalog(intake_server)
assert 'text' in catalog
s = catalog.text()
s.discover()
assert s.npartitions == len(glob.glob(os.path.join(d, '*.yml')))
assert s.read_partition(0)
assert s.read()
def test_remote_arr(intake_server):
catalog = Catalog(intake_server)
assert 'arr' in catalog
s = catalog.arr()
s.discover()
assert 'remote-array' in s.to_dask().name
assert s.npartitions == 2
assert s.read_partition(0).ravel().tolist() == list(range(50))
assert s.read().ravel().tolist() == list(range(100))
def test_pagination(intake_server):
PAGE_SIZE = 2
catalog = Catalog(intake_server, page_size=PAGE_SIZE)
assert len(catalog._entries._page_cache) == 0
assert len(catalog._entries._direct_lookup_cache) == 0
# Trigger fetching one specific name.
catalog['arr']
assert len(catalog._entries._page_cache) == 0
assert len(catalog._entries._direct_lookup_cache) == 1
# Using `in` on a Catalog should not iterate.
'arr' in catalog
assert len(catalog._entries._page_cache) == 0
assert len(catalog._entries._direct_lookup_cache) == 1
# Trigger fetching just one full page.
list(zip(range(PAGE_SIZE), catalog))
assert len(catalog._entries._page_cache) == PAGE_SIZE
assert len(catalog._entries._direct_lookup_cache) == 1
# Trigger fetching all pages by list-ifying.
list(catalog)
assert len(catalog._entries._page_cache) > PAGE_SIZE
assert len(catalog._entries._direct_lookup_cache) == 1
# Now direct lookup by name should be free because everything is cached.
catalog['text']
assert len(catalog._entries._direct_lookup_cache) == 1
def test_dir(intake_server):
PAGE_SIZE = 2
catalog = Catalog(intake_server, page_size=PAGE_SIZE)
assert len(catalog._entries._page_cache) == 0
assert len(catalog._entries._direct_lookup_cache) == 0
assert not catalog._entries.complete
with pytest.warns(UserWarning, match="Tab-complete"):
key_completions = catalog._ipython_key_completions_()
with pytest.warns(UserWarning, match="Tab-complete"):
dir_ = dir(catalog)
# __dir__ triggers loading the first page.
assert len(catalog._entries._page_cache) == 2
assert len(catalog._entries._direct_lookup_cache) == 0
assert not catalog._entries.complete
assert set(key_completions) == set(['use_example1', 'nested'])
assert 'metadata' in dir_ # a normal attribute
assert 'use_example1' in dir_ # an entry from the first page
assert 'arr' not in dir_ # an entry we haven't cached yet
# Trigger fetching one specific name.
catalog['arr']
with pytest.warns(UserWarning, match="Tab-complete"):
dir_ = dir(catalog)
with pytest.warns(UserWarning, match="Tab-complete"):
key_completions = catalog._ipython_key_completions_()
assert 'metadata' in dir_
assert 'arr' in dir_ # an entry cached via direct access
assert 'arr' in key_completions
# Load everything.
list(catalog)
assert catalog._entries.complete
with pytest.warns(None) as record:
assert set(catalog) == set(catalog._ipython_key_completions_())
assert set(catalog).issubset(set(dir(catalog)))
assert len(record) == 0
# Load without pagination (with also loads everything).
catalog = Catalog(intake_server, page_size=None)
assert catalog._entries.complete
with pytest.warns(None) as record:
assert set(catalog) == set(catalog._ipython_key_completions_())
assert set(catalog).issubset(set(dir(catalog)))
assert len(record) == 0
def test_getitem_and_getattr(intake_server):
catalog = Catalog(intake_server)
catalog['arr']
with pytest.raises(KeyError):
catalog['doesnotexist']
with pytest.raises(KeyError):
catalog['_doesnotexist']
with pytest.raises(KeyError):
# This exists as an *attribute* but not as an item.
catalog['metadata']
catalog.arr # alias to catalog['arr']
catalog.metadata # a normal attribute
with pytest.raises(AttributeError):
catalog.doesnotexit
with pytest.raises(AttributeError):
catalog._doesnotexit
assert catalog.arr is catalog['arr']
assert isinstance(catalog.arr, RemoteCatalogEntry)
assert isinstance(catalog.metadata, (dict, type(None)))
def test_search(intake_server):
remote_catalog = Catalog(intake_server)
local_catalog = Catalog(TEST_CATALOG_PATH)
# Basic example
remote_results = remote_catalog.search('entry1')
local_results = local_catalog.search('entry1')
expected = ['nested.entry1', 'nested.entry1_part', 'entry1', 'entry1_part']
assert isinstance(remote_results, RemoteCatalog)
assert list(local_results) == list(remote_results) == expected
# Progressive search
remote_results = remote_catalog.search('entry1').search('part')
local_results = local_catalog.search('entry1').search('part')
expected = ['nested.entry1_part', 'entry1_part']
assert isinstance(remote_results, RemoteCatalog)
assert list(local_results) == list(remote_results) == expected
# Double progressive search
remote_results = (remote_catalog
.search('entry1')
.search('part')
.search('part'))
local_results = (local_catalog
.search('entry1')
.search('part')
.search('part'))
expected = ['nested.entry1_part', 'entry1_part']
assert isinstance(remote_results, RemoteCatalog)
assert list(local_results) == list(remote_results) == expected
# Search on a nested Catalog.
remote_results = remote_catalog['nested'].search('entry1')
local_results = local_catalog['nested'].search('entry1')
expected = ['nested.entry1', 'nested.entry1_part', 'entry1', 'entry1_part']
assert isinstance(remote_results, RemoteCatalog)
assert list(local_results) == list(remote_results) == expected
# Search with empty results set
remote_results = remote_catalog.search('DOES NOT EXIST')
local_results = local_catalog.search('DOES NOT EXIST')
expected = []
assert isinstance(remote_results, RemoteCatalog)
assert list(local_results) == list(remote_results) == expected
def test_access_subcatalog(intake_server):
catalog = Catalog(intake_server)
catalog['nested']
def test_len(intake_server):
remote_catalog = Catalog(intake_server)
local_catalog = Catalog(TEST_CATALOG_PATH)
assert sum(1 for entry in local_catalog) == len(remote_catalog)
def test_datetime(intake_server):
catalog = Catalog(intake_server)
info = catalog["datetime"].describe()
print(info)
expected = {
'name': 'datetime',
'container': 'dataframe',
'description': 'datetime parameters',
'direct_access': 'forbid',
'user_parameters': [
{'name': 'time',
'description': 'some time',
'type': 'datetime',
'default': | pd.Timestamp('1970-01-01 00:00:00') | pandas.Timestamp |
import glob
import os
import pandas as pd
def retrieve(csv, csv2, structures_paths):
row = []
for folder in sorted(structures_paths):
print(folder)
input_met = os.path.join(folder, "metrics.out")
input_clu = os.path.join(folder, "cluster.out")
if not os.path.exists(input_met):
continue
df1 = | pd.read_csv(csv) | pandas.read_csv |
import xml.etree.ElementTree
import pandas as pd
import dateutil.parser
import re
def process_user_data(input_file, output_file):
root = xml.etree.ElementTree.parse(input_file).getroot()
user_list = []
for user in root.getchildren():
user_list.append(user.attrib)
user_data = | pd.DataFrame.from_dict(user_list) | pandas.DataFrame.from_dict |
import pytest
def test_concat_with_duplicate_columns():
import captivity
import pandas as pd
with pytest.raises(captivity.CaptivityException):
pd.concat(
[pd.DataFrame({"a": [1], "b": [2]}), pd.DataFrame({"c": [0], "b": [3]}),],
axis=1,
)
def test_concat_mismatching_columns():
import captivity
import pandas as pd
with pytest.raises(captivity.CaptivityException):
pd.concat(
[ | pd.DataFrame({"a": [1], "b": [2]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from TACT.computation.adjustments import Adjustments, empirical_stdAdjustment
def perform_G_SFc_adjustment(inputdata):
"""
simple filtered regression results from phase 2 averages used
"""
results = pd.DataFrame(
columns=[
"sensor",
"height",
"adjustment",
"m",
"c",
"rsquared",
"difference",
"mse",
"rmse",
]
)
m = 0.7086
c = 0.0225
adj = Adjustments()
if inputdata.empty or len(inputdata) < 2:
results = adj.post_adjustment_stats([None], results, "Ref_TI", "adjTI_RSD_TI")
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
inputdata = False
else:
full = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 11:57:27 2015
@author: malte
"""
import numpy as np
import pandas as pd
from scipy import sparse
import implicit
import time
class ColdImplicit:
'''
ColdImplicit(n_factors = 100, n_iterations = 10, learning_rate = 0.01, lambda_session = 0.0, lambda_item = 0.0, sigma = 0.05, init_normal = False, session_key = 'SessionId', item_key = 'ItemId')
Parameters
--------
'''
def __init__(self, n_factors = 100, epochs = 10, lr = 0.01, reg=0.01, algo='als', idf_weight=False, session_key = 'playlist_id', item_key = 'track_id'):
self.factors = n_factors
self.epochs = epochs
self.lr = lr
self.reg = reg
self.algo = algo
self.idf_weight = idf_weight
self.session_key = session_key
self.item_key = item_key
self.current_session = None
def train(self, train, test=None):
'''
Trains the predictor.
Parameters
--------
data: pandas.DataFrame
Training data. It contains the transactions of the sessions. It has one column for session IDs, one for item IDs and one for the timestamp of the events (unix timestamps).
It must have a header. Column names are arbitrary, but must correspond to the ones you set during the initialization of the network (session_key, item_key, time_key properties).
'''
data = train['actions']
#datat = test['actions']
#data = pd.concat([data,datat])
itemids = data[self.item_key].unique()
self.n_items = len(itemids)
self.itemidmap = pd.Series(data=np.arange(self.n_items), index=itemids)
self.itemidmap2 = pd.Series(index=np.arange(self.n_items), data=itemids)
sessionids = data[self.session_key].unique()
self.n_sessions = len(sessionids)
self.useridmap = pd.Series(data=np.arange(self.n_sessions), index=sessionids)
tstart = time.time()
data = pd.merge(data, pd.DataFrame({self.item_key:self.itemidmap.index, 'ItemIdx':self.itemidmap[self.itemidmap.index].values}), on=self.item_key, how='inner')
data = pd.merge(data, pd.DataFrame({self.session_key:self.useridmap.index, 'SessionIdx':self.useridmap[self.useridmap.index].values}), on=self.session_key, how='inner')
print( 'add index in {}'.format( (time.time() - tstart) ) )
tstart = time.time()
ones = np.ones( len(data) )
row_ind = data.ItemIdx
col_ind = data.SessionIdx
self.mat = sparse.csr_matrix((ones, (row_ind, col_ind)))
self.tmp = self.mat.T.tocsr()
print( 'matrix in {}'.format( (time.time() - tstart) ) )
if self.algo == 'als':
self.model = implicit.als.AlternatingLeastSquares( factors=self.factors, iterations=self.epochs, regularization=self.reg )
elif self.algo == 'bpr':
self.model = implicit.bpr.BaysianPersonalizedRanking( factors=self.factors, iterations=self.epochs, regularization=self.reg )
self.model.fit(self.mat)
if self.idf_weight:
self.idf = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 6 11:49:36 2019
@author: MAGESHWARAN
"""
import pandas as pd
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import f1_score
# import keras
from keras.models import Sequential
from keras.layers import Dense
# -----------------------Data PreProcessing------------------------------------
data = pd.read_csv("Churn_Modelling.csv")
data = data.drop(["RowNumber", "CustomerId", "Surname"], axis=1)
data["Gender"] = data["Gender"].map({"Male": 1, "Female": 0})
data["Geography"] = data["Geography"].map({"France": 0, "Germany": 1, "Spain": 2})
MultiCat_features = ["Tenure", "NumOfProducts"]
# label_encoder = LabelEncoder()
one_hot_encoder = OneHotEncoder(sparse=False)
# ----------------One Hot encoding for MultiClass features---------------------
for feature in MultiCat_features:
temp = data[feature].values
temp = temp.reshape(-1, 1)
store = one_hot_encoder.fit_transform(temp)
index_ = [feature + "_" + str(i) for i in range(len(store[0]))]
store_df = | pd.DataFrame(store, columns=index_) | pandas.DataFrame |
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=pd.Index([1, 0]),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT.value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=[1, 0],
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
| tm.assert_frame_equal(df_sorted, df_reversed) | pandas._testing.assert_frame_equal |
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
discrete_props = ['Direction'] # demonstrate which column of data is discrete feature (indicating others are linear)
def fillnan(df, col_name):
num=0
for i in df[col_name].notnull():
if i is False or df[col_name][num]=='NaN':
j=1
while | pd.isna(df[col_name][num+j]) | pandas.isna |
import io
import os
import re
import sys
import time
import pandas
import datetime
import requests
import mplfinance
from matplotlib import dates
# Basic Data
file_name = __file__[:-3]
absolute_path = os.path.dirname(os.path.abspath(__file__))
# <editor-fold desc='common'>
def load_json_config():
global file_directory
config_file = os.path.join(os.sep, absolute_path, 'Config.cfg')
with open(config_file, 'r') as file_handler:
config_data = file_handler.read()
regex = 'FILE_DIRECTORY=.*'
match = re.findall(regex, config_data)
file_directory = match[0].split('=')[1].strip()
# </editor-fold>
# <editor-fold desc='daily update'>
def save_dict_to_file(dic, txt):
f = open(txt, 'w', encoding='utf-8')
f.write(dic)
f.close()
def load_dict_from_file(txt):
f = open(txt, 'r', encoding='utf-8')
data = f.read()
f.close()
return eval(data)
def crawl_price(date=datetime.datetime.now()):
date_str = str(date).split(' ')[0].replace('-', '')
r = requests.post('http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=' + date_str + '&type=ALL')
ret = pandas.read_csv(io.StringIO('\n'.join([i.translate({ord(c): None for c in ' '}) for i in r.text.split('\n') if
len(i.split(',')) == 17 and i[0] != '='])), header=0,
index_col='證券代號')
ret['成交金額'] = ret['成交金額'].str.replace(',', '')
ret['成交股數'] = ret['成交股數'].str.replace(',', '')
return ret
def original_crawl_price(date='2011-01-01 00:00:00'):
print('Begin: original_crawl_price!')
data = {}
success = False
dateFormatter = '%Y-%m-%d %H:%M:%S'
date = datetime.datetime.strptime(date, dateFormatter)
while not success:
print('parsing', date)
try:
data[date.date()] = crawl_price(date)
print('success!')
success = True
except pandas.errors.EmptyDataError:
# 假日爬不到
print('fail! check the date is holiday')
# 減一天
date += datetime.timedelta(days=1)
time.sleep(10)
writer = pandas.ExcelWriter(stock_file_path, engine='xlsxwriter')
stock_volume = pandas.DataFrame({k: d['成交股數'] for k, d in data.items()}).transpose()
stock_volume.index = pandas.to_datetime(stock_volume.index)
stock_volume.to_excel(writer, sheet_name='stock_volume', index=True)
stock_open = pandas.DataFrame({k: d['開盤價'] for k, d in data.items()}).transpose()
stock_open.index = pandas.to_datetime(stock_open.index)
stock_open.to_excel(writer, sheet_name='stock_open', index=True)
stock_close = pandas.DataFrame({k: d['收盤價'] for k, d in data.items()}).transpose()
stock_close.index = pandas.to_datetime(stock_close.index)
stock_close.to_excel(writer, sheet_name='stock_close', index=True)
stock_high = pandas.DataFrame({k: d['最高價'] for k, d in data.items()}).transpose()
stock_high.index = pandas.to_datetime(stock_high.index)
stock_high.to_excel(writer, sheet_name='stock_high', index=True)
stock_low = pandas.DataFrame({k: d['最低價'] for k, d in data.items()}).transpose()
stock_low.index = pandas.to_datetime(stock_low.index)
stock_low.to_excel(writer, sheet_name='stock_low', index=True)
writer.save()
print('End: original_crawl_price!')
def update_stock_info():
print('Begin: update_stock_info!')
data = {}
count = 1
fail_count = 0
allow_continuous_fail_count = 20
try:
pandas.read_excel(stock_file_path, sheet_name='stock_volume', index_col=0)
print(r'{} Exist.'.format(stock_file_path))
except FileNotFoundError:
print(r'{} Not Exist.'.format(stock_file_path))
original_crawl_price()
stock_volume_old = pandas.read_excel(stock_file_path, sheet_name='stock_volume', index_col=0)
stock_volume_old.index = pandas.to_datetime(stock_volume_old.index)
stock_open_old = pandas.read_excel(stock_file_path, sheet_name='stock_open', index_col=0)
stock_open_old.index = pandas.to_datetime(stock_open_old.index)
stock_close_old = pandas.read_excel(stock_file_path, sheet_name='stock_close', index_col=0)
stock_close_old.index = pandas.to_datetime(stock_close_old.index)
stock_high_old = pandas.read_excel(stock_file_path, sheet_name='stock_high', index_col=0)
stock_high_old.index = pandas.to_datetime(stock_high_old.index)
stock_low_old = pandas.read_excel(stock_file_path, sheet_name='stock_low', index_col=0)
stock_low_old.index = pandas.to_datetime(stock_low_old.index)
last_date = stock_volume_old.index[-1]
dateFormatter = '%Y-%m-%d %H:%M:%S'
date = datetime.datetime.strptime(str(last_date), dateFormatter)
date += datetime.timedelta(days=1)
if date > datetime.datetime.now():
print('Finish update_stock_info!')
sys.exit(0)
while date < datetime.datetime.now() and count <= 100:
print('parsing', date)
try:
data[date.date()] = crawl_price(date)
print('success {} times!'.format(count))
fail_count = 0
count += 1
except pandas.errors.EmptyDataError:
# 假日爬不到
print('fail! check the date is holiday')
fail_count += 1
if fail_count == allow_continuous_fail_count:
raise
date += datetime.timedelta(days=1)
time.sleep(10)
writer = pandas.ExcelWriter(stock_file_path, engine='xlsxwriter')
stock_volume_new = pandas.DataFrame({k: d['成交股數'] for k, d in data.items()}).transpose()
stock_volume_new.index = pandas.to_datetime(stock_volume_new.index)
stock_volume = pandas.concat([stock_volume_old, stock_volume_new], join='outer')
stock_volume.to_excel(writer, sheet_name='stock_volume', index=True)
stock_open_new = pandas.DataFrame({k: d['開盤價'] for k, d in data.items()}).transpose()
stock_open_new.index = pandas.to_datetime(stock_open_new.index)
stock_open = pandas.concat([stock_open_old, stock_open_new], join='outer')
stock_open.to_excel(writer, sheet_name='stock_open', index=True)
stock_close_new = pandas.DataFrame({k: d['收盤價'] for k, d in data.items()}).transpose()
stock_close_new.index = pandas.to_datetime(stock_close_new.index)
stock_close = | pandas.concat([stock_close_old, stock_close_new], join='outer') | pandas.concat |
# -*- coding: utf-8 -*-
r"""
general helper functions
"""
# Import standard library
import os
import logging
import itertools
from pathlib import Path
from glob import glob
from operator import concat
from functools import reduce
from os.path import join, exists
from pprint import pprint
# Import from module
# from matplotlib.figure import Figure
# from matplotlib.image import AxesImage
# from loguru import logger
from uncertainties import unumpy
from tqdm import tqdm
import numpy as np
import pandas as pd
from scipy.stats import norm
from scipy.ndimage import zoom
import matplotlib.pyplot as pl
import lightkurve as lk
from astropy.visualization import hist
from astropy import units as u
from astropy import constants as c
from astropy.timeseries import LombScargle
from astropy.modeling import models, fitting
from astropy.io import ascii
from astropy.coordinates import (
SkyCoord,
Distance,
sky_coordinate,
Galactocentric,
match_coordinates_3d,
)
from skimage import measure
from astroquery.vizier import Vizier
from astroquery.mast import Catalogs, tesscut
from astroquery.gaia import Gaia
import deepdish as dd
# Import from package
from chronos import target
from chronos import cluster
from chronos import gls
from chronos.config import DATA_PATH
log = logging.getLogger(__name__)
__all__ = [
"get_nexsci_archive",
"get_tess_ccd_info",
"get_all_campaigns",
"get_all_sectors",
"get_sector_cam_ccd",
"get_tois",
"get_toi",
"get_ctois",
"get_ctoi",
"get_target_coord",
"get_epicid_from_k2name",
"get_target_coord_3d",
"get_transformed_coord",
"query_gaia_params_of_all_tois",
"get_mamajek_table",
"get_distance",
"get_excess_from_extiction",
"get_absolute_color_index",
"get_absolute_gmag",
"parse_aperture_mask",
"make_round_mask",
"make_square_mask",
"remove_bad_data",
"is_point_inside_mask",
"get_fluxes_within_mask",
"get_harps_bank",
"get_specs_table_from_tfop",
"get_rotation_period",
"get_transit_mask",
"get_mag_err_from_flux",
"get_err_quadrature",
"get_phase",
"bin_data",
"map_float",
"map_int",
"flatten_list",
"detrend",
"query_tpf",
"query_tpf_tesscut",
"is_gaiaid_in_cluster",
"get_pix_area_threshold",
"get_above_lower_limit",
"get_below_upper_limit",
"get_between_limits",
"get_RV_K",
"get_RM_K",
"get_tois_mass_RV_K",
"get_vizier_tables",
"get_mist_eep_table",
"get_tepcat",
]
# Ax/Av
extinction_ratios = {
"U": 1.531,
"B": 1.324,
"V": 1.0,
"R": 0.748,
"I": 0.482,
"J": 0.282,
"H": 0.175,
"K": 0.112,
"G": 0.85926,
"Bp": 1.06794,
"Rp": 0.65199,
}
def query_WDSC():
"""
Washington Double Star Catalog
"""
url = "http://www.astro.gsu.edu/wds/Webtextfiles/wdsnewframe.html"
df = pd.read_csv(url)
return df
def get_tepcat(catalog="all"):
"""
TEPCat
https://www.astro.keele.ac.uk/jkt/tepcat/
Choices:
all, homogenerous, planning, obliquity
"""
base_url = "https://www.astro.keele.ac.uk/jkt/tepcat/"
if catalog == "all":
full_url = base_url + "allplanets-csv.csv"
elif catalog == "homogeneous":
full_url = base_url + "homogeneous-par-csv.csv"
elif catalog == "planning":
full_url = base_url + "observables.csv"
elif catalog == "obliquity":
full_url = base_url + "obliquity.csv"
else:
raise ValueError("catalog=[all,homogeneous,planning,obliquity]")
df = pd.read_csv(full_url)
return df
def get_mist_eep_table():
"""
For eep phases, see
http://waps.cfa.harvard.edu/MIST/README_tables.pdf
"""
fp = Path(DATA_PATH, "mist_eep_table.csv")
return pd.read_csv(fp, comment="#")
def get_nexsci_archive(table="all"):
base_url = "https://exoplanetarchive.ipac.caltech.edu/"
settings = "cgi-bin/nstedAPI/nph-nstedAPI?table="
if table == "all":
url = base_url + settings + "exomultpars"
elif table == "confirmed":
url = base_url + settings + "exoplanets"
elif table == "composite":
url = base_url + settings + "compositepars"
else:
raise ValueError("table=[all, confirmed, composite]")
df = pd.read_csv(url)
return df
def get_vizier_tables(key, tab_index=None, row_limit=50, verbose=True):
"""
Parameters
----------
key : str
vizier catalog key
tab_index : int
table index to download and parse
Returns
-------
tables if tab_index is None else parsed df
"""
if row_limit == -1:
msg = f"Downloading all tables in "
else:
msg = f"Downloading the first {row_limit} rows of each table in "
msg += f"{key} from vizier."
if verbose:
print(msg)
# set row limit
Vizier.ROW_LIMIT = row_limit
tables = Vizier.get_catalogs(key)
errmsg = f"No data returned from Vizier."
assert tables is not None, errmsg
if tab_index is None:
if verbose:
print({k: tables[k]._meta["description"] for k in tables.keys()})
return tables
else:
df = tables[tab_index].to_pandas()
df = df.applymap(
lambda x: x.decode("ascii") if isinstance(x, bytes) else x
)
return df
def get_tois_mass_RV_K(clobber=False):
fp = Path(DATA_PATH, "TOIs2.csv")
if clobber:
try:
from mrexo import predict_from_measurement, generate_lookup_table
except Exception:
raise ModuleNotFoundError("pip install mrexo")
tois = get_tois()
masses = {}
for key, row in tqdm(tois.iterrows()):
toi = row["TOI"]
Rp = row["Planet Radius (R_Earth)"]
Rp_err = row["Planet Radius (R_Earth) err"]
Mp, (Mp_lo, Mp_hi), iron_planet = predict_from_measurement(
measurement=Rp,
measurement_sigma=Rp_err,
qtl=[0.16, 0.84],
dataset="kepler",
)
masses[toi] = (Mp, Mp_lo, Mp_hi)
df = pd.DataFrame(masses).T
df.columns = [
"Planet mass (Mp_Earth)",
"Planet mass (Mp_Earth) lo",
"Planet mass (Mp_Earth) hi",
]
df.index.name = "TOI"
df = df.reset_index()
df["RV_K_lo"] = get_RV_K(
tois["Period (days)"],
tois["Stellar Radius (R_Sun)"], # should be Mstar
df["Planet mass (Mp_Earth) lo"],
with_unit=True,
)
df["RV_K_hi"] = get_RV_K(
tois["Period (days)"],
tois["Stellar Radius (R_Sun)"], # should be Mstar
df["Planet mass (Mp_Earth) hi"],
with_unit=True,
)
joint = pd.merge(tois, df, on="TOI")
joint.to_csv(fp, index=False)
print(f"Saved: {fp}")
else:
joint = pd.read_csv(fp)
print(f"Loaded: {fp}")
return joint
def get_phase(time, period, epoch, offset=0.5):
"""phase offset -0.5,0.5
"""
phase = (((((time - epoch) / period) + offset) % 1) / offset) - 1
return phase
def bin_data(array, binsize, func=np.mean):
"""
"""
a_b = []
for i in range(0, array.shape[0], binsize):
a_b.append(func(array[i : i + binsize], axis=0))
return a_b
def get_tess_ccd_info(target_coord):
"""use search_targetpixelfile like get_all_sectors?"""
ccd_info = tesscut.Tesscut.get_sectors(target_coord)
errmsg = f"Target not found in any TESS sectors"
assert len(ccd_info) > 0, errmsg
return ccd_info.to_pandas()
def get_all_sectors(target_coord):
""" """
ccd_info = get_tess_ccd_info(target_coord)
all_sectors = [int(i) for i in ccd_info["sector"].values]
return np.array(all_sectors)
def get_all_campaigns(epicid):
""" """
res = lk.search_targetpixelfile(
f"K2 {epicid}", campaign=None, mission="K2"
)
errmsg = "No data found"
assert len(res) > 0, errmsg
df = res.table.to_pandas()
campaigns = df["observation"].apply(lambda x: x.split()[-1]).values
return np.array([int(c) for c in campaigns])
def get_sector_cam_ccd(target_coord, sector=None):
"""get TESS sector, camera, and ccd numbers using Tesscut
"""
df = get_tess_ccd_info(target_coord)
all_sectors = [int(i) for i in df["sector"].values]
if sector is not None:
sector_idx = df["sector"][df["sector"].isin([sector])].index.tolist()
if len(sector_idx) == 0:
raise ValueError(f"Available sector(s): {all_sectors}")
cam = str(df.iloc[sector_idx]["camera"].values[0])
ccd = str(df.iloc[sector_idx]["ccd"].values[0])
else:
sector_idx = 0
sector = str(df.iloc[sector_idx]["sector"])
cam = str(df.iloc[sector_idx]["camera"])
ccd = str(df.iloc[sector_idx]["ccd"])
return sector, cam, ccd
def is_gaiaid_in_cluster(
gaiaid, cluster_name=None, catalog_name="Bouma2019", verbose=True
):
"""
See scripts/check_target_in_cluster
"""
# reduce the redundant names above
gaiaid = int(gaiaid)
if cluster_name is None:
cc = cluster.ClusterCatalog(catalog_name=catalog_name, verbose=False)
df_mem = cc.query_catalog(return_members=True)
else:
c = cluster.Cluster(
catalog_name=catalog_name, cluster_name=cluster_name, verbose=False
)
df_mem = c.query_cluster_members()
idx = df_mem.source_id.isin([gaiaid])
if idx.sum() > 0:
if verbose:
if cluster_name is None:
cluster_match = df_mem[idx].Cluster.values[0]
else:
# TODO: what if cluster_match != cluster_name?
cluster_match = cluster_name
print(
f"Gaia DR2 {gaiaid} is IN {cluster_match} cluster based on {catalog_name} catalog!"
)
return True
else:
if verbose:
print(f"Gaia DR2 {gaiaid} is NOT in {catalog_name} catalog!")
return False
def query_tpf(
query_str,
sector=None,
campaign=None,
quality_bitmask="default",
apply_data_quality_mask=False,
mission="TESS",
verbose=True,
):
"""
"""
if verbose:
print(f"Searching targetpixelfile for {query_str} using lightkurve")
tpf = lk.search_targetpixelfile(
query_str, mission=mission, sector=sector, campaign=campaign
).download()
if apply_data_quality_mask:
tpf = remove_bad_data(tpf, sector=sector, verbose=verbose)
return tpf
def query_tpf_tesscut(
query_str,
sector=None,
quality_bitmask="default",
cutout_size=(15, 15),
apply_data_quality_mask=False,
verbose=True,
):
"""
"""
if verbose:
if isinstance(query_str, sky_coordinate.SkyCoord):
query = f"ra,dec=({query_str.to_string()})"
else:
query = query_str
print(f"Searching targetpixelfile for {query} using Tesscut")
tpf = lk.search_tesscut(query_str, sector=sector).download(
quality_bitmask=quality_bitmask, cutout_size=cutout_size
)
assert tpf is not None, "No results from Tesscut search."
# remove zeros
zero_mask = (tpf.flux_err == 0).all(axis=(1, 2))
if zero_mask.sum() > 0:
tpf = tpf[~zero_mask]
if apply_data_quality_mask:
tpf = remove_bad_data(tpf, sector=sector, verbose=verbose)
return tpf
def detrend(self, polyorder=1, break_tolerance=10):
"""mainly to be added as method to lk.LightCurve
"""
lc = self.copy()
half = lc.time.shape[0] // 2
if half % 2 == 0:
# add 1 if even
half += 1
return lc.flatten(
window_length=half,
polyorder=polyorder,
break_tolerance=break_tolerance,
)
def get_rotation_period(
time,
flux,
flux_err=None,
min_per=0.5,
max_per=None,
method="ls",
npoints=20,
plot=True,
verbose=True,
):
"""
time, flux : array
time and flux
min_period, max_period : float
minimum & maxmimum period (default=half baseline e.g. ~13 days)
method : str
ls = lomb-scargle; gls = generalized ls
npoints : int
datapoints around which to fit a Gaussian
Note:
1. Transits are assumed to be masked already
2. The period and uncertainty were determined from the mean and the
half-width at half-maximum of a Gaussian fit to the periodogram peak, respectively
See also:
https://arxiv.org/abs/1702.03885
"""
baseline = int(time[-1] - time[0])
max_per = max_per if max_per is not None else baseline / 2
if method == "ls":
if verbose:
print("Using Lomb-Scargle method")
ls = LombScargle(time, flux, dy=flux_err)
frequencies, powers = ls.autopower(
minimum_frequency=1.0 / max_per, maximum_frequency=1.0 / min_per
)
idx = np.argmax(powers)
while npoints > idx:
npoints -= 1
best_freq = frequencies[idx]
best_period = 1.0 / best_freq
# specify which points to fit a gaussian
x = (1 / frequencies)[idx - npoints : idx + npoints]
y = powers[idx - npoints : idx + npoints]
# Fit the data using a 1-D Gaussian
g_init = models.Gaussian1D(amplitude=0.5, mean=best_period, stddev=1)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, x, y)
label = f"P={g.mean.value:.2f}+/-{g.stddev.value:.2f} d"
if plot:
# Plot the data with the best-fit model
pl.plot(x, y, "ko", label="_nolegend_")
pl.plot(x, g(x), label="_nolegend_")
pl.ylabel("Lomb-Scargle Power")
pl.xlabel("Period [days]")
pl.axvline(g.mean, 0, 1, ls="--", c="r", label=label)
pl.legend()
if verbose:
print(label)
return (g.mean.value, g.stddev.value)
elif method == "gls":
if verbose:
print("Using Generalized Lomb-Scargle method")
data = (time, flux, flux_err)
ls = gls.Gls(data, Pbeg=min_per, Pend=max_per, verbose=verbose)
prot, prot_err = ls.hpstat["P"], ls.hpstat["e_P"]
if plot:
_ = ls.plot(block=False, figsize=(10, 8))
return (prot, prot_err)
else:
raise ValueError("Use method=[ls | gls]")
def get_transit_mask(lc, period, epoch, duration_hours):
"""
lc : lk.LightCurve
lightcurve that contains time and flux properties
mask = []
t0 += np.ceil((time[0] - dur - t0) / period) * period
for t in np.arange(t0, time[-1] + dur, period):
mask.extend(np.where(np.abs(time - t) < dur / 2.)[0])
return np.array(mask)
"""
assert isinstance(lc, lk.LightCurve)
assert (
(period is not None)
& (epoch is not None)
& (duration_hours is not None)
)
temp_fold = lc.fold(period, t0=epoch)
fractional_duration = (duration_hours / 24.0) / period
phase_mask = np.abs(temp_fold.phase) < (fractional_duration * 1.5)
transit_mask = np.in1d(lc.time, temp_fold.time_original[phase_mask])
return transit_mask
def get_harps_bank(
target_coord, separation=30, outdir=DATA_PATH, verbose=True
):
"""
Check if target has archival HARPS data from:
http://www.mpia.de/homes/trifonov/HARPS_RVBank.html
See also https://github.com/3fon3fonov/HARPS_RVBank
For column meanings:
https://www2.mpia-hd.mpg.de/homes/trifonov/HARPS_RVBank_header.txt
"""
homeurl = "http://www.mpia.de/homes/trifonov/HARPS_RVBank.html"
fp = os.path.join(outdir, "HARPS_RVBank_table.csv")
if os.path.exists(fp):
df = pd.read_csv(fp)
msg = f"Loaded: {fp}\n"
else:
if verbose:
print(
f"Downloading HARPS bank from {homeurl}. This may take a while."
)
# csvurl = "http://www.mpia.de/homes/trifonov/HARPS_RVBank_v1.csv"
# df = pd.read_csv(csvurl)
df = pd.read_html(homeurl, header=0)[0] # choose first table
df.to_csv(fp, index=False)
msg = f"Saved: {fp}\n"
if verbose:
print(msg)
# coordinates
coords = SkyCoord(
ra=df["RA"],
dec=df["DEC"],
distance=df["Dist [pc]"],
unit=(u.hourangle, u.deg, u.pc),
)
# check which falls within `separation`
idxs = target_coord.separation(coords) < separation * u.arcsec
if idxs.sum() > 0:
# result may be multiple objects
res = df[idxs]
if verbose:
targets = res["Target"].values
print(f"There are {len(res)} matches: {targets}")
print(f"{df.loc[idxs, df.columns[7:14]].T}\n\n")
return res
else:
# find the nearest HARPS object in the database to target
# idx, sep2d, dist3d = match_coordinates_3d(
# target_coord, coords, nthneighbor=1)
idx = target_coord.separation(coords).argmin()
sep2d = target_coord.separation(coords[idx])
nearest_obj = df.iloc[idx]["Target"]
ra, dec = df.iloc[idx][["RA", "DEC"]]
print(
f"Nearest HARPS object is\n{nearest_obj}: ra,dec=({ra},{dec}) @ d={sep2d.arcsec/60:.2f} arcmin\n"
)
return None
# def get_harps_bank(url, verbose=True):
# """
# Download archival HARPS data from url
# http://www.mpia.de/homes/trifonov/HARPS_RVBank.html
# """
# homeurl = ""
# fp = os.path.join(outdir, "HARPS_RVBank_table.csv")
# return
def get_mamajek_table(clobber=False, verbose=True, data_loc=DATA_PATH):
fp = join(data_loc, f"mamajek_table.csv")
if not exists(fp) or clobber:
url = "http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt"
# cols="SpT Teff logT BCv Mv logL B-V Bt-Vt G-V U-B V-Rc V-Ic V-Ks J-H H-Ks Ks-W1 W1-W2 W1-W3 W1-W4 Msun logAge b-y M_J M_Ks Mbol i-z z-Y R_Rsun".split(' ')
df = pd.read_csv(
url,
skiprows=21,
skipfooter=524,
delim_whitespace=True,
engine="python",
)
# tab = ascii.read(url, guess=None, data_start=0, data_end=124)
# df = tab.to_pandas()
# replace ... with NaN
df = df.replace(["...", "....", "....."], np.nan)
# replace header
# df.columns = cols
# drop last duplicate column
df = df.drop(df.columns[-1], axis=1)
# df['#SpT_num'] = range(df.shape[0])
# df['#SpT'] = df['#SpT'].astype('category')
# remove the : type in M_J column
df["M_J"] = df["M_J"].apply(lambda x: str(x).split(":")[0])
# convert columns to float
for col in df.columns:
if col == "#SpT":
df[col] = df[col].astype("category")
else:
df[col] = df[col].astype(float)
# if col=='SpT':
# df[col] = df[col].astype('categorical')
# else:
# df[col] = df[col].astype(float)
df.to_csv(fp, index=False)
print(f"Saved: {fp}")
else:
df = pd.read_csv(fp)
if verbose:
print(f"Loaded: {fp}")
return df
def get_mag_err_from_flux(flux, flux_err):
"""
equal to 1.086/(S/N)
"""
return 2.5 * np.log10(1 + flux_err / flux)
def get_err_quadrature(err1, err2):
return np.sqrt(err1 ** 2 + err2 ** 2)
def get_absolute_gmag(gmag, distance, a_g):
"""
gmag : float
apparent G band magnitude
distance : float
distance in pc
a_g : float
extinction in the G-band
"""
assert (gmag is not None) & (str(gmag) != "nan"), "gma is nan"
assert (distance is not None) & (str(distance) != "nan"), "distance is nan"
assert (a_g is not None) & (str(a_g) != "nan"), "a_g is nan"
Gmag = gmag - 5.0 * np.log10(distance) + 5.0 - a_g
return Gmag
def get_excess_from_extiction(A_g, color="bp_rp"):
"""
Compute excess from difference in extinctions E(Bp-Rp) = A_Bp-A_Rp
using coefficients from Malhan, Ibata & Martin (2018a)
and extinction in G-band A_g
Compare the result to 'e_bp_min_rp_val' column in gaia table
which is the estimate of redenning E[BP-RP] from Apsis-Priam.
"""
assert A_g is not None
assert str(A_g) != "nan"
# ratio of A_X/A_V
if color == "bp_rp":
# E(Bp-Rp) = A_Bp-A_Rp
Ag_Av = extinction_ratios["G"]
Ab_Av = extinction_ratios["Bp"]
Ar_Av = extinction_ratios["Rp"]
Ab_minus_Ar = (A_g / Ag_Av) * (Ab_Av - Ar_Av) # difference
else:
errmsg = "color=bp_rp is only implemented"
raise NotImplementedError(errmsg)
return Ab_minus_Ar
def get_absolute_color_index(A_g, bmag, rmag):
"""
Deredden the Gaia Bp-Rp color using Bp-Rp extinction ratio (==Bp-Rp excess)
E(Bp-Rp) = A_Bp - A_Rp = (Bp-Rp)_obs - (Bp-Rp)_abs
--> (Bp-Rp)_abs = (Bp-Rp)_obs - E(Bp-Rp)
Note that 'bmag-rmag' is same as bp_rp column in gaia table
See also http://www.astro.ncu.edu.tw/~wchen/Courses/ISM/11.Extinction.pdf
"""
assert (A_g is not None) & (str(A_g) != "nan")
assert (bmag is not None) & (str(bmag) != "nan")
assert (rmag is not None) & (str(rmag) != "nan")
# E(Bp-Rp) = A_Bp-A_Rp = (Bp-Rp)_obs - E(Bp-Rp)
Ab_minus_Ar = get_excess_from_extiction(A_g)
bp_rp = bmag - rmag # color index
Bp_Rp = bp_rp - Ab_minus_Ar
return Bp_Rp
def get_distance(m, M, Av=0):
"""
calculate distance [in pc] from extinction-corrected magnitude
using the equation: d=10**((m-M+5-Av)/5)
Note: m-M=5*log10(d)-5+Av
see http://astronomy.swin.edu.au/cosmos/I/Interstellar+Reddening
Parameters
---------
m : apparent magnitude
M : absolute magnitude
Av : extinction (in V band)
"""
assert (m is not None) & (str(m) != "nan")
assert (M is not None) & (str(M) != "nan")
distance = 10 ** (0.2 * (m - M + 5 - Av))
return distance
def parse_aperture_mask(
tpf,
sap_mask="pipeline",
aper_radius=None,
percentile=None,
verbose=False,
threshold_sigma=None,
):
"""Parse and make aperture mask"""
if verbose:
if sap_mask == "round":
print(
"aperture photometry mask: {} (r={} pix)\n".format(
sap_mask, aper_radius
)
)
elif sap_mask == "square":
print(
"aperture photometry mask: {0} ({1}x{1} pix)\n".format(
sap_mask, aper_radius
)
)
elif sap_mask == "percentile":
print(
"aperture photometry mask: {} ({}%)\n".format(
sap_mask, percentile
)
)
else:
print("aperture photometry mask: {}\n".format(sap_mask))
# stacked_img = np.median(tpf.flux,axis=0)
if (sap_mask == "pipeline") or (sap_mask is None):
errmsg = "tpf does not have pipeline mask"
assert tpf.pipeline_mask is not None, errmsg
mask = tpf.pipeline_mask # default
elif sap_mask == "all":
mask = np.ones((tpf.shape[1], tpf.shape[2]), dtype=bool)
elif sap_mask == "round":
assert aper_radius is not None, "supply aper_radius"
mask = make_round_mask(tpf.flux[0], radius=aper_radius)
elif sap_mask == "square":
assert aper_radius is not None, "supply aper_radius/size"
mask = make_square_mask(tpf.flux[0], size=aper_radius, angle=None)
elif sap_mask == "threshold":
assert threshold_sigma is not None, "supply threshold_sigma"
# FIXME: make sure aperture is contiguous
mask = tpf.create_threshold_mask(threshold_sigma)
elif sap_mask == "percentile":
assert percentile is not None, "supply percentile"
median_img = np.nanmedian(tpf.flux, axis=0)
mask = median_img > np.nanpercentile(median_img, percentile)
else:
raise ValueError("Unknown aperture mask")
return mask
def make_round_mask(img, radius, xy_center=None):
"""Make round mask in units of pixels
Parameters
----------
img : numpy ndarray
image
radius : int
aperture mask radius or size
xy_center : tuple
aperture mask center position
Returns
-------
mask : np.ma.masked_array
aperture mask
"""
offset = 2 # from center
xcen, ycen = img.shape[0] // 2, img.shape[1] // 2
if xy_center is None: # use the middle of the image
y, x = np.unravel_index(np.argmax(img), img.shape)
xy_center = [x, y]
# check if near edge
if np.any([abs(x - xcen) > offset, abs(y - ycen) > offset]):
print("Brightest star is detected far from the center.")
print("Aperture mask is placed at the center instead.\n")
xy_center = [xcen, ycen]
Y, X = np.ogrid[: img.shape[0], : img.shape[1]]
dist_from_center = np.sqrt(
(X - xy_center[0]) ** 2 + (Y - xy_center[1]) ** 2
)
mask = dist_from_center <= radius
return np.ma.masked_array(img, mask=mask).mask
def make_square_mask(img, size, xy_center=None, angle=None):
"""Make rectangular mask with optional rotation
Parameters
----------
img : numpy ndarray
image
size : int
aperture mask size
xy_center : tuple
aperture mask center position
angle : int
rotation
Returns
-------
mask : np.ma.masked_array
aperture mask
"""
offset = 2 # from center
xcen, ycen = img.shape[0] // 2, img.shape[1] // 2
if xy_center is None: # use the middle of the image
y, x = np.unravel_index(np.argmax(img), img.shape)
xy_center = [x, y]
# check if near edge
if np.any([abs(x - xcen) > offset, abs(y - ycen) > offset]):
print("Brightest star detected is far from the center.")
print("Aperture mask is placed at the center instead.\n")
xy_center = [xcen, ycen]
mask = np.zeros_like(img, dtype=bool)
mask[ycen - size : ycen + size + 1, xcen - size : xcen + size + 1] = True
# if angle:
# #rotate mask
# mask = rotate(mask, angle, axes=(1, 0), reshape=True, output=bool, order=0)
return mask
def remove_bad_data(tpf, sector=None, verbose=True):
"""Remove bad cadences identified in data release notes
https://arxiv.org/pdf/2003.10451.pdf, S4.5:
all transiting planets with periods 10.5-17.5 d could be
hidden by the masking in the PDC light curves if only
observed in Sector 14.
Parameters
----------
tpf : lk.targetpixelfile
sector : int
TESS sector
verbose : bool
print texts
"""
if sector is None:
sector = tpf.sector
if verbose:
print(
f"Applying data quality mask identified in Data Release Notes (sector {sector}):"
)
if sector == 1:
pointing_jitter_start = 1346
pointing_jitter_end = 1350
if verbose:
print(
"t<{}|t>{}\n".format(
pointing_jitter_start, pointing_jitter_end
)
)
tpf = tpf[
(tpf.time < pointing_jitter_start)
| (tpf.time > pointing_jitter_end)
]
if sector == 2:
if verbose:
print("None.\n")
if sector == 3:
science_data_start = 1385.89663
science_data_end = 1406.29247
if verbose:
print("t>{}|t<{}\n".format(science_data_start, science_data_end))
tpf = tpf[
(tpf.time > science_data_start) | (tpf.time < science_data_end)
]
if sector == 4:
guidestar_tables_replaced = 1413.26468
instru_anomaly_start = 1418.53691
data_collection_resumed = 1421.21168
if verbose:
print(
"t>{}|t<{}|t>{}\n".format(
guidestar_tables_replaced,
instru_anomaly_start,
data_collection_resumed,
)
)
tpf = tpf[
(tpf.time > guidestar_tables_replaced)
| (tpf.time < instru_anomaly_start)
| (tpf.time > data_collection_resumed)
]
if sector == 5:
# use of Cam1 in attitude control was disabled for the
# last ~0.5 days of orbit due to o strong scattered light
cam1_guide_disabled = 1463.93945
if verbose:
print("t<{}\n".format(cam1_guide_disabled))
tpf = tpf[tpf.time < cam1_guide_disabled]
if sector == 6:
# ~3 days of orbit 19 were used to collect calibration
# data for measuring the PRF of cameras;
# reaction wheel speeds were reset with momentum dumps
# every 3.125 days
data_collection_start = 1468.26998
if verbose:
print("t>{}\n".format(data_collection_start))
tpf = tpf[tpf.time > data_collection_start]
if sector == 8:
# interruption in communications between instru and spacecraft occurred
cam1_guide_enabled = 1517.39566
orbit23_end = 1529.06510
cam1_guide_enabled2 = 1530.44705
instru_anomaly_start = 1531.74288
data_colletion_resumed = 1535.00264
if verbose:
print(
"t>{}|t<{}|t>{}|t<{}|t>{}\n".format(
cam1_guide_enabled,
orbit23_end,
cam1_guide_enabled2,
instru_anomaly_start,
data_colletion_resumed,
)
)
tpf = tpf[
(tpf.time > cam1_guide_enabled)
| (tpf.time <= orbit23_end)
| (tpf.time > cam1_guide_enabled2)
| (tpf.time < instru_anomaly_start)
| (tpf.time > data_colletion_resumed)
]
if sector == 9:
"""
use of Cam1 in attitude control was disabled at the
start of both orbits due to strong scattered light"""
cam1_guide_enabled = 1543.75080
orbit25_end = 1555.54148
cam1_guide_enabled2 = 1543.75080
if verbose:
print(
"t>{}|t<{}|t>{}\n".format(
cam1_guide_enabled, orbit25_end, cam1_guide_enabled2
)
)
tpf = tpf[
(tpf.time > cam1_guide_enabled)
| (tpf.time <= orbit25_end)
| (tpf.time > cam1_guide_enabled2)
]
if sector == 10:
"""
https://archive.stsci.edu/missions/tess/doc/tess_drn/tess_sector_10_drn14_v02.pdf
Total of 25.27 days of science data collected
use of Cam1 in attitude control was disabled at the
start of both orbits due to strong scattered light
"""
cam1_guide_enabled = 1570.87620
orbit27_end = 1581.78453
cam1_guide_enabled2 = 1584.72342
if verbose:
print(
"t>{}|t<{}|t>{}\n".format(
cam1_guide_enabled, orbit27_end, cam1_guide_enabled2
)
)
tpf = tpf[
(tpf.time > cam1_guide_enabled)
| (tpf.time <= orbit27_end)
| (tpf.time > cam1_guide_enabled2)
]
if sector == 11:
"""
https://archive.stsci.edu/missions/tess/doc/tess_drn/tess_sector_11_drn16_v02.pdf
use of Cam1 in attitude control was disabled at the
start of both orbits due to strong scattered light
Total of 26.04 days of science data collected
"""
cam1_guide_enabled = 1599.94148
orbit29_end = 1609.69425
cam1_guide_enabled2 = 1614.19842
if verbose:
print(
"t>{}|t<{}|t>{}\n".format(
cam1_guide_enabled, orbit29_end, cam1_guide_enabled2
)
)
tpf = tpf[
(tpf.time > cam1_guide_enabled)
| (tpf.time <= orbit29_end)
| (tpf.time > cam1_guide_enabled2)
]
if sector in [12, 13, 14, 15, 16, 17, 19, 20, 21]:
"""
See list of release notes:
http://archive.stsci.edu/tess/tess_drn.html
Total days of science data collected:
12: 26.90
13: 27.51
14: 25.91
15: 24.97
16: 23.38
17: 23.51
19: 24.10
20: 24.79
21: 24.42
Note on sector 14:
* first northern ecliptic hemisphere pointing
* first sector to make use of TIC 8 based on Gaia DR2 astrometry+photometry
* spacecraft is pointed to a higher ecliptic latitude (+85 degrees rather
than +54 degrees) to mitigate issues with scattered light in Cam 1 and Cam 2
* first to make use of an updated SPOC data processing
pipeline, SPOC Release 4.0
* the first to make use of CCD-specific Data Anomaly Flags that mark
cadences excluded due to high levels of scattered light. The flags are referred to as
“Scattered Light” flags and marked with bit 13, value 4096
"""
print(f"No instrument anomaly in sector {sector}")
if sector == 18:
"""
* spacecraft passed through the shadow of the Earth at the start of orbit 43
during which the instrument was turned off and no data were collected for 6.2 hr
* thermal state of the spacecraft changed during this time,
and trends in the raw photometry and target positions are apparent after data collection
resumed
Total of 23.12 days of science data collected
"""
instru_restart = 1791.36989
orbit43_end = 1802.43999
if verbose:
print("t>{}|t<{}\n".format(instru_restart, orbit43_end))
tpf = tpf[(tpf.time > instru_restart) | (tpf.time <= orbit29_end)]
return tpf
def get_tois(
clobber=True,
outdir=DATA_PATH,
verbose=False,
remove_FP=True,
remove_known_planets=False,
add_FPP=False,
):
"""Download TOI list from TESS Alert/TOI Release.
Parameters
----------
clobber : bool
re-download table and save as csv file
outdir : str
download directory location
verbose : bool
print texts
Returns
-------
d : pandas.DataFrame
TOI table as dataframe
"""
dl_link = "https://exofop.ipac.caltech.edu/tess/download_toi.php?sort=toi&output=csv"
fp = join(outdir, "TOIs.csv")
if not exists(outdir):
os.makedirs(outdir)
if not exists(fp) or clobber:
d = | pd.read_csv(dl_link) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
| Timedelta(days=1) | pandas.Timedelta |
import numpy as np
import pytest
import pandas as pd
from pandas.util import testing as tm
pyreadstat = pytest.importorskip("pyreadstat")
def test_spss_labelled_num(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "labelled-num.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"VAR00002": "This is one"}, index=[0])
expected["VAR00002"] = pd.Categorical(expected["VAR00002"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = pd.DataFrame({"VAR00002": 1.0}, index=[0])
tm.assert_frame_equal(df, expected)
def test_spss_labelled_num_na(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "labelled-num-na.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"VAR00002": ["This is one", None]})
expected["VAR00002"] = pd.Categorical(expected["VAR00002"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = pd.DataFrame({"VAR00002": [1.0, np.nan]})
tm.assert_frame_equal(df, expected)
def test_spss_labelled_str(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "labelled-str.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"gender": ["Male", "Female"]})
expected["gender"] = pd.Categorical(expected["gender"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = pd.DataFrame({"gender": ["M", "F"]})
tm.assert_frame_equal(df, expected)
def test_spss_umlauts(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "umlauts.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"var1": ["the ä umlaut",
"the ü umlaut",
"the ä umlaut",
"the ö umlaut"]})
expected["var1"] = | pd.Categorical(expected["var1"]) | pandas.Categorical |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame(dict(A=range(5), B=range(5)))
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# exceptions
with pytest.raises(ValueError):
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df3"
)
with pytest.raises(ValueError):
store.append_to_multiple({"df1": None, "df2": None}, df, selector="df3")
with pytest.raises(ValueError):
store.append_to_multiple("df1", df, "df1")
# regular operation
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1"
)
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
)
result = store.select_as_multiple(["df1", "df2"])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
@pytest.mark.xfail(
run=False, reason="append_to_multiple_dropna_false is not raising as failed"
)
def test_append_to_multiple_dropna_false(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
)
with pytest.raises(ValueError):
store.select_as_multiple(["df1a", "df2a"])
assert not store.select("df1a").index.equals(store.select("df2a").index)
def test_select_as_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
# no tables stored
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(Exception):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
"df1", where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
with pytest.raises(ValueError):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(
dict(cols=["13.0", "14.0", "15.0"], values=[3.0, 4.0, 5.0]),
index=[3, 4, 5],
)
# write w/o the index on that particular column
store.append("df", df, data_columns=True, index=["cols"])
result = store.select("df", where="values>2.0")
tm.assert_frame_equal(result, expected)
def test_start_stop_table(self, setup_path):
with ensure_clean_store(setup_path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append("df", df)
result = store.select("df", "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ["A"]]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self, setup_path):
# GH 16209
with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple(
{"selector": ["foo"], "data": None}, df, selector="selector"
)
result = store.select_as_multiple(
["selector", "data"], selector="selector", start=0, stop=1
)
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self, setup_path):
with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
dict(A=np.random.rand(20), B=np.random.rand(20)),
index=pd.date_range("20130101", periods=20),
)
store.put("df", df)
result = store.select("df", start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select("df", start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put("s", s)
result = store.select("s", start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select("s", start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
def test_select_filter_corner(self, setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = ["{c:3d}".format(c=c) for c in df.index]
df.columns = ["{c:3d}".format(c=c) for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = "columns=df.columns[:75:2]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop, setup_path):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame(
{
"a": Series([20111010, 20111011, 20111012]),
"b": Series(["ab", "cd", "ab"]),
}
)
with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
tm.assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(
self, obj, comparator, path, compression=False, **kwargs
):
options = {}
if compression:
options["complib"] = compression or _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
store["obj"] = retrieved
again = store["obj"]
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, path, compression=False):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store.put("obj", obj, format="table")
retrieved = store["obj"]
comparator(retrieved, obj)
def test_multiple_open_close(self, setup_path):
# gh-4409: open & close multiple times
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
# single
store = HDFStore(path)
assert "CLOSED" not in store.info()
assert store.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
with ensure_clean_path(setup_path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
with pytest.raises(ValueError):
HDFStore(path)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert "CLOSED" not in store1.info()
assert "CLOSED" not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert "CLOSED" in store1.info()
assert not store1.is_open
assert "CLOSED" not in store2.info()
assert store2.is_open
store2.close()
assert "CLOSED" in store1.info()
assert "CLOSED" in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store2.append("df2", df)
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
store = HDFStore(path)
store.close()
with pytest.raises(ClosedFileError):
store.keys()
with pytest.raises(ClosedFileError):
"df" in store
with pytest.raises(ClosedFileError):
len(store)
with pytest.raises(ClosedFileError):
store["df"]
with pytest.raises(AttributeError):
store.df
with pytest.raises(ClosedFileError):
store.select("df")
with pytest.raises(ClosedFileError):
store.get("df")
with pytest.raises(ClosedFileError):
store.append("df2", df)
with pytest.raises(ClosedFileError):
store.put("df3", df)
with pytest.raises(ClosedFileError):
store.get_storer("df2")
with pytest.raises(ClosedFileError):
store.remove("df2")
with pytest.raises(ClosedFileError, match="file is not open"):
store.select("df")
def test_pytables_native_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
) as store:
d2 = store["detector/readout"]
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(
is_platform_windows(), reason="native2 read fails oddly on windows"
)
def test_pytables_native2_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
) as store:
str(store)
d1 = store["detector"]
assert isinstance(d1, DataFrame)
@td.xfail_non_writeable
def test_legacy_table_fixed_format_read_py2(self, datapath, setup_path):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
) as store:
result = store.select("df")
expected = pd.DataFrame(
[[1, 2, 3, "D"]],
columns=["A", "B", "C", "D"],
index=pd.Index(["ABC"], name="INDEX_NAME"),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_read_py2(self, datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
) as store:
result = store.select("table")
expected = pd.DataFrame({"a": ["a", "b"], "b": [2, 3]})
tm.assert_frame_equal(expected, result)
def test_copy(self, setup_path):
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs):
try:
store = HDFStore(f, "r")
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs
)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except (OSError, ValueError):
pass
safe_remove(new_f)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(setup_path)
st = HDFStore(path)
st.append("df", df, data_columns=["A"])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_store_datetime_fractional_secs(self, setup_path):
with ensure_clean_store(setup_path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store["a"] = series
assert store["a"].index[0] == dt
def test_tseries_indices_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
def test_tseries_indices_frame(self, setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store["a"] = df
result = store["a"]
tm.assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index, obj="dataframe index")
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store["a"] = df
result = store["a"]
tm.assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index, obj="dataframe index")
def test_unicode_index(self, setup_path):
unicode_values = ["\u03c3", "\u03c3\u03c3"]
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_unicode_longer_encoded(self, setup_path):
# GH 11234
char = "\u0394"
df = pd.DataFrame({"A": [char]})
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", encoding="utf-8")
result = store.get("df")
tm.assert_frame_equal(result, df)
df = pd.DataFrame({"A": ["a", char], "B": ["b", "b"]})
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", encoding="utf-8")
result = store.get("df")
tm.assert_frame_equal(result, df)
@td.xfail_non_writeable
def test_store_datetime_mixed(self, setup_path):
df = DataFrame({"a": [1, 2, 3], "b": [1.0, 2.0, 3.0], "c": ["a", "b", "c"]})
ts = tm.makeTimeSeries()
df["d"] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal, path=setup_path)
# FIXME: don't leave commented-out code
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
#
# with pytest.raises(Exception):
# store.put('foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self, setup_path):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({"a": np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({("1", 2): np.random.randn(10)})
df5 = DataFrame({("1", 2, object): np.random.randn(10)})
with ensure_clean_store(setup_path) as store:
name = "df_{}".format(tm.rands(10))
store.append(name, df)
for d in (df2, df3, df4, df5):
with pytest.raises(ValueError):
store.append(name, d)
def test_query_with_nested_special_character(self, setup_path):
df = DataFrame(
{
"a": ["a", "a", "c", "b", "test & test", "c", "b", "e"],
"b": [1, 2, 3, 4, 5, 6, 7, 8],
}
)
expected = df[df.a == "test & test"]
with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
result = store.select("test", 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_categorical(self, setup_path):
with ensure_clean_store(setup_path) as store:
# Basic
_maybe_remove(store, "s")
s = Series(
Categorical(
["a", "b", "b", "a", "a", "c"],
categories=["a", "b", "c", "d"],
ordered=False,
)
)
store.append("s", s, format="table")
result = store.select("s")
tm.assert_series_equal(s, result)
_maybe_remove(store, "s_ordered")
s = Series(
Categorical(
["a", "b", "b", "a", "a", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
)
store.append("s_ordered", s, format="table")
result = store.select("s_ordered")
tm.assert_series_equal(s, result)
_maybe_remove(store, "df")
df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]})
store.append("df", df, format="table")
result = store.select("df")
tm.assert_frame_equal(result, df)
# Dtypes
_maybe_remove(store, "si")
s = Series([1, 1, 2, 2, 3, 4, 5]).astype("category")
store.append("si", s)
result = store.select("si")
tm.assert_series_equal(result, s)
_maybe_remove(store, "si2")
s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype("category")
store.append("si2", s)
result = store.select("si2")
tm.assert_series_equal(result, s)
# Multiple
_maybe_remove(store, "df2")
df2 = df.copy()
df2["s2"] = Series(list("abcdefg")).astype("category")
store.append("df2", df2)
result = store.select("df2")
tm.assert_frame_equal(result, df2)
# Make sure the metadata is OK
info = store.info()
assert "/df2 " in info
# assert '/df2/meta/values_block_0/meta' in info
assert "/df2/meta/values_block_1/meta" in info
# unordered
_maybe_remove(store, "s2")
s = Series(
Categorical(
["a", "b", "b", "a", "a", "c"],
categories=["a", "b", "c", "d"],
ordered=False,
)
)
store.append("s2", s, format="table")
result = store.select("s2")
tm.assert_series_equal(result, s)
# Query
_maybe_remove(store, "df3")
store.append("df3", df, data_columns=["s"])
expected = df[df.s.isin(["b", "c"])]
result = store.select("df3", where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(["b", "c"])]
result = store.select("df3", where=['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(["d"])]
result = store.select("df3", where=['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(["f"])]
result = store.select("df3", where=['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# Appending with same categories is ok
store.append("df3", df)
df = concat([df, df])
expected = df[df.s.isin(["b", "c"])]
result = store.select("df3", where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# Appending must have the same categories
df3 = df.copy()
df3["s"].cat.remove_unused_categories(inplace=True)
with pytest.raises(ValueError):
store.append("df3", df3)
# Remove, and make sure meta data is removed (its a recursive
# removal so should be).
result = store.select("df3/meta/s/meta")
assert result is not None
store.remove("df3")
with pytest.raises(
KeyError, match="'No object named df3/meta/s/meta in the file'"
):
store.select("df3/meta/s/meta")
def test_categorical_conversion(self, setup_path):
# GH13322
# Check that read_hdf with categorical columns doesn't return rows if
# where criteria isn't met.
obsids = ["ESP_012345_6789", "ESP_987654_3210"]
imgids = ["APF00006np", "APF0001imm"]
data = [4.3, 9.8]
# Test without categories
df = DataFrame(dict(obsids=obsids, imgids=imgids, data=data))
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df", where="obsids=B")
tm.assert_frame_equal(result, expected)
# Test with categories
df.obsids = df.obsids.astype("category")
df.imgids = df.imgids.astype("category")
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df", where="obsids=B")
tm.assert_frame_equal(result, expected)
def test_categorical_nan_only_columns(self, setup_path):
# GH18413
# Check that read_hdf with categorical columns with NaN-only values can
# be read back.
df = pd.DataFrame(
{
"a": ["a", "b", "c", np.nan],
"b": [np.nan, np.nan, np.nan, np.nan],
"c": [1, 2, 3, 4],
"d": pd.Series([None] * 4, dtype=object),
}
)
df["a"] = df.a.astype("category")
df["b"] = df.b.astype("category")
df["d"] = df.b.astype("category")
expected = df
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df")
tm.assert_frame_equal(result, expected)
def test_duplicate_column_name(self, setup_path):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
df.to_hdf(path, "df", format="fixed")
df.to_hdf(path, "df", format="table")
other = read_hdf(path, "df")
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_round_trip_equals(self, setup_path):
# GH 9330
df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table")
other = read_hdf(path, "df")
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_preserve_timedeltaindex_type(self, setup_path):
# GH9635
# Storing TimedeltaIndexed DataFrames in fixed stores did not preserve
# the type of the index.
df = DataFrame(np.random.normal(size=(10, 5)))
df.index = timedelta_range(start="0s", periods=10, freq="1s", name="example")
with ensure_clean_store(setup_path) as store:
store["df"] = df
tm.assert_frame_equal(store["df"], df)
def test_columns_multiindex_modified(self, setup_path):
# BUG: 7212
# read_hdf store.select modified the passed columns parameters
# when multi-indexed.
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
data_columns = df.index.names + df.columns.tolist()
with ensure_clean_path(setup_path) as path:
df.to_hdf(
path,
"df",
mode="a",
append=True,
data_columns=data_columns,
index=False,
)
cols2load = list("BCD")
cols2load_original = list(cols2load)
df_loaded = read_hdf(path, "df", columns=cols2load) # noqa
assert cols2load_original == cols2load
@ignore_natural_naming_warning
def test_to_hdf_with_object_column_names(self, setup_path):
# GH9057
# Writing HDF5 table format should only work for string-like
# column types
types_should_fail = [
tm.makeIntIndex,
tm.makeFloatIndex,
tm.makeDateIndex,
tm.makeTimedeltaIndex,
tm.makePeriodIndex,
]
types_should_run = [
tm.makeStringIndex,
tm.makeCategoricalIndex,
tm.makeUnicodeIndex,
]
for index in types_should_fail:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
msg = "cannot have non-object label DataIndexableCol"
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", format="table", data_columns=True)
for index in types_should_run:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df.to_hdf(path, "df", format="table", data_columns=True)
result = pd.read_hdf(
path, "df", where="index = [{0}]".format(df.index[0])
)
assert len(result)
def test_read_hdf_open_store(self, setup_path):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
direct = read_hdf(path, "df")
store = HDFStore(path, mode="r")
indirect = read_hdf(store, "df")
tm.assert_frame_equal(direct, indirect)
assert store.is_open
store.close()
def test_read_hdf_iterator(self, setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w", format="t")
direct = read_hdf(path, "df")
iterator = read_hdf(path, "df", iterator=True)
assert isinstance(iterator, TableIterator)
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
def test_read_hdf_errors(self, setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
with ensure_clean_path(setup_path) as path:
with pytest.raises(IOError):
read_hdf(path, "key")
df.to_hdf(path, "df")
store = HDFStore(path, mode="r")
store.close()
with pytest.raises(IOError):
read_hdf(store, "df")
def test_read_hdf_generic_buffer_errors(self):
with pytest.raises(NotImplementedError):
read_hdf(BytesIO(b""), "df")
def test_invalid_complib(self, setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
df.to_hdf(path, "df", complib="foolib")
# GH10443
def test_read_nokey(self, setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
# Categorical dtype not supported for "fixed" format. So no need
# to test with that dtype in the dataframe here.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="a")
reread = read_hdf(path)
tm.assert_frame_equal(df, reread)
df.to_hdf(path, "df2", mode="a")
with pytest.raises(ValueError):
read_hdf(path)
def test_read_nokey_table(self, setup_path):
# GH13231
df = DataFrame({"i": range(5), "c": Series(list("abacd"), dtype="category")})
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="a", format="table")
reread = read_hdf(path)
tm.assert_frame_equal(df, reread)
df.to_hdf(path, "df2", mode="a", format="table")
with pytest.raises(ValueError):
read_hdf(path)
def test_read_nokey_empty(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path)
store.close()
with pytest.raises(ValueError):
read_hdf(path)
@td.skip_if_no("pathlib")
def test_read_from_pathlib_path(self, setup_path):
# GH11773
from pathlib import Path
expected = DataFrame(
np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE")
)
with ensure_clean_path(setup_path) as filename:
path_obj = Path(filename)
expected.to_hdf(path_obj, "df", mode="a")
actual = read_hdf(path_obj, "df")
tm.assert_frame_equal(expected, actual)
@td.skip_if_no("py.path")
def test_read_from_py_localpath(self, setup_path):
# GH11773
from py.path import local as LocalPath
expected = DataFrame(
np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE")
)
with ensure_clean_path(setup_path) as filename:
path_obj = LocalPath(filename)
expected.to_hdf(path_obj, "df", mode="a")
actual = read_hdf(path_obj, "df")
tm.assert_frame_equal(expected, actual)
def test_query_long_float_literal(self, setup_path):
# GH 14241
df = pd.DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]})
with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
cutoff = 1000000000.0006
result = store.select("test", "A < {cutoff:.4f}".format(cutoff=cutoff))
assert result.empty
cutoff = 1000000000.0010
result = store.select("test", "A > {cutoff:.4f}".format(cutoff=cutoff))
expected = df.loc[[1, 2], :]
tm.assert_frame_equal(expected, result)
exact = 1000000000.0011
result = store.select("test", "A == {exact:.4f}".format(exact=exact))
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
def test_query_compare_column_type(self, setup_path):
# GH 15492
df = pd.DataFrame(
{
"date": ["2014-01-01", "2014-01-02"],
"real_date": date_range("2014-01-01", periods=2),
"float": [1.1, 1.2],
"int": [1, 2],
},
columns=["date", "real_date", "float", "int"],
)
with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
ts = pd.Timestamp("2014-01-01") # noqa
result = store.select("test", where="real_date > ts")
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
for op in ["<", ">", "=="]:
# non strings to string column always fail
for v in [2.1, True, pd.Timestamp("2014-01-01"), pd.Timedelta(1, "s")]:
query = "date {op} v".format(op=op)
with pytest.raises(TypeError):
store.select("test", where=query)
# strings to other columns must be convertible to type
v = "a"
for col in ["int", "float", "real_date"]:
query = "{col} {op} v".format(op=op, col=col)
with pytest.raises(ValueError):
store.select("test", where=query)
for v, col in zip(
["1", "1.1", "2014-01-01"], ["int", "float", "real_date"]
):
query = "{col} {op} v".format(op=op, col=col)
result = store.select("test", where=query)
if op == "==":
expected = df.loc[[0], :]
elif op == ">":
expected = df.loc[[1], :]
else:
expected = df.loc[[], :]
| tm.assert_frame_equal(expected, result) | pandas.util.testing.assert_frame_equal |
import streamlit as st
import pandas as pd
import requests
import plotly.graph_objects as go
from plotly.subplots import make_subplots
@st.cache
def get_countries():
api_uri = "https://covid19-eu-data-api-gamma.now.sh/api/countryLookup"
data = requests.get(api_uri).json()
countries = data["countries"]
countries = [list(i.keys()) for i in countries]
countries = sum(countries, [])
countries = [i for i in countries if (len(i)==2) and (i != "uk")]
return countries
@st.cache
def get_data(country_code, days):
api_uri = f"https://covid19-eu-data-api-gamma.now.sh/api/countries?alpha2={country_code}&days={days+1}"
data = requests.get(api_uri).json()
return data
@st.cache
def create_dataframe(data):
data_records = []
for i in data:
data_records += i.get("records")
df = | pd.DataFrame(data_records) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
class Datasets:
"""Dataset for classification problem"""
def __init__(
self,
data_file="./train.csv",
cat_cols=None,
num_cols=None,
level_cols=None,
label_col=None,
train=True,
):
"""create new copies instead of references"""
self.cat_cols = cat_cols
self.num_cols = num_cols
self.level_cols = level_cols
self.feature_cols = cat_cols + num_cols + level_cols
self.label_col = label_col
self.label_encoder = None
self.data_df = self._create_data_df(data_file)
self.feature_train
self.target_train
self.feature_test
self.target_test
self.train = train
self.X
self.y
self.scaler
self.one_hot
def _label_encode(self, df, col):
"""label encodes data"""
le = LabelEncoder()
le.fit(df[col])
df[col] = le.transform(df[col])
self.label_encoder = le
return df
def _inverse_label_encode(self, df, col):
"""inverse label encodes data"""
le = self.label_encoder
df[col] = le.inverse_transform(df[col])
def _load_data(self, file):
"""loads csv to pd dataframe"""
return pd.read_csv(file)
# def _create_kfold(self, file):
# """make k fold for data"""
# df = _load_data(file)
# df["kfold"] = -1
# df = df.sample(frac=1).reset_index(drop=True)
# kf = model_selection.StratifiedKFold(
# n_splits=self.kfold, shuffle=False, random_state=24
# )
# for fold, (train_idx, val_idx) in enumerate(kf.split(X=df, y=df.target.values)):
# print(len(train_idx), len(val_idx))
# df.loc[val_idx, "kfold"] = fold
# return df
def _create_data_df(self, data_file, preprocess=True, label_encode=False):
"""loads and encodes train data"""
data = self._load_data(data_file)
if preprocess:
data = self._impute_missing_values(
data, self.cat_cols, self.num_cols, self.level_cols
)
data = self._feature_preprocessing(
data, self.cat_cols, self.num_cols, self.level_cols
)
if label_encode:
self._label_encode(data, self.label_col)
self._split_train_test(data)
return data
def _impute_missing_values(
self, df, categorical_features, numeric_features, level_features
):
"""Imputes the continious columns with median and categorical columns with the mode value"""
imputer_con = SimpleImputer(missing_values=np.nan, strategy="median")
imputer_cat = SimpleImputer(missing_values=np.nan, strategy="most_frequent")
for col in categorical_features + numeric_features + level_features:
if df[col].isnull().sum() > 0:
if col in categorical_features + level_features:
df[col] = imputer_cat.fit_transform(df[col].values.reshape(-1, 1))
elif col in numeric_features:
df[col] = imputer_con.fit_transform(df[col].values.reshape(-1, 1))
return df
def _onehot_encoding(self, df, cat_features):
encoded_features = []
self.one_hot = {}
for feature in cat_features:
oh = OneHotEncoder()
encoded_feat = oh.fit_transform(df[feature].values.reshape(-1, 1)).toarray()
self.one_hot[feature] = oh
n = df[feature].nunique()
cols = ["{}_{}".format(feature, n) for n in range(1, n + 1)]
self.one_hot[str(feature) + "col"] = cols
encoded_df = | pd.DataFrame(encoded_feat, columns=cols) | pandas.DataFrame |
# Copyright (c) 2019 Princeton University
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Standard
from datetime import datetime
import json
from json.decoder import JSONDecodeError
import os
from os.path import isfile, join
import pandas as pd
import pickle
from typing import Dict, List, Any
# Local
from GenConfigs import *
from .ContactDB import GetActivationRecordsSince
from .Logger import ScriptLogger
from .PerfMonAnalyzer import *
from .TestDataframePlotting import *
logger = ScriptLogger(loggername='workload_analyzer',
filename=FAAS_ROOT+'/logs/WA.log')
def get_test_metadata():
"""
Returns the test start time from the output log of SWI.
"""
test_start_time = None
with open(FAAS_ROOT+"/synthetic_workload_invoker/test_metadata.out") as f:
lines = f.readlines()
test_start_time = lines[0]
config_file = lines[1]
invoked_actions = int(lines[2][:-1])
print('Invocations by Workload Invoker :' + str(invoked_actions))
try:
return int(test_start_time[:-1]), config_file[:-1]
except Exception as e:
logger.error("Error reading the test metadata!")
raise e
def extract_extra_annotations(json_annotations_data):
"""
Extracts deep information from activation json record.
"""
extra_data = {'waitTime': [], 'initTime': [], 'kind': []}
for item in json_annotations_data:
if item['key'] in extra_data.keys():
extra_data[item['key']] = item['value']
for key in extra_data.keys():
if extra_data[key] == []:
extra_data[key] = 0
return extra_data
def construct_config_dataframe(config_file):
"""
Returns a dataframe which describes the test in a standard format.
"""
workload = None
try:
with open(config_file) as f:
workload = json.load(f)
logger.info("Successfully read the specified workload")
except JSONDecodeError as e:
logger.error("The JSON config file cannot be read")
logger.error(e.msg)
raise e
return [workload['test_name'], | pd.DataFrame(workload['instances']) | pandas.DataFrame |
import os
import pprint
from collections import OrderedDict
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score
import common
def main():
train_df = common.load_data('train')
path = [common.OUTPUT_DIR]
for name in os.listdir(os.path.join(*path)):
if not os.path.isdir(os.path.join(*path, name)):
continue
path.append(name)
for random_seed in os.listdir(os.path.join(*path)):
if not os.path.isdir(os.path.join(*path, random_seed)):
continue
path.append(random_seed)
results = []
for params_str in os.listdir(os.path.join(*path)):
if not os.path.isdir(os.path.join(*path, params_str)):
continue
path.append(params_str)
model_results = OrderedDict({'name': name})
for param in sorted(params_str.split('_')):
try:
k, v = param.split('=')
k = k.replace('-', '_')
model_results[k] = v
except ValueError:
pass
scores = []
for fold_num in range(1, 11):
fold_csv = os.path.join(*path, f'fold{fold_num}_validation.csv')
if os.path.isfile(fold_csv):
output = pd.read_csv(fold_csv).sort_values('id')
target = train_df[train_df['id'].isin(output['id'])].sort_values('id')
assert (output['id'].values == target['id'].values).all()
output = output[common.LABELS].values
target = target[common.LABELS].values
score = roc_auc_score(target, output, average='macro')
model_results[f'fold{fold_num}'] = score
scores.append(score)
if scores:
model_results['mean'] = np.mean(scores)
model_results['std'] = np.std(scores)
results.append(model_results)
path.pop()
if results:
results = | pd.DataFrame(results) | pandas.DataFrame |
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras.layers import Conv3D, Conv2D
from tensorflow.keras.layers import ConvLSTM2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras import losses
import numpy as np
import pandas as pd
import random
import pandasql as ps
import pickle
from scipy.stats import entropy
from numpy import percentile
import tensorflow.keras as keras
import gc
########## Create ConvLSTM network ##############
from tensorflow.keras.layers import LayerNormalization
def create_model(pixel,filters,channel,hiddenlayers = 4):
seq = Sequential()
#seq.add(BatchNormalization(trainable=False))
seq.add(ConvLSTM2D(filters=filters, kernel_size=(3, 3),
input_shape=(None, pixel, pixel, channel),
padding='same', return_sequences=True))#activation = 'tanh', recurrent_activation = 'tanh')),activation = 'elu'
#seq.add(BatchNormalization(trainable=False))
for layer in range(hiddenlayers-1):
seq.add(ConvLSTM2D(filters=filters, kernel_size=(3, 3),
padding='same', return_sequences=True))# activation = 'tanh', recurrent_activation = 'tanh'))
seq.add(ConvLSTM2D(filters=filters, kernel_size=(3, 3),
padding='same', return_sequences=False)) #activation = 'tanh', recurrent_activation = 'tanh'))
seq.add(Conv2D(filters=1, kernel_size=(3, 3),
activation='elu',
padding='same', data_format='channels_last'))
#seq.add(BatchNormalization(trainable=False))
seq.compile(loss='mean_squared_error', optimizer='adam',metrics=['mae'])
return seq
import pandas as pd
import statsmodels.formula.api as sm
def get_localdist(trainX,spatialboundary,ST,boundmargin,span,channel):
trainx_dist = []
for day in range(span):
if day <= boundmargin:
_trainx_dist = trainX[0:ST,spatialboundary[0]:spatialboundary[1],spatialboundary[2]:spatialboundary[3],::]
elif day >= span - boundmargin-1:
_trainx_dist = trainX[span-ST:span,spatialboundary[0]:spatialboundary[1],spatialboundary[2]:spatialboundary[3],::]
else:
_trainx_dist = trainX[day-boundmargin:day+boundmargin+1,spatialboundary[0]:spatialboundary[1],spatialboundary[2]:spatialboundary[3],::]
_trainx_dist = _trainx_dist.reshape(ST**3,channel)
_trainx_dist = np.std(_trainx_dist, axis = 0)
trainx_dist.append(_trainx_dist)
trainx_dist = np.array(trainx_dist)
return (trainx_dist)
def get_localranddist(trainx_dist,span,channel,spatial):
randomlist = np.array(random.sample(range(-5, 5), span))[::,np.newaxis]
for j in range(1,channel):
if j in spatial:
a = random.randint(-5,5)
_randomlist = np.array([a for i in range(10)])[::,np.newaxis]
else:
_randomlist = np.array(random.sample(range(-5, 5), span))[::,np.newaxis]
randomlist = np.concatenate((randomlist,_randomlist),axis = 1)
randomlist[randomlist == 0 ] =1
return (trainx_dist/randomlist)
import statsmodels.api as sm
def run_ST_lime_pixel(model,trainX,trainx_dist,samp,span,channel,spatial,ST,r,c,channellist,incubation):
trainx = []
trainy = []
#print(r,c)
incubation_span = span - incubation
for i in range(samp):
rand_trainx_dist = get_localranddist(trainx_dist,span,channel,spatial)
_trainx = pickle.loads(pickle.dumps(trainX , -1))
#if (r,c) == (5,6):
# print(_trainx[::,r,c,4])
temp = _trainx[::,r,c,::]+rand_trainx_dist
rand_trainx_dist[np.where((temp <0) | (temp >1) )] = rand_trainx_dist[np.where((temp <0) | (temp >1) )] * -1
_trainx[(incubation_span - ST):incubation_span,r,c,channellist] = _trainx[(incubation_span - ST):incubation_span,r,c,channellist]+rand_trainx_dist[(incubation_span - ST):incubation_span,channellist]
#print(_trainx[::,r,c,4])
for C in spatial:
_trainx[::,::,::,C] = _trainx[incubation_span-1,::,::,C]
_trainy = model.predict(_trainx[np.newaxis,::,::,::,::])
_trainy = _trainy[0,::,::,0]
trainx.append(_trainx)
trainy.append(_trainy)
trainx = np.array(trainx)[::,::,r,c,::]
#print(trainx[::,::,4].shape)
trainy = np.array(trainy)[::,r,c]
traindata = pd.DataFrame()
for C in channellist:
if C in spatial:
traindata['C'+str(C)] = trainx[::,span-1,C].flatten()
else:
for T in range(incubation+1,incubation+ST+1):
traindata['C'+str(C)+'_T'+str(T)] = trainx[::,span-T,C].flatten()
traindata['Y'] = trainy.flatten()
traindata = traindata[traindata.sum(axis=1)>0]
X=list(traindata.columns)
X.remove('Y')
#X.remove('index')
_traindata = pickle.loads(pickle.dumps(traindata,-1))
for x in X:
_traindata[x] = (_traindata[x] - _traindata[x].mean())/_traindata[x].std()
_traindata['Y'] = (_traindata['Y'] - _traindata['Y'].mean())/_traindata['Y'].std()
try:
res = sm.OLS(_traindata['Y'],_traindata[X]).fit()
except:
print(channellist)
print(traindata.iloc[0]) #trainx[::,span-4,4].flatten()) #trainx[::,span-1,2].flatten())
raise
return(res,traindata)
import itertools
def run_regression(model,grid,train,train_gridday,frames_grid,exclude_channel = [0],spatial = [1],start=0,ST=3,margin = 4,samp= 500, incubation = 3,offset=10):
trainsamp = []
maxday = max(frames_grid['day'])
span = train.shape[1]
channel = train.shape[-1]
channellist = list(set(range(channel)) - set(exclude_channel))
pix = np.int(np.sqrt(max(frames_grid['pixno'])))
_gridpix = np.flip(np.array(range(1,max(frames_grid['pixno'])+1)).reshape(pix,pix),0)
gridpix = _gridpix[margin:pix-margin,margin:pix-margin].flatten()
allowedgridpix = frames_grid[(frames_grid['no_pat']>10) & (frames_grid['grid'] == grid)].groupby(['grid','pixno'])['day'].count().reset_index()
allowedgridpix = allowedgridpix[allowedgridpix.day > 30 ][['grid','pixno']]
gridpix = np.intersect1d(gridpix,np.array(allowedgridpix['pixno']))
train_xplain = pd.DataFrame()
gridtraindata_xplain= pd.DataFrame()
for k,(_grid,T) in train_gridday.items():
if _grid == grid:
trainsamp.append(k)
for T_span in itertools.islice(trainsamp[0:span], None, None, ST):# trainsamp[start:start+ST]:
trainX = train[T_span,::,::,::,::]
g,day = train_gridday[T_span]
for pixno in gridpix:
(r,c) = np.array((np.where(_gridpix==pixno))).reshape(2)
_boundmargin = np.int((ST-1)/2)
spatialboundary = (r-_boundmargin,r+_boundmargin+1,c - _boundmargin, c+_boundmargin+1)
trainx_dist = get_localdist(trainX,spatialboundary,ST,_boundmargin,span,channel)
print("pixno",pixno,"Tspan",T_span)
res,traindata_explain = run_ST_lime_pixel(model,trainX,trainx_dist,samp,span,channel,spatial,ST,r,c, channellist,incubation)
traindata_explain['grid'] = grid; traindata_explain['pixno'] = pixno; traindata_explain['day'] = maxday - day;
gridtraindata_xplain = gridtraindata_xplain.append(traindata_explain, ignore_index = True)
#print(res.summary())
fnames = list(res.params.index.values); coef = list(res.params); pvalue = list(res.pvalues)
fnames.append('beta');coef.append(np.mean(trainX[span-ST:,r,c,0])); pvalue.append(0)
for C in channellist:
fnames.append('act_C_'+str(C))
coef.append(np.mean(trainX[span-ST:,r,c,C]))
pvalue.append(0)
temp_df = pd.DataFrame({'fnames':fnames,'coef':coef,'pvalue':pvalue})
temp_df['grid'] = grid; temp_df['pixno'] = pixno; temp_df['day'] = maxday - day;
train_xplain = train_xplain.append(temp_df, ignore_index = True)
return(train_xplain,gridtraindata_xplain)
# """Compute softmax values for each sets of scores in x."""
def softmax(x):
if np.max(x) > 1:
e_x = np.exp(x/np.max(x))
else:
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
########## Convert image pixel values to number of infection cases ########
def convert_image_to_data(image,margin,sus_pop):
frame = image
frame[frame<0.001] = 0
pix = frame.shape[0]
frame = frame[margin:pix-margin,margin:pix-margin]
_sus_pop = np.log(sus_pop +2)
frame = np.multiply(frame,_sus_pop)
popexists_size = len(sus_pop[sus_pop>0])
frame = np.exp(frame) -1
frame = np.round(frame,0)
return (frame,popexists_size)
def forecast(model,input_sequence,frames_grid,test_gridday,span,qt,in_grid=-1,epsilon_T = -1,margin=4,spatial_channel=[],calculate_channel={},pixno=-1):
pix = np.int(np.sqrt(max(frames_grid['pixno'])))
_gridpix = np.flip(np.array(range(1,max(frames_grid['pixno'])+1)).reshape(pix,pix),0)
gridpix = _gridpix[margin:pix-margin,margin:pix-margin]
forecastframe = pd.DataFrame()
channels = input_sequence.shape[-1]
_span = 10
#forecast_frames_grid = pickle.loads(pickle.dumps(frames_grid[frames_grid['day'] <= max(frames_grid['day'])-_span],-1))
forecast_frames_grid_array = []; colnames = frames_grid.columns
print(max(frames_grid['day'])-_span)
for k,(grid,_filler) in test_gridday.items():
if in_grid >-1 and in_grid != grid:
continue
grid_forecast_frames_grid = pickle.loads(pickle.dumps(frames_grid[(frames_grid.grid == grid) & (frames_grid['day'] <= max(frames_grid['day'])-_span)],-1))
track = input_sequence[k]
totpop = track[0,::,::,1]
pix = totpop.shape[0]
print(grid)
I_0 = np.log(np.array(grid_forecast_frames_grid[(grid_forecast_frames_grid.day == max(grid_forecast_frames_grid.day))].sort_values(['pixno'])['I'])+1)
I_0 = np.flip(I_0.reshape(pix,pix),0)
_forecast_frames_grid = pickle.loads(pickle.dumps(grid_forecast_frames_grid[grid_forecast_frames_grid['day']==max(grid_forecast_frames_grid['day'])],-1))
popexists = pickle.loads(pickle.dumps(totpop[::,::],-1))
popexists[popexists>0] = 1
######## for each prediction day
for i in range(span):
new_pos = model.predict(track[np.newaxis, ::, ::, ::, ::])
new = new_pos[::, ::, ::, ::]
new = np.multiply(new[0,::,::,0],popexists)[np.newaxis,::,::,np.newaxis]
I_0 = np.multiply(I_0,popexists)
new[new<0] = 0
new[new>1] = 1
if epsilon_T > 1 and i > 0:
#pass
sum_beta_gamma = grid_forecast_frames_grid[(grid_forecast_frames_grid.day >41 )][['pixno','beta','gamma']].groupby(['pixno']).sum()
sum_beta = np.flip(np.array(sum_beta_gamma.beta).reshape(pix,pix),0)
sum_gamma =np.flip(np.array(sum_beta_gamma.gamma).reshape(pix,pix),0);
Iperc = pickle.loads(pickle.dumps(track[-1,::,::,4],-1)); Iperc[Iperc==0]=1
gamma1 = I_0*(i+1)/epsilon_T+ new[0,::,::,0]*qt/Iperc + sum_beta -sum_gamma; gamma1[gamma1>0.2] = 0.2
else:
gamma = forecast_gamma(grid_forecast_frames_grid,grid,5)
if pixno > -1 and i > 0:
gamma = forecast_gamma(grid_forecast_frames_grid,grid,5)
pixcor = np.where(_gridpix == pixno)
gamma[pixcor] = gamma[pixcor]
elif i > 0 and epsilon_T>1:
gamma = gamma
_forecast_frames_grid = calculate_future_SIR(_forecast_frames_grid,grid,forecastbeta = new[0,::,::,0],forecastgamma = gamma,qt = qt)
if len(forecast_frames_grid_array) != 0:
forecast_frames_grid_array = np.concatenate((forecast_frames_grid_array,_forecast_frames_grid.values),axis = 0)
else:
forecast_frames_grid_array = _forecast_frames_grid.values
#print(span, max( forecast_frames_grid[(forecast_frames_grid.grid == grid)]['day']))
########### append channels
newtrack = new
for channel in range(1,channels):
if channel in spatial_channel:
channel_data = track[0,::,::,channel]
newtrack = np.concatenate((newtrack,channel_data[np.newaxis,::,::,np.newaxis]),axis = 3)
elif channel in calculate_channel:
channel2 = np.flip(np.array(_forecast_frames_grid[calculate_channel[channel]]).reshape(pix, pix), 0)
newtrack = np.concatenate((newtrack,channel2[np.newaxis,::,::,np.newaxis]),axis = 3)
track = np.concatenate((track, newtrack), axis=0)
predictframe = np.squeeze(new,0)[::,::,0][margin:pix-margin,margin:pix-margin]
#_forecastframe = pd.DataFrame({'pixno':gridpix[totpop[margin:pix-margin,margin:pix-margin]>0].flatten(),
#'predict':predictframe[totpop[margin:pix-margin,margin:pix-margin]>0].flatten()})
#_forecastframe['day'] = i
#_forecastframe['grid'] = grid
#forecastframe = forecastframe.append(_forecastframe)
forecast_frames_grid = pd.DataFrame(forecast_frames_grid_array)
forecast_frames_grid.columns = colnames
return (forecast_frames_grid)
from statsmodels.tsa.arima_model import ARIMA
def forecast_gamma_model(frames_grid,span):
gamma_model = {}
T = max(frames_grid['day']) - span
pix = max(frames_grid['pixno'])
for grid in frames_grid['grid'].unique():
for pixno in range(1,pix+1):
t_series = np.array(frames_grid[(frames_grid['grid'] == grid) & (frames_grid['pixno'] == pixno) & (frames_grid['no_pat'] >0)]['gamma'])
if len(t_series) > 10 :
gamma_model[(grid,pixno)] = ARIMA(t_series, (2,1,2))
gamma_model[(grid,pixno)].fit()
return gamma_model
def forecast_gamma(forecast_frames_grid,grid,span):
_forecast_frames_grid = forecast_frames_grid[forecast_frames_grid['grid'] == grid]
_forecast_frames_grid = _forecast_frames_grid[_forecast_frames_grid['day'] >= max(_forecast_frames_grid['day']) - span]
gamma = np.array(_forecast_frames_grid.groupby(['pixno'])['gamma'].mean())
pix = np.int(np.sqrt(max(_forecast_frames_grid['pixno'])))
gamma = np.flip(gamma.reshape(pix,pix),0)
return gamma
def validate(ensemble,test,testout,test_gridday,frames_grid,margin, qt, spatial_channel = [], forecast_channel=[], calculate_channel = {}):
errorsum = 0
averagetotalerror = 0
cnt = 1
channels = test.shape[-1]
predicttotal = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
modules of info class, including cashinfo, indexinfo and fundinfo class
"""
import os
import csv
import datetime as dt
import json
import re
import logging
from functools import lru_cache
import pandas as pd
from bs4 import BeautifulSoup
from sqlalchemy import exc
import xalpha.remain as rm
from xalpha.cons import (
convert_date,
droplist,
myround,
opendate,
yesterday,
yesterdaydash,
yesterdayobj,
today_obj,
rget,
rget_json,
_float,
)
from xalpha.exceptions import FundTypeError, TradeBehaviorError, ParserFailure
from xalpha.indicator import indicator
_warnmess = "Something weird on redem fee, please adjust self.segment by hand"
logger = logging.getLogger(__name__)
def _shengoucal(sg, sgf, value, label):
"""
Infer the share of buying fund by money input, the rate of fee in the unit of %,
and netvalue of fund
:param sg: positive float, 申购金额
:param sgf: positive float, 申购费,以%为单位,如 0.15 表示 0.15%
:param value: positive float, 对应产品的单位净值
:param label: integer, 1 代表份额正常进行四舍五入, 2 代表份额直接舍去小数点两位之后。金额部分都是四舍五入
:returns: tuple of two positive float, 净申购金额和申购份额
"""
jsg = myround(sg / (1 + sgf * 1e-2))
share = myround(jsg / value, label)
return (jsg, share)
def _nfloat(string):
"""
deal with comment column in fundinfo price table,
positive value for fenhong and negative value for chaifen,
keep other unrocognized pattern as original string
:param string: string of input from original data
:returns: make fenhong and songpei as float number
"""
result = 0
if string:
try:
result = float(string)
except ValueError:
if re.match(r'"分红\D*(\d*\.\d*)\D*"', string):
result = float(re.match(r'"分红\D*(\d*\.\d*)\D*"', string).group(1))
elif re.match(r".*现金(\d*\.\d*)\D*", string):
result = float(re.match(r".*现金(\d*\.\d*)\D*", string).group(1))
elif re.match(r".*折算(\d*\.\d*)\D*", string):
result = -float(re.match(r".*折算(\d*\.\d*)\D*", string).group(1))
elif re.match(r'"拆分\D*(\d*\.\d*)\D*"', string):
result = -float(re.match(r'"拆分\D*(\d*\.\d*)\D*"', string).group(1))
elif re.match(r"\D*分拆(\d*\.\d*)\D*", string):
result = -float(re.match(r"\D*分拆(\d*\.\d*)\D*", string).group(1))
else:
logger.warning("The comment col cannot be converted: %s" % string)
result = string
return result
class FundReport:
"""
提供查看各种基金报告的接口
"""
def __init__(self, code):
self.code = code
r = rget(
"http://api.fund.eastmoney.com/f10/JJGG?callback=&fundcode={code}&pageIndex=1&pageSize=20&type={type_}".format(
code=code, type_="3"
),
headers={
"Referer": "http://fundf10.eastmoney.com/jjgg_{code}_3.html".format(
code=code
)
},
)
self.report_list = r.json()["Data"]
self.report_detail = {}
def get_report(self, no=0, id_=None):
"""
:param no: int。在type_=3 中的第no个报告。
:param id_: id 可由 :meth:`show_report_list` 中条目的对应 ID 得到
:return:
"""
if id_:
report_url = "https://np-cnotice-fund.eastmoney.com/api/content/ann?client_source=web_fund&show_all=1&art_code={id_}".format(
id_=id_
)
if not self.report_detail.get(no):
report_url = "https://np-cnotice-fund.eastmoney.com/api/content/ann?client_source=web_fund&show_all=1&art_code={id_}".format(
id_=self.report_list[no]["ID"]
)
# report_url = "http://fund.eastmoney.com/gonggao/{code},{id_}.html".format(
# code=self.code, id_=self.report_list[no]["ID"]
# )
# r = rget(report_url)
# b = BeautifulSoup(r.text, "lxml")
# seasonr = b.find("pre")
# sr = [s.string.strip() for s in seasonr.findAll("p") if s.string]
r = rget_json(report_url)
sr = r["data"]["notice_content"]
sr = [s.strip() for s in sr.split("\n") if s.strip()]
self.report_detail[no] = sr
return sr
def show_report_list(self, type_=3):
"""
:param type_: int。第0栏,第1栏,每栏的含义,请参照天天基金基金报告的页面。
:return:
"""
r = rget(
"http://api.fund.eastmoney.com/f10/JJGG?callback=&fundcode={code}&pageIndex=1&pageSize=20&type={type_}".format(
code=self.code, type_=str(type_)
),
headers={
"Referer": "http://fundf10.eastmoney.com/jjgg_{code}_3.html".format(
code=self.code
)
},
)
return r.json()["Data"]
def analyse_report(self, no=0):
l = self.get_report(no)
d = {}
d["title"] = ""
for s in l[:5]:
if s.startswith("基金管理"):
break
d["title"] += s + " "
for i, s in enumerate(l):
if s.startswith("业绩比较基准"):
ss = [s for s in s.split(" ") if s.strip()]
if len(ss) == 2:
if l[i + 1][0] != "本":
d["benchmark"] = ss[-1] + l[i + 1]
else:
d["benchmark"] = ss[-1]
elif s.startswith("基金管理人"):
ss = [s for s in s.split(" ") if s.strip()]
if len(ss) == 2:
d["company"] = ss[-1]
elif s.startswith("基金托管人"):
ss = [s for s in s.split(" ") if s.strip()]
if len(ss) == 2:
d["bank"] = ss[-1]
elif s.startswith("场内简称"):
ss = [s for s in s.split(" ") if s.strip()]
if len(ss) == 2:
d["shortname"] = ss[-1]
elif s.startswith("基金主代码"):
ss = [s for s in s.split(" ") if s.strip()]
if len(ss) == 2:
d["code"] = ss[-1]
elif s.startswith("报告期末基金份额总额"):
ss = [s for s in s.split(" ") if s.strip()]
if len(ss) == 2:
d["share"] = ss[-1]
elif s.startswith("基金合同生效日"):
ss = [s for s in s.split(" ") if s.strip()]
if len(ss) == 2:
d["start_date"] = ss[-1]
return d
@lru_cache()
def get_fund_holdings(code, year="", season="", month="", category="jjcc"):
"""
获取基金详细的底层持仓信息
:param code: str. 6 位基金代码
:param year: int. eg. 2019
:param season: int, 1,2,3,4
:param month: Optional[int]. 指定 season 即可,一般不需理会
:param category: str. stock 股票持仓, bond 债券持仓,天天基金无法自动处理海外基金持仓,暂未兼容 FOF 的国内基金持仓
:return: pd.DataFrame or None. 没有对应持仓时返回 None。
"""
if not month and season:
month = 3 * int(season)
if category in ["stock", "stocks", "jjcc", "", "gp", "s"]:
category = "jjcc"
elif category in ["bond", "bonds", "zq", "zqcc", "b"]:
category = "zqcc"
else:
raise ParserFailure("unrecognized category %s" % category)
if code.startswith("F"):
code = code[1:]
r = rget(
"http://fundf10.eastmoney.com/FundArchivesDatas.aspx?type={category}&code={code}&topline=10&\
year={year}&month={month}".format(
year=str(year), month=str(month), code=code, category=category
),
headers={
"Host": "fundf10.eastmoney.com",
"Referer": "http://fundf10.eastmoney.com/ccmx_{code}.html".format(
code=code
),
},
)
if len(r.text) < 50:
return
# raise ParserFailure(
# "This fund has no holdings on stock or bonds in this period"
# )
s = BeautifulSoup(
re.match("[\s\S]*apidata={ content:(.*),arryear:", r.text).groups()[0], "lxml"
)
if len(s.text) < 30:
return
# raise ParserFailure(
# "This fund has no holdings on stock or bonds in this period"
# )
timeline = [
i.string for i in s.findAll("font", class_="px12") if i.text.startswith("2")
]
ind = 0
if month:
for i, d in enumerate(timeline):
if d.split("-")[1][-1] == str(month)[-1]: # avoid 09 compare to 9
ind = i
break
else:
return # not update to this month
t1 = s.findAll("table")[ind]
main = [[j.text for j in i.contents] for i in t1.findAll("tr")[1:]]
cols = [j.text for j in t1.findAll("tr")[0].contents if j.text.strip()]
icode = 1
iname = 2
iratio = 4
ishare = 5
ivalue = 6
for j, col in enumerate(cols):
if col.endswith("代码"):
icode = j
elif col.endswith("名称"):
iname = j
elif col.endswith("比例"):
iratio = j
elif col.startswith("持股数"):
ishare = j
elif col.startswith("持仓市值"):
ivalue = j
if category == "jjcc":
result = {"code": [], "name": [], "ratio": [], "share": [], "value": []}
for l in main:
result["code"].append(l[icode])
result["name"].append(l[iname])
result["ratio"].append(float(l[iratio][:-1]))
result["share"].append(_float(l[ishare]))
result["value"].append(_float(l[ivalue]))
elif category == "zqcc":
result = {"code": [], "name": [], "ratio": [], "value": []}
for l in main:
result["code"].append(l[1])
result["name"].append(l[2])
result["ratio"].append(float(l[3][:-1]))
result["value"].append(_float(l[4]))
return pd.DataFrame(result)
class basicinfo(indicator):
"""
Base class for info of fund, index or even cash,
which cannot be directly instantiate, the basic implementation consider
redemption fee as zero when shuhui() function is implemented
:param code: string of code for specific product
:param fetch: boolean, when open the fetch option, the class will try fetching from local files first in the init
:param save: boolean, when open the save option, automatically save the class to files
:param path: string, the file path prefix of IO. Or in sql case, path is the engine from sqlalchemy.
:param form: string, the format of IO, options including: 'csv','sql'
:param round_label: int, default 0 or 1, label to the different round scheme of shares, reserved for fundinfo class. 1 代表全舍而非四舍五入。
:param dividend_label: int, default 0 or 1. 0 代表默认现金分红,1代表红利再投。两者均可通过记账单上的 0.05 来改变单次的默认。
:param value_label: int, default 0 or 1. 1 代表记账单上的赎回数目是按金额而非份额的,只能完美支持货币基金。其他净值型基金本质上无法精确到分支持这一选项,因此不开放支持。
"""
def __init__(
self,
code,
fetch=False,
save=False,
path="",
form="csv",
round_label=0,
dividend_label=0,
value_label=0,
):
# 增量 IO 的逻辑都由 basicinfo 类来处理,对于具体的子类,只需实现_save_form 和 _fetch_form 以及 update 函数即可
self.code = code
self.round_label = round_label
self.dividend_label = dividend_label
self.value_label = value_label
self.specialdate = []
self.fenhongdate = []
self.zhesuandate = []
# compatible with new ``xa.set_backend()`` API
import xalpha.universal as xu
if (xu.ioconf["backend"] in ["csv", "sql"]) and (not path):
fetch = True
save = True
form = xu.ioconf["backend"]
path = xu.ioconf["path"]
if xu.ioconf["backend"] == "csv":
path = os.path.join(path, xu.ioconf["prefix"] + "INFO-")
self.format = form
if fetch is False:
self._basic_init() # update self. name rate and price table
else:
try:
self.fetch(path, self.format)
df = self.update() # update the price table as well as the file
if (df is not None) and save is True:
self.save(path, self.format, option="a", delta=df)
except (FileNotFoundError, exc.ProgrammingError) as e:
logger.info("no saved copy of %s" % self.code)
fetch = False
self._basic_init()
if (save is True) and (fetch is False):
self.save(path, self.format)
def _basic_init(self):
"""
set self. name rate and price (dataframe) as well as other necessary attr of info()
"""
# below lines are just showcase, this function must be rewrite by child classes
# self.name = 'unknown'
# self.rate = 0
# self.price = pd.DataFrame(data={'date':[],'netvalue':[],'comment':[]})
raise NotImplementedError
def shengou(self, value, date, fee=None):
"""
give the realdate deltacash deltashare tuple based on purchase date and purchase amount
if the date is not a trade date, then the purchase would happen on the next trade day, if the date is
in the furture, then the trade date is taken as yesterday.
:param value: the money for purchase
:param date: string or object of date
:param fee: the rate for shengou, default None and info.rate will be used, ok for most cases
:returns: three elements tuple, the first is the actual dateobj of commit
the second is a negative float for cashin,
the third is a positive float for share increase
"""
if fee is None:
fee = self.rate
row = self.price[self.price["date"] >= date].iloc[0]
share = _shengoucal(value, fee, row.netvalue, label=self.round_label + 1)[1]
return (row.date, -myround(value), share)
def shuhui(self, share, date, rem, value_label=None, fee=None):
"""
give the cashout considering redemption rates as zero.
if the date is not a trade date, then the purchase would happen on the next trade day, if the date is
in the furture, then the trade date is taken as yesterday.
:param share: float or int, number of shares to be sold. if value_label=1, its cash to be sold.
:param date: string or object of date
:param rem: positions with time list
:param value_label: default None, value_label will be chosen by info.value_label, determining
whether shuhui by share 0 or value 1. value_label = 0 will rewrite self.value_label = 1
:param fee: default None, determined automatically, suggested for most of the cases.
Otherwise 0.015 means 1.5% in shuhui, this is different than fee in shengou, where 1.5 is for 1.5% fee
:returns: three elements tuple, the first is dateobj
the second is a positive float for cashout,
the third is a negative float for share decrease
"""
if self.value_label == 0 or value_label == 0:
return self._shuhui_by_share(share, date, rem)
elif self.value_label == 1: # 按金额赎回,仅支持无赎回费的货币基金
partprice = self.price[self.price["date"] >= date]
if len(partprice) == 0:
row = self.price[self.price["date"] < date].iloc[-1]
else:
row = partprice.iloc[0]
share = share / row.netvalue
return self._shuhui_by_share(share, date, rem, fee=fee)
def _shuhui_by_share(self, share, date, rem, fee=None):
date = convert_date(date)
tots = sum([remitem[1] for remitem in rem if remitem[0] <= date])
if share > tots:
sh = tots
else:
sh = share
partprice = self.price[self.price["date"] >= date]
if len(partprice) == 0:
row = self.price[self.price["date"] < date].iloc[-1]
else:
row = partprice.iloc[0]
value = myround(sh * row.netvalue)
if fee is not None:
value = (1 - fee) * value
return (
row.date,
value,
-myround(sh),
) # TODO: 这里 myround 是否也和 round_label 有关,有待考证
def info(self):
"""
print basic info on the class
"""
print("fund name: %s" % self.name)
print("fund code: %s" % self.code)
print("fund purchase fee: %s%%" % self.rate)
def __repr__(self):
return self.name
def save(self, path, form=None, option="r", delta=None):
"""
save info to files, this function is designed to redirect to more specific functions
:param path: string of the folder path prefix! or engine obj from sqlalchemy
:param form: string, option:'csv'
:param option: string, r for replace and a for append output
:param delta: if option is a, you have to specify the delta which is the incremental part of price table
"""
if form is None:
form = self.format
if form == "csv" and option == "r":
self._save_csv(path)
elif form == "csv" and option == "a":
self._save_csv_a(path, delta)
elif form == "sql" and option == "r":
self._save_sql(path)
elif form == "sql" and option == "a":
self._save_sql_a(path, delta)
def _save_csv_a(self, path, df):
df.sort_index(axis=1).to_csv(
path + self.code + ".csv",
mode="a",
header=None,
index=False,
date_format="%Y-%m-%d",
)
def _save_sql_a(self, path, df):
df.sort_index(axis=1).to_sql(
"xa" + self.code, path, if_exists="append", index=False
)
def fetch(self, path, form=None):
"""
fetch info from files
:param path: string of the folder path prefix! end with / in csv case;
engine from sqlalchemy.create_engine() in sql case.
:param form: string, option:'csv' or 'sql
"""
if form is None:
form = self.format
if form == "csv":
self._fetch_csv(path)
elif form == "sql":
self._fetch_sql(path)
def update(self):
"""
对类的价格表进行增量更新,并进行增量存储,适合 fetch 打开的情形
:returns: the incremental part of price table or None if no incremental part exsits
"""
raise NotImplementedError
class fundinfo(basicinfo):
"""
class for specific fund with basic info and every day values
所获得的基金净值数据一般截止到昨日。但注意QDII基金的净值数据会截止的更早,因此部分时间默认昨日的函数可能出现问题,
处理QDII基金时,需要额外注意。
:param code: str, 基金六位代码字符
:param round_label: integer 0 or 1, 取1表示基金申购时份额直接舍掉小数点两位之后。当基金处于 cons.droplist 名单中时,
label 总会被自动设置为1。非名单内基金可以显式令 round_label=1.
:param dividend_label: int, default 0 or 1. 0 代表默认现金分红,1代表红利再投。两者均可通过记账单上的 0.05 来改变单次的默认。
:param fetch: boolean, when open the fetch option, the class will try fetching from local files first in the init
:param save: boolean, when open the save option, automatically save the class to files
:param path: string, the file path prefix of IO
:param form: string, the format of IO, options including: 'csv'
"""
def __init__(
self,
code,
round_label=0,
dividend_label=0,
fetch=False,
save=False,
path="",
form="csv",
priceonly=False,
):
if round_label == 1 or (code in droplist):
label = 1 # the scheme of round down on share purchase
else:
label = 0
if code.startswith("F") and code[1:].isdigit():
code = code[1:]
elif code.startswith("M") and code[1:].isdigit():
raise FundTypeError(
"This code seems to be a mfund, use ``mfundinfo`` instead"
)
code = code.zfill(6) # 1234 is the same as 001234
self._url = (
"http://fund.eastmoney.com/pingzhongdata/" + code + ".js"
) # js url api for info of certain fund
self._feeurl = (
"http://fund.eastmoney.com/f10/jjfl_" + code + ".html"
) # html url for trade fees info of certain fund
self.priceonly = priceonly
super().__init__(
code,
fetch=fetch,
save=save,
path=path,
form=form,
round_label=label,
dividend_label=dividend_label,
)
self.special = self.price[self.price["comment"] != 0]
self.specialdate = list(self.special["date"])
# date with nonvanishing comment, usually fenhong or zhesuan
try:
self.fenhongdate = list(self.price[self.price["comment"] > 0]["date"])
self.zhesuandate = list(self.price[self.price["comment"] < 0]["date"])
except TypeError:
print("There are still string comments for the fund!")
def _basic_init(self):
if self.code.startswith("96"):
self._hkfund_init() # 中港互认基金处理
return
self._page = rget(self._url)
if self._page.status_code == 404:
raise ParserFailure("Unrecognized fund, please check fund code you input.")
if self._page.text[:800].find("Data_millionCopiesIncome") >= 0:
raise FundTypeError("This code seems to be a mfund, use mfundinfo instead")
l = re.match(
r"[\s\S]*Data_netWorthTrend = ([^;]*);[\s\S]*", self._page.text
).groups()[0]
l = l.replace("null", "None") # 暂未发现基金净值有 null 的基金,若有,其他地方也很可能出问题!
l = eval(l)
ltot = re.match(
r"[\s\S]*Data_ACWorthTrend = ([^;]*);[\s\S]*", self._page.text
).groups()[
0
] # .* doesn't match \n
ltot = ltot.replace("null", "None") ## 096001 总值数据中有 null!
ltot = eval(ltot)
## timestamp transform tzinfo must be taken into consideration
tz_bj = dt.timezone(dt.timedelta(hours=8))
infodict = {
"date": [
dt.datetime.fromtimestamp(int(d["x"]) / 1e3, tz=tz_bj).replace(
tzinfo=None
)
for d in l
],
"netvalue": [float(d["y"]) for d in l],
"comment": [_nfloat(d["unitMoney"]) for d in l],
}
if len(l) == len(ltot): # 防止总值和净值数据量不匹配,已知有该问题的基金:502010
infodict["totvalue"] = [d[1] for d in ltot]
try:
rate = float(
eval(
re.match(
r"[\s\S]*fund_Rate=([^;]*);[\s\S]*", self._page.text
).groups()[0]
)
)
except ValueError:
rate = 0
logger.info("warning: this fund has no data for rate") # know cases: ETF
name = eval(
re.match(r"[\s\S]*fS_name = ([^;]*);[\s\S]*", self._page.text).groups()[0]
)
self.rate = rate
# shengou rate in tiantianjijin, daeshengou rate discount is not considered
self.name = name # the name of the fund
df = pd.DataFrame(data=infodict)
df = df[df["date"].isin(opendate)]
df = df.reset_index(drop=True)
if len(df) == 0:
raise ParserFailure("no price table found for this fund %s" % self.code)
self.price = df[df["date"] <= yesterdaydash()]
# deal with the redemption fee attrs finally
if not self.priceonly:
self._feepreprocess()
def _feepreprocess(self):
"""
Preprocess to add self.feeinfo and self.segment attr according to redemption fee info
"""
feepage = rget(self._feeurl)
soup = BeautifulSoup(
feepage.text, "lxml"
) # parse the redemption fee html page with beautiful soup
somethingwrong = False
if not soup.findAll("a", {"name": "shfl"}):
somethingwrong = True
logger.warning("%s 基金赎回信息为空,可能由于该基金已终止运作" % self.code)
self.feeinfo = []
else:
self.feeinfo = [
item.string
for item in soup.findAll("a", {"name": "shfl"})[
0
].parent.parent.next_sibling.next_sibling.find_all("td")
if item.string != "---"
]
# this could be [], known case 510030
if not self.feeinfo or len(self.feeinfo) % 2 != 0:
somethingwrong = True
else:
for item in self.feeinfo:
if "开放期" in item or "封闭" in item or "开放日期" in item or "运作期" in item:
# 暂时没有完美维护定开基金赎回费处理的计划
somethingwrong = True
if somethingwrong:
logger.warning(
"%s 赎回费信息异常,多是因为定开基金,封闭基金或场内 ETF: %s" % (self.code, self.feeinfo)
)
self.feeinfo = ["小于7天", "1.50%", "大于等于7天", "0.00%"]
# print(self.feeinfo)
try:
self.segment = fundinfo._piecewise(self.feeinfo)
except (ValueError, IndexError) as e:
logger.warning(
"%s 赎回费信息抓取异常,请手动设定 ``self.segment`` 和 ``self.feeinfo``: %s"
% (self.code, self.feeinfo)
)
# below is default one
self.feeinfo = ["小于7天", "1.50%", "大于等于7天", "0.00%"]
self.segment = fundinfo._piecewise(self.feeinfo)
@staticmethod
def _piecewise(a):
"""
Transform the words list into a pure number segment list for redemption fee, eg. [[0,7],[7,365],[365]]
"""
b = [
(
a[2 * i]
.replace("持有期限", "")
.replace("开放运作期时持有", "")
.replace("不少于", "")
.replace("小于", "")
.replace("大于", "")
.replace("等于", "")
.replace("个", "")
.replace("持有", "")
.replace("以上", "")
.replace("以内", "")
.replace("的", "")
.replace("(含7天)", "")
.replace("份额持有时间", "")
).split(",")
for i in range(int(len(a) / 2))
]
# ['赎回时份额持有7天以内的', '1.50%', '持有7天以上(含7天),30天以内的', '0.10%', '赎回时份额持有满30天以上(含30天)的', '0.00%']
# ['由于本基金最短持有期限为三年,赎回费率设置为零。', '0.00%', '对持续持有期少于7日的投资者收取不低于1.5%的赎回费。', '1.50%']
# ['对持续持有期少于7日的投资者收取1.5%的赎回费并全额计入基金财产', '1.50%', '对于持续持有期大于等于7日的投资者不收取赎回费用。', '0.00%']
# print(b)
for j, tem in enumerate(b):
for i, num in enumerate(tem):
if num[-1] == "天":
num = int(num[:-1])
elif num[-1] == "月":
num = int(num[:-1]) * 30
elif num == ".5年":
num = 183
else:
num = int(float(num[:-1]) * 365)
b[j][i] = num
if len(b[0]) == 1: # 有时赎回费会写大于等于一天
b[0].insert(0, 0)
elif len(b[0]) == 2:
b[0][0] = 0
else:
print(_warnmess)
for i in range(len(b) - 1): # 有时赎回费两区间都是闭区间
if b[i][1] - b[i + 1][0] == -1:
b[i][1] = b[i + 1][0]
elif b[i][1] == b[i + 1][0]:
pass
else:
print(_warnmess)
return b
def feedecision(self, day):
"""
give the redemption rate in percent unit based on the days difference between purchase and redemption
:param day: integer, 赎回与申购时间之差的自然日数
:returns: float,赎回费率,以%为单位
"""
i = -1
for seg in self.segment:
i += 2
if day - seg[0] >= 0 and (len(seg) == 1 or day - seg[-1] < 0):
return float(self.feeinfo[i].strip("%"))
return 0 # error backup, in case there is sth wrong in segment
def set_feeinfo(self, feeinfo):
"""
设置正确的赎回费率信息
:param feeinfo: List[string]
"""
self.feeinfo = feeinfo
self.segment = self._piecewise(feeinfo)
def set_price(self, col, date, value):
"""
设置修正 price 表中单日的 comment 或价格信息
:param col: str. "comment", "netvalue" or "totvalue"
:param date: “%Y%m%d”
:param value:
"""
self.price.loc[self.price["date"] == date, col] = value
## update special in case new comment is added
self.special = self.price[self.price["comment"] != 0]
self.specialdate = list(self.special["date"])
def shuhui(self, share, date, rem, value_label=None, fee=None):
"""
give the cashout based on rem term considering redemption rates
:returns: three elements tuple, the first is dateobj
the second is a positive float for cashout,
the third is a negative float for share decrease
"""
# value = myround(share*self.price[self.price['date']==date].iloc[0].netvalue)
date = convert_date(date)
partprice = self.price[self.price["date"] >= date]
if len(partprice) == 0:
row = self.price[self.price["date"] < date].iloc[-1]
else:
row = partprice.iloc[0]
soldrem, _ = rm.sell(rem, share, row.date)
value = 0
sh = myround(sum([item[1] for item in soldrem]))
for d, s in soldrem:
if fee is None:
tmpfee = self.feedecision((row.date - d).days) * 1e-2
else:
tmpfee = fee
value += myround(
s * row.netvalue * (1 - tmpfee)
) # TODO: round_label whether play a role here?
return (row.date, value, -sh)
def info(self):
super().info()
print("fund redemption fee info: %s" % self.feeinfo)
def _save_csv(self, path):
"""
save the information and pricetable into path+code.csv, not recommend to use manually,
just set the save label to be true when init the object
:param path: string of folder path
"""
s = json.dumps(
{
"feeinfo": self.feeinfo,
"name": self.name,
"rate": self.rate,
"segment": self.segment,
}
)
df = pd.DataFrame(
[[s, 0, 0, 0]], columns=["date", "netvalue", "comment", "totvalue"]
)
df = df.append(self.price, ignore_index=True, sort=True)
df.sort_index(axis=1).to_csv(
path + self.code + ".csv", index=False, date_format="%Y-%m-%d"
)
def _fetch_csv(self, path):
"""
fetch the information and pricetable from path+code.csv, not recommend to use manually,
just set the fetch label to be true when init the object
:param path: string of folder path
"""
try:
content = pd.read_csv(path + self.code + ".csv")
pricetable = content.iloc[1:]
datel = list(pd.to_datetime(pricetable.date))
self.price = pricetable[["netvalue", "totvalue", "comment"]]
self.price["date"] = datel
saveinfo = json.loads(content.iloc[0].date)
if not isinstance(saveinfo, dict):
raise FundTypeError("This csv doesn't looks like from fundinfo")
self.segment = saveinfo["segment"]
self.feeinfo = saveinfo["feeinfo"]
self.name = saveinfo["name"]
self.rate = saveinfo["rate"]
except FileNotFoundError as e:
# print('no saved copy of fund %s' % self.code)
raise e
def _save_sql(self, path):
"""
save the information and pricetable into sql, not recommend to use manually,
just set the save label to be true when init the object
:param path: engine object from sqlalchemy
"""
s = json.dumps(
{
"feeinfo": self.feeinfo,
"name": self.name,
"rate": self.rate,
"segment": self.segment,
}
)
df = pd.DataFrame(
[[pd.Timestamp("1990-01-01"), 0, s, 0]],
columns=["date", "netvalue", "comment", "totvalue"],
)
df = df.append(self.price, ignore_index=True, sort=True)
df.sort_index(axis=1).to_sql(
"xa" + self.code, con=path, if_exists="replace", index=False
)
def _fetch_sql(self, path):
"""
fetch the information and pricetable from sql, not recommend to use manually,
just set the fetch label to be true when init the object
:param path: engine object from sqlalchemy
"""
try:
content = pd.read_sql("xa" + self.code, path)
pricetable = content.iloc[1:]
commentl = [float(com) for com in pricetable.comment]
self.price = pricetable[["date", "netvalue", "totvalue"]]
self.price["comment"] = commentl
saveinfo = json.loads(content.iloc[0].comment)
if not isinstance(saveinfo, dict):
raise FundTypeError("This csv doesn't looks like from fundinfo")
self.segment = saveinfo["segment"]
self.feeinfo = saveinfo["feeinfo"]
self.name = saveinfo["name"]
self.rate = saveinfo["rate"]
except exc.ProgrammingError as e:
# print('no saved copy of %s' % self.code)
raise e
def _hk_update(self):
# 暂时不确定增量更新逻辑无 bug,需时间验证
# 注意增量更新时分红的同步更新
lastdate = self.price.iloc[-1].date
diffdays = (yesterdayobj() - lastdate).days
if diffdays == 0:
return None
import xalpha.universal as xu
df = xu.get_daily("F" + self.code, start=lastdate.strftime("%Y%m%d"))
df = df[df["date"].isin(opendate)]
df = df.reset_index(drop=True)
df = df[df["date"] <= yesterdayobj()]
df = df[df["date"] > lastdate]
if len(df) != 0:
r = self._hk_bonus(start=lastdate.strftime("%Y-%m-%d"))
df["comment"] = [0 for _ in range(len(df))]
df["netvalue"] = df["close"]
df = df.drop("close", axis=1)
df = df[df["date"].isin(opendate)] # ? 是否会过滤掉分红日
for d in r:
df.loc[df["date"] == d["EXDDATE"], "comment"] = d["BONUS"]
self.price = self.price.append(df, ignore_index=True, sort=True)
return df
def update(self):
"""
function to incrementally update the pricetable after fetch the old one
"""
if self.code.startswith("96"):
return self._hk_update()
lastdate = self.price.iloc[-1].date
diffdays = (yesterdayobj() - lastdate).days
if (
diffdays == 0
): ## for some QDII, this value is 1, anyways, trying update is compatible (d+2 update)
return None
self._updateurl = (
"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code="
+ self.code
+ "&page=1&per=1"
)
con = rget(self._updateurl)
soup = BeautifulSoup(con.text, "lxml")
items = soup.findAll("td")
if dt.datetime.strptime(str(items[0].string), "%Y-%m-%d") == today_obj():
diffdays += 1
if diffdays <= 10:
self._updateurl = (
"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code="
+ self.code
+ "&page=1&per="
+ str(diffdays)
)
con = rget(self._updateurl)
soup = BeautifulSoup(con.text, "lxml")
items = soup.findAll("td")
elif (
diffdays > 10
): ## there is a 20 item per page limit in the API, so to be safe, we query each page by 10 items only
items = []
for pg in range(1, int(diffdays / 10) + 2):
self._updateurl = (
"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code="
+ self.code
+ "&page="
+ str(pg)
+ "&per=10"
)
con = rget(self._updateurl)
soup = BeautifulSoup(con.text, "lxml")
items.extend(soup.findAll("td"))
else:
raise TradeBehaviorError(
"Weird incremental update: the saved copy has future records"
)
date = []
netvalue = []
totvalue = []
comment = []
for i in range(int(len(items) / 7)):
ts = pd.Timestamp(str(items[7 * i].string))
if (ts - lastdate).days > 0:
date.append(ts)
netvalue.append(_float(items[7 * i + 1].string))
totvalue.append(_float(items[7 * i + 2].string))
comment.append(_nfloat(items[7 * i + 6].string))
else:
break
df = pd.DataFrame(
{
"date": date,
"netvalue": netvalue,
"totvalue": totvalue,
"comment": comment,
}
)
df = df.iloc[::-1] ## reverse the time order
df = df[df["date"].isin(opendate)]
df = df.reset_index(drop=True)
df = df[df["date"] <= yesterdayobj()]
if len(df) != 0:
self.price = self.price.append(df, ignore_index=True, sort=True)
return df
def get_holdings(self, year="", season="", month="", category="stock"):
return get_fund_holdings(
self.code, year, season=season, month=month, category=category
)
def get_stock_holdings(self, year="", season="", month=""):
"""
持仓个股细节
:param year:
:param season:
:param month:
:return: pd.DataFrame
"""
return get_fund_holdings(
self.code, year, season=season, month=month, category="stock"
)
def get_bond_holdings(self, year="", season="", month=""):
"""
持仓债券细节
:param year:
:param season:
:param month:
:return: pd.DataFrame
"""
return get_fund_holdings(
self.code, year, season=season, month=month, category="bond"
)
def get_portfolio_holdings(self, date=None):
"""
持仓股债现金占比
:param date:
:return: Dict
"""
if date is None:
date = dt.datetime.now().strftime("%Y-%m-%d")
import xalpha.universal as xu
df = xu.get_daily("pt-F" + self.code, end=date)
if df is not None:
d = dict(df.iloc[-1])
del d["assets"], d["date"]
return d
else:
logger.warning("no portfolio information before %s" % date)
return
def get_industry_holdings(self, year="", season="", month="", threhold=0.5):
"""
持仓行业占比
:param year:
:param season:
:param month:
:param threhold: float, 持仓小于该百分数的个股行业不再统计,加快速度
:return: Dict
"""
# 注意该 API 未直接使用天天基金的行业数据,其数据行业划分比较奇怪,大量行业都划分进了笼统的制造业,
# 用于分析代表性不强,甚至没有消费,医药等行业划分方式
from xalpha.universal import ttjjcode, get_industry_fromxq
df = self.get_stock_holdings(year=year, season=season, month=month)
if df is None:
logger.warning(
"%s has no stock holdings in %s y %s s. (Possible reason: 链接基金,债券基金)"
% (self.code, year, season)
)
return
d = {}
for i, row in df.iterrows():
if row["ratio"] < threhold:
continue
code = ttjjcode(row["code"])
industry = get_industry_fromxq(code)["industryname"]
if not industry.strip():
logger.warning(
"%s has no industry information, cannot be classfied" % code
)
else:
if industry not in d:
d[industry] = 0
d[industry] += row["ratio"]
return d
def which_industry(self, threhold=1.0):
"""
Experimental API
当单一行业占比较其他行业的 threhold 倍还多时,自动判定为对应的行业基金
注意这里的行业可能比较细分,导致持仓多个行业其实是同一大行业从而误判为宽基基金的可能
:param threhold: float
:return: str
"""
d = self.get_industry_holdings()
l = sorted([(k, v) for k, v in d.items()], key=lambda s: -s[1])
s0 = 0
if l and l[0] and l[0][1]:
s0 = l[0][1]
s1 = sum([l[i][1] for i in range(1, len(l))])
if s0 > threhold * s1:
return "行业基金: " + l[0][0]
else:
return "宽基基金"
def _hkfund_init(self):
import xalpha.universal as xu
# 互认基金国内休市日也有净值,暂时过滤,不确定是否会引起兼容性问题
self.meta = xu.get_rt("F" + self.code)
self.start = self.meta["startdate"]
self.name = self.meta["name"]
self.price = xu.get_daily("F" + self.code, start=self.start)
self.feeinfo = ["小于7天", "0.00%", "大于等于7天", "0.00%"] # 似乎该类型基金都不收取赎回费
self.segment = fundinfo._piecewise(self.feeinfo)
r = rget("http://overseas.1234567.com.cn/f10/FundSaleInfo/968012#SaleInfo")
b = BeautifulSoup(r.text, "lxml")
self.rate = _float(
[
c.strip()
for c in b.select(".HK_Fund_Table.BigText")[5].text.split("\n")
if c.strip()
][-1]
.split("|")[-1]
.strip()[:-1]
)
r = self._hk_bonus()
df = self.price
df["comment"] = [0 for _ in range(len(df))]
df["netvalue"] = df["close"]
df["date"] = pd.to_datetime(df["date"])
df = df[df["date"].isin(opendate)] # ? 是否会过滤掉分红日
for d in r:
df.loc[df["date"] == d["EXDDATE"], "comment"] = d["BONUS"]
df = df.drop("close", axis=1)
self.price = df
def _hk_bonus(self, start=None):
"""
[summary]
:param start: "%Y-%m-%d", defaults to None
:type start: [type], optional
"""
import xalpha.universal as xu
todaydash = today_obj().strftime("%Y-%m-%d")
if not start:
start = self.price.iloc[0]["date"].strftime("%Y-%m-%d")
pagesize = int(
(today_obj() - dt.datetime.strptime(start, "%Y-%m-%d")).days / 5
) # 如果存在一周超过一次分红的基金,算我没说
self.hkfcode = xu.get_hkfcode(self.code)
r = rget_json(
"http://overseas.1234567.com.cn/overseasapi/OpenApiHander.ashx?\
api=HKFDApi&m=MethodJZ&hkfcode={hkfcode}&action=3&pageindex=0&pagesize={pagesize}&date1={startdash}&date2={enddash}&callback=".format(
hkfcode=self.hkfcode,
pagesize=pagesize,
startdash=start,
enddash=todaydash,
)
)
return r["Data"]
class indexinfo(basicinfo):
"""
Get everyday close price of specific index.
In self.price table, totvalue column is the real index
while netvalue comlumn is normalized to 1 for the start date.
In principle, this class can also be used to save stock prices but the price is without adjusted.
:param code: string with seven digitals! note the code here has an extra digit at the beginning,
0 for sh and 1 for sz.
:param value_label: int, default 0 or 1. If set to 1, 记账单数字按金额赎回。
:param fetch: boolean, when open the fetch option, the class will try fetching from local files first in the init
:param save: boolean, when open the save option, automatically save the class to files
:param path: string, the file path prefix of IO
:param form: string, the format of IO, options including: 'csv'
"""
def __init__(
self, code, value_label=0, fetch=False, save=False, path="", form="csv"
):
date = yesterday()
if code.startswith("SH") and code[2:].isdigit():
code = "0" + code[2:]
elif code.startswith("SZ") and code[2:].isdigit():
code = "1" + code[2:]
self.rate = 0
self._url = (
"http://quotes.money.163.com/service/chddata.html?code="
+ code
+ "&start=19901219&end="
+ date
+ "&fields=TCLOSE"
)
super().__init__(
code, value_label=value_label, fetch=fetch, save=save, path=path, form=form
)
def _basic_init(self):
raw = rget(self._url)
cr = csv.reader(raw.text.splitlines(), delimiter=",")
my_list = list(cr)
factor = float(my_list[-1][3])
dd = {
"date": [
dt.datetime.strptime(my_list[i + 1][0], "%Y-%m-%d")
for i in range(len(my_list) - 1)
],
"netvalue": [
float(my_list[i + 1][3]) / factor for i in range(len(my_list) - 1)
],
"totvalue": [float(my_list[i + 1][3]) for i in range(len(my_list) - 1)],
"comment": [0 for _ in range(len(my_list) - 1)],
}
index = | pd.DataFrame(data=dd) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import unittest
import pkgutil
import io
from datetime import timedelta
from unittest import TestCase
import numpy as np
import pandas as pd
import statsmodels
from kats.consts import TimeSeriesData
from kats.detectors.prophet_detector import (
ProphetDetectorModel,
ProphetScoreFunction,
)
from kats.utils.simulator import Simulator
statsmodels_ver = float(
re.findall("([0-9]+\\.[0-9]+)\\..*", statsmodels.__version__)[0]
)
def load_data(file_name):
ROOT="kats"
if "kats" in os.getcwd().lower():
path = 'data/'
else:
path = 'kats/data/'
data_object = pkgutil.get_data(ROOT, path + file_name)
return pd.read_csv(io.BytesIO(data_object), encoding='utf8')
class TestProphetDetector(TestCase):
def create_random_ts(self, seed, length, magnitude, slope_factor):
np.random.seed(seed)
sim = Simulator(n=length, freq="1D", start=pd.to_datetime("2020-01-01"))
sim.add_trend(magnitude=magnitude * np.random.rand() * slope_factor)
sim.add_seasonality(
magnitude * np.random.rand(),
period=timedelta(days=7),
)
sim.add_noise(magnitude=0.1 * magnitude * np.random.rand())
return sim.stl_sim()
def create_ts(
self, seed=0, length=100, magnitude=10, signal_to_noise_ratio=0.1, freq="1D"
):
np.random.seed(seed)
sim = Simulator(n=length, freq=freq, start=pd.to_datetime("2020-01-01"))
sim.add_seasonality(magnitude, period=timedelta(days=7))
sim.add_noise(magnitude=signal_to_noise_ratio * magnitude)
return sim.stl_sim()
def create_multi_seasonality_ts(
self, seed, length, freq, min_val, max_val, signal_to_noise_ratio
):
np.random.seed(seed)
sim = Simulator(n=length, freq=freq, start=pd.to_datetime("2020-01-01"))
magnitude = (max_val - min_val) / 2
sim.add_trend(-0.2 * magnitude)
sim.add_seasonality(
magnitude * (2 / 3) * np.random.rand() * 2,
period=timedelta(days=1),
)
sim.add_seasonality(
magnitude * (1 / 3) * np.random.rand(),
period=timedelta(days=0.5),
)
sim.add_seasonality(
magnitude * 0.2 * np.random.rand(),
period=timedelta(days=7),
)
sim.add_noise(magnitude=signal_to_noise_ratio * magnitude)
sim_ts = sim.stl_sim()
self.add_trend_shift(sim_ts, length, freq, min_val + magnitude)
return sim_ts
def add_smooth_anomaly(self, ts, seed, start_index, length, magnitude):
# Add an anomaly that is half of a sine wave
# start time and freq don't matter, since we only care about the values
np.random.seed(seed)
anomaly_sim = Simulator(n=length, freq="1D", start=pd.to_datetime("2020-01-01"))
anomaly_sim.add_seasonality(magnitude, period=timedelta(days=2 * length))
# anomaly_sim.add_noise(magnitude=0.3 * magnitude * np.random.rand())
anomaly_ts = anomaly_sim.stl_sim()
for i in range(0, length):
ts.value.iloc[start_index + i] += anomaly_ts.value[i]
def truncate(self, ts, start_index, end_index):
# Set all values outside the range [start_index, end_index) to 0
ts.value.iloc[:start_index] *= 0
ts.value.iloc[end_index:] *= 0
def add_trend_shift(self, ts, length, freq, magnitude):
ts_df = ts.to_dataframe()
sim = Simulator(n=length, freq=freq, start=pd.to_datetime("2020-01-01"))
elevation = sim.trend_shift_sim(
cp_arr=[0, 1],
trend_arr=[0, 0, 0],
noise=0,
seasonal_period=1,
seasonal_magnitude=0,
intercept=magnitude,
)
elevation_df = elevation.to_dataframe()
ts_df_elevated = (
ts_df.set_index("time") + elevation_df.set_index("time")
).reset_index()
elevated_ts = TimeSeriesData(df=ts_df_elevated)
ts.value = elevated_ts.value
def horiz_translate(self, ts, periods):
ts.value = ts.value.shift(periods=periods, fill_value=0)
def add_multiplicative_noise(self, ts, magnitude):
# Multiply all the values in ts by a number in the range [1-magnitude, 1+magnitude]
ts.value *= np.random.rand(len(ts)) * magnitude * 2 + 1 - magnitude
def merge_ts(self, ts1, ts2):
ts1_df, ts2_df = ts1.to_dataframe(), ts2.to_dataframe()
merged_df = (ts1_df.set_index("time") + ts2_df.set_index("time")).reset_index()
merged_ts = TimeSeriesData(df=merged_df)
return merged_ts
def add_multi_event(
self,
baseline_ts,
seed,
length,
freq,
min_val,
max_val,
signal_to_noise_ratio,
event_start_ratio,
event_end_ratio,
event_relative_magnitude,
):
np.random.seed(seed)
sim = Simulator(n=length, freq=freq, start= | pd.to_datetime("2020-01-01") | pandas.to_datetime |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
( | pd.Series(dtype="float") | pandas.Series |
from pathlib import Path
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import us
def create_prevalence_df(file_path, population_group):
"""
Creates a data frame that includes the prevalences and the demographic data
Parameters:
file_path: A folder with pq outputs to compare
population_group: Type of population, expected inputs ['Pediatric', 'Adult']
Returns:
A DataFrame where the rows are distinct demographic and prevalence numbers."""
# create a list of al the csvs in path
all_files = list(file_path.glob("**/*"))
# import census location data
# define an emptylist to create df from
all_df = []
# import files
if population_group == "Pediatric":
for filename in all_files:
print(f"Reading {filename}")
# read in csv
# Adding error-catching loop with output note for debugging
try:
df = pd.read_csv(filename, index_col=None, header=0)
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
except Exception as e:
print(f"File {filename} has no data, skipping")
continue
# read in sex as outputed from pq
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["sex"] = sex
# read in race as outputed from pq
race = (
df[df["Order"] == 7]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["race"] = race
# read in location code as outputed from pq
location_code = (
df[df["Order"] == 10]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# identify state
if len(location_code) == 2:
state_cd = location_code
df["zcta3"] = np.nan
else:
zcta3 = []
states = []
for loc in [l.strip() for l in location_code.split(",")]:
zcta3.append(loc[2:])
states.append(loc[:2])
df["zcta3"] = ",".join(zcta3)
states = list(set(states))
state_cd = ",".join(states)
state = us.states.lookup(state_cd)
df["state"] = state
# read in age as outputed from pq
age = (
df[df["Order"] == 5]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# converting to list
df["age"] = age
df["filename"] = filename
year = (
df[df["Order"] == 11]["Weight Category"]
.str.extract(":(.*)", expand=True)
.reset_index()
.at[0, 0]
)
df["year"] = year
# add dataframe to list
all_df.append(df)
if population_group == "Adult":
for filename in all_files:
print(f"Reading {filename}")
# read in csv
# Adding error-catching loop with output note for debugging
try:
df = pd.read_csv(filename, index_col=None, header=0)
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
except Exception as e:
print(f"File {filename} has no data, skipping")
continue
# read in sex as outputed from pq
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["sex"] = sex
# read in race as outputed from pq
race = (
df[df["Order"] == 8]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["race"] = race
# read in location code as outputed from pq
location_code = (
df[df["Order"] == 11]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# identify state
if len(location_code) == 2:
state_cd = location_code
df["zcta3"] = np.nan
else:
zcta3 = []
states = []
for loc in [l.strip() for l in location_code.split(",")]:
zcta3.append(loc[2:])
states.append(loc[:2])
df["zcta3"] = ",".join(zcta3)
states = list(set(states))
state_cd = ",".join(states)
state = us.states.lookup(state_cd)
df["state"] = state
# read in age as outputed from pq
age = (
df[df["Order"] == 5]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# converting to list
df["age"] = age
df["filename"] = filename
year = (
df[df["Order"] == 12]["Weight Category"]
.str.extract(":(.*)", expand=True)
.reset_index()
.at[0, 0]
)
df["year"] = year
# add dataframe to list
all_df.append(df)
all_df = pd.concat(all_df, axis=0, ignore_index=True, sort=True)
all_data = all_df[all_df["Order"] == 1].drop(columns="Order")
std_data = all_data.drop(
columns=[
"Crude Prevalence",
"Weighted Prevalence",
"Age-Adjusted Prevalence",
"Sample",
"Population",
]
)
prev_data = all_data.drop(
columns=[
"Crude Prevalence Standard Error",
"Weighted Prevalence Standard Error",
"Age-Adjusted Prevalence Standard Error",
"Sample",
"Population",
]
)
prev_data_melt = prev_data.melt(
id_vars=[
"Weight Category",
"sex",
"race",
"state",
"zcta3",
"age",
"filename",
"year",
],
value_name="Prevalence",
var_name="Prevalence type",
)
std_melt = std_data.melt(
id_vars=[
"Weight Category",
"sex",
"race",
"state",
"zcta3",
"age",
"filename",
"year",
],
value_name="Standard Error",
var_name="Prevalence type",
)
prev_data_melt["Prevalence type"] = prev_data_melt["Prevalence type"].str.split(
expand=True
)[0]
std_melt["Prevalence type"] = std_melt["Prevalence type"].str.split(expand=True)[0]
output_name = prev_data_melt.merge(
std_melt,
on=[
"Weight Category",
"sex",
"race",
"state",
"zcta3",
"age",
"filename",
"year",
"Prevalence type",
],
how="left",
)
output_name["Prevalence"] = output_name["Prevalence"].replace({".": np.NAN})
output_name["Standard Error"] = output_name["Standard Error"].replace({".": np.NAN})
return output_name
def create_population_df(file_path, population_group):
"""creates a data frame that includes the population numbers and the demographic data.
Population numbers come from American Community Survey
Parameters:
file_path: A folder with pq outputs to compare
population_group: Type of population, expected inputs ['Pediatric', 'Adult']
Returns:
A DataFrame where the rows are distinct demographic and prevalence numbers."""
# create a list of al the csvs in path
all_files = list(file_path.glob("**/*"))
# define an emptylist to create df from
all_df = []
# import files
if population_group == "Pediatric":
for filename in all_files:
print(f"Reading {filename}")
# read in csv
# Adding error-catching loop with output note for debugging
try:
df = pd.read_csv(filename, index_col=None, header=0)
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
except Exception as e:
print(f"File {filename} has no data, skipping")
continue
# read in sex as outputed from pq
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["sex"] = sex
# read in race as outputed from pq
race = (
df[df["Order"] == 7]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["race"] = race
# read in location code as outputed from pq
location_code = (
df[df["Order"] == 10]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# identify state
if len(location_code) == 2:
state_cd = location_code
df["zcta3"] = np.nan
else:
zcta3 = []
states = []
for loc in [l.strip() for l in location_code.split(",")]:
zcta3.append(loc[2:])
states.append(loc[:2])
df["zcta3"] = ",".join(zcta3)
states = list(set(states))
state_cd = ",".join(states)
state = us.states.lookup(state_cd)
df["state"] = state
# read in age as outputed from pq
age = (
df[df["Order"] == 5]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# converting to list
df["age"] = age
df["filename"] = filename
year = (
df[df["Order"] == 11]["Weight Category"]
.str.extract(":(.*)", expand=True)
.reset_index()
.at[0, 0]
)
df["year"] = year
# add dataframe to list
all_df.append(df)
if population_group == "Adult":
for filename in all_files:
print(f"Reading {filename}")
# read in csv
# Adding error-catching loop with output note for debugging
try:
df = pd.read_csv(filename, index_col=None, header=0)
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
except Exception as e:
print(f"File {filename} has no data, skipping")
continue
# read in sex as outputed from pq
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["sex"] = sex
# read in race as outputed from pq
race = (
df[df["Order"] == 8]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["race"] = race
# read in location code as outputed from pq
location_code = (
df[df["Order"] == 11]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# identify state
if len(location_code) == 2:
state_cd = location_code
df["zcta3"] = np.nan
else:
zcta3 = []
states = []
for loc in [l.strip() for l in location_code.split(",")]:
zcta3.append(loc[2:])
states.append(loc[:2])
df["zcta3"] = ",".join(zcta3)
states = list(set(states))
state_cd = ",".join(states)
state = us.states.lookup(state_cd)
df["state"] = state
# read in age as outputed from pq
age = (
df[df["Order"] == 5]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# converting to list
df["age"] = age
df["filename"] = filename
year = (
df[df["Order"] == 12]["Weight Category"]
.str.extract(":(.*)", expand=True)
.reset_index()
.at[0, 0]
)
df["year"] = year
# add dataframe to list
all_df.append(df)
all_df = | pd.concat(all_df, axis=0, ignore_index=True, sort=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 17:02:59 2018
@author: bruce
"""
# last version = plot_corr_mx_concate_time_linux_v1.6.0.py
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import librosa
def correlation_matrix(corr_mx, cm_title):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cs = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cs)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rank(corr_mx, cm_title):
temp = corr_mx
#output = (temp == temp.max(axis=1)[:,None]) # along row
output = temp.rank(axis=1, ascending=False)
fig, ax1 = plt.subplots()
im1 = ax1.matshow(output, cmap=plt.cm.Wistia)
#cs = ax1.matshow(output)
fig.colorbar(im1)
ax1.grid(False)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.title(cm_title)
# show digit in matrix
output = np.asarray(output)
for i in range(22):
for j in range(22):
c = output[j,i]
ax1.text(i, j, int(c), va='center', ha='center')
plt.show()
def correlation_matrix_comb(corr_mx, cm_title):
fig, (ax2, ax3) = plt.subplots(1, 2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
'''
# graph 1 grayscale
im1 = ax1.matshow(corr_mx, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im1, ax=ax1)
ax1.grid(False)
ax1.set_title(cm_title)
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
'''
# graph 2 yellowscale
corr_mx_rank = corr_mx.rank(axis=1, ascending=False)
cmap_grey = LinearSegmentedColormap.from_list('mycmap', ['white', 'black'])
im2 = ax2.matshow(corr_mx, cmap='viridis')
# colorbar need numpy version 1.13.1
fig.colorbar(im2, ax=ax2)
ax2.grid(False)
ax2.set_title(cm_title)
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
# show digit in matrix
corr_mx_rank = np.asarray(corr_mx_rank)
for i in range(22):
for j in range(22):
c = corr_mx_rank[j,i]
ax2.text(i, j, int(c), va='center', ha='center')
# graph 3
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
im3 = ax3.matshow(output, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im3, ax=ax3)
ax3.grid(False)
ax3.set_title(cm_title)
ax3.set_xticks(np.arange(len(xlabels)))
ax3.set_yticks(np.arange(len(ylabels)))
ax3.set_xticklabels(xlabels,fontsize=6)
ax3.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# shrink value for correlation matrix
# in order to use colormap -> 10 scale
def shrink_value_03_1(corr_in1):
corr_out1 = corr_in1.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i in range (22):
for j in range(22):
if corr_in1.iloc[i, j] < 0.3:
corr_out1.iloc[i, j] = 0.3
return corr_out1
def shrink_value_05_1(corr_in2):
corr_out2 = corr_in2.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i2 in range (22):
for j2 in range(22):
if corr_in2.iloc[i2, j2] < 0.5:
corr_out2.iloc[i2, j2] = 0.5
return corr_out2
# not used!!!!!!!!!!!!
# normalize the complex signal series
def normalize_complex_arr(a):
a_oo = a - a.real.min() - 1j*a.imag.min() # origin offsetted
return a_oo/np.abs(a_oo).max()
#################################
f_dB = lambda x : 20 * np.log10(np.abs(x))
# import the pkl file
#pkl_file=pd.read_pickle('/Users/bruce/Documents/uOttawa/Project/audio_brainstem_response/Data_BruceSunMaster_Studies/study2/study2DataFrame.pkl')
df_EFR=pd.read_pickle('/home/bruce/Dropbox/Project/4.Code for Linux/df_EFR.pkl')
# remove DC offset
df_EFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_EFR_detrend_data = pd.DataFrame(signal.detrend(df_EFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_detrend = df_EFR_detrend.append(pd.concat([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_detrend = df_EFR_detrend.reset_index(drop=True)
df_EFR = df_EFR_detrend
# Time domain
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_EFR
df_EFR_win = pd.DataFrame()
# implement the window function
for i in range(1408):
temp_EFR_window = pd.DataFrame((df_EFR.iloc[i,:1024] * win_hamming).values.reshape(1,1024))
temp_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_win = df_EFR_win.append(pd.concat([temp_EFR_window, temp_EFR_label], axis=1, ignore_index=True))
# set the title of columns
# df_EFR_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# df_EFR_avg = df_EFR_avg.sort_values(by=["Condition", "Subject"])
# df_EFR_avg = df_EFR_avg.reset_index(drop=True)
df_EFR_win.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_win = df_EFR_win.sort_values(by=["Condition", "Subject"])
df_EFR_win = df_EFR_win.reset_index(drop=True)
# average all the subjects , test and retest and keep one sound levels
# filter by 'a vowel and 85Db'
df_EFR_sorted = df_EFR.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_sorted = df_EFR_sorted.reset_index(drop=True)
df_EFR_win_sorted = df_EFR_win.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_win_sorted = df_EFR_win_sorted.reset_index(drop=True)
# filter55 65 75 sound levels and keep 85dB
# keep vowel condition and subject
df_EFR_85 = pd.DataFrame(df_EFR_sorted.iloc[1056:, :])
df_EFR_85 = df_EFR_85.reset_index(drop=True)
df_EFR_win_85 = pd.DataFrame(df_EFR_win_sorted.iloc[1056:, :])
df_EFR_win_85 = df_EFR_win_85.reset_index(drop=True)
##################################################
# Frequency Domain
# parameters
sampling_rate = 9606 # fs
# sampling_rate = 9596.623
n = 1024
k = np.arange(n)
T = n/sampling_rate # time of signal
frq = k/T
freq = frq[range(int(n/2))]
n2 = 96060
k2 = np.arange(n2)
T2 = n2/sampling_rate
frq2 = k2/T2
freq2 = frq2[range(int(n2/2))]
# zero padding
# for df_EFR
df_EFR_data = df_EFR.iloc[:, :1024]
df_EFR_label = df_EFR.iloc[:, 1024:]
df_EFR_mid = pd.DataFrame(np.zeros((1408, 95036)))
df_EFR_withzero = pd.concat([df_EFR_data, df_EFR_mid, df_EFR_label], axis=1)
# rename columns
df_EFR_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# for df_EFR_avg_85
df_EFR_85_data = df_EFR_85.iloc[:, :1024]
df_EFR_85_label = df_EFR_85.iloc[:, 1024:]
df_EFR_85_mid = pd.DataFrame(np.zeros((352, 18698)))
df_EFR_85_withzero = pd.concat([df_EFR_85_data, df_EFR_85_mid, df_EFR_85_label], axis=1)
df_EFR_85_withzero.columns = np.append(np.arange(19722), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# normalization
# normalize the dataframe by standard deviation
df_EFR_85_data_std = df_EFR_85_data.std(axis=1)
df_EFR_85_data_norm_std = (df_EFR_85_data.iloc[:, :1024]).div(df_EFR_85_data_std, axis=0)
df_EFR_85_vsc = pd.concat([df_EFR_85_data, df_EFR_85_label], axis=1).sort_values(by=["Vowel", "Subject", "Condition"]).reset_index(drop=True)
df_EFR_85_vsc_norm_std = pd.concat([df_EFR_85_data_norm_std, df_EFR_85_label], axis=1).sort_values(by=["Vowel", "Subject", "Condition"]).reset_index(drop=True)
df_EFR_85_withzero_vsc = df_EFR_85_withzero.sort_values(by=["Vowel", "Subject", "Condition"]).reset_index(drop=True)
df_EFR_85_withzero_cvs = df_EFR_85_withzero.sort_values(by=["Condition", "Vowel", "Subject"]).reset_index(drop=True)
df_EFR_85_withzero_cvs_r = df_EFR_85_withzero_cvs.iloc[0:176, :].reset_index(drop=True)
df_EFR_85_withzero_cvs_t = df_EFR_85_withzero_cvs.iloc[176:352, :].reset_index(drop=True)
df_EFR_85_vsc_a = df_EFR_85_vsc.iloc[0:88, :1024]
df_EFR_85_vsc_e = df_EFR_85_vsc.iloc[88:176, :1024].reset_index(drop=True)
df_EFR_85_vsc_n = df_EFR_85_vsc.iloc[176:264, :1024].reset_index(drop=True)
df_EFR_85_vsc_u = df_EFR_85_vsc.iloc[264:352, :1024].reset_index(drop=True)
df_EFR_85_vsc_norm_std_a = df_EFR_85_vsc_norm_std.iloc[0:88, :1024]
df_EFR_85_vsc_norm_std_e = df_EFR_85_vsc_norm_std.iloc[88:176, :1024]
df_EFR_85_vsc_norm_std_n = df_EFR_85_vsc_norm_std.iloc[176:264, :1024]
df_EFR_85_vsc_norm_std_u = df_EFR_85_vsc_norm_std.iloc[264:352, :1024]
df_EFR_85_withzero_vsc_a = df_EFR_85_withzero_vsc.iloc[0:88, :19722]
df_EFR_85_withzero_vsc_e = df_EFR_85_withzero_vsc.iloc[88:176, :19722]
df_EFR_85_withzero_vsc_n = df_EFR_85_withzero_vsc.iloc[176:264, :19722]
df_EFR_85_withzero_vsc_u = df_EFR_85_withzero_vsc.iloc[264:352, :19722]
df_EFR_85_withzero_cvs_r_a = df_EFR_85_withzero_cvs_r.iloc[0:44, :19722]
df_EFR_85_withzero_cvs_r_a_label = df_EFR_85_withzero_cvs_r.iloc[0:44, 19722:]
df_EFR_85_withzero_cvs_r_e = df_EFR_85_withzero_cvs_r.iloc[44:88, :19722]
df_EFR_85_withzero_cvs_r_n = df_EFR_85_withzero_cvs_r.iloc[88:132, :19722]
df_EFR_85_withzero_cvs_r_u = df_EFR_85_withzero_cvs_r.iloc[132:176, :19722]
df_EFR_85_withzero_cvs_t_a = df_EFR_85_withzero_cvs_t.iloc[0:44, :19722]
df_EFR_85_withzero_cvs_t_e = df_EFR_85_withzero_cvs_t.iloc[44:88, :19722]
df_EFR_85_withzero_cvs_t_n = df_EFR_85_withzero_cvs_t.iloc[88:132, :19722]
df_EFR_85_withzero_cvs_t_u = df_EFR_85_withzero_cvs_t.iloc[132:176, :19722]
# concatenate AENU
temp1 = | pd.concat([df_EFR_85_vsc_a,df_EFR_85_vsc_e], axis=1) | pandas.concat |
from numpy.core.fromnumeric import shape
import pytest
import pandas as pd
import datetime
from fast_trade.build_data_frame import (
build_data_frame,
detect_time_unit,
load_basic_df_from_csv,
apply_transformers_to_dataframe,
apply_charting_to_df,
prepare_df,
process_res_df,
)
def test_detect_time_unit_s():
mock_timestring = 1595115901734
result = detect_time_unit(mock_timestring)
assert result == "ms"
def test_detect_time_unit_ms():
mock_timestring = 1595115901
result = detect_time_unit(mock_timestring)
assert result == "s"
def test_load_basic_df_from_csv_str_1():
mock_data_path = "./test/ohlcv_data.csv.txt"
result_df = load_basic_df_from_csv(mock_data_path)
header = list(result_df.head())
assert "close" in header
assert "open" in header
assert "high" in header
assert "low" in header
assert "volume" in header
assert result_df.index.name == "date"
def test_load_basic_df_from_csv_list_1():
mock_data_path = "./test/ohlcv_data.csv.txt"
result_df = load_basic_df_from_csv(mock_data_path)
expected_line = [0.01404, 0.01, 0.025, 0.01, 3117.0]
assert list(result_df.iloc[1]) == expected_line
def test_load_basic_df_from_csv_str_error_1():
mock_data_path = "./test/SomeFakeNews.csv.txt"
with pytest.raises(Exception, match=r"File not found:*"):
load_basic_df_from_csv(mock_data_path)
def test_apply_transformers_to_dataframe_1_ind():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt")
mock_transformers = [
{"transformer": "sma", "name": "example_transformer_name", "args": [3]}
]
result_df = apply_transformers_to_dataframe(mock_df, mock_transformers)
header = list(result_df.head())
assert "example_transformer_name" in header
assert "FAKE_transformer_name" not in header
def test_apply_transformers_to_dataframe_no_args():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt")
mock_transformers = [{"transformer": "rsi", "name": "rsi", "args": []}]
result_df = apply_transformers_to_dataframe(mock_df, mock_transformers)
assert "rsi" in list(result_df.columns)
def test_apply_transformers_to_dataframe_no_args_multi_col():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt")
mock_transformers = [{"transformer": "wto", "name": "wto", "args": []}]
result_df = apply_transformers_to_dataframe(mock_df, mock_transformers)
assert "wto_wt1" in list(result_df.columns)
assert "wto_wt2" in list(result_df.columns)
def test_apply_charting_to_df_1():
mock_df = pd.read_csv("./test/ohlcv_data.csv.txt", index_col="date")
# mock_df.set_index(["date"], inplace=True)
mock_df.index = | pd.to_datetime(mock_df.index, unit="s") | pandas.to_datetime |
import pandas as pd
import xml.etree.ElementTree as ET
import lxml.etree as etree
most_serious_problem = pd.read_csv(
"../data/processed_data/special_eb/data/3_final/most_serious_problem/special_eb_most_serious_problem_final.csv")
personally_taken_action = pd.read_csv(
"../data/processed_data/special_eb/data/3_final/personally_taken_action/special_eb_personally_taken_action_final.csv")
severity_of_problem = pd.read_csv(
"../data/processed_data/special_eb/data/3_final/severity_of_problem/special_eb_severity_of_problem_final.csv")
who_is_responsible = pd.read_csv(
"../data/processed_data/special_eb/data/3_final/who_is_responsible/special_eb_who_is_responsible_final.csv")
share_renewable = pd.read_csv("../data/original_data/statistical_data/share_renewable/share_renewable.tsv", sep="\t|,")
share_renewable.drop(share_renewable.columns[[0,1]], axis=1, inplace=True)
share_renewable['source'] = 'share_renewable'
ghg_emissions = pd.read_csv("../data/original_data/statistical_data/ghg_emissions/ghg_emissions.tsv", sep="\t")
ghg_emissions['source'] = 'ghg_emissions'
data = [share_renewable, ghg_emissions]
data_annuals = | pd.concat(data) | pandas.concat |
"""
Functions used to compile water quality data from files that have already undergone basic formatting to have the same
column headers and units. List of data sources is available in readme.md file.
Functions:
* format_lake_data: Create additional columns for date and sampling frequency and round to daily means
* calc_growth_window: Detects the growth window for each lake in each year it's sampled using the daily mean dataframe,
and sifts for the data within the growth window and during the pre-growth window period
* growth_window_means: Calculates rates and mean values for environmental variables during each growth window and during
the pre-growth window period
* gw_summary: prints a summary of statistics for bloom type and lake trophic status in the dataset
* select_daily_mean:
* get_tsi: calculate the trophic status index (TSI) for each lake and create a dataframe with columns for lake, TSI, and
trophic status
* get_coords_ts: assign coordinates and trophic status to each lake
* lake_summary:
<NAME>
"""
import pandas as pd
from dplython import DplyFrame, X, sift, select, arrange, mutate
import numpy as np
from scipy.signal import find_peaks
from scipy.signal import savgol_filter
def format_lake_data(all_lakes):
"""
General formatting for lake data. Adds columns for date (year, month, day, and day of year) and calculates the
number of samples collected each year. Creates a separate dataframe rounded to the daily mean and sifted for at
least 6 samples collected per year.
input:
all_lakes: Compiled DplyFrame containing in situ data for all lakes to be analyzed
output:
all_lakes: Compiled data with additional columns (not rounded to daily mean)
daily_mean: additional data frame containing the daily mean values for all numerical parameters
"""
# convert columns to appropriate data type
all_lakes.loc[:, 'chla'] = pd.to_numeric(all_lakes.loc[:, 'chla'])
all_lakes.loc[:, 'temp'] = pd.to_numeric(all_lakes.loc[:, 'temp'])
# convert date to datetime and create additional columns
all_lakes.loc[:, 'date'] = pd.to_datetime(all_lakes.loc[:, 'date'])
all_lakes.loc[:, 'year'] = pd.datetimeIndex(all_lakes.loc[:, 'date']).year
all_lakes.loc[:, 'month'] = pd.datetimeIndex(all_lakes.loc[:, 'date']).month
all_lakes.loc[:, 'day'] = pd.datetimeIndex(all_lakes.loc[:, 'date']).day
all_lakes.loc[:, 'day_of_year'] = pd.PeriodIndex(all_lakes.loc[:, 'date'], freq='D').dayofyear
# round to the nearest day and convert back to datetime
all_lakes.loc[:, 'date'] = pd.PeriodIndex(all_lakes.loc[:, 'date'], freq='D')
all_lakes.loc[:, 'date'] = all_lakes.loc[:, 'date'].astype(str)
all_lakes.loc[:, 'date'] = pd.to_datetime(all_lakes.loc[:, 'date'])
# calculate daily mean
daily_mean = DplyFrame(all_lakes.groupby(['lake', 'date'], as_index=False).mean())
# arrange by date and drop rows where chlorophyll-a is not a number (nan)
daily_mean = daily_mean >> arrange(X.date)
daily_mean.dropna(subset=['chla'], inplace=True)
# add column for number of samples
master_mean_df = pd.DataFrame()
for name, group in daily_mean.groupby(['lake', 'year']):
group.loc[:, 'num_samples'] = len(group['chla'])
master_mean_df = DplyFrame(pd.concat([master_mean_df, group], axis=0))
daily_mean = DplyFrame(master_mean_df) >> sift(X.num_samples >= 6)
return all_lakes, daily_mean
def calc_growth_window(df, threshold_inc, num_sample_threshold):
"""
Detects the growth window period based on the the rate of change in chlorophyll-a concentration that has been
smoothed with the Savitzky-Golay filter. First, optima are flagged in the data using the find_peaks function,
indicating the end of a growth window. The growth window begins at the preceding minimum or when the rate
increases past the num_sample threshold (and if it doesn't increase past that threshold, it begins where the
rate increases above zero). Daily mean data is sifted for samples collected both within the growth window and
during the 1 and 2 weeks leading up to it (the pre-growth window), to be analyzed by the growth_window_means
function. See associated manuscript for full explanation of methods and rationale.
input:
df: DplyFrame containing daily mean in situ data for all lakes to be analyzed (from format_lake_data)
threshold_inc: minimum chlorophyll-a rate of change to constitute the start of the growth window when there
is no minimum flagged in the data.
num_sample_threshold: Minimum number of samples per year that will be retained in the growth window dataset.
output:
master_gw_df: Water quality data for all detected growth windows, compiled into one DplyFrame
springsummer_gw_doy: Dataframe containing the day of year for the start and end of each growth window
master_prev_2weeks_gw_df: Compiled water quality data for each 2 week pre-growth window
"""
# make empty dataframes (will be appended to later)
master_gw_df = pd.DataFrame(columns=['lake', 'date', 'year', 'season', 'day_of_year', 'start_day', 'end_day', 'chla_increase', 'chla_roc',
'chla', 'poc', 'tp', 'srp', 'par', 'ph', 'tkn', 'tdn', 'nh4', 'no2',
'no3', 'nox'])
master_prev_2weeks_gw_df = pd.DataFrame(columns=['lake', 'date', 'year', 'season', 'day_of_year', 'start_day', 'end_day',
'chla', 'chla_roc', 'poc', 'tp', 'srp', 'par', 'ph', 'tkn', 'tdn', 'nh4', 'no2',
'no3', 'nox'])
# sift data for minimum sampling frequency
df = df >> sift(X.num_samples >= num_sample_threshold)
for name, group in df.groupby(['lake', 'year']): # group by lake and year to detect growth windows
group.reset_index(inplace=True)
# determine savgol_filter window length (smaller window for fewer samples)
if group.loc[0, 'num_samples'] <= 15:
window_len = 3
else:
window_len = 5
# 1) smooth the data and find location of the optima along the smoothed line
savgol = savgol_filter(group['chla'], window_length=window_len, polyorder=1)
group.loc[:, 'savgol_chla'] = savgol
# calculate chlorophyll rate of change and flag all days above the threshold as true
group.loc[:, 'chla_roc'] = group.loc[:, 'savgol_chla'].diff() / group.loc[:, 'day_of_year'].diff()
group.loc[:, 'chla_increase'] = group.loc[:, 'chla_roc'].gt(threshold_inc)
# find peaks and minima
y = group['savgol_chla']
peaks, properties = find_peaks(y, prominence=2)
y2 = y * -1 # use -y to find the minima
minima, min_properties = find_peaks(y2, prominence=0.5)
# flag peaks in the dataframe
peaks = DplyFrame(peaks)
peak_df = group.loc[group.index.intersection(peaks[0])]
peak_df['max_flag'] = True
group = pd.merge(group, (peak_df >> select(X.day_of_year, X.max_flag)), how='left', left_on='day_of_year',
right_on='day_of_year')
# flag minima in the dataframe
minima = DplyFrame(minima)
trough_df = group.loc[group.index.intersection(minima[0])]
trough_df['min_flag'] = True
group = pd.merge(group, (trough_df >> select(X.day_of_year, X.min_flag)), how='left',
left_on='day_of_year', right_on='day_of_year')
# 2) find spring and summer or single growth windows for lakes with 2 or 1 defined peaks, respectively
num_peaks = len(group['max_flag'].dropna()) # count the number of optima in the data
if num_peaks == 2: # spring and summer growth windows occur
# find end date of growth window
spring_end_index = group.where(group.max_flag == True).first_valid_index()
spring_end_day = group.loc[spring_end_index, 'day_of_year']
# find start date of growth window
spring_group = group >> sift(X.day_of_year < spring_end_day)
num_minima = len(spring_group['min_flag'].dropna())
if num_minima == 0: # no previous min, use the first increase above threshold_inc
spring_start_index = spring_group.where(spring_group.chla_increase == True).first_valid_index()
if spring_start_index is None: # if there is no valid increase beforehand
spring_start_index = spring_group.where(spring_group.chla_roc > 0).first_valid_index() # find first day with a rate above zero
if spring_start_index is None:
spring_start_day = spring_group.loc[spring_group.first_valid_index(), 'day_of_year'] # select first sampling day
else:
spring_start_day = spring_group.loc[(spring_start_index - 1), 'day_of_year'] # select first day with rate > 0
else:
spring_start_day = spring_group.loc[(spring_start_index - 1), 'day_of_year'] # select first day with rate > threshold_inc
if num_minima > 0: # a previous minimum is present
spring_start_index = spring_group.where(spring_group.min_flag == True).last_valid_index() # select day with minimum closest to the max
spring_start_day = spring_group.loc[spring_start_index, 'day_of_year']
# sift growth window data based on start and end dates
spring_gw = group >> sift(X.day_of_year <= spring_end_day) >> sift(X.day_of_year >= spring_start_day)
spring_gw.loc[:, 'season'] = 'spring'
spring_gw.loc[:, 'start_day'] = spring_start_day
spring_gw.loc[:, 'end_day'] = spring_end_day
# sift out 1 and 2 week pre-growth window data
spring_prev_2weeks_start_day = spring_start_day - 15
prev_2weeks_spring_df = group >> sift(X.day_of_year >= spring_prev_2weeks_start_day) >> sift(
X.day_of_year <= spring_start_day)
prev_2weeks_spring_df.loc[:, 'season'] = 'spring'
prev_2weeks_spring_df.loc[:, 'start_day'] = spring_prev_2weeks_start_day
prev_2weeks_spring_df.loc[:, 'end_day'] = spring_start_day
# append spring gw data to main dataframe
master_gw_df = pd.concat([master_gw_df, spring_gw], axis=0)
master_prev_2weeks_gw_df = pd.concat([master_prev_2weeks_gw_df, prev_2weeks_spring_df], axis=0)
# sift out spring data and repeat for summer
summer_df = group >> sift(X.day_of_year > spring_end_day)
# find end date of growth window
summer_end_index = summer_df.where(summer_df.max_flag == True).first_valid_index()
summer_end_day = summer_df.loc[summer_end_index, 'day_of_year']
# find start date of growth window
summer_group = summer_df >> sift(X.day_of_year < summer_end_day)
num_minima = len(summer_group['min_flag'].dropna())
if num_minima == 0: # no previous min, use the first increase above threshold_inc
summer_start_index = summer_group.where(summer_group.chla_increase == True).first_valid_index()
if summer_start_index is None:
summer_start_index = summer_group.where(summer_group.chla_roc > 0).first_valid_index()
if summer_start_index is None:
summer_start_day = summer_group.loc[summer_group.first_valid_index(), 'day_of_year']
else:
summer_start_day = summer_group.loc[(summer_start_index-1), 'day_of_year']
else:
summer_start_day = summer_group.loc[(summer_start_index - 1), 'day_of_year']
if num_minima > 0: # a previous min is present
summer_start_index = summer_group.where(summer_group.min_flag == True).first_valid_index()
summer_start_day = summer_group.loc[summer_start_index, 'day_of_year']
# sift summer growth window data based on start and end dates
summer_gw = summer_df >> sift(X.day_of_year <= summer_end_day) >> sift(X.day_of_year >= summer_start_day)
summer_gw.loc[:, 'season'] = 'summer'
summer_gw.loc[:, 'start_day'] = summer_start_day
summer_gw.loc[:, 'end_day'] = summer_end_day
# sift out 1 and 2 week pre-growth window data
summer_prev_2weeks_start_day = summer_start_day - 15
prev_2weeks_summer_df = group >> sift(X.day_of_year >= summer_prev_2weeks_start_day) >> sift(
X.day_of_year <= summer_start_day)
prev_2weeks_summer_df.loc[:, 'season'] = 'summer'
prev_2weeks_summer_df.loc[:, 'start_day'] = summer_prev_2weeks_start_day
prev_2weeks_summer_df.loc[:, 'end_day'] = summer_start_day
# append summer gw data to main dataframe
master_gw_df = pd.concat([master_gw_df, summer_gw], axis=0)
master_prev_2weeks_gw_df = pd.concat([master_prev_2weeks_gw_df, prev_2weeks_summer_df], axis=0)
if num_peaks == 1: # single growth window
# find end date of growth window
single_gw_end_index = group.where(group.max_flag == True).first_valid_index()
single_gw_end_day = group.loc[single_gw_end_index, 'day_of_year']
# find start date of growth window
single_group = group >> sift(X.day_of_year < single_gw_end_day)
num_minima = len(single_group['min_flag'].dropna())
if num_minima == 0: # no previous min, use the first increase above threshold_inc
single_gw_start_index = single_group.where(single_group.chla_increase == True).first_valid_index()
if single_gw_start_index is None:
single_gw_start_index = single_group.where(single_group.chla_roc > 0).first_valid_index()
if single_gw_start_index is None:
single_gw_start_day = single_group.loc[single_group.first_valid_index(), 'day_of_year']
else:
single_gw_start_day = single_group.loc[(single_gw_start_index-1), 'day_of_year']
else:
single_gw_start_day = single_group.loc[(single_gw_start_index - 1), 'day_of_year']
if num_minima > 0: # a previous min is present
single_gw_start_index = single_group.where(single_group.min_flag == True).last_valid_index()
single_gw_start_day = single_group.loc[single_gw_start_index, 'day_of_year']
# sift single growth window data based on start and end dates
single_gw_gw = single_group >> sift(X.day_of_year <= single_gw_end_day) >> sift(X.day_of_year >= single_gw_start_day)
single_gw_gw.loc[:, 'season'] = 'single'
single_gw_gw.loc[:, 'start_day'] = single_gw_start_day
single_gw_gw.loc[:, 'end_day'] = single_gw_end_day
# sift out 1 and 2 week pre-growth window data
single_gw_prev_2weeks_start_day = single_gw_start_day - 15
prev_2weeks_single_gw_df = group >> sift(X.day_of_year >= single_gw_prev_2weeks_start_day) >> sift(
X.day_of_year <= single_gw_start_day)
prev_2weeks_single_gw_df.loc[:, 'season'] = 'single'
prev_2weeks_single_gw_df.loc[:, 'start_day'] = single_gw_prev_2weeks_start_day
prev_2weeks_single_gw_df.loc[:, 'end_day'] = single_gw_start_day
# append single gw data to main dataframe
master_gw_df = pd.concat([master_gw_df, single_gw_gw], axis=0)
master_prev_2weeks_gw_df = pd.concat([master_prev_2weeks_gw_df, prev_2weeks_single_gw_df], axis=0)
# create a separate doy file
springsummer_gw_doy = DplyFrame(master_gw_df) >> select(X.lake, X.year, X.season, X.start_day, X.end_day)
springsummer_gw_doy.drop_duplicates(inplace=True)
return master_gw_df, springsummer_gw_doy, master_prev_2weeks_gw_df
def growth_window_means(spring_and_summer_doy, spring_and_summer_selected, prev_2weeks_springsummer_data, min_gw_length, t_max, t_min, t_opt):
"""
This function calculates chlorophyll-a rate, maximum chlorophyll-a concentration, accumulated chlorophyll-a,and mean
values for environmental variables during each growth window. Mean water temperature, solar radiation, and total
phosphorus is calculated for the pre-growth window period. The chlorophyll-a rate of increase is corrected for
temperature using the f_temp calculation (Rosso et al., 1995).
input:
spring_and_summer_doy: dataframe with the start and end day of year for each growth window
spring_and_summer_selected: dataframe with the chlorophyll concentration and temperature for each sampling
day within each growth window
prev_2weeks_springsummer_data: dataframe containing all lake data for the 2 weeks leading up to the spring and summer growth windows
min_gw_length: minimum length for the growth window (set to 5 for now)
t_max: maximum temperature for the f_temp function
t_min: minimum temperature for the f_temp function
t_opt: optimum temperature for the f_temp function
output:
springsummer_gw_data: dataframe with a row for each lake/year/season with the chlorophyll rate of increase and
mean temperature during the growth window and pre-growth window period
"""
print('calculating means')
# calculate growth window length in "spring and summer doy" file and merge with "spring and summer selected"
spring_and_summer_doy = spring_and_summer_doy >> mutate(growth_window_length=X.end_day - X.start_day)
springsummer_data = pd.merge(spring_and_summer_selected, spring_and_summer_doy, how='left',
left_on=['lake', 'year', 'season', 'start_day', 'end_day'],
right_on=['lake', 'year', 'season', 'start_day', 'end_day'])
# make an empty dataframe
springsummer_gw_data = pd.DataFrame(columns=['lake', 'year', 'season', 'chla_rate', 'max_chla', 'poc_rate', 'chla_to_poc',
'gw_temp', 'gw_tp', 'gw_srp', 'gw_secchi', 'gw_ph',
'gw_tkn', 'gw_tdn', 'gw_length',
'start_day', 'end_day', 'specific_chla_rate', 'f_temp',
'temp_corrected_specific_chla_rate'])
for name, group in springsummer_data.groupby(['lake', 'year', 'season']):
first_index = group.first_valid_index() # first index in the group
last_index = group.last_valid_index() # last index in the group
group.loc[:, 'gw_length'] = group.loc[last_index, 'day_of_year'] - group.loc[first_index, 'day_of_year'] # growth window length (days)
# calculate the chlorophyll-a rate, specific rate, and max concentration
group.loc[:, 'chla_max-min'] = group.loc[last_index, 'chla'] - group.loc[first_index, 'chla']
group.loc[:, 'chla_rate'] = group.loc[:, 'chla_max-min'] / group.loc[:, 'gw_length']
group.loc[:, 'specific_chla_rate'] = group.loc[:, 'chla_rate'] / group.loc[first_index, 'chla']
group.loc[:, 'max_chla'] = group.loc[:, 'chla'].max()
# Calculate accumulated chlorophyll-a as the area under the curve during the growth window
group.loc[:, 'acc_chla'] = np.trapz(group.loc[:, 'savgol_chla'], x=group.loc[:, 'day_of_year'])
# calculate the rate of change in poc concentration (mg/L)
group.loc[:, 'poc_max-min'] = group.loc[last_index, 'poc'] - group.loc[first_index, 'poc']
group.loc[:, 'poc_rate'] = group.loc[:, 'poc_max-min'] / group.loc[:, 'gw_length']
# calculate chla:poc ratio after converting chlorophyll-a to mg/L
group.loc[:, 'chla_to_poc'] = (group.loc[:, 'chla']/1000) /group.loc[:, 'poc']
# calculate mean environmental variables during the window
group.loc[:, 'gw_temp'] = group.loc[:, 'temp'].mean()
mean_temp = group.loc[:, 'temp'].mean() # save mean temperature as an object for f_temp calculation
group.loc[:, 'gw_tp'] = group.loc[:, 'tp'].mean()
group.loc[:, 'gw_secchi'] = group.loc[:, 'secchi'].mean()
group.loc[:, 'gw_poc'] = group.loc[:, 'poc'].mean()
group.loc[:, 'gw_ph'] = group.loc[:, 'ph'].mean()
group.loc[:, 'gw_tkn'] = group.loc[:, 'tkn'].mean()
group.loc[:, 'gw_srp'] = group.loc[:, 'srp'].mean()
# calculate f_temp
group.loc[:, 'f_temp'] = (mean_temp - t_max) * (mean_temp - t_min) ** 2 / (
(t_opt - t_min) * ((t_opt - t_min) * (mean_temp - t_opt) - (t_opt - t_max) * (
t_opt + t_min - 2 * mean_temp)))
# divide specific growth rate by f_temp
group.loc[:, 'temp_corrected_specific_chla_rate'] = group.loc[:, 'specific_chla_rate'] / group.loc[:, 'f_temp']
# keep one row for each lake/year/season append each group to the empty dataframe
chla_temp = group.head(1)
springsummer_gw_data = | pd.concat([springsummer_gw_data, chla_temp], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
# @author: <NAME>
# @date: 2020-11
'''
This file will help you get infomation you need from baidu map or amap by official API
- Baidu: http://lbsyun.baidu.com/index.php?title=webapi
- Amap: https://lbs.amap.com/api/webservice/summary
Continuing updating.....
'''
import osmnx as ox
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
import argparse
import requests
import math
import json
import time
import numpy as np
import re
import seaborn as sns
import os
import matplotlib.pyplot as plt
from logging import warning
pd.set_option('display.max_columns', None)
pd.set_option('display.max_columns', None)
plt.rcParams['font.sans-serif']=['Arial Unicode MS']
plt.rcParams['axes.unicode_minus']=False
import warnings
warnings.filterwarnings("ignore")
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--map_type', type=str, default='baidu') # amap
parser.add_argument('--baidu_ak', type=str, default='<KEY>')
parser.add_argument('--amap_ak', type=str, default='a2e1307eb761e7ac6f3a87b7e95f234c')
parser.add_argument('--location', type=tuple, default=(31.221613,121.419054)) # 爬取POI的区域
parser.add_argument('--poi_names', type=tuple, default=('酒店','学校','美食','银行','电影院','KTV'))
parser.add_argument('--capitals', type=tuple, default=('成都市','哈尔滨市','重庆市','长春市','北京市','天津市','石家庄市'
,'济南市','沈阳市','上海市','呼和浩特市','南京市','杭州市','广州市',
'长沙市','昆明市','南宁市','太原市','南昌市','郑州市','兰州市',
'合肥市','武汉市','贵阳市','西宁市','乌鲁木齐市','银川市','福州市',
'海口市','拉萨市','台北市'))
args = parser.parse_known_args()[0]
return args
class CrawlBase(object):
'''基类'''
x_pi = 3.14159265358979324 * 3000.0 / 180.0
pi = 3.1415926535897932384626
a = 6378245.0
ee = 0.00669342162296594323
def __init__(self,args=get_args()):
self.map_type=args.map_type
self.location=args.location
self.baidu_ak=args.baidu_ak
self.amap_ak = args.amap_ak
self.poi_names = args.poi_names
@classmethod
def _lat(cls,lng, lat):
ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + \
0.1 * lng * lat + 0.2 * math.sqrt(math.fabs(lng))
ret += (20.0 * math.sin(6.0 * lng * cls.pi) + 20.0 *
math.sin(2.0 * lng * cls.pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lat * cls.pi) + 40.0 *
math.sin(lat / 3.0 * cls.pi)) * 2.0 / 3.0
ret += (160.0 * math.sin(lat / 12.0 * cls.pi) + 320 *
math.sin(lat * cls.pi / 30.0)) * 2.0 / 3.0
return ret
@classmethod
def _lng(cls, lng, lat):
ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + \
0.1 * lng * lat + 0.1 * math.sqrt(math.fabs(lng))
ret += (20.0 * math.sin(6.0 * lng * cls.pi) + 20.0 *
math.sin(2.0 * lng * cls.pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lng * cls.pi) + 40.0 *
math.sin(lng / 3.0 * cls.pi)) * 2.0 / 3.0
ret += (150.0 * math.sin(lng / 12.0 * cls.pi) + 300.0 *
math.sin(lng / 30.0 * cls.pi)) * 2.0 / 3.0
return ret
@classmethod
def out_of_china(cls,lng, lat):
return not (lng > 73.66 and lng < 135.05 and lat > 3.86 and lat < 53.55)
@classmethod
def bd09_to_gcj02(cls,bd_lon, bd_lat):
x = bd_lon - 0.0065
y = bd_lat - 0.006
z = math.sqrt(x * x + y * y) - 0.00002 * math.sin(y * cls.x_pi)
theta = math.atan2(y, x) - 0.000003 * math.cos(x * cls.x_pi)
gg_lng = z * math.cos(theta)
gg_lat = z * math.sin(theta)
return [gg_lng, gg_lat]
@classmethod
def gcj02_to_wgs84(cls,lng, lat):
dlat = CrawlBase._lat(lng - 105.0, lat - 35.0)
dlng = CrawlBase._lng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * cls.pi
magic = math.sin(radlat)
magic = 1 - cls.ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((cls.a * (1 - cls.ee)) / (magic * sqrtmagic) * cls.pi)
dlng = (dlng * 180.0) / (cls.a / sqrtmagic * math.cos(radlat) * cls.pi)
mglat = lat + dlat
mglng = lng + dlng
return [lng * 2 - mglng, lat * 2 - mglat]
@classmethod
def bd09_to_wgs84(cls,bd_lon, bd_lat):
lon, lat = CrawlBase.bd09_to_gcj02(bd_lon, bd_lat)
return CrawlBase.gcj02_to_wgs84(lon, lat)
@classmethod
def point(cls,arr):
return Point(arr['wgs_lng'],arr['wgs_lat'])
def target_map(self,target_poi):
place = gpd.GeoDataFrame([Point(self.location[1], self.location[0])])
place.columns = ['geometry']
G = ox.graph_from_point(center_point=(self.location[0], self.location[1]), dist=2000, network_type='drive') # dist
G_gdf = ox.graph_to_gdfs(G)
###### 读爬虫数据 ######
df=pd.read_csv(os.path.join('.',self.map_type,'POI',target_poi+'.csv'),encoding='utf-8',engine='python')
## 百度坐标系 纠正为 WGS1984
for i in df.index:
df.loc[i, 'wgs_lng'] = CrawlBase.bd09_to_wgs84(df.loc[i, 'lng'], df.loc[i, 'lat'])[0]
df.loc[i, 'wgs_lat'] = CrawlBase.bd09_to_wgs84(df.loc[i, 'lng'], df.loc[i, 'lat'])[1]
df['geometry'] = df.apply(CrawlBase.point, axis=1)
df = gpd.GeoDataFrame(df)
#############################
base = G_gdf[1].plot(figsize=(10, 10), edgecolor='grey') # 道路
west, east = base.get_xlim()
south, north = base.get_ylim()
G_gdf[0].plot(ax=base, color='blue') # 点
plt.scatter(df['wgs_lng'],df['wgs_lat'],s=8,c='r',label=target_poi) # cmap='Paired'
plt.legend()
#############################
base = G_gdf[1].plot(figsize=(10, 10), edgecolor='blue', alpha=0.3) # 道路
sns.kdeplot(df['wgs_lng'], df['wgs_lat'], shade=True, shade_lowest=False, cmap='Greys', n_levels=5,
alpha=0.8, legend=False)
df.plot(ax=base, color='red', markersize=5)
plt.xlim(west, east)
plt.ylim(south, north)
plt.title('{}POI空间分布核密度图'.format(target_poi), fontsize=20)
plt.show()
def get_area_poi_infos(self):
pass
def get_route_infos(self):
pass
def get_weather_infos(self):
pass
def get_road_infos(self):
pass
def get_migrat_index(self):
pass
class CrawlBaidu(CrawlBase):
'''
调用百度地图web服务API爬取相关信息
url:http://lbsyun.baidu.com/index.php?title=webapi/guide/webservice-placeapi
'''
def __init__(self, *args, **kwargs):
super(CrawlBaidu, self).__init__()
self.save_dir = './baidu/POI'
def __getattr__(self, item):
pass
def get_area_poi_infos(self,*args):
'''
:param args: 如果不传参,默认爬取所有POI,否则只爬指定的POI
:return:
'''
if args:
names=args
else:
names=self.poi_names
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
for name in names:
print('正在获取{}信息...'.format(name))
place_names, lats, lons, address = [], [], [], []
for i in range(0,20):
_url = 'http://api.map.baidu.com/place/v2/search?query={}&' \
'location={},{}&coord_type=1&radius=5000&' \
'page_size=20&page_num={}&output=json&ak={}'.format(name,self.location[0],self.location[1],i,self.baidu_ak)
data = requests.get(_url).json()
items = data['results']
for item in items:
place_names.append(item['name'])
lats.append(item['location']['lat'])
lons.append(item['location']['lng'])
address.append(item['address'])
time.sleep(2) # 休眠2s
data = pd.DataFrame({'name': place_names, 'address': address, 'lat': lats, 'lng': lons})
data.to_csv(os.path.join(self.save_dir,'{}.csv'.format(name)))
def get_route_infos(self):
pass
def traj_revise(self,filedir):
'''
轨迹纠偏
用于纠正一段或多段轨迹的漂移,通过去除噪点、绑路、补充道路形状点、抽稀等方式,还原真实轨迹
限制:
- 一次请求支持对2000个轨迹点(轨迹里程不超过500公里)进行批量纠偏处理
- 针对驾车、骑行和步行不同出行模式执行对应的轨迹纠偏策略,并针对停留点漂移进行了单独识别与处理
- 支持返回轨迹点对应的道路等级、道路限速信息,开发者可利用此信息进行报警提醒和驾驶监控分析
'''
_url='http://api.map.baidu.com/rectify/v1/track?point_list={}&' \
'rectify_option={}&supplement_mode={}&extensions={}&ak={}'
df_tra= | pd.read_csv(filedir,engine='python') | pandas.read_csv |
import pandas as pd
import re
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import mapping_career_causeways.text_cleaning_utils as text_cleaning_utils
def tfidf_keywords(p, dataframe, text_field, stopwords, N=10):
"""
Fast method to generate keywords characterising each cluster
Parameters
----------
p (list or nd.array):
Cluster integer labels
dataframe (pandas.DataFrame):
Dataframe with information about the clustered nodes.
text_field (string):
Column name of the 'dataframe' that contains the text corpus
to be used for keyword extraction.
stopwords (list of strings):
Specific words which should be excluded from the text corpus.
N (int)
Number of keywords to use
Returns
-------
tfidf_keywords (list of strings):
Strings containing cluster keywords
tfidf_keywords_ (list of strings):
Strings containing the cluster number and cluster keywords
"""
# Collect text for each cluster & remove custom stopwords
cluster_text = []
for c in range(len(np.unique(p))):
t=" ".join(dataframe.loc[p==c][text_field].to_list())
for stopword in stopwords:
t=re.sub(stopword,'',t)
cluster_text.append(t)
# Further clean the text (see 'text_cleaning_utils' for more details)
clust_descriptions_clean = []
for descr in cluster_text:
text = text_cleaning_utils.clean_text(descr)
text = text_cleaning_utils.remove_stopwords(text)
text = text_cleaning_utils.lemmatise(text)
clust_descriptions_clean.append(text)
# Find keywords using tf-idf vectors
vectorizer = TfidfVectorizer(ngram_range=(1, 2))
vectors = vectorizer.fit_transform(clust_descriptions_clean)
names = vectorizer.get_feature_names()
Data = vectors.todense().tolist()
# Create a dataframe with the results
df = | pd.DataFrame(Data, columns=names) | pandas.DataFrame |
#june 2014
#determine genes in copy number variants for TNBC
#genes lost at this stage are not relevant to triple negative
import csv
import math
import numpy as np
import scipy
from scipy import stats
from scipy import misc
import matplotlib.pyplot as plt
import math
import itertools
from itertools import zip_longest
import pandas as pd
#in order to create a candidate CNV file for a large number of genes,
#I need to automatically pull out the genomic coordinates for build hg19 for each gene
#function to transpose
def transpose(mylist):
return [list(i) for i in zip(*mylist)]
#function for significant digits
from math import log10, floor
def round_to_2(x):
digits = -int(floor(log10(x))-1)
digit_str = '.' + str(digits) + 'f'
return float(format(x, digit_str))
#function for testing if a string is a number
def isnumber(s):
try:
float(s)
return True
except ValueError:
return False
#function for finding binomial probability density with equal probabilities
def BinProb(n,k):
p = 0.5
return misc.comb(n,k)*(p**k)*((1-p)**(n-k))
#set amp and del cutoffs
ampcutoff = 2.**0.3
delcutoff = 2.**-0.3
#CNV data for candidate genes and pathological information by TCGA barcode
with open('BRCA_CNVs_genes_foldchange_processed.csv', 'r') as CNV_cand:
cand = csv.reader(CNV_cand)
cand_genes = next(cand)
cand_genes = list(cand_genes)[1:-2]
print('Initial Gene List:')
print(len(cand_genes), 'genes') #12029
path = pd.read_csv('../BRCA_pathology_2014.csv',header=0,names=['TCGA_ID','ER_Status','PR_Status','HER2_Status'])
path.set_index('TCGA_ID',drop=False,inplace=True)
#print(path.head())
CNVs = pd.read_csv('BRCA_CNVs_genes_foldchange_processed.csv')
CNVs.set_index('TCGA_ID',drop=True,inplace=True)
allinfo = | pd.concat([path,CNVs],axis=1,join='inner') | pandas.concat |
import re
import numpy as np
import pandas as pd
import random as rd
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
# Print options
np.set_printoptions(precision=4, threshold=10000, linewidth=160, edgeitems=999, suppress=True)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.width', 160)
| pd.set_option('expand_frame_repr', False) | pandas.set_option |
import re
import datetime as dt
from ftplib import FTP
import gzip
from zipfile import ZipFile
from pandas.compat import StringIO
from pandas import read_csv, DataFrame, to_datetime
from pandas_datareader.base import _BaseReader
from pandas_datareader._utils import RemoteDataError
from pandas_datareader.compat import BytesIO, is_number
_URL_FULL = 'edgar/full-index/master.zip'
_URL_DAILY = 'ftp://ftp.sec.gov/'
_SEC_FTP = 'ftp.sec.gov'
_COLUMNS = ['cik', 'company_name', 'form_type', 'date_filed', 'filename']
_COLUMN_TYPES = {'cik': str, 'company_name': str, 'form_type': str,
'date_filed': str, 'filename': str}
_DIVIDER = re.compile('--------------')
_EDGAR = 'edgar/'
_EDGAR_DAILY = 'edgar/daily-index'
_EDGAR_RE = re.compile(_EDGAR)
_EDGAR_MIN_DATE = dt.datetime(1994, 7, 1)
_ZIP_RE = re.compile('\.zip$')
_GZ_RE = re.compile('\.gz$')
_MLSD_VALUES_RE = re.compile('modify=(?P<modify>.*?);.*'
'type=(?P<type>.*?);.*'
'; (?P<name>.*)$')
_FILENAME_DATE_RE = re.compile('\w*?\.(\d*)\.idx')
_FILENAME_MASTER_RE = re.compile('master\.\d*\.idx')
_EDGAR_MAX_6_DIGIT_DATE = dt.datetime(1998, 5, 15)
class EdgarIndexReader(_BaseReader):
"""
Get master index from the SEC's EDGAR database.
Returns
-------
edgar_index : pandas.DataFrame.
DataFrame of EDGAR index.
"""
@property
def url(self):
if self.symbols == 'full':
return _URL_FULL
elif self.symbols == 'daily':
return _URL_DAILY
else:
return _URL_FULL # Should probably raise or use full unless daily.
def _read_zipfile(self, ftppath):
zipf = BytesIO()
try:
self._sec_ftp_session.retrbinary('RETR ' + ftppath, zipf.write)
except EOFError:
raise RemoteDataError('FTP server has closed the connection.')
zipf.seek(0)
with ZipFile(zipf, 'r') as zf:
data = zf.open(zf.namelist()[0]).read().decode()
return StringIO(data)
def _read_gzfile(self, ftppath):
zipf = BytesIO()
try:
self._sec_ftp_session.retrbinary('RETR ' + ftppath, zipf.write)
except EOFError:
raise RemoteDataError('FTP server has closed the connection.')
zipf.seek(0)
zf = gzip.GzipFile(fileobj=zipf, mode='rb')
try:
data = zf.read().decode('iso-8859-1')
finally:
zf.close()
return StringIO(data)
def _read_one_data(self, ftppath, params):
if re.search(_ZIP_RE, ftppath) is not None:
index_file = self._read_zipfile(ftppath)
elif re.search(_GZ_RE, ftppath) is not None:
index_file = self._read_gzfile(ftppath)
else:
index_file = StringIO()
index_list = []
try:
self._sec_ftp_session.retrlines('RETR ' + ftppath,
index_list.append)
except EOFError:
raise RemoteDataError('FTP server has closed the connection.')
for line in index_list:
index_file.write(line + '\n')
index_file.seek(0)
index_file = self._remove_header(index_file)
index = read_csv(index_file, delimiter='|', header=None,
index_col=False, names=_COLUMNS,
low_memory=False, dtype=_COLUMN_TYPES)
index['filename'] = index['filename'].map(self._fix_old_file_paths)
return index
def _read_daily_data(self, url, params):
doc_index = DataFrame()
file_index = self._get_dir_lists()
for idx_entry in file_index:
if self._check_idx(idx_entry):
daily_idx_path = (idx_entry['path'] + '/' + idx_entry['name'])
daily_idx = self._read_one_data(daily_idx_path, params)
doc_index = doc_index.append(daily_idx)
doc_index['date_filed'] = to_datetime(doc_index['date_filed'],
format='%Y%m%d')
doc_index.set_index(['date_filed', 'cik'], inplace=True)
return doc_index
def _check_idx(self, idx_entry):
if re.match(_FILENAME_MASTER_RE, idx_entry['name']):
if idx_entry['date'] is not None:
if (self.start <= idx_entry['date'] <= self.end):
return True
else:
return False
def _remove_header(self, data):
header = True
cleaned_datafile = StringIO()
for line in data:
if header is False:
cleaned_datafile.write(line + '\n')
elif re.search(_DIVIDER, line) is not None:
header = False
cleaned_datafile.seek(0)
return cleaned_datafile
def _fix_old_file_paths(self, path):
if type(path) == float: # pd.read_csv turns blank into np.nan
return path
if re.match(_EDGAR_RE, path) is None:
path = _EDGAR + path
return path
def read(self):
try:
return self._read()
finally:
self.close()
def _read(self):
try:
self._sec_ftp_session = FTP(_SEC_FTP, timeout=self.timeout)
self._sec_ftp_session.login()
except EOFError:
raise RemoteDataError('FTP server has closed the connection.')
try:
if self.symbols == 'full':
return self._read_one_data(self.url, self.params)
elif self.symbols == 'daily':
return self._read_daily_data(self.url, self.params)
finally:
self._sec_ftp_session.close()
def _sanitize_dates(self, start, end):
if is_number(start):
start = dt.datetime(start, 1, 1)
start = to_datetime(start)
if is_number(end):
end = dt.datetime(end, 1, 1)
end = | to_datetime(end) | pandas.to_datetime |
##### file path
### input
# data_set keys and lebels
path_df_part_1_uic_label = "df_part_1_uic_label.csv"
path_df_part_2_uic_label = "df_part_2_uic_label.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# data_set features
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_C = "df_part_1_C.csv"
path_df_part_1_IC = "df_part_1_IC.csv"
path_df_part_1_UI = "df_part_1_UI.csv"
path_df_part_1_UC = "df_part_1_UC.csv"
path_df_part_2_U = "df_part_2_U.csv"
path_df_part_2_I = "df_part_2_I.csv"
path_df_part_2_C = "df_part_2_C.csv"
path_df_part_2_IC = "df_part_2_IC.csv"
path_df_part_2_UI = "df_part_2_UI.csv"
path_df_part_2_UC = "df_part_2_UC.csv"
path_df_part_3_U = "df_part_3_U.csv"
path_df_part_3_I = "df_part_3_I.csv"
path_df_part_3_C = "df_part_3_C.csv"
path_df_part_3_IC = "df_part_3_IC.csv"
path_df_part_3_UI = "df_part_3_UI.csv"
path_df_part_3_UC = "df_part_3_UC.csv"
### out file
### intermediate file
# data partition with diffferent label
path_df_part_1_uic_label_0 = "df_part_1_uic_label_0.csv"
path_df_part_1_uic_label_1 = "df_part_1_uic_label_1.csv"
path_df_part_2_uic_label_0 = "df_part_2_uic_label_0.csv"
path_df_part_2_uic_label_1 = "df_part_2_uic_label_1.csv"
# training set keys uic-label with k_means clusters' label
path_df_part_1_uic_label_cluster = "df_part_1_uic_label_cluster.csv"
path_df_part_2_uic_label_cluster = "df_part_2_uic_label_cluster.csv"
# scalers for data standardization store as python pickle
# for each part's features
path_df_part_1_scaler = "df_part_1_scaler"
path_df_part_2_scaler = "df_part_2_scaler"
import pandas as pd
import numpy as np
def df_read(path, mode='r'):
'''the definition of dataframe loading function
'''
path_df = open(path, mode)
try:
df = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
return df
def subsample(df, sub_size):
'''the definition of sub-sampling function
@param df: dataframe
@param sub_size: sub_sample set size
@return sub-dataframe with the same formation of df
'''
if sub_size >= len(df):
return df
else:
return df.sample(n=sub_size)
########################################################################
'''Step 1: dividing of positive and negative sub-set by u-i-c-label keys
p.s. we first generate u-i-C key, then merging for data set and operation by chunk
such strange operation designed for saving my poor PC-MEM.
'''
df_part_1_uic_label = df_read(path_df_part_1_uic_label) # loading total keys
df_part_2_uic_label = df_read(path_df_part_2_uic_label)
df_part_1_uic_label_0 = df_part_1_uic_label[df_part_1_uic_label['label'] == 0]
df_part_1_uic_label_1 = df_part_1_uic_label[df_part_1_uic_label['label'] == 1]
df_part_2_uic_label_0 = df_part_2_uic_label[df_part_2_uic_label['label'] == 0]
df_part_2_uic_label_1 = df_part_2_uic_label[df_part_2_uic_label['label'] == 1]
df_part_1_uic_label_0.to_csv(path_df_part_1_uic_label_0, index=False)
df_part_1_uic_label_1.to_csv(path_df_part_1_uic_label_1, index=False)
df_part_2_uic_label_0.to_csv(path_df_part_2_uic_label_0, index=False)
df_part_2_uic_label_1.to_csv(path_df_part_2_uic_label_1, index=False)
#######################################################################
'''Step 2: clustering on negative sub-set
clusters number ~ 35, using mini-batch-k-means
'''
# clustering based on sklearn
from sklearn import preprocessing
from sklearn.cluster import MiniBatchKMeans
import pickle
##### part_1 #####
# loading features
df_part_1_U = df_read(path_df_part_1_U)
df_part_1_I = df_read(path_df_part_1_I)
df_part_1_C = df_read(path_df_part_1_C)
df_part_1_IC = df_read(path_df_part_1_IC)
df_part_1_UI = df_read(path_df_part_1_UI)
df_part_1_UC = df_read(path_df_part_1_UC)
# process by chunk as ui-pairs size is too big
# for get scale transform mechanism to large scale of data
scaler_1 = preprocessing.StandardScaler()
batch = 0
for df_part_1_uic_label_0 in pd.read_csv(open(path_df_part_1_uic_label_0, 'r'), chunksize=150000):
try:
# construct of part_1's sub-training set
train_data_df_part_1 = pd.merge(df_part_1_uic_label_0, df_part_1_U, how='left', on=['user_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_I, how='left', on=['item_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_C, how='left', on=['item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UC, how='left', on=['user_id', 'item_category'])
# getting all the complete features for clustering
train_X_1 = train_data_df_part_1.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u'])
# feature standardization
scaler_1.partial_fit(train_X_1)
batch += 1
print('chunk %d done.' % batch)
except StopIteration:
print("finish.")
break
# initial clusters
mbk_1 = MiniBatchKMeans(init='k-means++', n_clusters=1000, batch_size=500, reassignment_ratio=10 ** -4)
classes_1 = []
batch = 0
for df_part_1_uic_label_0 in pd.read_csv(open(path_df_part_1_uic_label_0, 'r'), chunksize=15000):
try:
# construct of part_1's sub-training set
train_data_df_part_1 = pd.merge(df_part_1_uic_label_0, df_part_1_U, how='left', on=['user_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_I, how='left', on=['item_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_C, how='left', on=['item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UC, how='left', on=['user_id', 'item_category'])
train_X_1 = train_data_df_part_1.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u'])
# feature standardization
standardized_train_X_1 = scaler_1.transform(train_X_1)
# fit clustering model
mbk_1.partial_fit(standardized_train_X_1)
classes_1 = np.append(classes_1, mbk_1.labels_)
batch += 1
print('chunk %d done.' % batch)
except StopIteration:
print(" ------------ k-means finished on part 1 ------------.")
break
del (df_part_1_U)
del (df_part_1_I)
del (df_part_1_C)
del (df_part_1_IC)
del (df_part_1_UI)
del (df_part_1_UC)
##### part_2 #####
# loading features
df_part_2_U = df_read(path_df_part_2_U)
df_part_2_I = df_read(path_df_part_2_I)
df_part_2_C = df_read(path_df_part_2_C)
df_part_2_IC = df_read(path_df_part_2_IC)
df_part_2_UI = df_read(path_df_part_2_UI)
df_part_2_UC = df_read(path_df_part_2_UC)
# process by chunk as ui-pairs size is too big
# for get scale transform mechanism to large scale of data
scaler_2 = preprocessing.StandardScaler()
batch = 0
for df_part_2_uic_label_0 in pd.read_csv(open(path_df_part_2_uic_label_0, 'r'), chunksize=150000):
try:
# construct of part_1's sub-training set
train_data_df_part_2 = pd.merge(df_part_2_uic_label_0, df_part_2_U, how='left', on=['user_id'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_I, how='left', on=['item_id'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_C, how='left', on=['item_category'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_UC, how='left', on=['user_id', 'item_category'])
train_X_2 = train_data_df_part_2.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u'])
# fit the scaler
scaler_2.partial_fit(train_X_2)
batch += 1
print('chunk %d done.' % batch)
except StopIteration:
print("finish.")
break
# initial clusters
mbk_2 = MiniBatchKMeans(init='k-means++', n_clusters=1000, batch_size=500, reassignment_ratio=10 ** -4)
# process by chunk as ui-pairs size is too big
batch = 0
classes_2 = []
for df_part_2_uic_label_0 in pd.read_csv(open(path_df_part_2_uic_label_0, 'r'), chunksize=15000):
try:
# construct of part_1's sub-training set
train_data_df_part_2 = pd.merge(df_part_2_uic_label_0, df_part_2_U, how='left', on=['user_id'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_I, how='left', on=['item_id'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_C, how='left', on=['item_category'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_2 = | pd.merge(train_data_df_part_2, df_part_2_UC, how='left', on=['user_id', 'item_category']) | pandas.merge |
from pathlib import Path
import os
import pandas as pd
import tensorflow as tf
from six import StringIO
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import text_problems
from tensor2tensor.data_generators.function_docstring import GithubFunctionDocstring
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import translate
from tensor2tensor.data_generators.extract_raw_data import extract_data
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
from nltk.tokenize import RegexpTokenizer
from sklearn.model_selection import train_test_split
from usr_dir.utils import read_from_file
_CONALA_TRAIN_DATASETS = [
[
"gs://conala/",
("train/conala-train.intent",
"train/conala-train.code")
],
[
"gs://conala/",
("mined/conala-train-mined.intent", "mined/conala-train-mined.code")
],
]
@registry.register_problem
class SemanticSearch(text_problems.Text2TextProblem):
"""
"""
def __init__(self, was_reversed=False, was_copy=False):
super(SemanticSearch, self).__init__(
was_reversed=False, was_copy=False)
@property
def vocab_type(self):
return text_problems.VocabType.SUBWORD
@property
def base_url(self):
return "gs://conala"
@property
def test_file(self):
return '{}/{}'.format(self.base_url, "conala-test.json"), "conala-test.json"
@property
def file_names(self):
return [
"conala-mined.jsonl",
"conala-train.json"
]
@property
def pair_files_list(self):
"""
This function returns a list of (url, file name) pairs
"""
return [
('{}/{}'.format(self.base_url, name),
name)
for name in self.file_names
]
@property
def is_generate_per_split(self):
return True
@property
def approx_vocab_size(self):
return 2 ** 14 # ~16
@property
def max_samples_for_vocab(self):
return int(3.5e5)
@property
def oov_token(self):
return "UNK"
@classmethod
def github_data(cls, data_dir, tmp_dir, dataset_split):
"""
Using data from function_docstring problem
"""
github = GithubFunctionDocstring()
return github.generate_samples(data_dir, tmp_dir, dataset_split)
def maybe_download_conala(self, tmp_dir):
all_files = [
generator_utils.maybe_download(tmp_dir, file_name, uri)
for uri, file_name in self.pair_files_list
]
return all_files
def maybe_split_data(self, tmp_dir, extracted_files, use_mined=True):
train_file = os.path.join(
tmp_dir, 'conala-joined-prod-train.json' if use_mined else 'conala-prod-train.json')
valid_file = os.path.join(
tmp_dir, 'conala-joined-prod-valid.json' if use_mined else 'conala-prod-valid.json')
if tf.gfile.Exists(train_file) or tf.gfile.Exists(valid_file):
tf.logging.info("Not splitting, file exists")
else:
if use_mined:
df = self.join_mined_and_train(tmp_dir, extracted_files)
else:
train_path = os.path.join(tmp_dir, 'conala-train.json.prod')
assert tf.gfile.Exists(train_path)
df = pd.read_json(train_path)
train, valid = train_test_split(
df, test_size=0.10, random_state=42)
train[['intent_tokens', 'snippet_tokens']].to_json(train_file)
valid[['intent_tokens', 'snippet_tokens']].to_json(valid_file)
return train_file, valid_file
def join_mined_and_train(self, tmp_dir, extracted_files):
df = pd.DataFrame([])
for extracted_file in extracted_files:
if 'test' not in extracted_file:
file_path = os.path.join(tmp_dir, extracted_file)
df = df.append(pd.read_json(file_path),
ignore_index=True, sort=False)
return df
def generate_samples(self, data_dir, tmp_dir, dataset_split):
"""A generator to return data samples.Returns the data generator to return.
Args:
data_dir: A string representing the data directory.
tmp_dir: A string representing the temporary directory and is¬
used to download files if not already available.
dataset_split: Train, Test or Eval.
Yields:
Each element yielded is of a Python dict of the form
{"inputs": "STRING", "targets": "STRING"}
"""
extracted_files, train_filename, valid_filename = self.process_files(
tmp_dir)
if dataset_split == problem.DatasetSplit.TRAIN:
df = | pd.read_json(train_filename) | pandas.read_json |
# %% Imports
import pandas
import altair
import datetime
import boto3
from plot_shared import get_chrome_driver
from data_shared import get_s3_csv_or_empty_df, get_ni_pop_pyramid
# %%
age_bands = pandas.read_excel('https://www.health-ni.gov.uk/sites/default/files/publications/health/doh-dd-030921.xlsx', sheet_name='Individuals 7 Days - 5yr Age')
age_bands['Total_Tests'] = age_bands['Positive_Tests'] + age_bands['Negative_Tests'] + age_bands['Indeterminate_Tests']
age_bands = age_bands.groupby('Age_Band_5yr').sum()[['Positive_Tests','Total_Tests']].reset_index()
age_bands['Positivity_Rate'] = age_bands['Positive_Tests'] / age_bands['Total_Tests']
age_bands['Band Start'] = age_bands['Age_Band_5yr'].str.extract('Aged (\d+)')
age_bands['Band End'] = age_bands['Age_Band_5yr'].str.extract('Aged \d+ - (\d+)')
# %%
#session = boto3.session.Session(profile_name='codeandnumbers')
#s3 = session.client('s3')
#datastore = get_s3_csv_or_empty_df(s3, 'ni-covid-tweets', 'DoH-DD/agebands.csv', ['Date'])
datastore = pandas.read_csv('../sam/agebands.csv')
datastore['Date'] = pandas.to_datetime(datastore['Date'])
datastore['Positive_Tests'] = datastore['Positive_Tests'].astype(int)
datastore['Total_Tests'] = datastore['Total_Tests'].astype(int)
datastore['Band Start'] = datastore['Band Start'].fillna(90).astype(int)
datastore = datastore.sort_values(['Date','Band Start']).reset_index(drop=True)
# Have to insert an extra date to get the first date shown - just altair things
#datastore = datastore.append(
# {
# 'Date': datastore['Date'].min() + pandas.DateOffset(days=-1),
# 'Positive_Tests': 1,
# 'Total_Tests': 1,
# 'Positivity_Rate': 1,
# 'Age_Band_5yr': 'Not Known',
# 'Band Start': 90
# }, ignore_index=True)
# %%
toplot = datastore[datastore['Date'] >= (datastore['Date'].max() + pandas.DateOffset(days=-42))]
toplot['Date'] = pandas.to_datetime(toplot['Date'])
newind = pandas.date_range(start=toplot['Date'].max() + pandas.DateOffset(days=-42), end=toplot['Date'].max())
alldates = | pandas.Series(newind) | pandas.Series |
"""Calculate weighted distances between samples in a given timepoint and both other samples in that timepoint and samples from a timepoint at a given delta time in the future.
"""
import argparse
from collections import defaultdict
import csv
import numpy as np
import pandas as pd
import sys
def get_distances_by_sample_names(distances):
"""Return a dictionary of distances by pairs of sample names.
Parameters
----------
distances : iterator
an iterator of dictionaries with keys of distance, sample, and other_sample
Returns
-------
dict :
dictionary of distances by pairs of sample names
"""
distances_by_sample_names = defaultdict(dict)
for record in distances:
sample_a = record["sample"]
sample_b = record["other_sample"]
distance = int(record["distance"])
distances_by_sample_names[sample_a][sample_b] = distance
return distances_by_sample_names
def get_distance_matrix_by_sample_names(samples_a, samples_b, distances):
"""Return a matrix of distances between pairs of given sample sets.
Parameters
----------
samples_a, samples_b : list
names of samples whose pairwise distances should populate the matrix
with the first samples in rows and the second samples in columns
distances : dict
dictionary of distances by pairs of sample names
Returns
-------
ndarray :
matrix of pairwise distances between the given samples
>>> samples_a = ["a", "b"]
>>> samples_b = ["c", "d"]
>>> distances = {"a": {"c": 1, "d": 2}, "b": {"c": 3, "d": 4}}
>>> get_distance_matrix_by_sample_names(samples_a, samples_b, distances)
array([[1., 2.],
[3., 4.]])
>>> get_distance_matrix_by_sample_names(samples_b, samples_a, distances)
array([[1., 3.],
[2., 4.]])
"""
matrix = np.zeros((len(samples_a), len(samples_b)))
for i, sample_a in enumerate(samples_a):
for j, sample_b in enumerate(samples_b):
try:
matrix[i, j] = distances[sample_a][sample_b]
except KeyError:
matrix[i, j] = distances[sample_b][sample_a]
return matrix
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Annotated weighted distances between viruses",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tip-attributes", required=True, help="a tab-delimited file describing tip attributes at one or more timepoints")
parser.add_argument("--distances", required=True, help="tab-delimited file with pairwise distances between samples")
parser.add_argument("--delta-months", required=True, type=int, help="number of months to project clade frequencies into the future")
parser.add_argument("--output", required=True, help="tab-delimited output file with mean and standard deviation used to standardize each predictor")
args = parser.parse_args()
# Load tip attributes.
tips = pd.read_csv(args.tip_attributes, sep="\t", parse_dates=["timepoint"])
# Load distances.
with open(args.distances, "r") as fh:
reader = csv.DictReader(fh, delimiter="\t")
# Map distances by sample names.
distances_by_sample_names = get_distances_by_sample_names(reader)
# Find valid timepoints for calculating distances to the future.
timepoints = tips["timepoint"].drop_duplicates()
last_timepoint = timepoints.max() - | pd.DateOffset(months=args.delta_months) | pandas.DateOffset |
'''
The analysis module
Handles the analyses of the info and data space for experiment evaluation and design.
'''
from slm_lab.agent import AGENT_DATA_NAMES
from slm_lab.env import ENV_DATA_NAMES
from slm_lab.lib import logger, math_util, util, viz
from slm_lab.spec import spec_util
import numpy as np
import os
import pandas as pd
import pydash as ps
import regex as re
import shutil
FITNESS_COLS = ['strength', 'speed', 'stability', 'consistency']
# TODO improve to make it work with any reward mean
FITNESS_STD = util.read('slm_lab/spec/_fitness_std.json')
NOISE_WINDOW = 0.05
NORM_ORDER = 1 # use L1 norm in fitness vector norm
MA_WINDOW = 100
logger = logger.get_logger(__name__)
'''
Fitness analysis
'''
def calc_strength_sr(aeb_df, rand_reward, std_reward):
'''
Calculate strength for each reward as
strength = (reward - rand_reward) / (std_reward - rand_reward)
'''
return (aeb_df['reward'] - rand_reward) / (std_reward - rand_reward)
def calc_strength(aeb_df):
'''
Strength of an agent in fitness is its maximum strength_ma. Moving average is used to denoise signal.
For an agent total reward at a time, calculate strength by normalizing it with a given baseline rand_reward and solution std_reward, i.e.
strength = (reward - rand_reward) / (std_reward - rand_reward)
**Properties:**
- random agent has strength 0, standard agent has strength 1.
- strength is standardized to be independent of the actual sign and scale of raw reward
- scales relative to std_reward: if an agent achieve x2 std_reward, the strength is x2, and so on.
This allows for standard comparison between agents on the same problem using an intuitive measurement of strength. With proper scaling by a difficulty factor, we can compare across problems of different difficulties.
'''
return aeb_df['strength_ma'].max()
def calc_speed(aeb_df, std_timestep):
'''
Find the maximum strength_ma, and the time to first reach it. Then the strength/time divided by the standard std_strength/std_timestep is speed, i.e.
speed = (max_strength_ma / timestep_to_first_reach) / (std_strength / std_timestep)
**Properties:**
- random agent has speed 0, standard agent has speed 1.
- if both agents reach the same max strength_ma, and one reaches it in half the timesteps, it is twice as fast.
- speed is standardized regardless of the scaling of absolute timesteps, or even the max strength attained
This allows an intuitive measurement of learning speed and the standard comparison between agents on the same problem.
'''
first_max_idx = aeb_df['strength_ma'].idxmax() # this returns the first max
max_row = aeb_df.loc[first_max_idx]
std_strength = 1.
if max_row['total_t'] == 0: # especially for random agent
speed = 0.
else:
speed = (max_row['strength_ma'] / max_row['total_t']) / (std_strength / std_timestep)
return speed
def calc_stability(aeb_df):
'''
Stability = fraction of monotonically increasing elements in the denoised series of strength_ma, or 0 if strength_ma is all <= 0.
**Properties:**
- stable agent has value 1, unstable agent < 1, and non-solution = 0.
- uses strength_ma to be more robust to noise
- sharp gain in strength is considered stable
- monotonically increasing implies strength can keep growing and as long as it does not fall much, it is considered stable
'''
if (aeb_df['strength_ma'].values <= 0.).all():
stability = 0.
else:
mono_inc_sr = np.diff(aeb_df['strength_ma']) >= 0.
stability = mono_inc_sr.sum() / mono_inc_sr.size
return stability
def calc_consistency(aeb_fitness_df):
'''
Calculate the consistency of trial by the fitness_vectors of its sessions:
consistency = ratio of non-outlier vectors
**Properties:**
- outliers are calculated using MAD modified z-score
- if all the fitness vectors are zero or all strength are zero, consistency = 0
- works for all sorts of session fitness vectors, with the standard scale
When an agent fails to achieve standard strength, it is meaningless to measure consistency or give false interpolation, so consistency is 0.
'''
fitness_vecs = aeb_fitness_df.values
if ~np.any(fitness_vecs) or ~np.any(aeb_fitness_df['strength']):
# no consistency if vectors all 0
consistency = 0.
elif len(fitness_vecs) == 2:
# if only has 2 vectors, check norm_diff
diff_norm = np.linalg.norm(np.diff(fitness_vecs, axis=0), NORM_ORDER) / np.linalg.norm(np.ones(len(fitness_vecs[0])), NORM_ORDER)
consistency = diff_norm <= NOISE_WINDOW
else:
is_outlier_arr = math_util.is_outlier(fitness_vecs)
consistency = (~is_outlier_arr).sum() / len(is_outlier_arr)
return consistency
def calc_epi_reward_ma(aeb_df, ckpt=None):
'''Calculates the episode reward moving average with the MA_WINDOW'''
rewards = aeb_df['reward']
if ckpt == 'eval':
# online eval mode reward is reward_ma from avg
aeb_df['reward_ma'] = rewards
else:
aeb_df['reward_ma'] = rewards.rolling(window=MA_WINDOW, min_periods=0, center=False).mean()
return aeb_df
def calc_fitness(fitness_vec):
'''
Takes a vector of qualifying standardized dimensions of fitness and compute the normalized length as fitness
use L1 norm for simplicity and intuititveness of linearity
'''
if isinstance(fitness_vec, pd.Series):
fitness_vec = fitness_vec.values
elif isinstance(fitness_vec, pd.DataFrame):
fitness_vec = fitness_vec.iloc[0].values
std_fitness_vector = np.ones(len(fitness_vec))
fitness = np.linalg.norm(fitness_vec, NORM_ORDER) / np.linalg.norm(std_fitness_vector, NORM_ORDER)
return fitness
def calc_aeb_fitness_sr(aeb_df, env_name):
'''Top level method to calculate fitness vector for AEB level data (strength, speed, stability)'''
std = FITNESS_STD.get(env_name)
if std is None:
std = FITNESS_STD.get('template')
logger.warn(f'The fitness standard for env {env_name} is not built yet. Contact author. Using a template standard for now.')
# calculate the strength sr and the moving-average (to denoise) first before calculating fitness
aeb_df['strength'] = calc_strength_sr(aeb_df, std['rand_epi_reward'], std['std_epi_reward'])
aeb_df['strength_ma'] = aeb_df['strength'].rolling(MA_WINDOW, min_periods=0, center=False).mean()
strength = calc_strength(aeb_df)
speed = calc_speed(aeb_df, std['std_timestep'])
stability = calc_stability(aeb_df)
aeb_fitness_sr = pd.Series({
'strength': strength, 'speed': speed, 'stability': stability})
return aeb_fitness_sr
'''
Checkpoint and early termination analysis
'''
def get_reward_mas(agent, name='eval_reward_ma'):
'''Return array of the named reward_ma for all of an agent's bodies.'''
bodies = getattr(agent, 'nanflat_body_a', [agent.body])
return np.array([getattr(body, name) for body in bodies], dtype=np.float16)
def get_std_epi_rewards(agent):
'''Return array of std_epi_reward for each of the environments.'''
bodies = getattr(agent, 'nanflat_body_a', [agent.body])
return np.array([ps.get(FITNESS_STD, f'{body.env.name}.std_epi_reward') for body in bodies], dtype=np.float16)
def new_best(agent):
'''Check if algorithm is now the new best result, then update the new best'''
best_reward_mas = get_reward_mas(agent, 'best_reward_ma')
eval_reward_mas = get_reward_mas(agent, 'eval_reward_ma')
best = (eval_reward_mas >= best_reward_mas).all()
if best:
bodies = getattr(agent, 'nanflat_body_a', [agent.body])
for body in bodies:
body.best_reward_ma = body.eval_reward_ma
return best
def all_solved(agent):
'''Check if envs have all been solved using std from slm_lab/spec/_fitness_std.json'''
eval_reward_mas = get_reward_mas(agent, 'eval_reward_ma')
std_epi_rewards = get_std_epi_rewards(agent)
solved = (
not np.isnan(std_epi_rewards).any() and
(eval_reward_mas >= std_epi_rewards).all()
)
return solved
def is_unfit(fitness_df, session):
'''Check if a fitness_df is unfit. Used to determine of trial should stop running more sessions'''
if FITNESS_STD.get(session.spec['env'][0]['name']) is None:
return False # fitness not known
mean_fitness_df = calc_mean_fitness(fitness_df)
return mean_fitness_df['strength'].iloc[0] <= NOISE_WINDOW
'''
Analysis interface methods
'''
def save_spec(spec, info_space, unit='experiment'):
'''Save spec to proper path. Called at Experiment or Trial init.'''
prepath = util.get_prepath(spec, info_space, unit)
util.write(spec, f'{prepath}_spec.json')
def calc_mean_fitness(fitness_df):
'''Method to calculated mean over all bodies for a fitness_df'''
return fitness_df.mean(axis=1, level=3)
def get_session_data(session, body_df_kind='eval', tmp_space_session_sub=False):
'''
Gather data from session from all the bodies
Depending on body_df_kind, will use eval_df or train_df
'''
session_data = {}
for aeb, body in util.ndenumerate_nonan(session.aeb_space.body_space.data):
aeb_df = body.eval_df if body_df_kind == 'eval' else body.train_df
# TODO tmp substitution since SpaceSession does not have run_eval_episode yet
if tmp_space_session_sub:
aeb_df = body.train_df
session_data[aeb] = aeb_df.copy()
return session_data
def calc_session_fitness_df(session, session_data):
'''Calculate the session fitness df'''
session_fitness_data = {}
for aeb in session_data:
aeb_df = session_data[aeb]
aeb_df = calc_epi_reward_ma(aeb_df, ps.get(session.info_space, 'ckpt'))
util.downcast_float32(aeb_df)
body = session.aeb_space.body_space.data[aeb]
aeb_fitness_sr = calc_aeb_fitness_sr(aeb_df, body.env.name)
aeb_fitness_df = pd.DataFrame([aeb_fitness_sr], index=[session.index])
aeb_fitness_df = aeb_fitness_df.reindex(FITNESS_COLS[:3], axis=1)
session_fitness_data[aeb] = aeb_fitness_df
# form multi_index df, then take mean across all bodies
session_fitness_df = pd.concat(session_fitness_data, axis=1)
mean_fitness_df = calc_mean_fitness(session_fitness_df)
session_fitness = calc_fitness(mean_fitness_df)
logger.info(f'Session mean fitness: {session_fitness}\n{mean_fitness_df}')
return session_fitness_df
def calc_trial_fitness_df(trial):
'''
Calculate the trial fitness df by aggregating from the collected session_data_dict (session_fitness_df's).
Adds a consistency dimension to fitness vector.
'''
trial_fitness_data = {}
try:
all_session_fitness_df = pd.concat(list(trial.session_data_dict.values()))
except ValueError as e:
logger.exception('Sessions failed, no data to analyze. Check stack trace above')
for aeb in util.get_df_aeb_list(all_session_fitness_df):
aeb_fitness_df = all_session_fitness_df.loc[:, aeb]
aeb_fitness_sr = aeb_fitness_df.mean()
consistency = calc_consistency(aeb_fitness_df)
aeb_fitness_sr = aeb_fitness_sr.append(pd.Series({'consistency': consistency}))
aeb_fitness_df = pd.DataFrame([aeb_fitness_sr], index=[trial.index])
aeb_fitness_df = aeb_fitness_df.reindex(FITNESS_COLS, axis=1)
trial_fitness_data[aeb] = aeb_fitness_df
# form multi_index df, then take mean across all bodies
trial_fitness_df = pd.concat(trial_fitness_data, axis=1)
mean_fitness_df = calc_mean_fitness(trial_fitness_df)
trial_fitness_df = mean_fitness_df
trial_fitness = calc_fitness(mean_fitness_df)
logger.info(f'Trial mean fitness: {trial_fitness}\n{mean_fitness_df}')
return trial_fitness_df
def plot_session(session_spec, info_space, session_data):
'''Plot the session graph, 2 panes: reward, loss & explore_var. Each aeb_df gets its own color'''
max_tick_unit = ps.get(session_spec, 'meta.max_tick_unit')
aeb_count = len(session_data)
palette = viz.get_palette(aeb_count)
fig = viz.tools.make_subplots(rows=3, cols=1, shared_xaxes=True, print_grid=False)
for idx, (a, e, b) in enumerate(session_data):
aeb_str = f'{a}{e}{b}'
aeb_df = session_data[(a, e, b)]
aeb_df.fillna(0, inplace=True) # for saving plot, cant have nan
fig_1 = viz.plot_line(aeb_df, 'reward_ma', max_tick_unit, legend_name=aeb_str, draw=False, trace_kwargs={'legendgroup': aeb_str, 'line': {'color': palette[idx]}})
fig.append_trace(fig_1.data[0], 1, 1)
fig_2 = viz.plot_line(aeb_df, ['loss'], max_tick_unit, y2_col=['explore_var'], trace_kwargs={'legendgroup': aeb_str, 'showlegend': False, 'line': {'color': palette[idx]}}, draw=False)
fig.append_trace(fig_2.data[0], 2, 1)
fig.append_trace(fig_2.data[1], 3, 1)
fig.layout['xaxis1'].update(title=max_tick_unit, zerolinewidth=1)
fig.layout['yaxis1'].update(fig_1.layout['yaxis'])
fig.layout['yaxis1'].update(domain=[0.55, 1])
fig.layout['yaxis2'].update(fig_2.layout['yaxis'])
fig.layout['yaxis2'].update(showgrid=False, domain=[0, 0.45])
fig.layout['yaxis3'].update(fig_2.layout['yaxis2'])
fig.layout['yaxis3'].update(overlaying='y2', anchor='x2')
fig.layout.update(ps.pick(fig_1.layout, ['legend']))
fig.layout.update(title=f'session graph: {session_spec["name"]} t{info_space.get("trial")} s{info_space.get("session")}', width=500, height=600)
viz.plot(fig)
return fig
def gather_aeb_rewards_df(aeb, session_datas, max_tick_unit):
'''Gather rewards from each session for a body into a df'''
aeb_session_rewards = {}
for s, session_data in session_datas.items():
aeb_df = session_data[aeb]
aeb_reward_sr = aeb_df['reward_ma']
aeb_reward_sr.index = aeb_df[max_tick_unit]
# guard for duplicate eval result
aeb_reward_sr = aeb_reward_sr[~aeb_reward_sr.index.duplicated()]
if util.in_eval_lab_modes():
# guard for eval appending possibly not ordered
aeb_reward_sr.sort_index(inplace=True)
aeb_session_rewards[s] = aeb_reward_sr
aeb_rewards_df = pd.DataFrame(aeb_session_rewards)
return aeb_rewards_df
def build_aeb_reward_fig(aeb_rewards_df, aeb_str, color, max_tick_unit):
'''Build the aeb_reward envelope figure'''
mean_sr = aeb_rewards_df.mean(axis=1)
std_sr = aeb_rewards_df.std(axis=1).fillna(0)
max_sr = mean_sr + std_sr
min_sr = mean_sr - std_sr
x = aeb_rewards_df.index.tolist()
max_y = max_sr.tolist()
min_y = min_sr.tolist()
envelope_trace = viz.go.Scatter(
x=x + x[::-1],
y=max_y + min_y[::-1],
fill='tozerox',
fillcolor=viz.lower_opacity(color, 0.2),
line=dict(color='rgba(0, 0, 0, 0)'),
showlegend=False,
legendgroup=aeb_str,
)
df = pd.DataFrame({max_tick_unit: x, 'mean_reward': mean_sr})
fig = viz.plot_line(
df, ['mean_reward'], [max_tick_unit], legend_name=aeb_str, draw=False, trace_kwargs={'legendgroup': aeb_str, 'line': {'color': color}}
)
fig.add_traces([envelope_trace])
return fig
def calc_trial_df(trial_spec, info_space):
'''Calculate trial_df as mean of all session_df'''
from slm_lab.experiment import retro_analysis
prepath = util.get_prepath(trial_spec, info_space)
predir, _, _, _, _, _ = util.prepath_split(prepath)
session_datas = retro_analysis.session_datas_from_file(predir, trial_spec, info_space.get('trial'), ps.get(info_space, 'ckpt'))
aeb_transpose = {aeb: [] for aeb in session_datas[list(session_datas.keys())[0]]}
max_tick_unit = ps.get(trial_spec, 'meta.max_tick_unit')
for s, session_data in session_datas.items():
for aeb, aeb_df in session_data.items():
aeb_transpose[aeb].append(aeb_df.sort_values(by=[max_tick_unit]).set_index(max_tick_unit, drop=False))
trial_data = {}
for aeb, df_list in aeb_transpose.items():
trial_data[aeb] = pd.concat(df_list).groupby(level=0).mean().reset_index(drop=True)
trial_df = pd.concat(trial_data, axis=1)
return trial_df
def plot_trial(trial_spec, info_space):
'''Plot the trial graph, 1 pane: mean and error envelope of reward graphs from all sessions. Each aeb_df gets its own color'''
from slm_lab.experiment import retro_analysis
prepath = util.get_prepath(trial_spec, info_space)
predir, _, _, _, _, _ = util.prepath_split(prepath)
session_datas = retro_analysis.session_datas_from_file(predir, trial_spec, info_space.get('trial'), ps.get(info_space, 'ckpt'))
rand_session_data = session_datas[list(session_datas.keys())[0]]
max_tick_unit = ps.get(trial_spec, 'meta.max_tick_unit')
aeb_count = len(rand_session_data)
palette = viz.get_palette(aeb_count)
fig = None
for idx, (a, e, b) in enumerate(rand_session_data):
aeb = (a, e, b)
aeb_str = f'{a}{e}{b}'
color = palette[idx]
aeb_rewards_df = gather_aeb_rewards_df(aeb, session_datas, max_tick_unit)
aeb_fig = build_aeb_reward_fig(aeb_rewards_df, aeb_str, color, max_tick_unit)
if fig is None:
fig = aeb_fig
else:
fig.add_traces(aeb_fig.data)
fig.layout.update(title=f'trial graph: {trial_spec["name"]} t{info_space.get("trial")}, {len(session_datas)} sessions', width=500, height=600)
viz.plot(fig)
return fig
def plot_experiment(experiment_spec, experiment_df):
'''
Plot the variable specs vs fitness vector of an experiment, where each point is a trial.
ref colors: https://plot.ly/python/heatmaps-contours-and-2dhistograms-tutorial/#plotlys-predefined-color-scales
'''
y_cols = ['fitness'] + FITNESS_COLS
x_cols = ps.difference(experiment_df.columns.tolist(), y_cols)
fig = viz.tools.make_subplots(rows=len(y_cols), cols=len(x_cols), shared_xaxes=True, shared_yaxes=True, print_grid=False)
fitness_sr = experiment_df['fitness']
min_fitness = fitness_sr.values.min()
max_fitness = fitness_sr.values.max()
for row_idx, y in enumerate(y_cols):
for col_idx, x in enumerate(x_cols):
x_sr = experiment_df[x]
guard_cat_x = x_sr.astype(str) if x_sr.dtype == 'object' else x_sr
trace = viz.go.Scatter(
y=experiment_df[y], yaxis=f'y{row_idx+1}',
x=guard_cat_x, xaxis=f'x{col_idx+1}',
showlegend=False, mode='markers',
marker={
'symbol': 'circle-open-dot', 'color': experiment_df['fitness'], 'opacity': 0.5,
# dump first quarter of colorscale that is too bright
'cmin': min_fitness - 0.50 * (max_fitness - min_fitness), 'cmax': max_fitness,
'colorscale': 'YlGnBu', 'reversescale': True
},
)
fig.append_trace(trace, row_idx + 1, col_idx + 1)
fig.layout[f'xaxis{col_idx+1}'].update(title='<br>'.join(ps.chunk(x, 20)), zerolinewidth=1, categoryarray=sorted(guard_cat_x.unique()))
fig.layout[f'yaxis{row_idx+1}'].update(title=y, rangemode='tozero')
fig.layout.update(title=f'experiment graph: {experiment_spec["name"]}', width=max(600, len(x_cols) * 300), height=700)
viz.plot(fig)
return fig
def save_session_df(session_data, filepath, info_space):
'''Save session_df, and if is in eval mode, modify it and save with append'''
if util.in_eval_lab_modes():
ckpt = util.find_ckpt(info_space.eval_model_prepath)
epi = int(re.search('epi(\d+)', ckpt)[1])
totalt = int(re.search('totalt(\d+)', ckpt)[1])
session_df = pd.concat(session_data, axis=1)
mean_sr = session_df.mean()
mean_sr.name = totalt # set index to prevent all being the same
eval_session_df = | pd.DataFrame(data=[mean_sr]) | pandas.DataFrame |
"""ETS Prediction View"""
__docformat__ = "numpy"
import datetime
import os
import warnings
from typing import Union
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.common.prediction_techniques import ets_model
from gamestonk_terminal.common.prediction_techniques.pred_helper import (
price_prediction_backtesting_color,
print_prediction_kpis,
print_pretty_prediction,
)
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal.helper_funcs import (
export_data,
get_next_stock_market_days,
patch_pandas_text_adjustment,
plot_autoscale,
)
from gamestonk_terminal.rich_config import console
register_matplotlib_converters()
warnings.filterwarnings("ignore")
# pylint:disable=too-many-arguments
def display_exponential_smoothing(
ticker: str,
values: Union[pd.DataFrame, pd.Series],
n_predict: int,
trend: str = "N",
seasonal: str = "N",
seasonal_periods: int = 5,
s_end_date: str = "",
export: str = "",
time_res: str = "",
):
"""Perform exponential smoothing
Parameters
----------
ticker : str
Dataset being smoothed
values : Union[pd.DataFrame, pd.Series]
Raw data
n_predict : int
Days to predict
trend : str, optional
Trend variable, by default "N"
seasonal : str, optional
Seasonal variable, by default "N"
seasonal_periods : int, optional
Number of seasonal periods, by default 5
s_end_date : str, optional
End date for backtesting, by default ""
export : str, optional
Format to export data, by default ""
time_res : str
Resolution for data, allowing for predicting outside of standard market days
"""
if s_end_date:
if not time_res:
future_index = get_next_stock_market_days(
last_stock_day=s_end_date, n_next_days=n_predict
)
else:
future_index = pd.date_range(
s_end_date, periods=n_predict + 1, freq=time_res
)[1:]
if future_index[-1] > datetime.datetime.now():
console.print(
"Backtesting not allowed, since End Date + Prediction days is in the future\n"
)
return
df_future = values[future_index[0] : future_index[-1]]
values = values[:s_end_date] # type: ignore
# Get ETS model
model, title, forecast = ets_model.get_exponential_smoothing_model(
values, trend, seasonal, seasonal_periods, n_predict
)
if not forecast:
console.print("No forecast made. Model did not converge.\n")
return
if np.isnan(forecast).any():
console.print("Model predicted NaN values. Runtime Error.\n")
return
if not time_res:
l_pred_days = get_next_stock_market_days(
last_stock_day=values.index[-1],
n_next_days=n_predict,
)
else:
l_pred_days = pd.date_range(
values.index[-1], periods=n_predict + 1, freq=time_res
)[1:]
df_pred = | pd.Series(forecast, index=l_pred_days, name="Price") | pandas.Series |
from constants_and_util import *
from scipy.stats import norm, pearsonr, spearmanr
import pandas as pd
import copy
import numpy as np
import random
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
from scipy.stats import ttest_ind, rankdata
import non_image_data_processing
import patsy
import os
import math
import sklearn
import json
import seaborn as sns
from scipy.stats import scoreatpercentile
import statsmodels
from sklearn.kernel_ridge import KernelRidge
import scipy
from scipy.stats import scoreatpercentile, linregress, ttest_rel
from statsmodels.iolib.summary2 import summary_col
"""
Code to perform analyses on the fitted models. We note two potentially confusing naming conventions in the analysis code.
First, much of the code was written during preliminary analyses looking just at SES; later, we broadened the analysis to look at pain gaps by sex, race, etc.
Hence, many of the variable names/comments contain "ses", but in general, these refer to all three binary variables we consider in the final paper (capturing education and race, not just income).
Second, while the paper refers to "training", "development", and "validation" sets, those correspond in the code to the "train", "val", and "test" sets, respectively.
"""
def make_simple_histogram_of_pain(y, binary_vector_to_use, positive_class_label, negative_class_label, plot_filename):
"""
Make a simple histogram of pain versus binary class (eg, pain for black vs non-black patients).
Checked.
"""
sns.set_style()
bins = np.arange(0, 101, 10)
plt.figure(figsize=[4, 4])
hist_weights = np.ones((binary_vector_to_use == False).sum())/float((binary_vector_to_use == False).sum()) # https://stackoverflow.com/a/16399202/9477154
plt.hist(y[binary_vector_to_use == False], weights=hist_weights, alpha=1, bins=bins, label=negative_class_label, orientation='horizontal')
hist_weights = np.ones((binary_vector_to_use == True).sum())/float((binary_vector_to_use == True).sum())
plt.hist(y[binary_vector_to_use == True], weights=hist_weights, alpha=.7, bins=bins, label=positive_class_label, orientation='horizontal')
plt.ylim([0, 100])
plt.yticks([0, 20, 40, 60, 80, 100], fontsize=12)
plt.xlabel("")
plt.legend(loc=4, fontsize=12)
plt.xticks([])
plt.savefig(plot_filename)
def compare_to_mri_features(datasets, y, yhat, all_ses_vars, ids, df_for_filtering_out_special_values, also_include_xray_features, use_random_forest, mri_features):
"""
Show that yhat still outperforms a predictor which uses MRI features.
df_for_filtering_out_special_values: this is a dataframe for MRI features only which only has rows if there are no 0.5/-0.5 values.
Just a sanity check (those values are rare) because I'm not sure whether binarizing really makes sense for those values.
"""
datasets = copy.deepcopy(datasets)
idxs_with_mris = {}
dfs_to_use_in_regression = {}
for dataset in ['train', 'val', 'test']:
idxs_with_mris[dataset] = np.isnan(datasets[dataset].non_image_data[mri_features].values).sum(axis=1) == 0
if df_for_filtering_out_special_values is not None:
dfs_to_use_in_regression[dataset] = pd.merge(datasets[dataset].non_image_data,
df_for_filtering_out_special_values,
how='left',
on=['id', 'side', 'visit'],
validate='one_to_one')
no_special_values = ~pd.isnull(dfs_to_use_in_regression[dataset]['no_special_values']).values
idxs_with_mris[dataset] = (idxs_with_mris[dataset]) & (no_special_values)
else:
dfs_to_use_in_regression[dataset] = datasets[dataset].non_image_data.copy()
if also_include_xray_features:
mri_features_to_use = ['C(%s)' % a for a in mri_features + CLINICAL_CONTROL_COLUMNS]
else:
mri_features_to_use = ['C(%s)' % a for a in mri_features]
print("\n\n\n\n********Predicting pain from MRI features; including Xray clinical features=%s; using random forest %s; filtering out special values %s" %
(also_include_xray_features, use_random_forest, df_for_filtering_out_special_values is not None))
yhat_from_mri = compare_to_clinical_performance(
train_df=dfs_to_use_in_regression['train'].loc[idxs_with_mris['train']],
val_df=dfs_to_use_in_regression['val'].loc[idxs_with_mris['val']],
test_df=dfs_to_use_in_regression['test'].loc[idxs_with_mris['test']],
y_col='koos_pain_subscore',
features_to_use=mri_features_to_use,
binary_prediction=False,
use_nonlinear_model=use_random_forest,
do_ols_sanity_check=True)
print("Compare to yhat performance")
yhat_performance = assess_performance(y=y[idxs_with_mris['test']],
yhat=yhat[idxs_with_mris['test']],
binary_prediction=False)
for k in yhat_performance:
print('%s: %2.3f' % (k, yhat_performance[k]))
mri_ses_vars = {}
for k in all_ses_vars:
mri_ses_vars[k] = all_ses_vars[k][idxs_with_mris['test']]
print(quantify_pain_gap_reduction_vs_rival(yhat=yhat[idxs_with_mris['test']],
y=y[idxs_with_mris['test']],
rival_severity_measure=yhat_from_mri,
all_ses_vars=mri_ses_vars,
ids=ids[idxs_with_mris['test']]))
def sig_star(p):
assert p >= 0 and p <= 1
if p < .001:
return '***'
elif p < .01:
return '**'
elif p < .05:
return '*'
return ''
def get_pvalue_on_binary_vector_mean_diff(yhat_vector, klg_vector, ids):
"""
Assess whether yhat_vector and KLG_vector are assigning different fractions of people to surgery.
Basically does a paired t-test on the binary vector accounting for clustering.
Used for surgery analysis.
"""
assert len(yhat_vector) == len(klg_vector) == len(ids)
check_is_array(yhat_vector)
check_is_array(klg_vector)
diff_df = pd.DataFrame({'diff':1.*yhat_vector - 1.*klg_vector, 'id':ids})
clustered_diff_model = sm.OLS.from_formula('diff ~ 1', data=diff_df).fit(cov_type='cluster', cov_kwds={'groups':diff_df['id']})
assert np.allclose(clustered_diff_model.params['Intercept'], yhat_vector.mean() - klg_vector.mean())
return clustered_diff_model.pvalues['Intercept']
def get_ci_on_binary_vector(vector, ids):
"""
Compute standard error on a binary vector's mean, accounting for clustering.
Used for surgery analysis.
"""
assert len(vector) == len(ids)
check_is_array(vector)
check_is_array(ids)
df = pd.DataFrame({'val':1.*vector, 'id':ids})
cluster_model = sm.OLS.from_formula('val ~ 1', data=df).fit(cov_type='cluster', cov_kwds={'groups':df['id']})
assert np.allclose(cluster_model.params['Intercept'], vector.mean())
return '(%2.5f, %2.5f)' % (cluster_model.conf_int().loc['Intercept', 0], cluster_model.conf_int().loc['Intercept', 1])
def do_surgery_analysis_ziad_style(yhat, y, klg, all_ses_vars, baseline_idxs, have_actually_had_surgery, df_to_use, ids):
"""
Hopefully the final surgery analysis. Does a couple things:
1. Uses a criterion for allocating surgery based on the prior literature: KLG >= 3 and high pain, as defined using pain threshold from prior literature.
- to compare to yhat we do it two ways: discretize yhat, use discretized_yhat >= 3
- and, to make sure we allocate the same number of surgeries, take all high pain people and just count down by yhat until we have the same number that KLG allocates.
2. Then examines:
- What fraction of people are eligible for surgery both overall and in our racial/SES groups?
- What fraction of people are in a lot of pain but aren't eligible for surgery both overall and in our racial/SES groups?
- Is painkiller use correlated with yhat among those who don't receive surgery?
As a robustness check, all these analyses are repeated on both baseline + overall dataset, both excluding and including those who have already had surgery.
"""
check_is_array(yhat)
check_is_array(y)
check_is_array(klg)
check_is_array(ids)
check_is_array(baseline_idxs)
check_is_array(have_actually_had_surgery)
pd.set_option('precision', 6)
pd.set_option('display.width', 1000)
df_to_use = df_to_use.copy()
in_high_pain = binarize_koos(y) == True
discretized_yhat = discretize_yhat_like_kl_grade(yhat_arr=yhat, kl_grade_arr=klg, y_col='koos_pain_subscore')
klg_cutoff = 3
fit_surgery_criteria_under_klg = (in_high_pain == True) & (klg >= klg_cutoff)
fit_surgery_criteria_under_discretized_yhat = (in_high_pain == True) & (discretized_yhat >= klg_cutoff)
for just_use_baseline in [True, False]:
for exclude_those_who_have_surgery in [True, False]:
idxs_to_use = np.ones(baseline_idxs.shape) == 1
if just_use_baseline:
idxs_to_use = idxs_to_use & (baseline_idxs == 1)
if exclude_those_who_have_surgery:
idxs_to_use = idxs_to_use & (have_actually_had_surgery == 0)
print("\n\n\n\n****Just use baseline: %s; exclude those who have had surgery: %s; analyzing %i knees" %
(just_use_baseline, exclude_those_who_have_surgery, idxs_to_use.sum()))
n_surgeries_under_klg = int(fit_surgery_criteria_under_klg[idxs_to_use].sum())
# Alternate yhat criterion: assign exactly same number of people surgery under yhat as under KLG.
# Do this by taking people with the lowest yhat values subject to being in high pain.
# Compute this independently for each group specified by idxs_to_use.
lowest_yhat_idxs = np.argsort(yhat)
yhat_match_n_surgeries = np.array([False for a in range(len(fit_surgery_criteria_under_discretized_yhat))])
for idx in lowest_yhat_idxs:
if yhat_match_n_surgeries.sum() < n_surgeries_under_klg:
if (in_high_pain[idx] == 1) & (idxs_to_use[idx] == 1):
yhat_match_n_surgeries[idx] = True
assert yhat[yhat_match_n_surgeries == True].mean() < yhat[yhat_match_n_surgeries == False].mean()
assert np.allclose(yhat_match_n_surgeries[idxs_to_use].mean(), fit_surgery_criteria_under_klg[idxs_to_use].mean())
fracs_eligible_for_surgery = []
fracs_eligible_for_surgery.append({'group':'Overall',
'klg':fit_surgery_criteria_under_klg[idxs_to_use].mean(),
'klg_ci':get_ci_on_binary_vector(fit_surgery_criteria_under_klg[idxs_to_use], ids[idxs_to_use]),
'yhat':fit_surgery_criteria_under_discretized_yhat[idxs_to_use].mean(),
'yhat_ci':get_ci_on_binary_vector(fit_surgery_criteria_under_discretized_yhat[idxs_to_use], ids[idxs_to_use]),
'yhat_match_surgeries':yhat_match_n_surgeries[idxs_to_use].mean(),
'yhat_klg_p':get_pvalue_on_binary_vector_mean_diff(yhat_vector=fit_surgery_criteria_under_discretized_yhat[idxs_to_use],
klg_vector=fit_surgery_criteria_under_klg[idxs_to_use],
ids=ids[idxs_to_use])})
for ses_var in all_ses_vars:
fracs_eligible_for_surgery.append({'group':ses_var,
'yhat':fit_surgery_criteria_under_discretized_yhat[(all_ses_vars[ses_var] == True) & idxs_to_use].mean(),
'yhat_ci':get_ci_on_binary_vector(fit_surgery_criteria_under_discretized_yhat[(all_ses_vars[ses_var] == True) & idxs_to_use],
ids[(all_ses_vars[ses_var] == True) & idxs_to_use]),
'klg':fit_surgery_criteria_under_klg[(all_ses_vars[ses_var] == True) & idxs_to_use].mean(),
'klg_ci':get_ci_on_binary_vector(fit_surgery_criteria_under_klg[(all_ses_vars[ses_var] == True) & idxs_to_use],
ids[(all_ses_vars[ses_var] == True) & idxs_to_use]),
'yhat_match_surgeries':yhat_match_n_surgeries[(all_ses_vars[ses_var] == True) & idxs_to_use].mean(),
'yhat_klg_p':get_pvalue_on_binary_vector_mean_diff(yhat_vector=fit_surgery_criteria_under_discretized_yhat[(all_ses_vars[ses_var] == True) & idxs_to_use], klg_vector=fit_surgery_criteria_under_klg[(all_ses_vars[ses_var] == True) & idxs_to_use],
ids=ids[(all_ses_vars[ses_var] == True) & idxs_to_use])})
fracs_eligible_for_surgery = pd.DataFrame(fracs_eligible_for_surgery)
fracs_eligible_for_surgery['yhat/klg'] = fracs_eligible_for_surgery['yhat'] / fracs_eligible_for_surgery['klg']
fracs_eligible_for_surgery['yhat_match_surgeries/klg'] = fracs_eligible_for_surgery['yhat_match_surgeries'] / fracs_eligible_for_surgery['klg']
print("Fraction eligible for surgery")
print(fracs_eligible_for_surgery[['group', 'klg', 'klg_ci', 'yhat', 'yhat_ci', 'yhat/klg', 'yhat_klg_p']])
assert (fracs_eligible_for_surgery['yhat/klg'] > 1).all()
assert (fracs_eligible_for_surgery['yhat_match_surgeries/klg'] >= 1).all()
for check in ['klg', 'yhat']:
# check CIs.
assert np.allclose(
fracs_eligible_for_surgery[check].values - fracs_eligible_for_surgery['%s_ci' % check].map(lambda x:float(x.split()[0].replace(',', '').replace('(', ''))),
fracs_eligible_for_surgery['%s_ci' % check].map(lambda x:float(x.split()[1].replace(',', '').replace(')', ''))) - fracs_eligible_for_surgery[check].values,
atol=1e-5)
# for each population we calculate both under the current regime and under our counterfactual surgery assignment: the rate of people who do not receive surgery and are in pain.
do_not_receive_surgery_and_are_in_pain = []
print("Do not receive surgery and are in pain")
do_not_receive_surgery_and_are_in_pain.append({'group':'Overall',
'klg':((fit_surgery_criteria_under_klg == 0) & in_high_pain)[idxs_to_use].mean(),
'klg_ci':get_ci_on_binary_vector(((fit_surgery_criteria_under_klg == 0) & in_high_pain)[idxs_to_use], ids[idxs_to_use]),
'yhat':((fit_surgery_criteria_under_discretized_yhat == 0) & in_high_pain)[idxs_to_use].mean(),
'yhat_ci':get_ci_on_binary_vector(((fit_surgery_criteria_under_discretized_yhat == 0) & in_high_pain)[idxs_to_use], ids[idxs_to_use]),
'yhat_match_surgeries':((yhat_match_n_surgeries == 0) & in_high_pain)[idxs_to_use].mean(),
'yhat_klg_p':get_pvalue_on_binary_vector_mean_diff(yhat_vector=((fit_surgery_criteria_under_discretized_yhat == 0) & in_high_pain)[idxs_to_use],
klg_vector=((fit_surgery_criteria_under_klg == 0) & in_high_pain)[idxs_to_use],
ids=ids[idxs_to_use])})
for ses_var in all_ses_vars:
do_not_receive_surgery_and_are_in_pain.append({'group':ses_var,
'klg':((fit_surgery_criteria_under_klg == 0) & in_high_pain)[(all_ses_vars[ses_var] == True) & idxs_to_use].mean(),
'klg_ci':get_ci_on_binary_vector(((fit_surgery_criteria_under_klg == 0) & in_high_pain)[idxs_to_use & (all_ses_vars[ses_var] == True)], ids[idxs_to_use & (all_ses_vars[ses_var] == True)]),
'yhat':((fit_surgery_criteria_under_discretized_yhat == 0) & in_high_pain)[(all_ses_vars[ses_var] == True) & idxs_to_use].mean(),
'yhat_ci':get_ci_on_binary_vector(((fit_surgery_criteria_under_discretized_yhat == 0) & in_high_pain)[idxs_to_use & (all_ses_vars[ses_var] == True)], ids[idxs_to_use & (all_ses_vars[ses_var] == True)]),
'yhat_match_surgeries':((yhat_match_n_surgeries == 0) & in_high_pain)[(all_ses_vars[ses_var] == True) & idxs_to_use].mean(),
'yhat_klg_p':get_pvalue_on_binary_vector_mean_diff(yhat_vector=((fit_surgery_criteria_under_discretized_yhat == 0) & in_high_pain)[(all_ses_vars[ses_var] == True) & idxs_to_use],
klg_vector=((fit_surgery_criteria_under_klg == 0) & in_high_pain)[(all_ses_vars[ses_var] == True) & idxs_to_use],
ids=ids[(all_ses_vars[ses_var] == True) & idxs_to_use])})
do_not_receive_surgery_and_are_in_pain = pd.DataFrame(do_not_receive_surgery_and_are_in_pain)
do_not_receive_surgery_and_are_in_pain['yhat/klg'] = do_not_receive_surgery_and_are_in_pain['yhat'] / do_not_receive_surgery_and_are_in_pain['klg']
do_not_receive_surgery_and_are_in_pain['yhat_match_surgeries/klg'] = do_not_receive_surgery_and_are_in_pain['yhat_match_surgeries'] / do_not_receive_surgery_and_are_in_pain['klg']
print(do_not_receive_surgery_and_are_in_pain[['group', 'klg', 'klg_ci', 'yhat', 'yhat_ci', 'yhat/klg', 'yhat_klg_p']])
assert (do_not_receive_surgery_and_are_in_pain['yhat/klg'] < 1).all()
assert (do_not_receive_surgery_and_are_in_pain['yhat_match_surgeries/klg'] <= 1).all()
for check in ['klg', 'yhat']:
# check CIs.
assert np.allclose(
do_not_receive_surgery_and_are_in_pain[check].values - do_not_receive_surgery_and_are_in_pain['%s_ci' % check].map(lambda x:float(x.split()[0].replace(',', '').replace('(', ''))),
do_not_receive_surgery_and_are_in_pain['%s_ci' % check].map(lambda x:float(x.split()[1].replace(',', '').replace(')', ''))) - do_not_receive_surgery_and_are_in_pain[check].values,
atol=1e-5)
# show in the non-surgical population the corrrelation between opioid use and y-hat
predict_medication_results = []
medications = ['rxactm', 'rxanalg', 'rxasprn', 'rxnarc', 'rxnsaid', 'rxothan']
for surgery_criterion in ['yhat', 'yhat_match_surgeries', 'klg']:
if surgery_criterion == 'yhat':
non_surgical_population = (fit_surgery_criteria_under_discretized_yhat == False) & idxs_to_use
elif surgery_criterion == 'klg':
non_surgical_population = (fit_surgery_criteria_under_klg == False) & idxs_to_use
elif surgery_criterion == 'yhat_match_surgeries':
non_surgical_population = (yhat_match_n_surgeries == False) & idxs_to_use
for m in medications:
df_for_regression = pd.DataFrame({'medication':df_to_use.loc[non_surgical_population, m].values,
'yhat':yhat[non_surgical_population],
'id':df_to_use.loc[non_surgical_population, 'id'].values})
df_for_regression = df_for_regression.dropna()
predict_on_medication_in_nonsurgical_population = sm.Logit.from_formula('medication ~ yhat', data=df_for_regression).fit(cov_type='cluster', cov_kwds={'groups':df_for_regression['id']})
predict_medication_results.append({'medication':MEDICATION_CODES[('v00' + m).upper()],
'beta_yhat':predict_on_medication_in_nonsurgical_population.params['yhat'],
'DV mean':df_for_regression['medication'].mean(),
'p_yhat':predict_on_medication_in_nonsurgical_population.pvalues['yhat'],
'surgery_criterion':surgery_criterion,
'n':predict_on_medication_in_nonsurgical_population.nobs})
predict_medication_results = pd.DataFrame(predict_medication_results)[['surgery_criterion', 'medication', 'beta_yhat', 'p_yhat', 'DV mean', 'n']]
predict_medication_results['sig'] = predict_medication_results['p_yhat'].map(sig_star)
assert (predict_medication_results['sig'].map(lambda x:'*' in x) & (predict_medication_results['beta_yhat'] > 0)).sum() == 0 # make sure no significant associations in the wrong direction.
print(predict_medication_results.sort_values(by='medication'))
def extract_all_ses_vars(df):
"""
Small helper method: return a dictionary of variables coded in the proper direction.
"""
for k in ['binarized_income_at_least_50k', 'binarized_education_graduated_college', 'race_black']:
assert df[k].map(lambda x:x in [0, 1]).all()
assert df[k].map(lambda x:x in [True, False]).all()
income_at_least_50k = df['binarized_income_at_least_50k'].values == 1
graduated_college = df['binarized_education_graduated_college'].values == 1
race_black = df['race_black'].values == 1
all_ses_vars = {'did_not_graduate_college':~(graduated_college == 1),
'income_less_than_50k':~(income_at_least_50k == 1),
'race_black':race_black == 1}
return all_ses_vars, income_at_least_50k, graduated_college, race_black
def assess_treatment_gaps_controlling_for_klg(klg, all_ses_vars, baseline_idxs, df_to_use):
"""
Regression:
treatment ~ SES + controls, where controls \in [KLG, none].
"""
check_is_array(klg)
check_is_array(baseline_idxs)
pd.set_option('max_rows', 500)
get_OR_and_CI = lambda m:'%2.2f (%2.2f, %2.2f)' % (np.exp(m.params['ses']), np.exp(m.conf_int().loc['ses', 0]), np.exp(m.conf_int().loc['ses', 1]))
treatment_gaps_regression_results = []
for treatment in ['knee_surgery', 'rxnarc', 'rxactm', 'rxanalg', 'rxasprn', 'rxnsaid', 'rxothan']:
for just_use_baseline in [True, False]:
idxs_to_use = np.ones(baseline_idxs.shape) == 1
if just_use_baseline:
idxs_to_use = idxs_to_use & (baseline_idxs == 1)
for control_for_klg in [True, False]:
for ses_var_name in all_ses_vars:
regression_df = pd.DataFrame({'ses':all_ses_vars[ses_var_name][idxs_to_use] * 1.,
'klg':klg[idxs_to_use],
'treatment':df_to_use.loc[idxs_to_use, treatment].values,
'id':df_to_use.loc[idxs_to_use, 'id'].values,
'visit':df_to_use.loc[idxs_to_use, 'visit'].values}).dropna()
if control_for_klg:
formula = 'treatment ~ ses + C(klg)'
else:
formula = 'treatment ~ ses'
regression_model = sm.Logit.from_formula(formula, data=regression_df).fit(cov_type='cluster', cov_kwds={'groups':regression_df['id'].values})
treatment_gaps_regression_results.append({'n_obs':regression_model.nobs,
'just_baseline':just_use_baseline,
'klg_control':control_for_klg,
'treatment':MEDICATION_CODES[('v00' + treatment).upper()] if treatment != 'knee_surgery' else 'knee_surgery',
'ses_var':ses_var_name,
'ses_OR':get_OR_and_CI(regression_model),
'DV mean':'%2.3f' % regression_df['treatment'].mean() ,
'sig':sig_star(regression_model.pvalues['ses'])})
treatment_gaps_regression_results = pd.DataFrame(treatment_gaps_regression_results)[['just_baseline',
'klg_control',
'treatment',
'ses_var',
'ses_OR',
'sig',
'DV mean',
'n_obs']]
print(treatment_gaps_regression_results)
def study_effect_of_surgery(df_to_use, surgery_col_to_analyze):
"""
The goal here was to show that people are in less pain after surgery, which is true for arthroplasty (not arthroscopy).
"""
pd.set_option('display.width', 500)
df_to_use = df_to_use.copy()
df_to_use['high_pain'] = binarize_koos(df_to_use['koos_pain_subscore'])
print("Prior to dropping people with missing %s data, %i rows" % (surgery_col_to_analyze, len(df_to_use)))
df_to_use = df_to_use.dropna(subset=[surgery_col_to_analyze])
print("After dropping people with missing %s data, %i rows" % (surgery_col_to_analyze, len(df_to_use)))
df_to_use['id_plus_side'] = df_to_use['id'].astype(str) + '*' + df_to_use['side'].astype(str)
medications = ['rxactm', 'rxanalg', 'rxasprn', 'rxnarc', 'rxnsaid', 'rxothan']
outcomes = ['koos_pain_subscore', 'high_pain'] + medications + ['all_pain_medications_combined']
df_to_use['all_pain_medications_combined'] = False
for k in medications:
df_to_use['all_pain_medications_combined'] = (df_to_use['all_pain_medications_combined'] | (df_to_use[k] == 1))
grouped_d = df_to_use.groupby('id_plus_side')
outcomes_to_changes = {}
for outcome in outcomes:
outcomes_to_changes[outcome] = []
outcomes_to_changes['pre_surgery_klg'] = []
outcomes_to_changes['pre_surgery_discretized_yhat'] = []
for group_id, small_d in grouped_d:
small_d = small_d.copy().sort_values(by='visit')
if small_d[surgery_col_to_analyze].sum() == 0:
continue
if small_d[surgery_col_to_analyze].iloc[0] == 1:
continue
small_d.index = range(len(small_d))
before_surgery = small_d[surgery_col_to_analyze] == 0
after_surgery = small_d[surgery_col_to_analyze] == 1
assert before_surgery.sum() > 0
assert after_surgery.sum() > 0
outcomes_to_changes['pre_surgery_klg'].append(small_d.loc[before_surgery, 'xrkl'].dropna().mean())
if 'discretized_yhat' in small_d.columns:
outcomes_to_changes['pre_surgery_discretized_yhat'].append(small_d.loc[before_surgery, 'discretized_yhat'].dropna().mean())
else:
outcomes_to_changes['pre_surgery_discretized_yhat'].append(np.nan)
for outcome in outcomes:
if pd.isnull(small_d[outcome]).mean() > 0:
continue
before_surgery_mean = small_d.loc[before_surgery, outcome].mean()
after_surgery_mean = small_d.loc[after_surgery, outcome].mean()
outcomes_to_changes[outcome].append({'before_surgery':before_surgery_mean, 'after_surgery':after_surgery_mean})
assert sorted(small_d[surgery_col_to_analyze].values) == list(small_d[surgery_col_to_analyze].values)
outcomes_to_changes['pre_surgery_klg'] = np.array(outcomes_to_changes['pre_surgery_klg'])
outcomes_to_changes['pre_surgery_discretized_yhat'] = np.array(outcomes_to_changes['pre_surgery_discretized_yhat'])
if np.isnan(outcomes_to_changes['pre_surgery_discretized_yhat']).mean() < 1:
assert (np.isnan(outcomes_to_changes['pre_surgery_klg']) == np.isnan(outcomes_to_changes['pre_surgery_discretized_yhat'])).all()
for k in ['pre_surgery_klg', 'pre_surgery_discretized_yhat']:
not_nan = ~np.isnan(outcomes_to_changes[k])
print('Mean of %s prior to surgery in people who had surgery: %2.5f; median %2.5f' % (k,
outcomes_to_changes[k][not_nan].mean(),
np.median(outcomes_to_changes[k][not_nan])))
results_df = []
for outcome in outcomes:
pre_surgery_values = np.array([a['before_surgery'] for a in outcomes_to_changes[outcome]])
post_surgery_values = np.array([a['after_surgery'] for a in outcomes_to_changes[outcome]])
t, p = ttest_rel(pre_surgery_values, post_surgery_values)
pretty_outcome_name = MEDICATION_CODES['V00' + outcome.upper()] if 'V00' + outcome.upper() in MEDICATION_CODES else outcome
results_df.append({'outcome':pretty_outcome_name,
'n':len(post_surgery_values),
'pre_surgery_larger':(pre_surgery_values > post_surgery_values).sum(),
'post_surgery_larger':(pre_surgery_values < post_surgery_values).sum(),
'no_change':(pre_surgery_values == post_surgery_values).sum(),
'pre_surgery_mean':pre_surgery_values.mean(),
'post_surgery_mean':post_surgery_values.mean(),
'p':p})
if np.isnan(outcomes_to_changes['pre_surgery_discretized_yhat']).mean() < 1:
# check whether yhat predicts surgical outcomes -- but this turns out to be pretty impossible due to small size o fhte test set.
for outcome in outcomes:
print(outcome)
pre_surgery_values = np.array([a['before_surgery'] for a in outcomes_to_changes[outcome]])
post_surgery_values = np.array([a['after_surgery'] for a in outcomes_to_changes[outcome]])
for k in ['pre_surgery_klg', 'pre_surgery_discretized_yhat']:
not_nan = ~np.isnan(outcomes_to_changes[k])
r, p = pearsonr(outcomes_to_changes[k][not_nan], post_surgery_values[not_nan] - pre_surgery_values[not_nan])
print("Correlation between %s and post-surgery change: %2.3f, p=%2.3e; n=%i" % (k, r, p, not_nan.sum()))
return pd.DataFrame(results_df)[['outcome', 'n', 'pre_surgery_larger', 'no_change', 'post_surgery_larger', 'pre_surgery_mean', 'post_surgery_mean', 'p']]
def analyze_performance_on_held_out_sites(all_site_generalization_results, yhat, y, yhat_from_klg, site_vector, all_ses_vars, ids, recalibrate_to_new_set):
"""
Check how we do on held out data (ie, train just on 4 sites, validate+test on the fifth).
all_site_generalization_results is a dataframe with performance results.
If recalibrate_to_new_set is True, fits a model ax + b on the held out site (improves RMSE but leaves r^2 unchanged).
This seems like something you probably want to avoid doing.
"""
pd.set_option("display.width", 500)
# Just a little bit of paranoia here to avoid accidental modification due to pass-by-reference.
yhat = yhat.copy()
y = y.copy()
yhat_from_klg = yhat_from_klg.copy()
site_vector = site_vector.copy()
all_ses_vars = copy.deepcopy(all_ses_vars)
ids = ids.copy()
# one way to combine performance across all 5 settings: stich together yhat/KLG for each held out site. But this is kind of weird.
stitched_together_held_out_yhat = np.nan * np.ones(yhat.shape)
stitched_together_klg = np.nan * np.ones(yhat.shape)
results_to_plot = []
all_site_names = sorted(list(set(all_site_generalization_results['site_to_remove'])))
concatenated_pain_gap_reductions = []
for site in all_site_names:
model_idxs = all_site_generalization_results['site_to_remove'] == site
site_idxs = site_vector == site
site_ensemble_results, ensemble_site_yhat = try_ensembling(all_site_generalization_results.loc[model_idxs],
5,
binary_prediction=False)
yhat_from_klg_to_use = yhat_from_klg.copy()
ensemble_site_yhat[~site_idxs] = np.nan
if recalibrate_to_new_set:
# recalibrate yhat
df_for_recalibration = pd.DataFrame({'yhat':ensemble_site_yhat[site_idxs], 'y':y[site_idxs]})
recalibration_model = sm.OLS.from_formula('y ~ yhat', data=df_for_recalibration).fit()
ensemble_site_yhat[site_idxs] = recalibration_model.predict(df_for_recalibration)
# recalibrate KLG
df_for_recalibration = pd.DataFrame({'yhat':yhat_from_klg_to_use[site_idxs], 'y':y[site_idxs]})
recalibration_model = sm.OLS.from_formula('y ~ yhat', data=df_for_recalibration).fit()
yhat_from_klg_to_use[site_idxs] = recalibration_model.predict(df_for_recalibration)
stitched_together_held_out_yhat[site_idxs] = ensemble_site_yhat[site_idxs]
stitched_together_klg[site_idxs] = yhat_from_klg_to_use[site_idxs]
# KLG
klg_results_for_site = assess_performance(
yhat=yhat_from_klg_to_use[site_idxs],
y=y[site_idxs],
binary_prediction=False)
klg_results_for_site['predictor'] = 'klg'
# held out yhat
held_out_yhat_results_for_site = assess_performance(
yhat=ensemble_site_yhat[site_idxs],
y=y[site_idxs],
binary_prediction=False)
held_out_yhat_results_for_site['predictor'] = 'held_out_yhat'
# original performance results, restricted to site.
yhat_results_for_site = assess_performance(
yhat=yhat[site_idxs],
y=y[site_idxs],
binary_prediction=False)
yhat_results_for_site['predictor'] = 'yhat'
results_for_site_compared = pd.DataFrame([yhat_results_for_site, held_out_yhat_results_for_site, klg_results_for_site])
results_for_site_compared['n'] = site_idxs.sum()
results_for_site_compared['n_models'] = model_idxs.sum()
results_for_site_compared['site'] = site
results_to_plot.append(results_for_site_compared)
print(results_for_site_compared[['predictor', 'r^2', 'negative_rmse', 'spearman_r^2', 'n', 'n_models']])
ses_vars_to_use = copy.deepcopy(all_ses_vars)
for var_name in ses_vars_to_use:
ses_vars_to_use[var_name] = all_ses_vars[var_name][site_idxs]
print("Pain gap analysis (important point is that Rival red. vs nothing < red. vs nothing)")
pain_gap_reduction = quantify_pain_gap_reduction_vs_rival(yhat=ensemble_site_yhat[site_idxs],
y=y[site_idxs],
rival_severity_measure=yhat_from_klg_to_use[site_idxs],
all_ses_vars=ses_vars_to_use,
ids=ids[site_idxs])
concatenated_pain_gap_reductions.append(pain_gap_reduction)
assert (pain_gap_reduction['No controls gap'] < 0).all()
assert (pain_gap_reduction['Rival red. vs nothing'] < pain_gap_reduction['red. vs nothing']).all()
print(pain_gap_reduction[['SES var',
'Rival red. vs nothing',
'red. vs nothing',
'yhat/rival red. ratio',
'n_people',
'n_obs']])
results_to_plot = pd.concat(results_to_plot)
print("\n\nUnweighted mean predictive performance across all 5 sites")
print(results_to_plot.groupby('predictor').mean())
print("Unweighted mean pain gap reduction performance across all 5 sites")
concatenated_pain_gap_reductions = pd.concat(concatenated_pain_gap_reductions)
print(concatenated_pain_gap_reductions.groupby('SES var').mean()[['Rival red. vs nothing', 'red. vs nothing']])
plt.figure(figsize=[12, 4])
for subplot_idx, col_to_plot in enumerate(['r^2', 'spearman_r^2', 'negative_rmse']):
plt.subplot(1, 3, subplot_idx + 1)
for predictor in ['held_out_yhat', 'klg']:
predictor_idxs = results_to_plot['predictor'] == predictor
plt.scatter(
x=range(predictor_idxs.sum()),
y=results_to_plot.loc[predictor_idxs, col_to_plot].values,
label=predictor)
plt.xticks(range(predictor_idxs.sum()), results_to_plot.loc[predictor_idxs, 'site'].values)
plt.title(col_to_plot)
plt.legend()
plt.show()
print("Stitched together predictor across all sites! Not really using this at present")
print("yhat")
assert np.isnan(stitched_together_held_out_yhat).sum() == 0
print(assess_performance(yhat=yhat,
y=y,
binary_prediction=False))
print("stitched together held out yhat")
print(assess_performance(yhat=stitched_together_held_out_yhat,
y=y,
binary_prediction=False))
print("KLG")
print(assess_performance(yhat=stitched_together_klg,
y=y,
binary_prediction=False))
print(quantify_pain_gap_reduction_vs_rival(yhat=stitched_together_held_out_yhat,
y=y,
rival_severity_measure=stitched_together_klg,
all_ses_vars=all_ses_vars,
ids=ids)[['SES var',
'Rival red. vs nothing',
'red. vs nothing',
'yhat/rival red. ratio',
'n_people',
'n_obs']])
def analyze_effect_of_diversity(all_diversity_results, all_ses_vars, y, yhat_from_klg, ids, n_bootstraps):
"""
Look at the effect of training on all non-minority patients, as opposed to including some minority patients.
Checked.
"""
for ses_group in sorted(list(set(all_diversity_results['ses_col']))):
print('\n\n\n\n%s' % ses_group)
metrics_we_want = []
if ses_group == 'race_black':
minority_idxs = all_ses_vars['race_black']
elif ses_group == 'binarized_education_graduated_college':
minority_idxs = all_ses_vars['did_not_graduate_college']
elif ses_group == 'binarized_income_at_least_50k':
minority_idxs = all_ses_vars['income_less_than_50k']
else:
raise Exception("Invalid variable.")
assert minority_idxs.mean() < .5
vals_to_test_yhat = {}
for val in sorted(list(set(all_diversity_results['majority_group_seed']))):
# Note: all_diversity_results['majority_group_seed'] is None iff we exclude the minority group.
diversity_idxs = ((all_diversity_results['majority_group_seed'] == val) &
(all_diversity_results['ses_col'] == ses_group))
assert diversity_idxs.sum() >= 5
ensemble_diversity_results, ensemble_test_diversity_yhat = try_ensembling(all_diversity_results.loc[diversity_idxs], 5, binary_prediction=False)
vals_to_test_yhat[val] = ensemble_test_diversity_yhat
# Predictive performance on full dataset.
ensemble_diversity_results = ensemble_diversity_results.loc[ensemble_diversity_results['model'] == 'ensemble']
assert len(ensemble_diversity_results) == 1
results_for_seed = {'majority_group_seed':val,
'r^2':ensemble_diversity_results['r^2'].iloc[-1],
'spearman_r^2':ensemble_diversity_results['spearman_r^2'].iloc[-1],
'negative_rmse':ensemble_diversity_results['negative_rmse'].iloc[-1],
'n_models':diversity_idxs.sum()}
# predictive performance just on minority/majority.
just_minority_results = assess_performance(yhat=ensemble_test_diversity_yhat[minority_idxs],
y=y[minority_idxs],
binary_prediction=False)
non_minority_results = assess_performance(yhat=ensemble_test_diversity_yhat[~minority_idxs],
y=y[~minority_idxs],
binary_prediction=False)
for k in ['r^2', 'negative_rmse']:
results_for_seed['Just minority %s' % k] = just_minority_results[k]
for k in ['r^2', 'negative_rmse']:
results_for_seed['Just non-minority %s' % k] = non_minority_results[k]
# pain gap reduction.
diversity_pain_gap_reduction = quantify_pain_gap_reduction_vs_rival(yhat=ensemble_test_diversity_yhat,
y=y,
rival_severity_measure=yhat_from_klg,
all_ses_vars=all_ses_vars,
ids=ids)[['SES var', 'yhat/rival red. ratio']]
for ses_var in all_ses_vars:
results_for_seed['%s_pain gap reduction ratio' % ses_var] = diversity_pain_gap_reduction.loc[
diversity_pain_gap_reduction['SES var'] == ses_var, 'yhat/rival red. ratio'].iloc[0]
metrics_we_want.append(results_for_seed)
metrics_we_want = pd.DataFrame(metrics_we_want)
print(metrics_we_want)
# CIs
for val in ['0.0', '1.0', '2.0', '3.0', '4.0']:
print("Comparing predictive performance for diverse dataset with seed %s to non-diverse dataset (note that 'KLG' here is the non-diverse dataset)" % val)
bootstrap_CIs_on_model_performance(y=y,
yhat=vals_to_test_yhat[val],
yhat_from_klg=vals_to_test_yhat['nan'],
yhat_from_clinical_image_features=None,
ids=ids,
n_bootstraps=n_bootstraps)
for val in ['0.0', '1.0', '2.0', '3.0', '4.0']:
print("Comparing pain gap reduction for diverse dataset with seed %s to non-diverse dataset (note that 'KLG' here is the non-diverse dataset)" % val)
bootstrap_CIs_on_pain_gap_reduction(y=y,
yhat=vals_to_test_yhat[val],
yhat_from_klg=vals_to_test_yhat['nan'],
ids=ids,
all_ses_vars=all_ses_vars,
n_bootstraps=n_bootstraps,
quantities_of_interest=['yhat/rival red. ratio'])
main_titles = {'race_black':'Race\ndiversity',
'binarized_education_graduated_college':'Education\ndiversity',
'binarized_income_at_least_50k':'Income\ndiversity'}
plot_diversity_results(metrics_we_want,
main_title=main_titles[ses_group],
minority_idxs=minority_idxs,
y=y,
yhat_from_klg=yhat_from_klg)
def plot_diversity_results(metrics_we_want, main_title, minority_idxs, y, yhat_from_klg):
"""
Plot blue dots for KLG baseline, red dots for no-diversity condition, black dots for diversity condition.
metrics_we_want is a dataframe with performance under different majority_group_seed conditions.
Checked.
"""
check_is_array(minority_idxs)
check_is_array(y)
check_is_array(yhat_from_klg)
cols_to_plot = [#'negative_rmse',
'r^2',
#'spearman_r^2',
'did_not_graduate_college_pain gap reduction ratio',
'income_less_than_50k_pain gap reduction ratio',
'race_black_pain gap reduction ratio',
#'Just minority r^2',
#'Just non-minority r^2',
#'Just minority negative_rmse',
#'Just non-minority negative_rmse']
]
col_pretty_names = {'r^2':'$r^2$',
'did_not_graduate_college_pain gap reduction ratio':'Reduction in education pain disparity\n(relative to KLG)',
'income_less_than_50k_pain gap reduction ratio':'Reduction in income pain disparity\n(relative to KLG)',
'race_black_pain gap reduction ratio':'Reduction in race pain disparity\n(relative to KLG)'}
fontsize = 16
plt.figure(figsize=[6 * len(cols_to_plot), 3])
#plt.suptitle(main_title)
for subplot_idx, col in enumerate(cols_to_plot):
xlimits = None
plt.subplot(1, len(cols_to_plot), subplot_idx + 1)
assert sorted(list(set(metrics_we_want['majority_group_seed']))) == sorted(['0.0', '1.0', '2.0', '3.0', '4.0', 'nan'])
assert len(metrics_we_want) == 6
if 'pain gap reduction ratio' in col:
plt.scatter([1], [1], color='blue', label='KLG')
if col == 'did_not_graduate_college_pain gap reduction ratio':
xlimits = [.9, 5.1]
plt.xticks([1, 2, 3, 4, 5], ['1x', '2x', '3x', '4x', '5x'], fontsize=fontsize)
elif col == 'income_less_than_50k_pain gap reduction ratio':
xlimits = [.9, 3.1]
plt.xticks([1, 2, 3], ['1x', '2x', '3x'], fontsize=fontsize)
elif col == 'race_black_pain gap reduction ratio':
xlimits = [.9, 6.1]
plt.xticks([1, 2, 3, 4, 5, 6], ['1x', '2x', '3x', '4x', '5x', '6x'], fontsize=fontsize)
else:
raise Exception("Invalid column")
elif 'Just minority' in col:
klg_counterpart = assess_performance(y=y[minority_idxs],
yhat=yhat_from_klg[minority_idxs],
binary_prediction=False)[col.split()[-1]]
plt.scatter([klg_counterpart], [1], color='blue', label='KLG')
elif 'Just non-minority' in col:
klg_counterpart = assess_performance(y=y[~minority_idxs],
yhat=yhat_from_klg[~minority_idxs],
binary_prediction=False)[col.split()[-1]]
plt.scatter([klg_counterpart], [1], color='blue', label='KLG')
else:
if col == 'r^2':
xlimits = [.09, .18]
plt.xticks([.1, .12, .14, .16], fontsize=fontsize)
klg_counterpart = assess_performance(y=y, yhat=yhat_from_klg, binary_prediction=False)[col]
plt.scatter([klg_counterpart], [1], color='blue', label='KLG')
plt.scatter([], [], label='') # this is just to make the legend spacing good.
# This is the non-diversity condition. One red dot.
plt.scatter(metrics_we_want.loc[metrics_we_want['majority_group_seed'] == 'nan', col].values,
[1],
color='red',
label='Non-diverse\ntrain set')
# This is the diversity condition. 5 black dots.
plt.scatter(metrics_we_want.loc[metrics_we_want['majority_group_seed'] != 'nan', col].values,
[1] * (len(metrics_we_want) - 1),
color='black',
label='Diverse\ntrain set')
if xlimits is not None:
assert (metrics_we_want[col].values > xlimits[0]).all()
assert (metrics_we_want[col].values < xlimits[1]).all()
plt.xlim(xlimits)
plt.yticks([])
plt.xlabel(col_pretty_names[col] if col in col_pretty_names else col, fontsize=fontsize)
if subplot_idx == 0:
plt.ylabel(main_title, fontsize=fontsize)
if 'race' in main_title.lower():
plt.legend(ncol=3, fontsize=fontsize, labelspacing=0.2, columnspacing=.2, handletextpad=.1, loc=(.08, .6))
plt.subplots_adjust(left=.05, right=.95, bottom=.3, wspace=.05)
plt.savefig('diversity_%s.png' % main_title.replace(' ', '_').replace('\n', '_'), dpi=300)
plt.show()
def print_out_paired_images(df, dataset, pair_idxs, title_fxn, directory_to_save):
"""
Saves images in pairs so we can look for differences.
pair_idxs should be a list of image pairs (list of lists). Title_fxn takes in df and an index i and returns a title.
"""
for i in range(len(pair_idxs)):
img_1 = dataset[pair_idxs[i][0]]['image'][0, :, :]
img_1 = (img_1 - img_1.mean()) / img_1.std()
img_2 = dataset[pair_idxs[i][1]]['image'][0, :, :]
img_2 = (img_2 - img_2.mean()) / img_2.std()
plt.figure()
plt.subplot(121)
plt.imshow(img_1, clim=[-3, 3], cmap='bone')
plt.title(title_fxn(df, pair_idxs[i][0]))
plt.xticks([])
plt.yticks([])
plt.subplot(122)
plt.imshow(img_2, clim=[-3, 3], cmap='bone')
plt.title(title_fxn(df, pair_idxs[i][1]))
plt.xticks([])
plt.yticks([])
plt.savefig(os.path.join(directory_to_save, 'pair_%i.png' % i), dpi=300)
plt.show()
def generate_paired_images_to_inspect_three_different_ways(dataset_to_use, yhat):
"""
Try pairing images by KLG; by all image features (basically we take the max over feature categories); and by invididual and side. In all cases, the
"high pain" image is on the right, although the way we define "high pain" changes.
"""
# 1. Pair images by KLG. Just use KLG 2.
df = dataset_to_use.non_image_data.copy()
df['yhat'] = yhat
klg_idxs = (df['xrkl'] == 2)
bad_y_cutoff = scoreatpercentile(df.loc[klg_idxs, 'koos_pain_subscore'], 10)
bad_yhat_cutoff = scoreatpercentile(df.loc[klg_idxs, 'yhat'], 10)
print("Bad Y cutoff is %2.3f; yhat cutoff is %2.3f" % (bad_y_cutoff, bad_yhat_cutoff))
good_y_cutoff = scoreatpercentile(df.loc[klg_idxs, 'koos_pain_subscore'], 90)
good_yhat_cutoff = scoreatpercentile(df.loc[klg_idxs, 'yhat'], 90)
print("Good Y cutoff is %2.3f; yhat cutoff is %2.3f" % (good_y_cutoff, good_yhat_cutoff))
low_pain_candidates = np.array(range(len(df)))[klg_idxs &
(df['yhat'] >= good_yhat_cutoff) &
(df['koos_pain_subscore'] >= good_y_cutoff)]
random.shuffle(low_pain_candidates)
high_pain_candidates = np.array(range(len(df)))[klg_idxs &
(df['yhat'] <= bad_yhat_cutoff) &
(df['koos_pain_subscore'] <= bad_y_cutoff)]
random.shuffle(high_pain_candidates)
print("%i low pain candidates; %i high pain candidates" % (len(low_pain_candidates),
len(high_pain_candidates)))
n_images = min(len(high_pain_candidates), len(low_pain_candidates))
paired_image_idxs = list(zip(low_pain_candidates[:n_images], high_pain_candidates[:n_images]))
def title_fxn(df, idx):
return 'KLG %i; yhat %2.1f; y %2.1f' % (df.iloc[idx]['xrkl'],
df.iloc[idx]['yhat'],
df.iloc[idx]['koos_pain_subscore'])
print_out_paired_images(df=df,
dataset=dataset_to_use,
pair_idxs=paired_image_idxs,
title_fxn=title_fxn,
directory_to_save='paired_images/paired_by_KLG/')
# pair images by image features.
# Set looser percentile cutoffs than in KLG pairing, to ensure we actually have pairs.
df = dataset_to_use.non_image_data.copy()
df['yhat'] = yhat
feature_groups_to_match_on = ['att', 'ch', 'cy', 'js', 'os', 'kl', 'sc']
def title_fxn(df, idx):
return '%s\nyhat %2.1f; y %2.1f' % (' '.join(['%s%1.0f' % (a, df.iloc[idx]['max_%s' % a]) for a in feature_groups_to_match_on]),
df.iloc[idx]['yhat'],
df.iloc[idx]['koos_pain_subscore'])
all_cols_used = []
for feature_group in feature_groups_to_match_on:
cols_to_use = sorted([a for a in CLINICAL_CONTROL_COLUMNS if feature_group in a])
print("taking max of features", cols_to_use, 'for %s' % feature_group)
df['max_%s' % feature_group] = df[cols_to_use].values.max(axis=1)
assert pd.isnull(df['max_%s' % feature_group]).sum() == 0
all_cols_used += cols_to_use
assert sorted(all_cols_used) == sorted(CLINICAL_CONTROL_COLUMNS) # make sure we have a disjoint partition of the original column set.
grouped_d = df.groupby(['max_%s' % a for a in feature_groups_to_match_on])
bad_y_cutoff = scoreatpercentile(df['koos_pain_subscore'], 40)
bad_yhat_cutoff = scoreatpercentile(df['yhat'], 40)
print("Y cutoff is %2.3f; yhat cutoff is %2.3f" % (bad_y_cutoff, bad_yhat_cutoff))
good_y_cutoff = scoreatpercentile(df['koos_pain_subscore'], 60)
good_yhat_cutoff = scoreatpercentile(df['yhat'], 60)
print("Y cutoff is %2.3f; yhat cutoff is %2.3f" % (good_y_cutoff, good_yhat_cutoff))
pair_idxs = []
for feature_vals, small_d in grouped_d:
if len(small_d) > 1:
bad_idxs = ((small_d['koos_pain_subscore'] <= bad_y_cutoff) &
(small_d['yhat'] <= bad_yhat_cutoff))
good_idxs = ((small_d['koos_pain_subscore'] >= good_y_cutoff) &
(small_d['yhat'] >= good_yhat_cutoff))
if bad_idxs.sum() > 0 and good_idxs.sum() > 0:
bad_small_d = small_d.loc[bad_idxs]
good_small_d = small_d.loc[good_idxs]
bad_idx = random.choice(bad_small_d.index)
good_idx = random.choice(good_small_d.index)
pair_idxs.append([good_idx, bad_idx])
print_out_paired_images(df=df,
dataset=dataset_to_use,
pair_idxs=pair_idxs,
title_fxn=title_fxn,
directory_to_save='paired_images/paired_by_image_features/')
# pair by person, side, and KLG. (So variation is just over time). Set a threshold on change in pain/yhat.
df = dataset_to_use.non_image_data.copy()
df['yhat'] = yhat
def title_fxn(df, idx):
return '%s, %s side\nKLG %i\nyhat %2.1f; y %2.1f' % (
df.iloc[idx]['visit'],
df.iloc[idx]['side'],
df.iloc[idx]['xrkl'],
df.iloc[idx]['yhat'],
df.iloc[idx]['koos_pain_subscore'])
grouped_d = df.groupby(['id', 'side', 'xrkl'])
pair_idxs = []
for feature_vals, small_d in grouped_d:
if len(small_d) > 1:
small_d = small_d.copy().sort_values(by='koos_pain_subscore')[::-1]
koos_change = small_d.iloc[0]['koos_pain_subscore'] - small_d.iloc[-1]['koos_pain_subscore']
yhat_change = small_d.iloc[0]['yhat'] - small_d.iloc[-1]['yhat']
if koos_change > 5 and yhat_change > 5:
pair_idxs.append([small_d.index[0], small_d.index[-1]])
print_out_paired_images(df=df,
dataset=dataset_to_use,
pair_idxs=pair_idxs,
title_fxn=title_fxn,
directory_to_save='paired_images/paired_by_person_and_side/')
def stratify_performances(df, yhat, y, yhat_from_klg):
"""
How do we do across subsets of the dataset relative to KLG?
"""
stratified_performances = []
pd.set_option('max_rows', 500)
for thing_to_stratify_by in ['v00site',
'side',
'visit',
'p02sex',
'age_at_visit',
'current_bmi',
'binarized_income_at_least_50k',
'binarized_education_graduated_college',
'race_black']:
for k in sorted(list(set(df[thing_to_stratify_by].dropna()))):
substratification_idxs = df[thing_to_stratify_by].values == k
if substratification_idxs.sum() < 100:
# don't plot super-noisy groups.
continue
yhat_performance = assess_performance(yhat=yhat[substratification_idxs],
y=y[substratification_idxs],
binary_prediction=False)
klg_performance = assess_performance(yhat=yhat_from_klg[substratification_idxs],
y=y[substratification_idxs],
binary_prediction=False)
stratified_performances.append(yhat_performance)
stratified_performances[-1]['predictor'] = 'yhat'
stratified_performances[-1]['substratification'] = thing_to_stratify_by + ' ' + str(k)
stratified_performances[-1]['n'] = substratification_idxs.sum()
stratified_performances.append(klg_performance)
stratified_performances[-1]['predictor'] = 'klg'
stratified_performances[-1]['substratification'] = thing_to_stratify_by + ' ' + str(k)
stratified_performances[-1]['n'] = substratification_idxs.sum()
for metric in ['r^2', 'negative_rmse']:
if yhat_performance[metric] < klg_performance[metric]:
print('Warning: yhat %s (%2.3f) is less than KLGs (%2.3f) for %s' %
(metric,
yhat_performance[metric],
klg_performance[metric],
k))
print("All other metrics passed. If a few fail, and not by too much, probably noise.")
stratified_performances = pd.DataFrame(stratified_performances)
# plot performance across subsets.
sns.set_style('whitegrid')
plt.figure(figsize=[15, 5])
plt.subplot(121)
labels = stratified_performances.loc[stratified_performances['predictor'] == 'klg', 'substratification'].values
plt.scatter(range(len(labels)), stratified_performances.loc[stratified_performances['predictor'] == 'klg', 'r^2'].values, label='klg')
plt.scatter(range(len(labels)), stratified_performances.loc[stratified_performances['predictor'] == 'yhat', 'r^2'].values, label='yhat')
plt.xticks(range(len(labels)), labels, rotation=90)
plt.ylabel("r^2")
plt.legend()
plt.subplot(122)
plt.scatter(range(len(labels)), stratified_performances.loc[stratified_performances['predictor'] == 'klg', 'negative_rmse'].values, label='KLG')
plt.scatter(range(len(labels)), stratified_performances.loc[stratified_performances['predictor'] == 'yhat', 'negative_rmse'].values, label='yhat')
plt.legend()
plt.xticks(range(len(labels)), labels, rotation=90)
plt.ylabel("Negative RMSE")
plt.show()
return stratified_performances
def get_CI_from_percentiles(bootstraps, alpha=0.05, make_plot=False):
# given a list of bootstrapped values, compute the CIs.
assert alpha > 0
assert alpha < 1
if make_plot:
plt.figure()
plt.hist(bootstraps, bins=50)
plt.title("N bootstraps: %i; range %2.3f-%2.3f" % (len(bootstraps), np.min(bootstraps), np.max(bootstraps)))
plt.show()
return [scoreatpercentile(bootstraps, alpha * 100 / 2.),
scoreatpercentile(bootstraps, 100 - alpha * 100 / 2.)]
def bootstrap_CIs_on_pain_gap_reduction(y, yhat, yhat_from_klg, ids, all_ses_vars, n_bootstraps, quantities_of_interest=None):
"""
Confidence intervals on how much we reduce the pain gap.
"""
if quantities_of_interest is None:
quantities_of_interest = ['No controls gap', 'Rival gap', 'yhat gap', 'Rival red. vs nothing', 'red. vs nothing', 'yhat/rival red. ratio']
def ci_fxn(bootstraps):
print("Bootstrapped CIs")
concat_bootstraps = pd.concat(bootstraps)
for k in quantities_of_interest:
print('Bootstrap CI on', k, get_CI_from_percentiles(concat_bootstraps[k].values, make_plot=True))
for ses_var_name in all_ses_vars:
print("Bootstrap on pain gap reduction for %s" % ses_var_name)
get_bootstrapped_cis_on_quantity(df=pd.DataFrame({'id':ids,
'y':y,
'yhat':yhat,
'rival':yhat_from_klg,
ses_var_name:all_ses_vars[ses_var_name]}),
resample_points_within_cluster=False,
fxn=lambda x:quantify_pain_gap_reduction_vs_rival(yhat=x['yhat'].values,
y=x['y'].values,
rival_severity_measure=x['rival'].values,
all_ses_vars={ses_var_name:x[ses_var_name].values},
ids=x['id'].values,
lower_bound_rival_reduction_at_0=True),
n_bootstraps=n_bootstraps,
ci_fxn=ci_fxn)
def mean_absolute_error(y, yhat):
return np.mean(np.abs(y - yhat))
def bootstrap_CIs_on_model_performance(y, yhat, yhat_from_klg, yhat_from_clinical_image_features, ids, n_bootstraps, binary_prediction=False, metrics=None):
"""
Compare our r^2 and RMSE to KLGs.
"""
check_is_array(y)
check_is_array(yhat)
check_is_array(yhat_from_klg)
if yhat_from_clinical_image_features is not None:
check_is_array(yhat_from_clinical_image_features)
check_is_array(ids)
def compare_our_performance_to_rival(df, metric):
if metric != 'mean_absolute_error':
# coded mean_absolute_error separately because it was just for nature medicine R+R
rival_performance = assess_performance(y=df['y'].values, yhat=df['rival'].values, binary_prediction=binary_prediction)[metric]
our_performance = assess_performance(y=df['y'].values, yhat=df['yhat'].values, binary_prediction=binary_prediction)[metric]
else:
rival_performance = mean_absolute_error(y=df['y'].values, yhat=df['rival'].values)
our_performance = mean_absolute_error(y=df['y'].values, yhat=df['yhat'].values)
return {'our_performance':our_performance,
'rival_performance':rival_performance,
'ratio':our_performance/rival_performance}
def ci_fxn(bootstraps):
print("Bootstrapped CIs")
for k in bootstraps[0].keys():
print('CI on', k, get_CI_from_percentiles([x[k] for x in bootstraps], make_plot=True))
for rival_name in ['klg', 'all_image_features']:
if rival_name == 'all_image_features' and yhat_from_clinical_image_features is None:
continue
if rival_name == 'klg':
rival = yhat_from_klg
elif rival_name == 'all_image_features':
rival = yhat_from_clinical_image_features
else:
raise Exception("Invalid rival name")
if metrics is None:
if binary_prediction:
metrics = ['auc', 'auprc']
else:
metrics = ['rmse', 'r^2', 'spearman_r^2']
for metric in metrics:
print("\n\nComputing CIs for metric %s, comparing yhat to %s" % (metric, rival_name))
get_bootstrapped_cis_on_quantity(df=pd.DataFrame({'id':ids, 'y':y, 'yhat':yhat, 'rival':rival}),
resample_points_within_cluster=False,
fxn=lambda x:compare_our_performance_to_rival(x, metric=metric),
n_bootstraps=n_bootstraps,
ci_fxn=ci_fxn)
def make_counterfactual_surgery_prediction(interventions_df,
yhat,
klg,
all_ses_vars):
"""
Given an interventions dataframe (on which we fit the knee_surgery ~ xrkl model)
and a counterfactual dataframe (on which we actually predict how things would change)
predict how surgery allocation to disadvantaged racial/SES groups would change under yhat rather than KLG.
"""
check_is_array(yhat)
check_is_array(klg)
assert len(yhat) == len(klg)
surgery_model = sm.Logit.from_formula('knee_surgery ~ C(xrkl)', data=interventions_df).fit()
print(surgery_model.summary())
print("Surgery gap in SINGLE KNEE surgery rates")
for k in ['race_black', 'binarized_education_graduated_college', 'binarized_income_at_least_50k']:
ses_var_1_mean = interventions_df.loc[interventions_df[k] == 1, 'knee_surgery'].mean()
ses_var_0_mean = interventions_df.loc[interventions_df[k] == 0, 'knee_surgery'].mean()
print("%s var=1 surgery rate: %2.3f; var=0 surgery rate: %2.3f; ratio: %2.5f; inverse ratio %2.5f" % (k, ses_var_1_mean, ses_var_0_mean, ses_var_1_mean/ses_var_0_mean, ses_var_0_mean/ses_var_1_mean))
discretized_yhat = discretize_yhat_like_kl_grade(
yhat_arr=yhat,
kl_grade_arr=klg,
y_col='koos_pain_subscore') # want to make sure the allocation of severity grades match up.
original_pred = surgery_model.predict({'xrkl':klg}).values
counterfactual_pred = surgery_model.predict({'xrkl':discretized_yhat}).values
assert (original_pred > 0).all()
assert (counterfactual_pred > 0).all()
assert (original_pred < 1).all()
assert (counterfactual_pred < 1).all()
print("Number of rows in counterfactual dataframe: %i" % len(counterfactual_pred))
# bar graph/histogram of fraction of disadvantaged groups assigned to each severity grade by yhat/KLG.
plt.figure(figsize=[12, 4])
xs = sorted(list(set(klg)))
assert xs == list(range(5))
for subplot_idx, ses_var in enumerate(list(all_ses_vars.keys())):
plt.subplot(1, 3, subplot_idx + 1)
klg_props = []
yhat_props = []
total_count = all_ses_vars[ses_var].sum()
klg_counts = Counter(klg[all_ses_vars[ses_var] == 1])
discretized_yhat_counts = Counter(discretized_yhat[all_ses_vars[ses_var] == 1])
for x in xs:
klg_props.append(100. * klg_counts[x] / total_count)
yhat_props.append(100. * discretized_yhat_counts[x] / total_count)
print(ses_var)
print(klg_props)
print(yhat_props)
barwidth = .3
plt.bar(np.array(xs), klg_props, label='KLG', alpha=.7, width=barwidth)
plt.bar(np.array(xs) + barwidth, yhat_props, label='$\hat y$', alpha=.7, width=barwidth)
plt.title(ses_var)
plt.legend()
plt.xticks(range(5))
plt.xlabel("Severity grade")
plt.ylim([0, 50])
plt.xlim([-.1 - barwidth / 2., 4 + 1.5 * barwidth + .1])
if subplot_idx == 0:
plt.ylabel("Probability of being assigned to that grade")
plt.yticks(range(0, 60, 10), ['%i%%' % a for a in range(0, 60, 10)])
else:
plt.yticks([])
plt.show()
assert np.allclose(original_pred.mean(), counterfactual_pred.mean())
pd.set_option('precision', 5)
for var in all_ses_vars:
ses_arr = all_ses_vars[var]
check_is_array(ses_arr)
ratio = counterfactual_pred[ses_arr].mean()/original_pred[ses_arr].mean()
print("Frac of %s getting surgery under KLG: %2.3f; counterfactual %2.3f; ratio %2.3f" % (var, original_pred[ses_arr].mean(), counterfactual_pred[ses_arr].mean(), ratio))
def make_scatter_plot_showing_severity_reassignment_under_yhat(yhat, y, klg, all_ses_vars, idxs_to_use, interventions_df):
"""
Compute how severity / surgery assignments change under yhat. Calls make_counterfactual_surgery_prediction.
We only compute reassignments for rows specified by idxs_to_use (used to, eg, choose only baseline values).
"""
check_is_array(yhat)
check_is_array(y)
check_is_array(klg)
check_is_array(idxs_to_use)
yhat = yhat[idxs_to_use].copy()
y = y[idxs_to_use].copy()
klg = klg[idxs_to_use].copy()
all_ses_vars = copy.deepcopy(all_ses_vars)
for ses_var in all_ses_vars:
all_ses_vars[ses_var] = all_ses_vars[ses_var][idxs_to_use]
plt.figure(figsize=[5, 4])
sns.set_style('white')
for ses_var in all_ses_vars:
check_is_array(all_ses_vars[ses_var])
geq_klg_2_results = compare_pain_levels_for_people_geq_klg_2(yhat=yhat,
y=y,
klg=klg,
ses=all_ses_vars[ses_var],
y_col='koos_pain_subscore')
if ses_var == 'race_black':
pretty_label = 'Black'
elif ses_var == 'did_not_graduate_college':
pretty_label = "Didn't graduate college"
elif ses_var == 'income_less_than_50k':
pretty_label = 'Income < $50k'
else:
raise Exception("Invalid SES var")
plt.scatter(range(5),
geq_klg_2_results['all_klg_yhat_ratios'],
label=pretty_label)
plt.ylim([1, 1.7])
plt.yticks([1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
['0%', '10%', '20%', '30%', '40%', '50%', '60%', '70%'], fontsize=14)
plt.ylabel("How much likelier under $\hat y$", fontsize=14)
plt.xticks([1, 2, 3, 4],
['$\geq 1$', '$\geq 2$', '$\geq 3$', '$\geq 4$'], fontsize=14)
plt.xlabel("Severity grade", fontsize=14)
plt.xlim([.9, 4.1])
plt.legend()
plt.subplots_adjust(left=.3, bottom=.2)
plt.savefig("reassign_under_yhat_plot.png", dpi=300)
plt.show()
make_counterfactual_surgery_prediction(interventions_df=interventions_df,
yhat=yhat,
klg=klg,
all_ses_vars=all_ses_vars)
def make_painkillers_and_surgery_frequency_bar_plot(interventions_df):
"""
Plot the frequency of various medical interventions.
"""
interventions_df = interventions_df.copy()
medications = ["V00RXACTM", "V00RXANALG", "V00RXASPRN", "V00RXBISPH",
"V00RXCHOND", "V00RXCLCTN", "V00RXCLCXB", "V00RXCOX2",
"V00RXFLUOR", "V00RXGLCSM", "V00RXIHYAL", "V00RXISTRD",
"V00RXMSM", "V00RXNARC", "V00RXNSAID", "V00RXNTRAT",
"V00RXOSTRD", "V00RXOTHAN", "V00RXRALOX", "V00RXRFCXB",
"V00RXSALIC", "V00RXSAME", "V00RXTPRTD", "V00RXVIT_D", "V00RXVLCXB"]
sns.set_style('whitegrid')
medications = [a.lower().replace('v00', '') for a in medications]
other_cols = ["id", "knee_surgery",
'binarized_income_at_least_50k', 'binarized_education_graduated_college', 'race_black',
'xrkl', 'koos_pain_subscore']
medication_df = interventions_df[medications + other_cols].copy()
medication_df.columns = [MEDICATION_CODES[('v00' + a).upper()] for a in medications] + other_cols
any_knee_surgery = set(medication_df.loc[medication_df['knee_surgery'] > 0, 'id'])
medication_df['knee_surgery\neither_knee'] = medication_df['id'].map(lambda x:x in any_knee_surgery)
intervention_cols_to_plot = ['Aspirin', 'Acetaminophen',
'Narcotic_analgesic','NSAID',
'Analgesic', 'knee_surgery\neither_knee']
medication_df['in_high_pain'] = binarize_koos(medication_df['koos_pain_subscore'].values)
for intervention in ['Aspirin', 'Acetaminophen', 'Narcotic_analgesic', 'NSAID', 'Analgesic']:
df_to_use = medication_df.dropna(subset=['in_high_pain', intervention])
medication_model = sm.Logit.from_formula('%s ~ in_high_pain' % intervention, data=df_to_use).fit(cov_type='cluster', cov_kwds={'groups':df_to_use['id'].values})
print(medication_model.summary())
for k in intervention_cols_to_plot + ['knee_surgery']:
print("Fraction of missing data in %s: %2.3f out of %i points" % (k, pd.isnull(medication_df[k]).mean(), len(medication_df)))
for col in intervention_cols_to_plot:
# make sure that each person has a unique value.
assert len(medication_df.drop_duplicates('id')) == len(medication_df.drop_duplicates(['id', col]))
medication_df = medication_df.drop_duplicates('id')
make_intervention_frequencies_plot(medication_df, intervention_cols_to_plot, 'interventions_plot.png')
def make_rate_of_surgery_figure(interventions_df):
"""
Fraction of surgery by KLG.
"""
interventions_df = interventions_df.copy()
print(interventions_df[['xrkl', 'knee_surgery']].groupby('xrkl').agg(['mean', 'size']))
xs = range(5)
ys = []
yerrs = []
for x in xs:
ys.append(interventions_df.loc[interventions_df['xrkl'] == x, 'knee_surgery'].mean())
sns.set_style('white')
plt.figure(figsize=[5, 4])
plt.scatter(xs, ys)
plt.xlabel("KLG", fontsize=14)
plt.ylabel("Had knee surgery", fontsize=14)
plt.xlim([-.1, 4.1])
plt.xticks([0, 1, 2, 3, 4], fontsize=14)
plt.yticks([0, .3, .6], ['0%', '30%', '60%'], fontsize=14)
plt.ylim([0, .6])
plt.subplots_adjust(bottom=.3, left=.3)
plt.savefig('klg_surgery_plot.png', dpi=300)
def make_descriptive_stats_table(train_df, val_df, test_df):
"""
Descriptive stats for table 1 in paper.
"""
# Need to load original data to get original BMI + age, which we render as categorical in final data.
all_clinical00 = pd.read_csv(os.path.join(BASE_NON_IMAGE_DATA_DIR, 'AllClinical_ASCII', 'AllClinical00.txt'), sep='|')
all_clinical00.columns = all_clinical00.columns.map(lambda x:x.lower())
assert len(all_clinical00.columns) == len(set(all_clinical00.columns))
print("allclinical00 has %i columns, %i rows" % (len(all_clinical00.columns), len(all_clinical00)))
all_clinical00['current_bmi'] = all_clinical00['p01weight'] / ((all_clinical00['p01height'] / 1000.) ** 2)
all_clinical00 = all_clinical00[['id', 'current_bmi', 'v00age']]
all_clinical00.index = all_clinical00['id']
train_df = train_df.copy()
val_df = val_df.copy()
test_df = test_df.copy()
train_plus_val_df = pd.concat([train_df, val_df])
train_plus_val_df.index = range(len(train_plus_val_df))
train_plus_val_plus_test_df = pd.concat([train_df, val_df, test_df])
train_plus_val_plus_test_df.index = range(len(train_plus_val_plus_test_df))
print("Sorted image features by how often they are nonzero (all three datasets combined)")
how_often_not_zero = []
for c in CLINICAL_CONTROL_COLUMNS:
assert pd.isnull(train_plus_val_plus_test_df[c]).sum() == 0
how_often_not_zero.append({'c':c,
'not_zero':(train_plus_val_plus_test_df[c] != 0).mean(),
'val_counts':Counter(train_plus_val_plus_test_df[c])})
print(pd.DataFrame(how_often_not_zero).sort_values(by='not_zero')[::-1])
dataset_names = ['train', 'val', 'train+val', 'test', 'train+val+test']
for dataset_idx, descriptive_stats_df in enumerate([train_df, val_df, train_plus_val_df, test_df, train_plus_val_plus_test_df]):
print("\n\n****%s" % dataset_names[dataset_idx])
print("Points: %i total" % len(descriptive_stats_df))
print("People: %i total" % len(set(descriptive_stats_df['id'])))
descriptive_stats_df['is_female'] = (descriptive_stats_df['p02sex'] == '2: Female').values
ids = list(set(descriptive_stats_df['id'].values))
print(all_clinical00.loc[ids, ['current_bmi', 'v00age']].describe().loc[['mean', 'std']])
assert pd.isnull(all_clinical00.loc[ids, 'v00age']).sum() == 0
for k in ['binarized_income_at_least_50k',
'binarized_education_graduated_college',
'race_black',
'is_female']:
n_ids_in_cat = len(set(descriptive_stats_df.loc[descriptive_stats_df[k] == 1, 'id'].values))
print('%s: %i/%i people, %2.5f '% (k, n_ids_in_cat, len(set(descriptive_stats_df['id'])), 1.*n_ids_in_cat/len(set(descriptive_stats_df['id']))))
print(100 * descriptive_stats_df.drop_duplicates('id')['p02race'].value_counts(dropna=False)/len(descriptive_stats_df.drop_duplicates('id')))
print('race + ethnicity')
descriptive_stats_df['race+is_hispanic'] = descriptive_stats_df['p02race'] + ', hispanic ' + descriptive_stats_df['p02hisp']
print(100 * descriptive_stats_df.drop_duplicates('id')['race+is_hispanic'].value_counts(dropna=False)/len(descriptive_stats_df.drop_duplicates('id')))
# categorical baseline BMI/age.
baseline_idxs = descriptive_stats_df['visit'] == '00 month follow-up: Baseline'
baseline_df = descriptive_stats_df.loc[baseline_idxs].copy()
assert len(baseline_df.drop_duplicates('id')) == len(baseline_df[['id', 'current_bmi']].drop_duplicates())
assert len(baseline_df.drop_duplicates('id')) == len(baseline_df[['id', 'age_at_visit']].drop_duplicates())
baseline_df = baseline_df.drop_duplicates('id')
print(baseline_df['current_bmi'].value_counts(dropna=False) / len(baseline_df))
print(baseline_df['age_at_visit'].value_counts(dropna=False) / len(baseline_df))
# fraction of people in high pain.
descriptive_stats_df['klg_geq_2'] = (descriptive_stats_df['xrkl'] >= 2).values
descriptive_stats_df['high_pain'] = binarize_koos(descriptive_stats_df['koos_pain_subscore'].values)
for outcome in ['klg_geq_2', 'high_pain']:
print("\n\n***Outcome %s" % outcome)
print("Overall fraction of knees %s: %2.5f" % (outcome, descriptive_stats_df[outcome].mean()))
for k in ['binarized_income_at_least_50k', 'binarized_education_graduated_college', 'race_black']:
mean_for_group_true = descriptive_stats_df.loc[descriptive_stats_df[k] == 1, outcome].mean()
mean_for_group_false = descriptive_stats_df.loc[descriptive_stats_df[k] == 0, outcome].mean()
print("Fraction for %-50s=1: %2.5f" % (k, mean_for_group_true))
print("Fraction for %-50s=0: %2.5f" % (k, mean_for_group_false))
# Compute p-value on difference.
df_for_regression = pd.DataFrame({'outcome':descriptive_stats_df[outcome].values * 1.,
'ses':descriptive_stats_df[k].values * 1.,
'id':descriptive_stats_df['id'].values})
diff_p_value = (sm.OLS.from_formula('outcome ~ ses', data=df_for_regression).fit(cov_type='cluster', cov_kwds={'groups':df_for_regression['id']}))
print('p-value for difference: %2.6f' % diff_p_value.pvalues['ses'])
descriptive_stats_df['koos_pain_zscore'] = (descriptive_stats_df['koos_pain_subscore'] - descriptive_stats_df['koos_pain_subscore'].mean()) / descriptive_stats_df['koos_pain_subscore'].std(ddof=1)
descriptive_stats_df['koos_pain_percentile'] = 100. * rankdata(descriptive_stats_df['koos_pain_subscore'].values)/len(descriptive_stats_df)
pd.set_option('display.width', 500)
for k in ['binarized_income_at_least_50k', 'binarized_education_graduated_college', 'race_black']:
print("Continuous descriptive stats for pain and KLG")
print(descriptive_stats_df[['xrkl', 'koos_pain_subscore', 'koos_pain_percentile', k]].groupby(k).agg(['mean', 'std']))
absolute_pain_gap = np.abs(descriptive_stats_df.loc[descriptive_stats_df[k] == 1, 'koos_pain_subscore'].mean() -
descriptive_stats_df.loc[descriptive_stats_df[k] == 0, 'koos_pain_subscore'].mean())
print("Pain gap in stds: %2.3f" % (absolute_pain_gap / descriptive_stats_df['koos_pain_subscore'].std(ddof=1)))
# Cohen's d, as defined by Wikipedia: https://en.wikipedia.org/wiki/Effect_size#Cohen%27s_d. This ends up being very similar to the effect size in sds.
n1 = (descriptive_stats_df[k] == 1).sum()
n0 = (descriptive_stats_df[k] == 0).sum()
var1 = descriptive_stats_df.loc[descriptive_stats_df[k] == 1, 'koos_pain_subscore'].std(ddof=1) ** 2
var0 = descriptive_stats_df.loc[descriptive_stats_df[k] == 0, 'koos_pain_subscore'].std(ddof=1) ** 2
pooled_std = np.sqrt(((n1 - 1) * var1 + (n0 - 1) * var0) / (n1 + n0 - 2))
print("Pain gap, cohen's d: %2.3f" % (absolute_pain_gap / pooled_std))
print("\n\nComparing median to other distributions")
for k in ['binarized_income_at_least_50k', 'binarized_education_graduated_college', 'race_black']:
print(k)
for ingroup in [0, 1]:
ingroup_pain_median = descriptive_stats_df.loc[descriptive_stats_df[k] == ingroup, 'koos_pain_subscore'].median()
outgroup_worse_pain = (descriptive_stats_df.loc[descriptive_stats_df[k] != ingroup, 'koos_pain_subscore'] < ingroup_pain_median).mean()
outgroup_better_pain = (descriptive_stats_df.loc[descriptive_stats_df[k] != ingroup, 'koos_pain_subscore'] > ingroup_pain_median).mean()
outgroup_same_pain = (descriptive_stats_df.loc[descriptive_stats_df[k] != ingroup, 'koos_pain_subscore'] == ingroup_pain_median).mean()
print("var=%i: %2.1f%% of the other group has worse pain than median person in this group, %2.1f%% better, %2.1f%% the same" % (ingroup, 100*outgroup_worse_pain, 100*outgroup_better_pain, 100*outgroup_same_pain))
def make_intervention_frequencies_plot(medication_df, cols_to_plot, fig_filename=None):
"""
Given a bunch of binary cols_to_plot, plot the frequency with which they occur in the data overall and in disadvantaged racial/SES groups.
Two plots: one of absolute risks, one of relative risks (relative to the outgroup).
Also run some regressions where we see whether pain or KLG better predicts if you'll receive a treatment.
"""
plt.figure(figsize=[10, 10])
sns.set_style('whitegrid')
assert len(set(medication_df['id'])) == len(medication_df) # should be person-level.
bar_width = .2
for group_idx, group in enumerate(['income < $50k', "didn't graduate college", "black"]):
bar_positions = []
risks_for_each_group = [] # absolute risks
relative_risks = [] # risk relative to outgroup.
labels = []
current_pos = group_idx * bar_width
for c in cols_to_plot:
if c not in cols_to_plot:
continue
assert set(medication_df[c].dropna()).issubset(set([0, 1]))
if medication_df[c].mean() < .01:
continue
if group == 'overall': # only need to do regressions once.
raise Exception("This is a bit sketchy because the plot is made using individual-level data, not side-individual level data, so KLG and koos are invalid")
klg_rsquared = sm.OLS.from_formula('%s ~ C(xrkl)' % c, data=medication_df).fit().rsquared
koos_rsquared = sm.OLS.from_formula('%s ~ koos_pain_subscore' % c, data=medication_df).fit().rsquared
combined_model = sm.OLS.from_formula('%s ~ koos_pain_subscore + xrkl' % c, data=medication_df).fit()
all_rsquareds.append({'intervention':c,
'koos r^2':koos_rsquared,
'klg r^2 with categorial KLG':klg_rsquared,
'koos_beta_in_combined_model with linear KLG':combined_model.params['koos_pain_subscore'],
'klg_beta_in_combined_model with linear KLG':combined_model.params['xrkl']})
labels.append('%s\n(%1.0f%% overall)' % (c.replace('_', ' '), 100 * medication_df[c].mean()))
bar_positions.append(current_pos)
if group == 'overall':
risks_for_each_group.append(medication_df[c].mean())
relative_risks.append(1)
elif group == 'black':
risks_for_each_group.append(medication_df.loc[medication_df['race_black'] == 1, c].mean())
relative_risks.append(medication_df.loc[medication_df['race_black'] == 1, c].mean()/
medication_df.loc[medication_df['race_black'] == 0, c].mean())
elif group == 'income < $50k':
risks_for_each_group.append(medication_df.loc[medication_df['binarized_income_at_least_50k'] == 0, c].mean())
relative_risks.append(medication_df.loc[medication_df['binarized_income_at_least_50k'] == 0, c].mean()/
medication_df.loc[medication_df['binarized_income_at_least_50k'] == 1, c].mean())
elif group == "didn't graduate college":
risks_for_each_group.append(medication_df.loc[medication_df['binarized_education_graduated_college'] == 0, c].mean())
relative_risks.append(medication_df.loc[medication_df['binarized_education_graduated_college'] == 0, c].mean()/
medication_df.loc[medication_df['binarized_education_graduated_college'] == 1, c].mean())
else:
raise Exception("invalid col")
print("%-30s: high SES people are %2.3fx as likely to get %s" % (group, 1/relative_risks[-1], c.replace('\n', ' ')))
current_pos += 1
#plt.subplot(121)
#plt.barh(bar_positions, risks_for_each_group, label=group, height=bar_width)
#plt.subplot(111)
plt.barh(bar_positions, relative_risks, label=group, height=bar_width)
#plt.subplot(121)
#plt.yticks([a - bar_width for a in bar_positions], labels)
#plt.legend()
#plt.xlabel('Proportion of people reporting use')
#plt.subplot(122)
#plt.yticks([])
fontsize = 18
plt.yticks([a - bar_width for a in bar_positions], labels, fontsize=fontsize)
plt.xlabel("Risk relative to outgroup", fontsize=fontsize)
plt.xticks([0, 0.5, 1, 1.5, 2.0, 2.5], ['0x', '0.5x', '1x', '1.5x', '2.0x', '2.5x'], fontsize=fontsize)
plt.plot([1, 1], [min(bar_positions) - 1, max(bar_positions) + 1], linestyle='-', color='black')
plt.ylim(min(bar_positions) - .5, max(bar_positions) + bar_width/2)
plt.legend(fontsize=fontsize - 2)
plt.subplots_adjust(left=.3)
if fig_filename is not None:
plt.savefig(fig_filename, dpi=300)
def check_main_results_hold_when_controlling_for_things(df, yhat, rival_predictor, rival_name, all_controls):
"""
Make sure that when we include controls, we still reduce the pain gap and we still outpredict rival_predictor.
"""
test_df = df.copy()
check_is_array(yhat)
check_is_array(rival_predictor)
test_df['yhat'] = yhat
test_df['rival'] = rival_predictor
for control in all_controls:
all_cols = control.split('*') # in case we have an interaction term.
for c in all_cols:
col_name = c.replace('C(', '').replace(')', '')
missing_data = pd.isnull(test_df[col_name])
if missing_data.mean() > 0:
print("warning! fraction %2.3f of control column %s is NULL; replacing with MISSING_DATA indicator" % (missing_data.mean(), col_name))
if 'C(' in c:
test_df.loc[missing_data, col_name] = 'MISSING_DATA'
else:
assert False
control_string = '+'.join(all_controls)
print("Testing whether we still outperform %s when controlling for %s" % (rival_name, ','.join(all_controls)))
yhat_beta_with_no_controls_model = sm.OLS.from_formula('koos_pain_subscore ~ yhat', data=test_df).fit()
yhat_beta_with_no_controls = yhat_beta_with_no_controls_model.params['yhat']
yhat_beta_with_no_controls_model = yhat_beta_with_no_controls_model.get_robustcov_results(cov_type='cluster', groups=test_df['id'].astype(int))
print("yhat beta with no controls: %2.3f" % yhat_beta_with_no_controls)
our_model = sm.OLS.from_formula('koos_pain_subscore ~ yhat + %s' % control_string, data=test_df).fit()
yhat_beta = our_model.params['yhat']
our_rsquared = our_model.rsquared
our_model = our_model.get_robustcov_results(cov_type='cluster', groups=test_df['id'].astype(int))
print(yhat_beta_with_no_controls_model.summary())
print(our_model.summary())
rival_rsquared = sm.OLS.from_formula('koos_pain_subscore ~ rival + %s' % control_string, data=test_df).fit().rsquared
yhat_from_controls_model = sm.OLS.from_formula('yhat ~ %s' % control_string, data=test_df).fit()
assert yhat_from_controls_model.nobs == len(test_df)
yhat_rsquared_from_controls = yhat_from_controls_model.rsquared
print("Our r^2: %2.3f; rival r^2: %2.3f; yhat beta %2.3f; fraction of variance in yhat that controls explain %2.3f" % (our_rsquared, rival_rsquared, yhat_beta, yhat_rsquared_from_controls))
comparisons_with_controls = []
for k in ['race_black',
'binarized_education_graduated_college',
'binarized_income_at_least_50k']:
test_df[k] = test_df[k] * 1.
controls_only_model = sm.OLS.from_formula('koos_pain_subscore ~ %s + %s' % (k, control_string), data=test_df).fit()
yhat_model = sm.OLS.from_formula('koos_pain_subscore ~ %s + yhat + %s' % (k, control_string), data=test_df).fit()
rival_model = sm.OLS.from_formula('koos_pain_subscore ~ %s + rival + %s' % (k, control_string), data=test_df).fit()
our_reduction = 1 - yhat_model.params[k] / controls_only_model.params[k]
rival_reduction = 1 - rival_model.params[k] / controls_only_model.params[k]
comparisons_with_controls.append({'var':k,
'rival':rival_name,
'controls':all_controls,
'ses_beta_just_controls':controls_only_model.params[k],
'our_reduction':'%2.0f%%' % (100 * our_reduction),
'rival_reduction':'%2.0f%%' % (100 * rival_reduction),
'ratio':our_reduction/rival_reduction})
return pd.DataFrame(comparisons_with_controls)[['var', 'rival', 'controls', 'ses_beta_just_controls', 'our_reduction', 'rival_reduction', 'ratio']]
def try_ensembling(all_results, n_to_ensemble, binary_prediction):
"""
Given a dataframe of results with test_yhats in each result, take top n_to_ensemble results and average them together.
Dataframe must be sorted.
"""
print("Ensembling results! Warning: dataframe must be sorted so yhats you like best are FIRST.")
previous_y = None
assert not binary_prediction
assert len(all_results) >= n_to_ensemble
all_performances = []
all_yhats = []
for i in range(n_to_ensemble):
yhat = all_results.iloc[i]['test_yhat'].copy()
y = all_results.iloc[i]['test_y'].copy()
all_yhats.append(yhat.copy())
performance = assess_performance(yhat, y, binary_prediction=binary_prediction)
performance['model'] = i
all_performances.append(performance)
if i == 0:
ensemble_yhat = yhat
previous_y = y
else:
ensemble_yhat = ensemble_yhat + yhat
assert (previous_y == y).all()
for i in range(len(all_yhats)):
for j in range(i):
print("Correlation between yhat %i and %i: %2.3f" % (i, j, pearsonr(all_yhats[i], all_yhats[j])[0]))
ensemble_yhat = ensemble_yhat / n_to_ensemble
performance = assess_performance(ensemble_yhat, y, binary_prediction=binary_prediction)
performance['model'] = 'ensemble'
all_performances.append(performance)
return pd.DataFrame(all_performances), ensemble_yhat
def get_david_education_variable(x):
"""
Code education the way David did, for purpose of replicating his results.
"""
#gen byte educat = 1 if v00edcv==0 | v00edcv==1
#replace educat = 2 if v00edcv==2
#replace educat = 3 if v00edcv>=3 & v00edcv<=5
#label define educat 1 "<=HS" 2 "Some college" 3 "College grad"
mapping = {'0: Less than high school graduate':'1:<=HS',
'1: High school graduate':'1:<=HS',
'2: Some college':'2:Some college',
'3: College graduate':"3:College grad",
'4: Some graduate school':'3:College grad',
'5: Graduate degree':"3:College grad"}
return mapping[x]
def replicate_david_regressions(non_image_data, remove_missing_data):
"""
Qualitatively replicate David's regressions.
Verified regression coefficients look similar.
Not sure exactly how he dealt with missing data so try it both ways.
"""
non_image_data = copy.deepcopy(non_image_data)
controls_david_used = (AGE_RACE_SEX_SITE +
RISK_FACTORS +
BMI +
TIMEPOINT_AND_SIDE +
FRACTURES_AND_FALLS +
KNEE_INJURY_OR_SURGERY +
OTHER_PAIN)
for c in sorted(list(set(controls_david_used + MRI))):
col_name_to_use = c.replace('C(', '').replace(')', '').split(', Treatment')[0]
if '*' in col_name_to_use:
# this indicates an interaction term, not a column.
continue
missing_data_for_col = pd.isnull(non_image_data[col_name_to_use])
if missing_data_for_col.sum() > 0:
if not remove_missing_data or c in OTHER_PAIN + ['fractured_hip']:
# we never filter out missing data for a few columns because it's so often missing.
print("Filling in missing data for proportion %2.3f values of %s" %
(missing_data_for_col.mean(), col_name_to_use))
non_image_data.loc[missing_data_for_col, col_name_to_use] = 'MISSING'
else:
print("removing rows with missing data for proportion %2.3f values of %s" %
(missing_data_for_col.mean(), col_name_to_use))
non_image_data = non_image_data.loc[~missing_data_for_col]
# Define IV of interest.
non_image_data['david_education_variable'] = non_image_data['v00edcv'].map(get_david_education_variable)
# Run the three specifications David did.
sp1 = sm.OLS.from_formula('womac_pain_subscore ~ %s' % '+'.join(controls_david_used + ['david_education_variable']), data=non_image_data).fit()
sp1 = sp1.get_robustcov_results(cov_type='cluster', groups=non_image_data['id'].astype(int))
sp2 = sm.OLS.from_formula('womac_pain_subscore ~ %s' %
'+'.join(controls_david_used + ['david_education_variable', 'C(xrkl)']), data=non_image_data).fit()
sp2 = sp2.get_robustcov_results(cov_type='cluster', groups=non_image_data['id'].astype(int))
sp3 = sm.OLS.from_formula('womac_pain_subscore ~ %s' % '+'.join(controls_david_used + MRI + ['david_education_variable', 'C(xrkl)']), data=non_image_data).fit()
param_names = sp3.params.index
sp3 = sp3.get_robustcov_results(cov_type='cluster', groups=non_image_data['id'].astype(int))
regressor_order = ([a for a in param_names if 'david_education_variable' in a] +
[a for a in param_names if 'xrkl' in a] +
[a for a in param_names if any([b in a for b in MRI])] +
[a for a in param_names if 'knee_injury' in a] +
[a for a in param_names if 'knee_surgery' in a] +
[a for a in param_names if 'current_bmi' in a] +
[a for a in param_names if 'max_bmi' in a] +
[a for a in param_names if 'smoker' in a] +
[a for a in param_names if 'drinks' in a] +
[a for a in param_names if 'fractured' in a or 'fell' in a] +
[a for a in param_names if any([b in a for b in OTHER_PAIN])] +
[a for a in param_names if 'age_at_visit' in a or 'p02sex' in a] +
[a for a in param_names if 'p02race' in a or 'p02hisp' in a] +
[a for a in param_names if 'v00maritst' in a] +
[a for a in param_names if 'dominant' in a] +
[a for a in param_names if 'v00site' in a] +
[a for a in param_names if 'visit' in a and 'age_at_visit' not in a])
return summary_col([sp1, sp2, sp3],
stars=True,
regressor_order=regressor_order,
info_dict={'N':lambda x: "{0:d}".format(int(x.nobs)),
'R2':lambda x: "{:.3f}".format(x.rsquared)})
def assess_what_image_features_y_and_yhat_correlate_with(non_image_data, yhat, y):
"""
Make a plot of how strongly the image features correlate with Y and with yhat.
"""
check_is_array(yhat)
check_is_array(y)
non_image_data = copy.deepcopy(non_image_data)
assert len(non_image_data) == len(yhat)
negative_klg = copy.deepcopy(-non_image_data['xrkl'].values)
print("Testing how well yhat is modeled by all image features")
non_image_data['yhat'] = yhat
non_image_data['y'] = y
yhat_model = sm.OLS.from_formula('yhat ~ %s' % ('+'.join(['C(%s)' % a for a in CLINICAL_CONTROL_COLUMNS])),
data=non_image_data).fit()
print('Predicting yhat using image features. r: %2.3f; r^2 %2.3f' % (np.sqrt(yhat_model.rsquared), yhat_model.rsquared))
yhat_hat = yhat_model.predict(non_image_data)
# assess how well yhat_hat predicts yhat, predicts y, etc, etc.
performances_to_compare = []
performances_to_compare.append(assess_performance(yhat_hat.values, yhat, binary_prediction=False))
performances_to_compare[-1]['comparison'] = 'yhat_hat to yhat'
performances_to_compare.append(assess_performance(yhat_hat.values, y, binary_prediction=False))
performances_to_compare[-1]['comparison'] = 'yhat_hat to y'
performances_to_compare.append(assess_performance(yhat, y, binary_prediction=False))
performances_to_compare[-1]['comparison'] = 'yhat to y'
print(pd.DataFrame(performances_to_compare))
print("\n\nSanity check: make sure we are not wildly overfitting yhat")
yhat_sanity_checks = []
y_sanity_checks = []
for iterate in range(100):
all_ids = sorted(list(set(non_image_data['id'])))
random.shuffle(all_ids)
n_ids = len(all_ids)
random_train_ids = set(all_ids[:int(n_ids * 0.6)])
random_val_ids = set(all_ids[int(n_ids * 0.6):int(n_ids * 0.8)])
random_test_ids = set(all_ids[int(n_ids * 0.8):])
random_train_idxs = non_image_data['id'].map(lambda x:x in random_train_ids).values
random_val_idxs = non_image_data['id'].map(lambda x:x in random_val_ids).values
random_test_idxs = non_image_data['id'].map(lambda x:x in random_test_ids).values
# compare_to_clinical_performance(train_df, val_df, test_df, y_col, features_to_use, binary_prediction, use_nonlinear_model)
overfitting_sanity_check_yhat_hat = compare_to_clinical_performance(train_df=non_image_data.loc[random_train_idxs],
val_df=non_image_data.loc[random_val_idxs],
test_df=non_image_data.loc[random_test_idxs],
y_col='yhat',
features_to_use=['C(%s)' % a for a in CLINICAL_CONTROL_COLUMNS],
binary_prediction=False,
use_nonlinear_model=False,
verbose=False)
y_sanity_checks.append(assess_performance(overfitting_sanity_check_yhat_hat, y[random_test_idxs], binary_prediction=False))
yhat_sanity_checks.append(assess_performance(overfitting_sanity_check_yhat_hat, yhat[random_test_idxs], binary_prediction=False))
print("Performance across 100 shuffled folds of test set")
print("yhat_hat to y")
print(pd.DataFrame(y_sanity_checks).agg(['mean', 'std']))
print("yhat_hat to yhat")
print(pd.DataFrame(yhat_sanity_checks).agg(['mean', 'std']))
##### END OF SANITY CHECKS
model = sm.OLS.from_formula('y ~ yhat', data=non_image_data).fit()
print("Yhat beta without controlling for image features: %2.3f" % model.params['yhat'])
model = model.get_robustcov_results(cov_type='cluster', groups=non_image_data['id'].astype(int))
print(model.summary())
model = sm.OLS.from_formula('y ~ yhat + %s' % ('+'.join(['C(%s)' % a for a in CLINICAL_CONTROL_COLUMNS])), data=non_image_data).fit()
print("Yhat beta controlling for image features: %2.3f" % model.params['yhat'])
model = model.get_robustcov_results(cov_type='cluster', groups=non_image_data['id'].astype(int))
print(model.summary())
def extract_r_squared_for_categorical_image_feature(df, image_feature, y_col):
# Small helper method. Since some image features are categorical, look at total amount of variance feature explains
# when we use it as a categorical variable.
image_model = sm.OLS.from_formula('%s ~ C(%s)' % (y_col, image_feature), data=df).fit()
return image_model.rsquared
all_correlations = []
for feature in CLINICAL_CONTROL_COLUMNS:
y_rsquared = extract_r_squared_for_categorical_image_feature(non_image_data, feature, 'y')
yhat_rsquared = extract_r_squared_for_categorical_image_feature(non_image_data, feature, 'yhat')
negative_klg_rsquared = extract_r_squared_for_categorical_image_feature(non_image_data, feature, 'xrkl')
all_correlations.append({'feature':feature,
'yhat_rsquared':yhat_rsquared,
'y_rsquared':y_rsquared,
'negative_klg_rsquared':negative_klg_rsquared})
all_correlations = pd.DataFrame(all_correlations)
all_correlations['klg_rsquared - yhat_rsquared'] = all_correlations['negative_klg_rsquared'] - all_correlations['yhat_rsquared']
print("Correlations sorted by klg_rsquared - yhat_rsquared")
print(all_correlations.sort_values(by='klg_rsquared - yhat_rsquared'))
for var in ['yhat', 'y', 'negative_klg']:
print("Average r^2 between %s and features besides KLG: %2.3f" %
(var, all_correlations.loc[all_correlations['feature'] != 'xrkl', '%s_rsquared' % var].mean()))
plt.figure(figsize=[8, 8])
for i in range(len(all_correlations)):
plt.annotate(all_correlations['feature'].iloc[i],
[all_correlations['y_rsquared'].iloc[i], all_correlations['yhat_rsquared'].iloc[i]])
plt.scatter(all_correlations['y_rsquared'], all_correlations['yhat_rsquared'])
min_val = 0
max_val = max(all_correlations['y_rsquared'].max(), all_correlations['yhat_rsquared'].max()) + .005
plt.xlim([min_val, max_val])
plt.ylim([min_val, max_val])
plt.plot([min_val, max_val], [min_val, max_val], color='black', linestyle='--')
plt.plot([min_val, max_val], [0, 0], color='black')
plt.plot([0, 0], [min_val, max_val], color='black')
plt.xlabel("Feature r^2 with y")
plt.ylabel("Feature r^2 with yhat")
plt.show()
return pd.DataFrame(all_correlations.sort_values(by='y_rsquared'))
def fit_followup_regression(combined_df, col_is_categorical):
if col_is_categorical:
t0_control = 'C(col_of_interest_t0)'
else:
t0_control = 'col_of_interest_t0'
col_of_interest_model = sm.OLS.from_formula('col_of_interest_t1 ~ %s' % t0_control, data=combined_df).fit()
yhat_model = sm.OLS.from_formula('col_of_interest_t1 ~ yhat_t0', data=combined_df).fit()
combined_model = sm.OLS.from_formula('col_of_interest_t1 ~ yhat_t0 + %s' % t0_control, data=combined_df).fit()
assert combined_model.nobs == yhat_model.nobs == col_of_interest_model.nobs
clustered_combined_model = combined_model.get_robustcov_results(cov_type='cluster',
groups=combined_df['id'].astype(int))
yhat_t0_index = list(combined_model.params.index).index('yhat_t0')
yhat_t0_pval = clustered_combined_model.pvalues[yhat_t0_index]
assert np.allclose(clustered_combined_model.params[yhat_t0_index], combined_model.params['yhat_t0'])
return {'col_t1 ~ yhat_t0 r^2':yhat_model.rsquared,
'col_t1 ~ col_t0 r^2':col_of_interest_model.rsquared,
'col_t1 ~ yhat_t0 + col_t0 r^2':combined_model.rsquared,
'yhat beta':'%2.5f (%2.5f, %2.5f)' % (combined_model.params['yhat_t0'],
combined_model.conf_int().loc['yhat_t0', 0],
combined_model.conf_int().loc['yhat_t0', 1]),
'yhat p':yhat_t0_pval,
'n_obs':int(combined_model.nobs),
'n_people':len(set(combined_df['id']))}
def fit_followup_binary_regression(combined_df):
"""
If col_of_interest is a binary variable, fit a logistic regression instead.
col_of_interest here is binarized pain.
"""
assert combined_df['col_of_interest_t0'].map(lambda x:x in [0, 1]).all()
assert combined_df['col_of_interest_t1'].map(lambda x:x in [0, 1]).all()
assert not combined_df['koos_pain_subscore_t0'].map(lambda x:x in [0, 1]).all()
assert not combined_df['koos_pain_subscore_t1'].map(lambda x:x in [0, 1]).all()
assert not (combined_df['col_of_interest_t1'] == combined_df['col_of_interest_t0']).all()
# predict binary pain at followup without any controls.
yhat_model = sm.Logit.from_formula('col_of_interest_t1 ~ yhat_t0', data=combined_df).fit(
cov_type='cluster', cov_kwds={'groups':combined_df['id'].astype(int).values})
# predict binary pain at followup controlling for binary pain at t0.
combined_model_binary_control = sm.Logit.from_formula('col_of_interest_t1 ~ yhat_t0 + col_of_interest_t0', data=combined_df).fit(
cov_type='cluster', cov_kwds={'groups':combined_df['id'].astype(int).values})
# predict binary pain at followup controlling for CONTINUOUS pain at t0.
combined_model_continuous_control = sm.Logit.from_formula('col_of_interest_t1 ~ yhat_t0 + koos_pain_subscore_t0', data=combined_df).fit(
cov_type='cluster', cov_kwds={'groups':combined_df['id'].astype(int).values})
get_OR_and_CI = lambda m:'%2.3f (%2.3f, %2.3f)' % (np.exp(m.params['yhat_t0']), np.exp(m.conf_int().loc['yhat_t0', 0]), np.exp(m.conf_int().loc['yhat_t0', 1]))
return {'OR (no control)':get_OR_and_CI(yhat_model),
'OR (binary control)':get_OR_and_CI(combined_model_binary_control),
'OR (continuous control)':get_OR_and_CI(combined_model_continuous_control)}
def predict_kl_at_future_timepoints(non_image_data, yhat, use_binary_pain=False):
"""
Given the non-image data and an array of yhats, check how well
yhat_t0 predicts followup values of col_of_interest at t1, where col_of_interest = ['xrkl', 'koos_pain_subscore']
"""
check_is_array(yhat)
non_image_data = copy.deepcopy(non_image_data)
assert 'yhat' not in non_image_data.columns
non_image_data['yhat'] = yhat
if not use_binary_pain:
cols_to_use = ['xrkl', 'koos_pain_subscore']
statistics_to_return = ['col', 't0', 't1', 'col_t1 ~ yhat_t0 r^2', 'col_t1 ~ col_t0 r^2',
'col_t1 ~ yhat_t0 + col_t0 r^2', 'yhat beta', 'yhat p', 'n_obs', 'n_people']
else:
cols_to_use = ['binarized_koos_pain_subscore']
non_image_data['binarized_koos_pain_subscore'] = binarize_koos(non_image_data['koos_pain_subscore'].values)
statistics_to_return = ['col', 't0', 't1', 'OR (no control)', 'OR (binary control)', 'OR (continuous control)']
t0 = '00 month follow-up: Baseline'
all_results = []
for col_of_interest in cols_to_use:
pooled_dfs = []
for t1 in ['12 month follow-up', '24 month follow-up', '36 month follow-up', '48 month follow-up']:
if t1 <= t0:
continue
df_t0 = copy.deepcopy(non_image_data.loc[non_image_data['visit'] == t0, ['id', 'side', 'yhat', col_of_interest, 'koos_pain_subscore', 'xrkl']])
df_t1 = copy.deepcopy(non_image_data.loc[non_image_data['visit'] == t1, ['id', 'side', 'yhat', col_of_interest, 'koos_pain_subscore', 'xrkl']])
df_t0.columns = ['id', 'side', 'yhat', 'col_of_interest', 'koos_pain_subscore', 'xrkl']
df_t1.columns = ['id', 'side', 'yhat', 'col_of_interest', 'koos_pain_subscore', 'xrkl']
assert len(df_t0) > 0
assert len(df_t1) > 0
assert df_t0[['side', 'id']].duplicated().sum() == 0
assert df_t1[['side', 'id']].duplicated().sum() == 0
assert len(df_t0[['yhat', 'col_of_interest']].dropna()) == len(df_t0)
assert len(df_t1[['yhat', 'col_of_interest']].dropna()) == len(df_t1)
combined_df = pd.merge(df_t0, df_t1, how='inner', on=['id', 'side'], suffixes=['_t0', '_t1'])
if use_binary_pain:
regression_results = fit_followup_binary_regression(combined_df)
else:
regression_results = fit_followup_regression(combined_df, col_is_categorical=(col_of_interest == 'xrkl'))
regression_results['col'] = col_of_interest
regression_results['t0'] = t0
regression_results['t1'] = t1
all_results.append(regression_results)
pooled_dfs.append(combined_df)
if use_binary_pain:
regression_results = fit_followup_binary_regression(pd.concat(pooled_dfs))
else:
regression_results = fit_followup_regression(pd.concat(pooled_dfs), col_is_categorical=(col_of_interest == 'xrkl'))
regression_results['col'] = col_of_interest
regression_results['t0'] = t0
regression_results['t1'] = 'pooled'
all_results.append(regression_results)
return pd.DataFrame(all_results)[statistics_to_return]
def quantify_pain_gap_reduction_vs_rival(yhat, y, rival_severity_measure, all_ses_vars, ids, lower_bound_rival_reduction_at_0=False):
"""
Given a rival_severity_measure, return a dataframe of how much yhat reduces the pain gap as compared to the rival.
"""
check_is_array(yhat)
check_is_array(y)
check_is_array(rival_severity_measure)
check_is_array(ids)
all_beta_ratios = []
for ses_var_name in all_ses_vars:
ses_arr = all_ses_vars[ses_var_name].copy() * 1.
assert set(ses_arr) == set([0, 1])
check_is_array(ses_arr)
#print("\n\n****Stratification: %s" % ses_var_name)
ses_betas = compare_ses_gaps(yhat=yhat,
y=y,
rival_severity_measure=rival_severity_measure,
ses=ses_arr,
verbose=False)
ses_beta_ratio_yhat_rival = ses_betas['yhat_ses_beta'] / ses_betas['rival_ses_beta']
ses_beta_ratio_rival_nothing = ses_betas['rival_ses_beta'] / ses_betas['no_controls_ses_beta'] # used to be called rival_nothing_ratio
ses_beta_ratio_yhat_nothing = ses_betas['yhat_ses_beta'] / ses_betas['no_controls_ses_beta'] # used to be called no_controls_beta_ratio
if lower_bound_rival_reduction_at_0 and (1 - ses_beta_ratio_rival_nothing) < 0.001:
print("Warning: rival actually makes pain gap larger, setting yhat:rival reduction ratio to 100.")
yhat_rival_red_ratio = 100
else:
yhat_rival_red_ratio = (1 - ses_beta_ratio_yhat_nothing)/(1 - ses_beta_ratio_rival_nothing)
all_beta_ratios.append({'SES var':ses_var_name,
'n_obs':'%i/%i' % (sum(ses_arr == 1), len(ses_arr)),
'n_people':'%i/%i' % (len(set(ids[ses_arr == 1])), len(set(ids))),
'No controls gap':ses_betas['no_controls_ses_beta'],
'Rival gap':ses_betas['rival_ses_beta'],
'yhat gap':ses_betas['yhat_ses_beta'],
'Rival red. vs nothing':(1 - ses_beta_ratio_rival_nothing) * 100, # '%2.0f%%' %
'red. vs rival': ((1 - ses_beta_ratio_yhat_rival) * 100),# '%2.0f%%' %
'red. vs nothing': ((1 - ses_beta_ratio_yhat_nothing) * 100), # '%2.0f%%'
'yhat/rival red. ratio': yhat_rival_red_ratio})# '%2.1f'
return pd.DataFrame(all_beta_ratios)[['SES var', 'No controls gap', 'Rival gap', 'yhat gap',
'Rival red. vs nothing', 'red. vs nothing', 'yhat/rival red. ratio',
'n_obs', 'n_people']]
def compare_ses_gaps(yhat, y, rival_severity_measure, ses, verbose=True):
"""
How big are the SES/racial gaps controlling for various measures of severity?
"""
check_is_array(yhat)
check_is_array(y)
check_is_array(ses)
check_is_array(rival_severity_measure)
assert set(ses) == set([0, 1])
ses = ses.copy() * 1.
df = pd.DataFrame({'yhat':yhat, 'y':y, 'rival_severity_measure':rival_severity_measure, 'ses':ses})
if verbose:
print("Comparing pain gaps")
no_controls_model = sm.OLS.from_formula('y ~ ses', data=df).fit()
rival_model = sm.OLS.from_formula('y ~ rival_severity_measure + ses', data=df).fit()
yhat_model = sm.OLS.from_formula('y ~ yhat + ses', data=df).fit()
no_controls_beta = float(no_controls_model.params['ses'])
rival_ses_beta = float(rival_model.params['ses'])
yhat_ses_beta = float(yhat_model.params['ses'])
if verbose:
print(klg_model.summary())
print(yhat_model.summary())
results = {'yhat_ses_beta': yhat_ses_beta,
'rival_ses_beta':rival_ses_beta,
'no_controls_ses_beta':no_controls_beta}
return results
def compare_pain_levels_for_people_geq_klg_2(yhat, y, klg, ses, y_col):
"""
compute various algorithmic-fairness-inspired metrics
based on binarizing both the risk score and the outcome.
"""
check_is_array(yhat)
check_is_array(klg)
check_is_array(ses)
check_is_array(y)
assert y_col == 'koos_pain_subscore'
assert y.max() == 100
# first compute our three measures of severity
# 1. binarized KLG
# 2. binarized yhat
# 3. binarized y (oracle)
binarized_klg_severity = klg >= 2
print("%i/%i people with KLG >= 2" % (binarized_klg_severity.sum(), len(binarized_klg_severity)))
discretized_yhat = discretize_yhat_like_kl_grade(yhat_arr=yhat, kl_grade_arr=klg, y_col=y_col)
binarized_yhat_severity = discretized_yhat >= 2
discretized_y = discretize_yhat_like_kl_grade(yhat_arr=y, kl_grade_arr=klg, y_col=y_col)
binarized_oracle_severity = discretized_y >= 2
severity_measures = {'klg':binarized_klg_severity,
'yhat':binarized_yhat_severity,
'oracle':binarized_oracle_severity}
assert binarized_oracle_severity.sum() == binarized_yhat_severity.sum()
assert binarized_klg_severity.sum() == binarized_yhat_severity.sum()
# cast a couple of things to bools
high_pain = binarize_koos(y).astype(bool)
assert (high_pain == (binarize_koos(y) == 1)).all()
assert set(ses) == set([0, 1])
ses = ses == 1
print("Overall fraction of people with SES var=1: %2.3f" % ses.mean())
for severity_measure_name in ['klg', 'yhat', 'oracle']:
print("p(SES var=1|high severity), severity measure %s: %2.3f" % (
severity_measure_name, ses[severity_measures[severity_measure_name] == 1].mean()))
# alternate computation as sanity check.
percentile_cutoff = 100. * (klg >= 2).sum() / len(klg)
y_cutoff = scoreatpercentile(y, percentile_cutoff)
yhat_cutoff = scoreatpercentile(yhat, percentile_cutoff)
print('Alternate computation using KLG with %i people above severity cutoff: %2.3f' %
((klg >= 2).sum(), ses[klg >= 2].mean()))
print('Alternate computation using yhat with %i people above severity cutoff: %2.3f' %
((yhat <= yhat_cutoff).sum(), ses[yhat <= yhat_cutoff].mean()))
print('Alternate computation using oracle with %i people above severity cutoff: %2.3f' %
((y <= y_cutoff).sum(), ses[y <= y_cutoff].mean()))
all_klg_yhat_ratios = []
for threshold in [0, 1, 2, 3, 4]:
yhat_geq_thresh = discretized_yhat >= threshold
klg_geq_thresh = klg >= threshold
assert yhat_geq_thresh.sum() == klg_geq_thresh.sum()
print("Threshold %i: %-5i >= threshold. p(SES var = 1 | >= threshold): %2.3f for yhat, %2.3f for KLG, ratio %2.5f" %
(threshold,
yhat_geq_thresh.sum(),
ses[yhat_geq_thresh].mean(),
ses[klg_geq_thresh].mean(),
ses[yhat_geq_thresh].mean() / ses[klg_geq_thresh].mean()))
all_klg_yhat_ratios.append(ses[yhat_geq_thresh].mean() / ses[klg_geq_thresh].mean())
for threshold in [0, 1, 2, 3, 4]:
yhat_geq_thresh = discretized_yhat >= threshold
klg_geq_thresh = klg >= threshold
assert yhat_geq_thresh.sum() == klg_geq_thresh.sum()
print("Threshold %i: %-5i above threshold. p(above threshold | SES var = 1): %2.3f for yhat, %2.3f for KLG, ratio %2.3f" %
(threshold,
yhat_geq_thresh.sum(),
yhat_geq_thresh[ses == 1].mean(),
klg_geq_thresh[ses == 1].mean(),
yhat_geq_thresh[ses == 1].mean() / klg_geq_thresh[ses == 1].mean()))
assert np.allclose(yhat_geq_thresh[ses == 1].mean() / klg_geq_thresh[ses == 1].mean(), all_klg_yhat_ratios[threshold])
# compute actual results
results = {'all_klg_yhat_ratios':all_klg_yhat_ratios}
for ses_val in [False, True, 'all']:
ses_key = 'ses_var=%s' % ses_val
results[ses_key] = {}
if ses_val in [False, True]:
ses_idxs = ses == ses_val
elif ses_val == 'all':
ses_idxs = np.array([True for k in range(len(ses))])
for severity_measure_name in severity_measures:
high_severity = severity_measures[severity_measure_name]
assert set(high_severity) == set([0, 1])
p_high_severity = high_severity[ses_idxs].mean()
p_high_severity_given_high_pain = high_severity[ses_idxs & high_pain].mean()
p_low_severity_given_low_pain = 1 - high_severity[ses_idxs & (~high_pain)].mean()
p_high_pain_given_high_severity = high_pain[ses_idxs & high_severity].mean()
p_low_pain_given_low_severity = 1 - high_pain[ses_idxs & (~high_severity)].mean()
correct = high_severity == high_pain
accuracy = correct[ses_idxs].mean()
results[ses_key][severity_measure_name] = {
'p_high_severity':p_high_severity,
'p_high_severity_given_high_pain':p_high_severity_given_high_pain,
'p_low_severity_given_low_pain':p_low_severity_given_low_pain,
'p_high_pain_given_high_severity':p_high_pain_given_high_severity,
'p_low_pain_given_low_severity':p_low_pain_given_low_severity,
'accuracy':accuracy}
return results
def plot_geq_klg_results(geq_klg_2_results, plot_title, figname=None):
"""
Makes a plot of the results from compare_pain_levels_for_people_geq_klg_2.
"""
def make_metric_name_pretty(a):
a = a.replace('_given_', '|')
if a[:2] == 'p_':
a = 'p(' + a[2:] + ')'
return a
metrics_to_plot = sorted(geq_klg_2_results['ses_var=True']['klg'].keys())
severity_measures = ['klg', 'yhat', 'oracle']
plt.figure(figsize=[15, 10])
for subplot_idx, ses_status in enumerate([True, False, 'all']):
plt.subplot(3, 1, subplot_idx + 1)
x_offset = 0
bar_width = .2
for severity_measure in severity_measures:
xs = np.arange(len(metrics_to_plot)) + x_offset
ys = [geq_klg_2_results['ses_var=%s' % ses_status][severity_measure][metric]
for metric in metrics_to_plot]
plt.bar(xs, ys, width=bar_width, label=severity_measure)
x_offset += bar_width
plt.ylim([0, 1])
plt.xticks(np.arange(len(metrics_to_plot)) + bar_width,
[make_metric_name_pretty(a) for a in metrics_to_plot])
plt.legend()
plt.title('SES var = %s' % ses_status)
plt.suptitle(plot_title)
if figname is not None:
plt.savefig(figname, dpi=300)
def make_comparison_klg_yhat_plot(yhat, y, klg):
"""
Compare how well yhat and KLG fit y by plotting distribution of y in each bin.
Make a box plot to show this and also do a simple line plot with the median of y.
Checked.
"""
check_is_array(yhat)
check_is_array(y)
check_is_array(klg)
discretized_yhat = discretize_yhat_like_kl_grade(yhat_arr=yhat,
kl_grade_arr=klg,
y_col='koos_pain_subscore')
discretized_vals = range(5)
assert set(klg) == set(discretized_vals)
assert y.max() == 100
assert set(discretized_yhat) == set(discretized_vals)
print('pearson correlation between our original score and y %2.3f' % pearsonr(yhat, y)[0])
print('pearson correlation between our discretized score and y %2.3f' % pearsonr(discretized_yhat, y)[0])
print('pearson correlation between klg and y %2.3f' % pearsonr(klg, y)[0])
# box plot.
plt.figure(figsize=[8, 4])
ylimits = [0, 102]
plt.subplot(121)
sns.boxplot(x=discretized_yhat, y=y)
plt.xlabel("Discretized $\hat y$")
plt.ylabel('Koos pain score')
plt.ylim(ylimits)
plt.subplot(122)
sns.boxplot(x=klg, y=y)
plt.yticks([])
plt.xlabel("KLG")
plt.ylim(ylimits)
plt.savefig('sendhil_plots/klg_yhat_comparison_boxplot.png', dpi=300)
plt.show()
# plot median value of y by each KLG/yhat bin.
klg_y_medians = []
yhat_y_medians = []
for val in discretized_vals:
yhat_idxs = discretized_yhat == val
klg_idxs = klg == val
print("score %i: yhat and KLG means: %2.3f %2.3f; yhat and KLG medians %2.3f %2.3f" %
(val, y[yhat_idxs].mean(), y[klg_idxs].mean(), np.median(y[yhat_idxs]), np.median(y[klg_idxs])))
klg_y_medians.append(np.median(y[klg_idxs]))
yhat_y_medians.append(np.median(y[yhat_idxs]))
plt.figure(figsize=[5, 4])
plt.plot(discretized_vals, yhat_y_medians, label='Our model', color='green')
plt.plot(discretized_vals, klg_y_medians, label='KLG', color='red')
plt.legend()
plt.xticks(discretized_vals)
plt.ylabel("Median Koos pain score")
plt.xlabel("Severity grade")
#plt.savefig('sendhil_plots/klg_yhat_comparison_line_plot.png', dpi=300)
plt.show()
def make_kernel_regression_plot(yhat, y):
"""
Kernel regression plot of y on yhat.
Also plot linear trend for comparison.
Checked.
"""
check_is_array(yhat)
check_is_array(y)
# fit RBF kernel.
kernel_model = KernelRidge(kernel='rbf', gamma=1/(15.**2)) # bandwidth of 15.
kernel_model.fit(X=yhat.reshape(-1, 1), y=y)
vals_to_predict_at = np.arange(yhat.min(), yhat.max(), 1).reshape(-1, 1)
kernel_predictions = kernel_model.predict(vals_to_predict_at)
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x=yhat, y=y)
# make plot. Both linear + kernel fit for comparison.
plt.figure(figsize=[4, 4])
plt.scatter(yhat, y, s=1, alpha=.7)
plt.plot(vals_to_predict_at, vals_to_predict_at*slope + intercept, color='red', linewidth=3, label='linear fit')
plt.plot(vals_to_predict_at, kernel_predictions, color='black', linewidth=3, label='kernel fit')
plt.legend()
plt.xlabel('$\hat y$')
plt.ylabel('Koos pain score')
plot_limits = [45, 102]
plt.xlim(plot_limits)
plt.ylim(plot_limits)
print("Warning: %2.5f%% of y and %2.5f%% of yhat vals are truncated by lower limit of %s" %
((y < plot_limits[0]).mean(), (yhat < plot_limits[0]).mean(), plot_limits[0]))
plt.subplots_adjust(left=.2)
plt.savefig('sendhil_plots/kernel_regression.png', dpi=300)
plt.show()
def make_violin_nonredundancy_plot(yhat, klg):
"""
Make violin plot showing lack of correlation between yhat and KLG.
Checked.
"""
check_is_array(yhat)
check_is_array(klg)
plt.figure(figsize=[4, 4])
sns.violinplot(x=klg, y=yhat)
assert set(klg) == set(range(5))
plt.xticks(range(5), ['0', '1', '2', '3', '4'])
plt.ylim([50, 100])
plt.ylabel("$\hat y$")
plt.xlabel('KLG')
plt.subplots_adjust(left=.2)
plt.savefig('violin_nonredundancy_plot.png', dpi=300)
plt.show()
def generate_correlated_variable(x, desired_r):
"""
This generates a variable with a given level of correlation desired_r with x.
Checked.
"""
tolerance = .003
max_iterations = 1000
assert desired_r > 0
assert desired_r < 1
upper_noise = np.std(x) * 100
lower_noise = 0
n = len(x)
n_iter = 0
while True:
middle_noise = (upper_noise + lower_noise) / 2
y = x + np.random.randn(n) * middle_noise
r, p = pearsonr(y, x)
print("%2.3f - %2.3f: %2.3f" % (lower_noise, upper_noise, r))
if r < desired_r - tolerance:
upper_noise = middle_noise
elif r > desired_r + tolerance:
lower_noise = middle_noise
else:
print("Within tolerance: %2.3f" % np.abs(r - desired_r))
return y
n_iter += 1
if n_iter > max_iterations:
return y
def get_baseline_scores(df):
"""
filter for left knee scores at baseline.
Checked.
"""
idxs = (df['side'] == 'left') & (df['visit'] == '00 month follow-up: Baseline')
df = copy.deepcopy(df.loc[idxs])
df.index = range(len(df))
return df
def power_analysis_is_y_associated_with_yhat(non_image_dataset, pval_thresh, n_iterates):
"""
power analysis: is y associated with yhat when we do/don't control for covariates?
Checked.
"""
clinical_controls = ['C(%s)' % col for col in non_image_dataset.clinical_xray_semiquantitative_cols]
knee_pain_scores = non_image_dataset.processed_dataframes['all_knee_pain_scores']
clinical_assessments = non_image_dataset.processed_dataframes['kxr_sq_bu']
baseline_pain_scores = get_baseline_scores(get_combined_dataframe(non_image_dataset, clinical_assessments))
pain_subscores = ['koos_pain_subscore', 'womac_pain_subscore']
for k in pain_subscores:
baseline_pain_scores[k] = (baseline_pain_scores[k] - baseline_pain_scores[k].mean()) / baseline_pain_scores[k].std()
all_simulations = []
for desired_r in [.4, .3, .2, .15, .1, .08, .05]:
for k in pain_subscores:
baseline_pain_scores['pain_hat'] = generate_correlated_variable(baseline_pain_scores[k], desired_r)
r, p = pearsonr(baseline_pain_scores['pain_hat'], baseline_pain_scores[k])
for subset_size in list(range(250, 2000, 250)):
for _ in range(n_iterates):
individuals_to_sample = set(random.sample(non_image_dataset.all_ids, subset_size))
df_to_use = baseline_pain_scores.loc[baseline_pain_scores['id'].map(lambda x:x in individuals_to_sample)]
for rhs in ['just_pain_hat', 'pain_hat_plus_clinical_controls']:
if rhs == 'just_pain_hat':
covs_to_use = ['pain_hat']
elif rhs == 'pain_hat_plus_clinical_controls':
covs_to_use = ['pain_hat'] + clinical_controls
covs_to_use = [cov for cov in covs_to_use if len(set(df_to_use[cov].dropna())) > 1]
covs_to_use = '+'.join(covs_to_use)
model = sm.OLS.from_formula('%s ~ %s' % (k, covs_to_use), data=df_to_use).fit()
#print(model.summary())
model_pval = model.pvalues['pain_hat']
all_simulations.append({'pain_subscore':k,
'r':r,
'rhs':rhs,
'subset_size':subset_size,
'model_pval':model_pval})
# now make the plot.
all_simulations = pd.DataFrame(all_simulations)
all_simulations['is_sig'] = all_simulations['model_pval'] < pval_thresh
for pain_subscore in ['koos_pain_subscore', 'womac_pain_subscore']:
for rhs in sorted(list(set(all_simulations['rhs']))):
simulations_to_use = all_simulations.loc[(all_simulations['pain_subscore'] == pain_subscore) &
(all_simulations['rhs'] == rhs)]
plt.figure(figsize=[8, 4])
for r in sorted(list(set(simulations_to_use['r']))):
x = []
y = []
for subset_size in sorted(list(set(simulations_to_use['subset_size']))):
simulations_for_subset = simulations_to_use.loc[(simulations_to_use['subset_size'] == subset_size) &
(simulations_to_use['r'] == r), 'is_sig']
assert len(simulations_for_subset) == n_iterates
x.append(subset_size)
y.append(simulations_for_subset.mean())
plt.plot(x, y, label='$r=%2.2f$' % r)
plt.title("DV: %s\nRHS: %s" % (pain_subscore, rhs))
plt.xlabel("Test set size")
plt.ylabel("Fraction of simulations\nwhich are significant @ $p = %2.2f$" % pval_thresh)
plt.xlim([min(x), max(x)])
plt.ylim([0, 1.02])
plt.legend(bbox_to_anchor=(1.1, 1.05))
plt.savefig('power_analysis_dv_%s_rhs_%s.png' % (pain_subscore, rhs), dpi=300)
def power_analysis_does_yhat_reduce_effect_of_income(non_image_dataset, pval_thresh, n_iterates, dv):
"""
power analysis for "does yhat reduce effect of income/education"
for the answer to "does the coefficient change", see
<NAME>., <NAME>., & <NAME>. (1995). Statistical methods for comparing regression coefficients between models. American Journal of Sociology, 100(5), 1261-1293.
Bottom of page 1269 gives an example which makes it look like this is what we want to do.
See eq (15) and also the paragraph which begins "results begin in Table 1"
<NAME> fwiw is skeptical of this -- https://andrewgelman.com/2010/01/21/testing_for_sig/
Checked.
"""
all_results = []
assert dv in ['koos_pain_subscore', 'womac_pain_subscore']
knee_pain_scores = non_image_dataset.processed_dataframes['all_knee_pain_scores']
clinical_ratings = non_image_dataset.processed_dataframes['kxr_sq_bu']
df_to_use = get_baseline_scores(get_combined_dataframe(non_image_dataset, clinical_ratings))
print("Length of baseline data")
print(len(df_to_use))
iv = 'binarized_income_at_least_50k'
pain_subscores = ['koos_pain_subscore', 'womac_pain_subscore']
assert dv in pain_subscores
for k in pain_subscores:
df_to_use[k] = (df_to_use[k] - df_to_use[k].mean()) / df_to_use[k].std()
clinical_controls = '+'.join(['C(%s)' % a for a in non_image_dataset.clinical_xray_semiquantitative_cols])
for noise_param in [3, 5, 8, 10]:
for disparity_param in [.2]:
print("Noise param: %2.3f; disparity param: %2.3f" % (noise_param, disparity_param))
# as disparity param increases in magnitude, yhat gets more correlated with SES.
# as noise_param increases in magnitude, yhat gets less correlated with y and SES.
if dv == 'womac_pain_subscore':
# higher scores indicate worse pain on the womac
# so if you have a higher SES we want you to have lower predicted Yhat.
disparity_param = -disparity_param
df_to_use['yhat'] = df_to_use[dv] + df_to_use[iv] * disparity_param + noise_param * np.random.randn(len(df_to_use),)
df_to_use = df_to_use.dropna(subset=[dv, iv])
print(df_to_use[[iv, 'yhat']].groupby(iv).agg(['mean', 'std']))
for subset_size in list(range(250, 2000, 250)):
for _ in range(n_iterates):
people = set(random.sample(non_image_dataset.all_ids, subset_size))
people_idxs = df_to_use['id'].map(lambda x:x in people).values
model_without_yhat = sm.OLS.from_formula('%s ~ %s + %s' % (dv, iv, clinical_controls), df_to_use.loc[people_idxs]).fit()
model_with_yhat = sm.OLS.from_formula('%s ~ %s + %s + yhat' % (dv, iv, clinical_controls), df_to_use.loc[people_idxs]).fit()
change_in_iv_coef = model_with_yhat.params[iv] - model_without_yhat.params[iv]
# Note:
## To get estimate of noise variance for a model, the following 3 are all the same.
# this is sigma_hat SQUARED, not sigma_hat.
# 1. np.sum(model_without_yhat.resid ** 2) / model_without_yhat.df_resid)
# 2. model_without_yhat.mse_resid
# 3. model_without_yhat.scale
squared_error_on_change = (model_with_yhat.bse[iv] ** 2 -
model_without_yhat.bse[iv] ** 2 * model_with_yhat.scale / model_without_yhat.scale)
assert squared_error_on_change > 0
error_on_change = np.sqrt(squared_error_on_change)
zscore = change_in_iv_coef/error_on_change
if (model_with_yhat.params[iv] > 0) != (model_without_yhat.params[iv] > 0):
# if the sign of the coefficient changes that is weird. It should just get smaller.
print("Warning: coefficient changed sign from %2.3f to %2.3f" % (model_without_yhat.params[iv], model_with_yhat.params[iv]))
results = {'r2_with_yhat':model_with_yhat.rsquared,
'r2_without_yhat':model_without_yhat.rsquared,
'beta_with_yhat':model_with_yhat.params[iv],
'beta_without_yhat':model_without_yhat.params[iv],
'change_in_IV_coef':change_in_iv_coef,
'error_on_change':error_on_change,
'zscore':zscore,
'p_change':2*(1 - norm.cdf(abs(zscore))), # two-tailed p-value.
'yhat_iv_corr':pearsonr(df_to_use['yhat'], df_to_use[iv])[0],
'yhat_dv_corr':pearsonr(df_to_use['yhat'], df_to_use[dv])[0],
'subset_size':subset_size}
all_results.append(results)
# now make plot.
all_results = pd.DataFrame(all_results)
for iv_corr in sorted(list(set(all_results['yhat_iv_corr'])), key=lambda x:abs(x)):
for dv_corr in sorted(list(set(all_results['yhat_dv_corr'])), key=lambda x:abs(x)):
idxs = ((all_results['yhat_iv_corr'] == iv_corr) &
(all_results['yhat_dv_corr'] == dv_corr))
if idxs.sum() == 0:
continue
x = []
y = []
for subset_size in sorted(list(set(all_results['subset_size']))):
x.append(subset_size)
results = all_results.loc[idxs & (all_results['subset_size'] == subset_size),
'p_change'] < pval_thresh
assert len(results) == n_iterates
y.append(results.mean())
plt.plot(x, y, label='IV (income/educ): r=%2.3f, DV (pain): r=%2.3f' % (abs(iv_corr),
abs(dv_corr)))
plt.legend(bbox_to_anchor=(1.1, 1.05))
plt.xlabel("Test set size")
plt.ylabel("Fraction of simulations\nwhich are significant @ $p = %2.2f$" % pval_thresh)
plt.title("Significance test is for change in income/educ coef")
plt.savefig('power_analysis_does_yhat_reduce_effect_of_ses.png')
def discretize_yhat_like_kl_grade(yhat_arr, kl_grade_arr, y_col):
"""
Given an array of yhats and an arry of kl grades, create a discretized yhat with the same bin size.
y_col argument is just to make sure we are intending to recreate koos pain score.
In this case, highest yhats should have the lowest KL grades.
"""
check_is_array(yhat_arr)
check_is_array(kl_grade_arr)
yhat_arr = copy.deepcopy(yhat_arr)
kl_grade_arr = copy.deepcopy(kl_grade_arr)
yhat_arr = yhat_arr + np.random.random(len(yhat_arr)) * 1e-6 # break ties by adding a small amount of random noise.
assert y_col == 'koos_pain_subscore'
kl_counts = Counter(kl_grade_arr)
print("KL counts are", kl_counts)
total_count = len(kl_grade_arr)
assert len(kl_grade_arr) == len(yhat_arr)
assert sorted(kl_counts.keys()) == list(range(5))
if y_col == 'koos_pain_subscore':
yhat_ranks = rankdata(yhat_arr)
discretized_yhat = np.zeros(yhat_arr.shape)
cutoff_n = kl_counts[0] - 1
for klg in range(1, 5):
discretized_yhat[yhat_ranks < (total_count - cutoff_n)] = klg
cutoff_n += kl_counts[klg]
assert pearsonr(kl_grade_arr, discretized_yhat)[0] > 0
if not Counter(kl_grade_arr) == Counter(discretized_yhat):
print("KL grade and yhat should be equal in counts and are not")
print(Counter(kl_grade_arr))
print(Counter(discretized_yhat))
assert False
# check how well things agree.
for klg in list(range(5)):
kl_grade_idxs = kl_grade_arr == klg
our_grade_lower = discretized_yhat < klg
our_grade_equal = discretized_yhat == klg
our_grade_higher = discretized_yhat > klg
print('Original KLG %i: %2.1f%% lower, %2.1f%% the same, %2.1f%% higher' % (klg,
our_grade_lower[kl_grade_idxs].mean() * 100,
our_grade_equal[kl_grade_idxs].mean() * 100,
our_grade_higher[kl_grade_idxs].mean() * 100))
print("Overall agreement: %2.1f%% of the time. Disagree by at least 2: %2.1f%% of the time." %
((discretized_yhat == kl_grade_arr).mean() * 100, (np.abs(discretized_yhat - kl_grade_arr) >= 2).mean() * 100))
return discretized_yhat
def bootstrap_dataframe_accounting_for_clustering(df, resample_points_within_cluster, random_seed):
"""
Given a dataframe, draw a new sample by resampling IDs.
If resample_points_within_cluster, also resample points within IDs (ie, measurements for each person);
otherwise, just takes the original dataframe for the person.
Note: we do not resample points within cluster.
"A Practitioner’s Guide to Cluster-Robust Inference" does not do this either.
Similarly, "BOOTSTRAP-BASED IMPROVEMENTS FOR INFERENCE WITH CLUSTERED ERRORS" section 3.1 also makes no mention of resampling within cluster.
They say this method is variously referred to as "cluster bootstrap, case bootstrap, non- parametric bootstrap, and nonoverlapping block bootstrap"
"""
assert not resample_points_within_cluster
assert 'id' in df.columns
assert sum(pd.isnull(df['id'])) == 0
df = copy.deepcopy(df)
unique_ids = list(set(df['id']))
reproducible_random_sampler = random.Random(random_seed)
ids_sampled_with_replacement = [reproducible_random_sampler.choice(unique_ids) for a in range(len(unique_ids))]
grouped_df = df.groupby('id')
new_df = []
for i, sampled_id in enumerate(ids_sampled_with_replacement):
df_for_person = grouped_df.get_group(sampled_id)
#df_for_person['id'] = i # pretend they all have unique ids. This doesn't actually matter when we're fitting regressions on bootstrapped samples (since we don't do clustering on ID or use it in any way).
if resample_points_within_cluster:
within_cluster_idxs = [reproducible_random_sampler.choice(range(len(df_for_one_person))) for k in range(len(df_for_one_person))]
df_for_person = df_for_person.iloc[within_cluster_idxs]
new_df.append(df_for_person)
new_df = | pd.concat(new_df) | pandas.concat |
import os
import sys
import numpy as np
import pandas as pd
import geopandas as gpd
import argparse
import torch
import tqdm
import segmentation_models_pytorch as smp
from torch.utils.data import DataLoader, Dataset
import cv2
from shapely.wkt import loads as wkt_loads
import shapely.wkt
import rasterio
import shapely
from rasterio import features
import shapely.geometry
import shapely.affinity
from scipy import ndimage
import shutil
import gc
sys.path.append('.')
import solaris as sol
from albumentations.pytorch.transforms import ToTensor
from albumentations import (
Compose,
Normalize
)
def mask2box(mask):
y1, y2, x1, x2 = np.where(mask == 1)[0].min(), np.where(mask == 1)[0].max(), np.where(mask == 1)[1].min(), \
np.where(mask == 1)[1].max()
return y1, y2, x1, x2
def mask2box_xminyminxmaxymax(mask):
y1, y2, x1, x2 = np.where(mask == 1)[0].min(), np.where(mask == 1)[0].max(), np.where(mask == 1)[1].min(), \
np.where(mask == 1)[1].max()
return x1, y1, x2, y2
def colormask2boxes(mask):
"""
Args:
mask: [height,width], mask values, integers 0-255, 0=background
Returns:
list of bboxes (bbox is a list of 4 numbers, [xmin, ymin, xmax, ymax])
"""
boxes = []
if mask.sum() > 0:
# for i in range(1,len(np.unique(mask))):
for i in [x for x in np.unique(mask) if x not in [0]]:
x1y1x2y2 = mask2box_xminyminxmaxymax(mask == i)
boxes.append([x1y1x2y2[0], x1y1x2y2[1], x1y1x2y2[2], x1y1x2y2[3]])
return boxes
sigmoid = lambda x: 1 / (1 + np.exp(-x))
# def denormalize(x_batch):
# #x_batch of shape batch_size,channels,height,width
# x_batch2=x_batch.numpy().copy()
# mean=[0.485, 0.456, 0.406]
# std=[0.229, 0.224, 0.225]
# for i in range(3):
# x_batch2[:,i,...] = x_batch2[:,i,...]*std[i] + mean[i]
# return (np.round(x_batch2*255)).astype('uint8')
def multimask2mask3d(multimask):
num_buildings = len(np.unique(multimask))
if num_buildings > 1:
mask3d = np.zeros((multimask.shape[0], multimask.shape[1], num_buildings - 1))
for i in range(1, num_buildings):
mask3d[..., i - 1][multimask[..., 0] == i] = 1
else:
mask3d = np.zeros((multimask.shape[0], multimask.shape[1], 1))
return (mask3d)
def multimask2mask3d_v2(multimask):
num_buildings = len(np.unique(multimask))
if multimask.sum() > 0:
mask3d = np.zeros((multimask.shape[0], multimask.shape[1], num_buildings - 1))
# for i in range(1,num_buildings):
for i in [x for x in np.unique(multimask) if x not in [0]]:
mask3d[..., i - 1][multimask[..., 0] == i] = 1
else:
mask3d = np.zeros((multimask.shape[0], multimask.shape[1], 1))
return (mask3d)
def multimask2mask3d_v3(multimask):
num_buildings = len(np.unique(multimask))
if multimask.sum() > 0:
mask3d = np.zeros((multimask.shape[0], multimask.shape[1], num_buildings - 1))
# for i in range(1,num_buildings):
counter = 0
for i in [x for x in np.unique(multimask) if x not in [0]]:
mask3d[..., counter][multimask == i] = 1
counter += 1
else:
mask3d = np.zeros((multimask.shape[0], multimask.shape[1], 1))
return (mask3d.astype('uint8'))
def mask2buildings(mask):
maskC = mask.copy()
maskC_output = np.zeros_like(maskC) # .astype('int32')
contours, hierarchy = cv2.findContours(maskC, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(contours)):
cnt = contours[i]
maskC_output += (cv2.drawContours(maskC, [cnt], -1, 1, cv2.FILLED) > 127.5).astype('uint8')
uns = np.unique(maskC_output).copy()
for ii in range(len(uns)):
maskC_output[maskC_output == uns[ii]] = ii
return maskC_output
def masks2masknum_v2(masks):
outmask = np.zeros(masks.shape[1:])
add = masks.shape[0]
for m in range(len(masks)):
outmask += masks[m] * (m + 1 + add)
un_masks = np.unique(outmask)
for mm in range(len(un_masks)):
outmask[outmask == un_masks[mm]] = mm
return outmask # .astype('uint8')
def masks2masknum(masks):
outmask = np.zeros(masks.shape[1:])
for m in range(len(masks)):
outmask += masks[m] * (m + 1)
return outmask
def mask_to_polygon(mask):
all_polygons = []
lens=[]
for shape, value in features.shapes(mask.astype(np.int16), mask=(mask == 1), transform=rasterio.Affine(1.0, 0, 0, 0, 1.0, 0)):
# print(value)
# print(len(shape['coordinates'][0]))
all_polygons.append(shapely.geometry.shape(shape))
lens.append(len(shape['coordinates'][0]))
# print(np.argmax(lens))
all_polygons = shapely.geometry.Polygon(all_polygons[np.argmax(lens)])
if not all_polygons.is_valid:
all_polygons = all_polygons.buffer(0)
# Sometimes buffer() converts a simple Multipolygon to just a Polygon,
# need to keep it a Multi throughout
# if all_polygons.type == 'Polygon':
# all_polygons = shapely.geometry.MultiPolygon([all_polygons])
return all_polygons
def _convert_coordinates_to_raster(coords, img_size, xymax):
x_max, y_max = xymax
height, width = img_size
W1 = 1.0 * width * width / (width + 1)
H1 = 1.0 * height * height / (height + 1)
xf = W1 / x_max
yf = H1 / y_max
coords[:, 1] *= yf
coords[:, 0] *= xf
coords_int = np.round(coords).astype(np.int32)
return coords_int
def _plot_mask_from_contours(raster_img_size, contours, class_value=1):
img_mask = np.zeros(raster_img_size, np.int8)
if contours is None:
return img_mask
perim_list, interior_list = contours
# print(interior_list)
cv2.fillPoly(img_mask, perim_list, class_value)
# img_mask[np.array(list(proposalcsv.PolygonWKT_Pix.values[-1].exterior.coords)).astype(int)]=0
cv2.fillPoly(img_mask, interior_list, 0)
return img_mask
def _get_and_convert_contours(onepolygon, raster_img_size, xymax):
perim_list = []
interior_list = []
# if onepolygon is None:
# return None
# for k in range(len(onepolygon)):
poly = onepolygon
# for ppp in poly.interiors:
# print(ppp)
perim = np.array(list(poly.exterior.coords))
perim_c = _convert_coordinates_to_raster(perim, raster_img_size, xymax)
perim_list.append(perim_c)
for pi in poly.interiors:
interior = np.array(list(pi.coords))
interior_c = _convert_coordinates_to_raster(interior, raster_img_size, xymax)
interior_list.append(interior_c)
return perim_list, interior_list
def polygon2mask(polygon, width, height):
xymax = (900,900)
mask = np.zeros(( width, height))
# for i, p in enumerate(polygons):
i=0
polygon_list = wkt_loads(str(polygon))
# if polygon_list.length == 0:
# continue
contours = _get_and_convert_contours(polygon_list, (width, height), xymax)
mask = _plot_mask_from_contours((width, height), contours, 1)
return mask
def read_jpg(tile_id, data_folder='test_path'):
image_name = tile_id
image_path = data_folder + image_name
img = cv2.imread(image_path)
return img
def jpg_to_tensor(img, transforms, preprocessing=None):
augmented = transforms(image=img)
img = augmented['image']
if preprocessing is not None:
preprocessed = preprocessing(image=img, mask=np.zeros_like(img).astype('uint8'))
img = preprocessed['image']
return img
def patch_left_right_fixed(im1, mask1, im2, mask2):
r = 0.5
mid = max(2, int(im1.shape[0] * r))
img_new = np.zeros_like(im1)
img_new[:, :mid, :] = im1[:, -mid:, :]
img_new[:, mid:, :] = im2[:, :-mid, :]
mask_new = np.zeros_like(mask1)
mask_new[:, :mid] = mask1[:, -mid:]
mask_new[:, mid:] = mask2[:, :-mid]
return img_new, mask_new
def patch_top_down_fixed(im1, mask1, im2, mask2):
r = 0.5
mid = max(2, int(im1.shape[0] * r))
img_new = np.zeros_like(im1)
img_new[:mid, :, :] = im1[-mid:, :, :]
img_new[mid:, :, :] = im2[:-mid, :, :]
mask_new = np.zeros_like(mask1)
mask_new[:mid, :] = mask1[-mid:, :]
mask_new[mid:, :] = mask2[:-mid, :]
return img_new, mask_new
class BuildingsDatasetInferenceCombined(Dataset):
def __init__(self, img_ids: np.array = None, combImages=None,
transforms=None,
preprocessing=None):
self.combImages = combImages
self.img_ids = img_ids
self.transforms = transforms
self.preprocessing = preprocessing
def __getitem__(self, idx):
img = self.combImages[idx]
augmented = self.transforms(image=img, mask=np.zeros_like(img).astype('uint8'))
img = augmented['image']
mask = augmented['mask']
if self.preprocessing:
preprocessed = self.preprocessing(image=img, mask=np.zeros_like(img).astype('uint8'))
img = preprocessed['image']
mask = preprocessed['mask']
return img, mask
def __len__(self):
return len(self.img_ids)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='SpaceNet 6 Baseline Algorithm')
parser.add_argument('--testdata',
help='BaseDir')
parser.add_argument('--outputfile',
help='Output directory')
args = parser.parse_args(sys.argv[1:])
print ('torch', torch.__version__)
print ('gpd', gpd.__version__)
print ("solaris", sol.__version__)
test_data_path = args.testdata
output_file = args.outputfile
spacenet_out_dir = os.path.join(os.path.curdir, 'data/')
spacenet_test_sar_path = os.path.join(test_data_path , 'SAR-Intensity/')
print ('Base dir :', spacenet_test_sar_path)
print ('Output dir :', spacenet_out_dir)
#
# Copy orientation to output as well...
orientation_file = os.path.join('./', 'SAR_orientations.txt')
if os.path.exists(orientation_file):
print('SAR_orientations.txt exists')
else:
print ('FATAL SAR_orientations.txt missing')
exit(1)
import datagen
from nasiosdataprocess import createdir, write_test_images
#
# 0. Nasios pipeline data prep
test_save_path = os.path.join(spacenet_out_dir, 'test_sar_productscale_orient/')
spacenet_test_sar = os.listdir(spacenet_test_sar_path)
spacenet_test_sar = np.sort(spacenet_test_sar)
orient_df = pd.read_csv(orientation_file, header=None, sep=" ")
orient_df.columns = ['date', 'orient']
testtifsdates = [('_').join(x.split('-')[1][10:].split('_')[:2]) for x in spacenet_test_sar]
mines = np.load('productminesAllBoth.npy')
maxes = np.load('productmaxesAllBoth.npy')
if not os.path.exists(test_save_path):
createdir(test_save_path)
write_test_images(orient_df, test_save_path, spacenet_test_sar_path, testtifsdates, mines, maxes)
tmp = os.listdir(test_save_path)
print('nasios test images created', len(tmp))
else:
tmp = os.listdir(test_save_path)
print('nasios test images exist', len(tmp) )
shutil.rmtree(test_save_path)
createdir(test_save_path)
write_test_images(orient_df, test_save_path, spacenet_test_sar_path, testtifsdates, mines, maxes)
tmp = os.listdir(test_save_path)
print('nasios test images created', len(tmp))
#
# 2. Test on experiments
from experiments import infer_one, create_model_optimizer
from experiments import experiments as exps_vog
from nasios import experiments1 as exp_nas1
from nasios import experiments2 as exp_nas2
exp_nas = exp_nas1 + exp_nas2
from nasios import BuildingsDatasetBorders, get_preprocessing, get_validation_augmentation
test_ids = [x[:-4] for x in os.listdir(test_save_path)]
test_tiles = ['_'.join(x.split('_')[-4:-1]) for x in test_ids]
test_tiles_nums = [int(x.split('_')[-1]) for x in test_ids]
sortorder = np.argsort(test_tiles_nums)
test_ids = list((np.array(test_ids)[sortorder]))
test_ids2 = []
for untile in np.unique(test_tiles):
test_images_part = [x for x in test_ids if untile in x]
test_ids2.extend(test_images_part)
test_ids = test_ids2[:]
test_ids_jpg = [x + '.jpg' for x in test_ids]
test_ids_vog = ['_'.join(f.split('_')[-4:]) for f in test_ids]
pream = '_'.join(test_ids[0].split('_')[:-4]) + '_'
print('pream', pream)
test_tiles_nums_nums = []
for untile in np.unique(test_tiles):
num = 0
test_images_part = [x for x in test_ids if untile in x]
test_tiles_nums = np.array([int(x.split('_')[-1]) for x in test_images_part])
test_tiles_nums2 = ((test_tiles_nums - test_tiles_nums[0]) / 2).astype('int')
test_tiles_nums_nums.extend(list(test_tiles_nums2))
#pd.DataFrame(test_ids).to_csv("test_ids.csv")
#pd.DataFrame(test_tiles_nums).to_csv("test_tiles_nums.csv")
#pd.DataFrame(test_tiles_nums_nums).to_csv("test_tiles_nums_nums.csv")
#
# Accumulate all preds here
final_preds = np.zeros((len(test_ids), 900, 900), dtype='float32')
final_w = 0
final_preds_borders = np.zeros((len(test_ids), 900, 900), dtype='float32')
final_w_borders = 0
test_df = pd.DataFrame({'ImageId': test_ids_vog, 'FullImageId':test_ids_jpg})
test_df['date'] = test_df.apply(lambda row: row.ImageId.split("_")[0] + "_" + row.ImageId.split("_")[1], axis=1)
test_df['tile'] = test_df.apply(lambda row: row.ImageId.split("_")[-1], axis=1)
orient_df = | pd.read_csv(spacenet_out_dir + '/SAR_orientations.txt', header=None, sep=" ") | pandas.read_csv |
"""This module runs unit tests over functions in the get_sentiment_score
and analyze_comments_as_tblob modules"""
import os
import unittest
import pandas as pd
import movie_analysis as mv
class TestSentiment(unittest.TestCase):
"""This class runs unit tests over functions in the get_sentiment_score
and analyze_comments_as_tblob modules
"""
# test get_sentiment_score function
def test_oneshot(self):
"""This function tests if the data frame returned
by get_sentiment_score is not empty
"""
df = mv.get_sentiment_score()
self.assertFalse(df.empty)
def test_edge(self):
"""This function tests that an exception is raised
if the wrong path name is provided
"""
ANALYZED_PATH = "something"
self.assertRaises(Exception, mv.get_sentiment_score())
def test_column_names(self):
"""This function tests if the returned dataframe has the correct
column names
"""
self.assertEqual(list(mv.get_sentiment_score()), ['movie_id', 'sentiment_score'])
# test find_csv_filenames function
def test_find_csv_oneshot(self):
"""This function tests that the list of filenames returned is not empty"""
self.assertFalse(len(mv.find_csv_filenames(mv.data_path, suffix=".csv")) == 0)
def test_find_csv_edge(self):
"""This function tests that an exception is raised if the function is passed
the wrong file extension
"""
self.assertRaises(Exception, mv.find_csv_filenames(mv.data_path, ".html"))
def test_find_csv_2(self):
"""This function tests that the function returns the corrcet filename"""
if not os.path.isdir("movie_analysis/tests/test1"):
os.mkdir("movie_analysis/tests/test1")
f = open("movie_analysis/tests/test1/testing.csv", "w+")
f.close()
self.assertTrue(mv.find_csv_filenames("movie_analysis/tests/test1", ".csv"), ["testing.csv"])
#test add_row function
def test_add_row(self):
"""This function tests that the function adds a row to the dataframe with the
correct values
"""
test_df = | pd.DataFrame(columns=['movie_id', 'sentiment_score']) | pandas.DataFrame |
from datetime import datetime
from typing import Any, List, Union
import pandas as pd
from binance.client import Client
from binance.exceptions import BinanceAPIException
from yacht.data.markets.base import H5Market
from yacht.logger import Logger
class Binance(H5Market):
def __init__(
self,
get_features: List[str],
logger: Logger,
api_key,
api_secret,
storage_dir: str,
include_weekends: bool,
read_only: bool
):
super().__init__(get_features, logger, api_key, api_secret, storage_dir, 'binance.h5', include_weekends, read_only)
self.client = Client(api_key, api_secret)
def request(
self,
ticker: str,
interval: str,
start: datetime,
end: datetime = None
) -> Union[List[List[Any]], pd.DataFrame]:
if '-' not in ticker:
ticker = f'{ticker}USDT'
else:
ticker = ''.join(ticker.split('-'))
start = start.strftime('%d %b, %Y')
kwargs = {
'symbol': ticker,
'interval': interval,
'start_str': start
}
if end:
end = end.strftime('%d %b, %Y')
kwargs['end_str'] = end
try:
return self.client.get_historical_klines(**kwargs)
except BinanceAPIException as e:
self.logger.info(f'Binance does not support ticker: {ticker}')
raise e
def process_request(self, data: Union[List[List[Any]], pd.DataFrame], **kwargs) -> pd.DataFrame:
df = pd.DataFrame(
data,
columns=[
'Open time',
'Open',
'High',
'Low',
'Close',
'Volume',
'Close time',
'Quote asset volume',
'Number of trades',
'Taker buy base asset volume',
'Taker buy quote asset volume',
'Ignore'
])
df['Open time'] = pd.to_datetime(df['Open time'], unit='ms')
df['Open time'] = df['Open time']
df['Open'] = pd.to_numeric(df['Open'])
df['High'] = pd.to_numeric(df['High'])
df['Low'] = pd.to_numeric(df['Low'])
df['Close'] = | pd.to_numeric(df['Close']) | pandas.to_numeric |
# import Asclepius dependencies
from asclepius.instelling import GGZ, ZKH, HardCodedParameters
# import other dependencies
from pandas import read_excel, merge, isnull, DataFrame
from typing import Union
class TestFuncties:
def __init__(self):
pass
# DAILY AUDIT FUNCTIES
def wrangle_da(self, excel_a, excel_p) -> DataFrame:
# import excelsheets
data_a = read_excel(excel_a, index_col = None, header = 2)
data_p = read_excel(excel_p, index_col = None, header = 2)
if data_a.columns.values[0] == 'Controle/norm':
# rename columns
data_a = data_a.rename(columns = {'Controle/norm': 'controle', 'Aantal': 'aantal_a', 'Impact': 'impact_a'})
data_p = data_p.rename(columns = {'Controle/norm': 'controle', 'Aantal': 'aantal_p', 'Impact': 'impact_p'})
else:
# rename columns
data_a = data_a.rename(columns = {'Controle': 'controle', 'Aantal': 'aantal_a', 'Impact': 'impact_a'})
data_p = data_p.rename(columns = {'Controle': 'controle', 'Aantal': 'aantal_p', 'Impact': 'impact_p'})
# Outer join van de dataframes op controle
wrangled_data = merge(data_a, data_p, how = 'outer', on = 'controle')
# drop impact_a and impact_p
wrangled_data = wrangled_data.drop(columns=['impact_a', 'impact_p'])
# fill NaN values
wrangled_data['aantal_a'] = wrangled_data['aantal_a'].fillna(0)
# add absolute and percentual difference
wrangled_data = wrangled_data.assign(diff_abs = wrangled_data['aantal_a'] - wrangled_data['aantal_p'])
wrangled_data = wrangled_data.assign(diff_pct = round((wrangled_data['diff_abs'] / wrangled_data['aantal_p']) * 100, 2))
# if P is NaN
wrangled_data['aantal_p'] = wrangled_data['aantal_p'].fillna(0)
wrangled_data['diff_abs'] = wrangled_data['diff_abs'].fillna(wrangled_data['aantal_a'])
wrangled_data['diff_pct'] = wrangled_data['diff_pct'].fillna(100.00)
return wrangled_data
def check_verschillen_da(self, wrangled_data: DataFrame, tolerantie_abs: int, tolerantie_pct: int):
verschillen = DataFrame(columns = list(wrangled_data.columns.values))
for i in range(len(wrangled_data)):
entry = wrangled_data.iloc[i, :]
if abs(entry['diff_abs']) > tolerantie_abs and abs(entry['diff_pct']) > tolerantie_pct:
verschillen = verschillen.append(entry)
return verschillen
def aantallencheck(self, instelling: Union[GGZ, ZKH], test: bool = False):
if test:
excel_a = instelling.excel_da_a_test
excel_p = instelling.excel_da_p_test
else:
excel_a = instelling.excel_da_a
excel_p = instelling.excel_da_p
actieaantallen = self.wrangle_da(excel_a, excel_p)
bevindingen = self.check_verschillen_da(actieaantallen, instelling.tolerantie_abs, instelling.tolerantie_pct)
if len(bevindingen) == 0:
print('Geen significante verschillen gevonden')
else:
pass
if test:
instelling.bevindingen_da_test = bevindingen
else:
instelling.bevindingen_da = bevindingen
return None
# PRESTATIEKAART FUNCTIES
def wrangle_pk(self, excel_a, excel_p):
# import excelsheets
data_a = read_excel(excel_a, index_col = None, header = 0)
data_p = read_excel(excel_p, index_col = None, header = 0)
# drop Verschil column
data_a = data_a.drop(columns=['Verschil'])
data_p = data_p.drop(columns=['Verschil'])
# voeg index column toe
data_a['index_a'] = data_a.index
data_p['index_p'] = data_p.index
# hernoem kolommen
data_a = data_a.rename(columns = {'Titel': 'titel', 'Norm': 'norm_a', 'Realisatie': 'real_a', 'sectie': 'sectie_a'})
data_p = data_p.rename(columns = {'Titel': 'titel', 'Norm': 'norm_p', 'Realisatie': 'real_p', 'sectie': 'sectie_p'})
# join dataframes op titlel
wrangled_data = merge(data_a, data_p, how = 'outer', on = 'titel')
# return de prestatiekaart data
return wrangled_data
def check_existence(self, prestatiekaart, bevindingen):
# checkt of een titel niet in A bestaat, niet in P bestaat
for i in range(len(prestatiekaart)):
if isnull(prestatiekaart['index_a'][i]):
new_row = {'Indicator': prestatiekaart['titel'][i], 'Norm/Realisatie': 'Beiden', 'A': '', 'P': '', 'Bevinding': "Indicator niet in A portaal."}
bevindingen = bevindingen.append(new_row, ignore_index = True)
prestatiekaart = prestatiekaart.drop(index = i)
elif isnull(prestatiekaart['index_p'][i]):
new_row = {'Indicator': prestatiekaart['titel'][i], 'Norm/Realisatie': 'Beiden', 'A': '', 'P': '', 'Bevinding': "Indicator niet in P portaal."}
bevindingen = bevindingen.append(new_row, ignore_index = True)
prestatiekaart = prestatiekaart.drop(index = i)
else:
pass
prestatiekaart = prestatiekaart.reset_index(drop = True)
return prestatiekaart, bevindingen
def check_empty(self, prestatiekaart, bevindingen):
# checkt of een titel niet leeg is in A en P
for i in range(len(prestatiekaart)):
if isnull(prestatiekaart['norm_a'][i]) and isnull(prestatiekaart['real_a'][i]) and | isnull(prestatiekaart['norm_p'][i]) | pandas.isnull |
# Copyright (c) 2016 <NAME>
import numpy as np
import pandas as pd
from sklearn import decomposition
import json
import math
import pickle
### Load data
loadPrefix = "import/input/"
# Bins 1, 2, 3 of Up are to be removed later on
dirmagUpA = np.genfromtxt(loadPrefix+"MLM_adcpU_dirmag.csv", skip_header=3, delimiter=",", comments="#", dtype=float, invalid_raise=True)
# Bin 1 of Down is to be removed later on
dirmagDownA = np.genfromtxt(loadPrefix+"MLM_adcpD_dirmag.csv", skip_header=3, delimiter=",", comments="#", dtype=float, invalid_raise=True)
openessA = np.genfromtxt(loadPrefix+"coral_frames2.csv", skip_header=2, delimiter=",", comments="#", dtype=float, invalid_raise=True)
with open(loadPrefix+"scalar_POS434-156_conservativeTemperature_215_original.json") as fp:
ctA = np.asarray(json.load(fp)["data"])
with open(loadPrefix+"scalar_POS434-156_absoluteSalinity_215_original.json") as fp:
saA = np.asarray(json.load(fp)["data"])
with open(loadPrefix+"scalar_POS434-156_potentialDensityAnomaly_215_original.json") as fp:
sigma0A = np.asarray(json.load(fp)["data"])
### Create time series date indices
dateOffset = np.datetime64("2012-06-01T00:00:01Z")
hiResIndexStart = 354185 # in [s]
hiResIndexEnd = 1343285 # in [s] --- shorter: 1342685 --- longer: 9332570
hiResIndexStep = 600 # in [s]
hiResIndex = dateOffset + np.arange(hiResIndexStart, hiResIndexEnd, hiResIndexStep).astype("timedelta64[s]")
ignoreBecauseOfLags = 7
loResIndex = dateOffset + openessA[ignoreBecauseOfLags:,0].astype("timedelta64[s]")
ctIndex = dateOffset + ctA[:,0].astype("timedelta64[s]")
saIndex = dateOffset + saA[:,0].astype("timedelta64[s]")
sigma0Index = dateOffset + sigma0A[:,0].astype("timedelta64[s]")
dirmagUpIndex = dateOffset + dirmagUpA[:,0].astype("timedelta64[s]")
dirmagDownIndex = dateOffset + dirmagDownA[:,0].astype("timedelta64[s]")
### Create original time series / data frames
ctOrig = pd.Series(ctA[:,1], index=ctIndex)
saOrig = pd.Series(saA[:,1], index=saIndex)
sigma0Orig = | pd.Series(sigma0A[:,1], index=sigma0Index) | pandas.Series |
import json
import matplotlib.pyplot as plt
import pandas as pd
with open('benchmark_results.json') as f:
data = json.load(f)
df = | pd.json_normalize(data['benchmarks']) | pandas.json_normalize |
from scipy import stats
import random
import numpy as np
import pandas as pd
import CleanData
import timeit
import PullDataPostgreSQL
# Conditional Parameter Aggregation (CPA) is one of the most important parts
# of the entire SDV paper. It is what allows the user to synthesize an entire
# database instead of a single table. there are a couple steps that will be taken
# in this paper in accordance with the SDV paper. Keep in mind that the end result
# is to create a series of extended tables that include each original table's data
# and metrics from all the associated children tables
# 1) Start the extended table by saving all the data from the original table.
# 2) Iteratively go through each value in the original table's primary key
# 2a) Go iteratively through each child table
# 2ai) Find all primary key value instances in all children tables
# 2aj) Perform Gaussian Copula and find feature covariance
# 2ak) find alpha and beta values for distribution
# 2al) save all covariance and alpha beta values into the extended table
def ConditionalParameterAggregaation(df, children):
# df is the information from the original table. This includes missing value indices
# and has all datetime values converted to EPOCH
#
# children is a list of all child tables
#
# cur is a database cursor object that will be used to pull data in the future
for childstr in children:
print(childstr)
child = pd.DataFrame.from_csv('%s.csv' % childstr)
child.fillna(value=np.nan, inplace=True)
# saves all data as categorical or not. ignores the primary key
logicalCategorical = CleanData.IdentifyCategorical(child)
# preallocates memory for points to be appended to in the future
df = MakeBlankDataFrame(df, child, childstr, logicalCategorical)
# iterate over all IDs in the primary key with the intent of finding and
# inputting data
for c in range(len(df[df.columns[0]])):
print(c)
ID = df[df.columns[0]][c]
# pulls all data in the child table corresponding to the specific ID
data = | pd.DataFrame(child[child[df.columns[0]] == ID]) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from solartf.core.pipeline import TFPipelineBase
from .generator import ClassifierDirectoryGenerator
class ClassificationPipeline(TFPipelineBase):
def inference(self, dataset_type='test'):
self.load_model().load_dataset()
results = []
for inputs, outputs in self.dataset[dataset_type]:
fnames = [os.path.basename(image_input.image_path)
for image_input in inputs]
image_arrays = np.stack([image_input.image_array
for image_input in inputs], axis=0)
predict_results = self.model.predict(image_arrays)
for fname, gt, predict in zip(fnames, outputs, predict_results):
result = {
'fname': fname,
'class_id': np.argmax(gt),
'label': np.argmax(predict)
}
for index, score in enumerate(predict):
result.update({f'class_{index}_score': score})
results.append(result)
return | pd.DataFrame(results) | pandas.DataFrame |
import typing
import collections
import pandas as pd
import glob
import re
from itertools import product
from pycoingecko import CoinGeckoAPI
from datetime import datetime, timedelta, timezone
from utils import load_json, save_json, json_serialize_datetime
cg = CoinGeckoAPI()
symbol_id_map = {}
def initialize_coingecko_api():
cg_coins_list = cg.get_coins_list()
for c in cg_coins_list:
symbol_id_map[c['symbol']] = c['id']
def tidy_up_prices(prices: typing.List[typing.List]) -> typing.List[typing.Tuple]:
"""
input [[timestamp_ms, price_avg], [..]...] (90d ago -> now, 180d ago -> 90d ago, 270d ago -> 180d ago)
output [(datetime(timestamp_s), price_avg), (..)...] (sorted by timestamp)
"""
p2 = [(datetime.fromtimestamp(int(p[0]/1000), timezone.utc), p[1]) for p in prices]
p2.sort(key=lambda p: p[0])
return p2
def get_90d_of_hourly_prices(coin_symbol: str, base: str, to_timestamp: datetime):
"""
Coingecko only gives us hourly data for 90d at a time, more than that and it's daily prices.
"""
coin_symbol = coin_symbol.lower()
base = base.lower()
coin_id = symbol_id_map[coin_symbol]
delta = timedelta(days=90)
res = cg.get_coin_market_chart_range_by_id(coin_id, base, int((to_timestamp-delta).timestamp()), int(to_timestamp.timestamp()))
return res['prices']
def get_complete_hourly_prices(coin_symbol: str, base: str):
"""
Request 90d at a time from Coingecko to keep getting hourly data for the entire life of the coin
"""
all_prices = []
delta = 0
while True:
answer = get_90d_of_hourly_prices(coin_symbol, base, datetime.now() - timedelta(days=delta))
if not answer:
break
delta += 90
all_prices.extend(answer)
return tidy_up_prices(all_prices)
def use_cached_price_feeds_or_download_prices(pool_address: str, initial_state: typing.Dict, fiat_currency: str):
tokens = initial_state['pool']['tokens']
price_feeds = glob.glob(f'./{pool_address}/coingecko-*.json')
answer = {token:feed_name for token, feed_name in list(product(tokens, price_feeds)) if token in feed_name}
# product returns every possible combination of the elements in the
# iterables you give it, so you don't have to write a double for loop. One
# step towards functional programming. Also this is a dict comprehension.
if len(answer.keys()) != len(tokens):
initialize_coingecko_api()
for token in tokens:
prices = get_complete_hourly_prices(token, fiat_currency)
path = save_coingecko_prices_json(pool_address, token, fiat_currency, prices)
answer[token] = path
return answer
def only_prices_at_and_after_initial_state(prices: typing.List[typing.Tuple], initial_state_datetime: str) -> typing.Tuple[typing.Tuple, typing.List[typing.Tuple]]:
initial_state_datetime = datetime.fromisoformat(initial_state_datetime)
for i, p in enumerate(prices):
if p[0] > initial_state_datetime:
return prices[i-1], prices[i:]
raise Exception("There is no price data available after the simulation's starting point")
def load_coingecko_prices_json(path: str):
prices = load_json(path)
prices = [(datetime.fromisoformat(p[0]), p[1]) for p in prices]
return prices
def save_coingecko_prices_json(pool_address: str, token: str, base: str, prices: typing.List[typing.Tuple]):
path = f'./{pool_address}/coingecko-{token.upper()}{base.upper()}.json'
save_json(prices, path, default=json_serialize_datetime)
return path
def dataframeize(prices: typing.List[typing.Tuple], column_name: str) -> pd.DataFrame:
df = | pd.DataFrame(prices) | pandas.DataFrame |
"""
ABSOLUTELY NOT TESTED
"""
import time
import os
import datetime
from collections import namedtuple
import numpy as np
import pandas as pd
import sklearn.preprocessing
import torch
import torch.nn as nn
import torch.optim as optim
from dateutil.relativedelta import relativedelta
from simple_ts_forecast.models import Model
SavedFit = namedtuple('SavedFit', 'filename date_test_start datetime_fit mape')
def r2_score(y_test, y_pred, torch_order=False):
if torch_order:
y_test, y_pred = y_pred, y_test
if isinstance(y_test, np.ndarray) and isinstance(y_pred, np.ndarray):
return 1 - np.mean((y_test - y_pred) ** 2) / np.mean((y_test - np.mean(y_test)) ** 2)
elif isinstance(y_test, torch.Tensor) and isinstance(y_pred, torch.Tensor):
return 1 - torch.mean((y_test - y_pred) ** 2).item() / torch.mean((y_test - torch.mean(y_test)) ** 2).item()
else:
raise TypeError(f"input_ array must be np.ndarray or torch.Tensor, got {type(y_test)}, {type(y_pred)}")
def mean_absolute_percent_error(y_test, y_pred, torch_order=False):
if torch_order:
y_test, y_pred = y_pred, y_test
if isinstance(y_test, np.ndarray) and isinstance(y_pred, np.ndarray):
return np.mean(np.abs((y_test - y_pred) / y_test)) * 100
elif isinstance(y_test, torch.Tensor) and isinstance(y_pred, torch.Tensor):
return torch.mean(torch.abs((y_test - y_pred) / y_test)) * 100
else:
raise TypeError(f"input_ array must be np.ndarray or torch.Tensor, got {type(y_test)}, {type(y_pred)}")
class LSTM(Model):
"""Use this class as another classic simple_ts_forecast"""
class _Model(nn.Module):
"""PyTorch RNN model"""
def __init__(self, input_size, hidden_size, output_size, device):
super().__init__()
self.device = device
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.lstm_1 = nn.LSTMCell(self.input_size, self.hidden_size)
self.lstm_2 = nn.LSTMCell(self.hidden_size, self.hidden_size)
self.dropout_1 = nn.Dropout(p=0.5)
self.dropout_2 = nn.Dropout(p=0.1)
self.linear = nn.Linear(self.hidden_size, self.input_size)
self.out_linear = nn.Linear(self.input_size, self.output_size)
def forward(self, x, future=1):
x = x.to(self.device)
outputs = []
# reset the state of LSTM
# the state is kept till the end of the sequence
h_t1, c_t1 = self.init_hidden(x.size(0))
h_t2, c_t2 = self.init_hidden(x.size(0))
for input_t in x.split(1, dim=1):
h_t1, c_t1 = self.lstm_1(input_t.squeeze(1), (h_t1, c_t1))
h_t1 = self.dropout_1(h_t1)
h_t2, c_t2 = self.lstm_2(h_t1, (h_t2, c_t2))
output = self.linear(self.dropout_2(h_t2))
outputs += [self.out_linear(output)]
for i in range(future - 1):
h_t1, c_t1 = self.lstm_1(output, (h_t1, c_t1))
h_t1 = self.dropout_1(h_t1)
h_t2, c_t2 = self.lstm_2(h_t1, (h_t2, c_t2))
output = self.linear(self.dropout_2(h_t2))
outputs += [self.out_linear(output)]
outputs = torch.stack(outputs, 1).squeeze(2)
return outputs
def init_hidden(self, batch_size):
h_t = torch.zeros(batch_size, self.hidden_size, dtype=torch.float32).to(self.device)
c_t = torch.zeros(batch_size, self.hidden_size, dtype=torch.float32).to(self.device)
return h_t, c_t
def __init__(self, n=14, window=35, lr=0.005, sched_step_size=10, sched_gamma=0.5,
model_params=None, model_input_size=1, model_hidden_size=300, model_output_size=1, scaler=None,
device=None, gpu_num=0, train_set_prop=0.9, batch_size=175, n_epochs=30,
models_dir='lstm_saves/ts_mnpz/',
days_between_fits=31, n_fits=3, search_window=14, post_process_coef=0.75):
"""Init model
Args:
n (int, optional): future days num to predict. Defaults to 14.
window (int, optional): window of past data from predict. Defaults to 35.
lr (float, optional): learning rate of optimizer. Defaults to 0.005.
sched_step_size (int, optional): lr_scheduler.StepLR step size. Defaults to 10.
sched_gamma (float, optional): lr_scheduler.StepLR gamma. Defaults to 0.5.
model_params (dict, optional): dict of params = args to model init. Defaults to dict of 3 params below.
model_input_size (int, optional): param of Model, num input_ features. Defaults to 1.
model_hidden_size (int, optional): param of Model, size of hidden layers. Defaults to 300.
model_output_size (int, optional): param of Model, size of output. Defaults to 1.
scaler (sklearn.preprocessing.*Scaler, optional): class Scaler for features. Defaults to sklearn.preprocessing.StandardScaler.
device (torch.device, optional): device train on. Defaults to gpu, if available.
gpu_num (int, optional): gpu num in sys. Defaults to 0.
train_set_prop (float, optional): if not providing sate_test_start uses these coef to slicing train data. Defaults to 0.9.
batch_size (int, optional): batch size for train. Defaults to 175.
n_epochs (int, optional): number epochs for train. Defaults to 30.
models_dir (str, optional): path to saves of simple_ts_forecast. Defaults to 'lstm_saves/ts_mnpz/'.
days_between_fits (int, optional): days between fits for predict for report. Defaults to 31.
n_fits (int, optional): number of fits for one test data. Defaults to 3.
search_window (int, optional): search saved fit up to search_window days back. Defaults to 14.
post_process_coef (float, optional): in [0, 1]. Defaults to 0.75.
"""
super().__init__()
self.model_params = model_params or dict(input_size=model_input_size, hidden_size=model_hidden_size,
output_size=model_output_size)
self.device = device or torch.device(f'cuda:{gpu_num}' if torch.cuda.is_available() else 'cpu')
self.cpu_device = torch.device('cpu')
self.model = self._Model(**self.model_params, device=self.cpu_device)
self.loss_fn = nn.MSELoss()
self.lr = lr
self.sched_step_size = sched_step_size
self.sched_gamma = sched_gamma
self.Scaler = scaler or sklearn.preprocessing.StandardScaler
self.scalers = []
self.n_in = window
self.n_out = n
self.n_epochs = n_epochs
self.batch_size = batch_size
self.seeds = [0, 42, 1, 123, 1337, 2000, -1000, 300]
self.models_dir = models_dir
os.makedirs(self.models_dir, exist_ok=True)
self.days_between_fits = days_between_fits
self._filename_pattern = 'model_{date_test_start}_{datetime_fit}_{mape:.2f}_.pt'
self.train_set_prop = train_set_prop
self.n_fits = n_fits
self.search_window = search_window
self.post_process_coef = post_process_coef
def fit(self, X, verbose=False, date_test_start=None, force_fit=False, load_from_filename=None, saving=True):
"""fit or load LSTM model
Args:
X ([pd.DataFrame]): all series to train (and testing model) without Nan
verbose (bool, optional): if True prints verbose information. Defaults to False.
date_test_start (str or datetime): Date for first n_out prediction. Defaults to end of 90% of df.
force_fit (bool, optional): Fit even if exist saved. Defaults to False.
load_from_filename (str, optional): Filename load from (without dirname). Defaults to None.
"""
ind = pd.to_datetime(X.index)
X = X.values
n_features = X.shape[1]
if date_test_start is None:
test_start = int(len(X) * self.train_set_prop)
date_test_start = pd.to_datetime(ind[test_start])
else:
test_start = ind.get_loc(date_test_start) + 1 - self.n_in - self.n_out
self._test_start = test_start
self.date_test_start = pd.to_datetime(date_test_start)
train = X[:test_start].reshape(-1, n_features)
test = X[test_start:].reshape(-1, n_features)
trains = []
tests = []
for i in range(n_features):
scaler = self.Scaler()
series = train[:, i].reshape(-1, 1)
scaler = scaler.fit(series)
trains.append(scaler.fit_transform(series))
tests.append(scaler.transform(test[:, i].reshape(-1, 1)))
self.scalers.append(scaler)
shift_size = self.n_in
train_arr = np.concatenate(trains, 1)
test_arr = np.concatenate(tests, 1)
x_train, y_train = self.series_to_supervised(train_arr, self.n_in, self.n_out, shift_size, for_new_arch=True)
self._x_train = x_train
self._y_train = y_train
x_test, y_test = self.series_to_supervised(test_arr, self.n_in, self.n_out, shift_size, for_new_arch=True)
self._x_test = x_test
self._y_test = y_test
if load_from_filename and not force_fit:
self.load_model(self.models_dir + load_from_filename)
elif force_fit:
self._n_fits(self.n_fits, verbose, saving)
else:
filename = self.find_nearest_save(self.date_test_start)
if filename:
self.load_model(self.models_dir + filename)
else:
self._n_fits(self.n_fits, verbose, saving)
def _n_fits(self, n_fits=3, verbose=False, saving=True):
info = []
min_mape = float('inf')
min_mape_i = 0
for i in range(n_fits):
if i < len(self.seeds):
torch.manual_seed(self.seeds[i])
else:
torch.seed()
self.model = self._Model(**self.model_params, device=self.device)
self.model.to(self.device)
self.loss_fn = nn.MSELoss()
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=self.sched_step_size,
gamma=self.sched_gamma)
if verbose:
print(f'START fit {i}')
train_loss, val_loss, tttime, mape = self.train(self._x_train, self._y_train, self._x_test, self._y_test,
verbose=verbose)
if verbose:
print(f'MAPE on {i} fit = {mape:.4f}, last best = {min_mape:.4f}, elapsed {tttime / 60:.2f}min.\n')
if min_mape > mape:
min_mape = mape
min_mape_i = i
info.append((self.model, self.loss_fn, self.optimizer, self.scheduler))
self.model.to(self.cpu_device)
self.model.device = self.cpu_device
if verbose:
print(f'\nTHE BEST Model is {min_mape_i} with MAPE = {min_mape:.4f}\n')
self.model, self.loss_fn, self.optimizer, self.scheduler = info[min_mape_i]
self.mape_on_val = min_mape
if saving:
self.save_fit()
if torch.cuda.is_available():
torch.cuda.empty_cache()
def predict(self, X, dates_from_predict=None, post_process=True):
"""
:param X: all series, same as in fit(), but with additional data at the end
:type X: pd.DataFrame or np.ndarray
:param dates_from_predict: indexes of days in df to predict
if None predicts for last date in df
:return: np.array if predictions for each day in dates_to_predict
"""
n_features = X.shape[1]
trains = []
for i in range(n_features):
scaler = self.scalers[i]
series = X.iloc[:, i:i + 1].values
trains.append(scaler.transform(series))
X = pd.DataFrame(np.concatenate(trains, 1), index=X.index)
ind = X.index
if dates_from_predict is None:
dates_from_predict = [ind[-1]]
to_predict = []
for date in dates_from_predict:
end_ind = ind.get_loc(date)
x = X.iloc[end_ind - self.n_in:end_ind, :].values
to_predict.append(x)
to_predict = np.array(to_predict)
x = torch.from_numpy(to_predict).float()
with torch.no_grad():
self.model.eval()
y_pred = self.model(x, future=self.n_out).cpu()
y_pred = y_pred[:, -self.n_out:].numpy()
predicted_scaled = self._scale_all_predictions(y_pred)
predicted_scaled = np.array(predicted_scaled).reshape(len(dates_from_predict), self.n_out)
columns = [f'n{i + 1}' for i in range(self.n_out)]
pred = pd.DataFrame(predicted_scaled, index=dates_from_predict, columns=columns)
if post_process:
ma = X.loc[pred.index].values[:, :1]
ppc = self.post_process_coef
pred = pred - predicted_scaled[:, :1] + (ma * ppc + predicted_scaled[:, :1] * (1 - ppc))
return pred
def predict_for_report(self, X, date_start, date_end, current_fit=False, force_fits=False, verbose=False,
saving=True, post_process=True):
date_start = pd.to_datetime(date_start)
date_end = pd.to_datetime(date_end)
columns = [f'n{i + 1}' for i in range(self.n_out)]
if current_fit:
predicted = self._evaluate_all(self._x_test, self._y_test)
start = date_start - relativedelta(days=self.n_out)
ind = pd.date_range(start, periods=len(predicted))
return pd.DataFrame(predicted, index=ind, columns=columns)
flag = False
preds = []
l_range = (date_end - date_start).days
for i in range(0, l_range, self.days_between_fits):
if l_range - (i + self.days_between_fits) < self.n_out:
flag = True
new_date_start = date_start + relativedelta(days=i)
new_end = new_date_start + relativedelta(days=self.days_between_fits - 1)
if flag:
new_end = date_end
if force_fits:
self.fit(X.loc[:new_end], date_test_start=new_date_start, force_fit=True, verbose=verbose,
saving=saving)
else:
saved_fit_fn = self.find_nearest_save(new_date_start)
if saved_fit_fn:
self.fit(X.loc[:new_end], date_test_start=new_date_start, load_from_filename=saved_fit_fn,
verbose=verbose, saving=saving)
else:
self.fit(X.loc[:new_end], date_test_start=new_date_start, force_fit=True, verbose=verbose,
saving=saving)
predicted = self._evaluate_all(self._x_test, self._y_test)
start = new_date_start - relativedelta(days=self.n_out)
ind = pd.date_range(start, periods=len(predicted))
preds.append(pd.DataFrame(predicted, index=ind, columns=columns))
if flag:
break
pred = pd.concat(preds)
if post_process:
predicted_scaled = pred.values
ma = X.loc[pred.index].values[:, :1]
ppc = self.post_process_coef
pred = pred - predicted_scaled[:, :1] + (ma * ppc + predicted_scaled[:, :1] * (1 - ppc))
return pred
def save_fit(self):
checkpoint = {
'model': self._Model(**self.model_params, device=self.cpu_device),
'date_test_start': self.date_test_start,
'state_dict': self.model.state_dict(),
'mape_on_val': self.mape_on_val
}
torch.save(checkpoint,
self.models_dir + self._filename_pattern.format(date_test_start=self.date_test_start.date(),
datetime_fit=datetime.datetime.now().strftime(
"%Y-%m-%d %H%M%S"),
mape=self.mape_on_val))
def load_model(self, filepath):
checkpoint = torch.load(filepath, map_location=self.cpu_device)
self.model = checkpoint['model']
self.model.load_state_dict(checkpoint['state_dict'])
self.model.eval()
self.model.to(self.cpu_device)
self.mape_on_val = checkpoint['mape_on_val']
self.date_test_start = checkpoint['date_test_start']
def list_saved_fits(self):
filenames = [fn for fn in os.listdir(self.models_dir) if fn.endswith('.pt')]
list_of_fits = []
for fn in filenames:
_, date_test_start, datetime_fit, mape, _ = fn.split('_')
date_test_start = | pd.to_datetime(date_test_start) | pandas.to_datetime |
from aide_design.shared.units import unit_registry as u
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from pathlib import Path
def ftime(data_file_path, start, end=-1):
"""This function extracts the column of times from a ProCoDA data file.
Parameters
----------
data_file_path : string
File path. If the file is in the working directory, then the file name
is sufficient.
start : int or float
Index of first row of data to extract from the data file
end : int or float, optional
Index of last row of data to extract from the data
Defaults to -1, which extracts all the data in the file
Returns
-------
numpy array
Experimental times starting at 0 day with units of days.
Examples
--------
ftime(Reactor_data.txt, 0)
"""
if not isinstance(start, int):
start = int(start)
if not isinstance(end, int):
end = int(end)
df = pd.read_csv(data_file_path, delimiter='\t')
start_time = pd.to_numeric(df.iloc[start, 0])*u.day
day_times = pd.to_numeric(df.iloc[start:end, 0])
time_data = np.subtract((np.array(day_times)*u.day), start_time)
return time_data
def column_of_data(data_file_path, start, column, end="-1", units=""):
"""This function extracts a column of data from a ProCoDA data file.
Parameters
----------
data_file_path : string
File path. If the file is in the working directory, then the file name
is sufficient.
start : int
Index of first row of data to extract from the data file
end : int, optional
Index of last row of data to extract from the data
Defaults to -1, which extracts all the data in the file
column : int or string
int:
Index of the column that you want to extract. Column 0 is time.
The first data column is column 1.
string:
Name of the column header that you want to extract
units : string, optional
The units you want to apply to the data, e.g. 'mg/L'.
Defaults to "" which indicates no units
Returns
-------
numpy array
Experimental data with the units applied.
Examples
--------
column_of_data(Reactor_data.txt, 0, 1, -1, "mg/L")
"""
if not isinstance(start, int):
start = int(start)
if not isinstance(end, int):
end = int(end)
df = pd.read_csv(data_file_path, delimiter='\t')
if units == "":
if isinstance(column, int):
data = np.array(pd.to_numeric(df.iloc[start:end, column]))
else:
df[column][0:len(df)]
else:
if isinstance(column, int):
data = np.array(pd.to_numeric(df.iloc[start:end, column]))*u(units)
else:
df[column][0:len(df)]*u(units)
return data
def notes(data_file_path):
"""This function extracts any experimental notes from a ProCoDA data file.
Parameters
----------
data_file_path : string
File path. If the file is in the working directory, then the file name
is sufficient.
Returns
-------
dataframe
The rows of the data file that contain text notes inserted during the
experiment. Use this to identify the section of the data file that you
want to extract.
Examples
--------
"""
df = pd.read_csv(data_file_path, delimiter='\t')
text_row = df.iloc[0:-1, 0].str.contains('[a-z]', '[A-Z]')
text_row_index = text_row.index[text_row].tolist()
notes = df.loc[text_row_index]
return notes
def read_state(dates, state, column, units="", path="", extension=".xls"):
"""Reads a ProCoDA file and outputs the data column and time vector for
each iteration of the given state.
Parameters
----------
dates : string (list)
A list of dates or single date for which data was recorded, in
the form "M-D-Y"
state : int
The state ID number for which data should be extracted
column : int or string
int:
Index of the column that you want to extract. Column 0 is time.
The first data column is column 1.
string:
Name of the column header that you want to extract
units : string, optional
The units you want to apply to the data, e.g. 'mg/L'.
Defaults to "" which indicates no units
path : string, optional
Optional argument of the path to the folder containing your ProCoDA
files. Defaults to the current directory if no argument is passed in
extension : string, optional
The file extension of the tab delimited file. Defaults to ".xls" if
no argument is passed in
Returns
-------
time : numpy array
Times corresponding to the data (with units)
data : numpy array
Data in the given column during the given state with units
Examples
--------
time, data = read_state(["6-19-2013", "6-20-2013"], 1, 28, "mL/s")
"""
data_agg = []
day = 0
first_day = True
overnight = False
if not isinstance(dates, list):
dates = [dates]
for d in dates:
state_file = path + "statelog " + d + extension
data_file = path + "datalog " + d + extension
states = pd.read_csv(state_file, delimiter='\t')
data = pd.read_csv(data_file, delimiter='\t')
states = np.array(states)
data = np.array(data)
# get the start and end times for the state
state_start_idx = states[:, 1] == state
state_start = states[state_start_idx, 0]
state_end_idx = np.append([False], state_start_idx[0:(np.size(state_start_idx)-1)])
state_end = states[state_end_idx, 0]
if overnight:
state_start = np.insert(state_start, 0, 0)
state_end = np.insert(state_end, 0, states[0, 0])
if state_start_idx[-1]:
state_end.append(data[0, -1])
# get the corresponding indices in the data array
data_start = []
data_end = []
for i in range(np.size(state_start)):
add_start = True
for j in range(np.size(data[:, 0])):
if (data[j, 0] > state_start[i]) and add_start:
data_start.append(j)
add_start = False
if (data[j, 0] > state_end[i]):
data_end.append(j-1)
break
if first_day:
start_time = data[1, 0]
# extract data at those times
for i in range(np.size(data_start)):
t = data[data_start[i]:data_end[i], 0] + day - start_time
if isinstance(column, int):
c = data[data_start[i]:data_end[i], column]
else:
c = data[column][data_start[i]:data_end[i]]
if overnight and i == 0:
data_agg = np.insert(data_agg[-1], np.size(data_agg[-1][:, 0]),
np.vstack((t, c)).T)
else:
data_agg.append(np.vstack((t, c)).T)
day += 1
if first_day:
first_day = False
if state_start_idx[-1]:
overnight = True
data_agg = np.vstack(data_agg)
if units != "":
return data_agg[:, 0]*u.day, data_agg[:, 1]*u(units)
else:
return data_agg[:, 0]*u.day, data_agg[:, 1]
def average_state(dates, state, column, units="", path="", extension=".xls"):
"""Outputs the average value of the data for each instance of a state in
the given ProCoDA files
Parameters
----------
dates : string (list)
A list of dates or single date for which data was recorded, in
the form "M-D-Y"
state : int
The state ID number for which data should be extracted
column : int or string
int:
Index of the column that you want to extract. Column 0 is time.
The first data column is column 1.
string:
Name of the column header that you want to extract
units : string, optional
The units you want to apply to the data, e.g. 'mg/L'.
Defaults to "" which indicates no units
path : string, optional
Optional argument of the path to the folder containing your ProCoDA
files. Defaults to the current directory if no argument is passed in
extension : string, optional
The file extension of the tab delimited file. Defaults to ".xls" if
no argument is passed in
Returns
-------
float list
A list of averages for each instance of the given state
Examples
--------
data_avgs = average_state(["6-19-2013", "6-20-2013"], 1, 28, "mL/s")
"""
data_agg = []
day = 0
first_day = True
overnight = False
if not isinstance(dates, list):
dates = [dates]
for d in dates:
state_file = path + "statelog " + d + extension
data_file = path + "datalog " + d + extension
states = pd.read_csv(state_file, delimiter='\t')
data = | pd.read_csv(data_file, delimiter='\t') | pandas.read_csv |
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = pd.Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
expected = pd.Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=["A", "B", "C"])
left = df.groupby(by="A", as_index=False)["B"].sum()
assert type(left) is DataFrame
assert left.to_dict() == {"A": {}, "B": {}}
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith",):
pytest.skip("Test not applicable")
if reduction_func in ("nth", "ngroup",):
pytest.skip("Skip until behavior is determined (GH #5755)")
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
g = df.groupby("a", as_index=False)
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(["A", "B"], as_index=False)
# GH #421
result = grouped["C"].agg(len)
expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
msg = "as_index=False only valid with DataFrame"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda x: x.weekday(), as_index=False)
msg = "as_index=False only valid for axis=0"
with pytest.raises(ValueError, match=msg):
df.groupby(lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_multiple_key(df):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby(
[lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
)
agged = grouped.agg(lambda x: x.sum())
tm.assert_index_equal(agged.index, df.columns)
tm.assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
tm.assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(df):
# test that having an all-NA column doesn't mess you up
df = df.copy()
df["bad"] = np.nan
agged = df.groupby(["A", "B"]).mean()
expected = df.groupby(["A", "B"]).mean()
expected["bad"] = np.nan
tm.assert_frame_equal(agged, expected)
def test_omit_nuisance(df):
grouped = df.groupby("A")
result = grouped.mean()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
tm.assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
df = df.loc[:, ["A", "C", "D"]]
df["E"] = datetime.now()
grouped = df.groupby("A")
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
msg = "reduction operation 'sum' not allowed for this dtype"
with pytest.raises(TypeError, match=msg):
grouped.agg(lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(["A", "B"])
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
def test_empty_groups_corner(mframe):
# handle empty groups
df = DataFrame(
{
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
"k3": ["foo", "bar"] * 3,
"v1": np.random.randn(6),
"v2": np.random.randn(6),
}
)
grouped = df.groupby(["k1", "k2"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped = mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped["A"].apply(np.mean)
tm.assert_series_equal(agged["A"], agged_A)
assert agged.index.name == "first"
def test_nonsense_func():
df = DataFrame([0])
msg = r"unsupported operand type\(s\) for \+: 'int' and 'str'"
with pytest.raises(TypeError, match=msg):
df.groupby(lambda x: x + "foo")
def test_wrap_aggregated_output_multindex(mframe):
df = mframe.T
df["baz", "two"] = "peekaboo"
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
assert isinstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ("foo", "one"):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
assert len(agged2.columns) + 1 == len(df.columns)
def test_groupby_level_apply(mframe):
result = mframe.groupby(level=0).count()
assert result.index.name == "first"
result = mframe.groupby(level=1).count()
assert result.index.name == "second"
result = mframe["A"].groupby(level=0).count()
assert result.index.name == "first"
def test_groupby_level_mapper(mframe):
deleveled = mframe.reset_index()
mapper0 = {"foo": 0, "bar": 0, "baz": 1, "qux": 1}
mapper1 = {"one": 0, "two": 0, "three": 1}
result0 = mframe.groupby(mapper0, level=0).sum()
result1 = mframe.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled["first"]])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled["second"]])
expected0 = mframe.groupby(mapped_level0).sum()
expected1 = mframe.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = "first", "second"
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
def test_groupby_level_nonmulti():
# GH 1313, GH 13901
s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name="foo"))
expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name="foo"))
result = s.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[0]).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=-1).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[-1]).sum()
tm.assert_series_equal(result, expected)
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=1)
with pytest.raises(ValueError, match=msg):
s.groupby(level=-2)
msg = "No group keys passed!"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[])
msg = "multiple levels only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 0])
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 1])
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[1])
def test_groupby_complex():
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = a.sum(level=0)
tm.assert_series_equal(result, expected)
def test_groupby_series_indexed_differently():
s1 = Series(
[5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],
index=Index(["a", "b", "c", "d", "e", "f", "g"]),
)
s2 = Series(
[1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index(["a", "b", "d", "f", "g", "h"])
)
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
tm.assert_series_equal(agged, exp)
def test_groupby_with_hier_columns():
tuples = list(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
)
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples(
[("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")]
)
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).mean()
tm.assert_index_equal(result.index, df.index)
result = df.groupby(level=0).agg(np.mean)
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0).apply(lambda x: x.mean())
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
tm.assert_index_equal(result.columns, Index(["A", "B"]))
tm.assert_index_equal(result.index, df.index)
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df["A", "foo"] = "bar"
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, df.columns[:-1])
def test_grouping_ndarray(df):
grouped = df.groupby(df["A"].values)
result = grouped.sum()
expected = df.groupby("A").sum()
tm.assert_frame_equal(
result, expected, check_names=False
) # Note: no names when grouping by value
def test_groupby_wrong_multi_labels():
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(["foo", "bar", "baz", "spam"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_groupby_series_with_name(df):
result = df.groupby(df["A"]).mean()
result2 = df.groupby(df["A"], as_index=False).mean()
assert result.index.name == "A"
assert "A" in result2
result = df.groupby([df["A"], df["B"]]).mean()
result2 = df.groupby([df["A"], df["B"]], as_index=False).mean()
assert result.index.names == ("A", "B")
assert "A" in result2
assert "B" in result2
def test_seriesgroupby_name_attr(df):
# GH 6265
result = df.groupby("A")["C"]
assert result.count().name == "C"
assert result.mean().name == "C"
testFunc = lambda x: np.sum(x) * 2
assert result.agg(testFunc).name == "C"
def test_consistency_name():
# GH 12363
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
expected = df.groupby(["A"]).B.count()
result = df.B.groupby(df.A).count()
tm.assert_series_equal(result, expected)
def test_groupby_name_propagation(df):
# GH 6124
def summarize(df, name=None):
return Series({"count": 1, "mean": 2, "omissions": 3}, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"])
metrics = df.groupby("A").apply(summarize)
assert metrics.columns.name is None
metrics = df.groupby("A").apply(summarize, "metrics")
assert metrics.columns.name == "metrics"
metrics = df.groupby("A").apply(summarize_random_name)
assert metrics.columns.name is None
def test_groupby_nonstring_columns():
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
tm.assert_frame_equal(result, expected)
def test_groupby_mixed_type_columns():
# GH 13432, unorderable types in py3
df = DataFrame([[0, 1, 2]], columns=["A", "B", 0])
expected = DataFrame([[1, 2]], columns=["B", 0], index=Index([0], name="A"))
result = df.groupby("A").first()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").sum()
tm.assert_frame_equal(result, expected)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:Mean of:RuntimeWarning")
def test_cython_grouper_series_bug_noncontig():
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0])
inds = np.tile(range(10), 10)
result = obj.groupby(inds).agg(Series.median)
assert result.isna().all()
def test_series_grouper_noncontig_index():
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone():
s = Series(range(5))
labels = np.array(["a", "b", "c", "d", "e"], dtype="O")
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert len(x.values.base) > 0
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_groupby_dtype_inference_empty():
# GH 6733
df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")})
assert df["x"].dtype == np.float64
result = df.groupby("x").first()
exp_index = Index([], name="x", dtype=np.float64)
expected = DataFrame({"range": Series([], index=exp_index, dtype="int64")})
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_groupby_list_infer_array_like(df):
result = df.groupby(list(df["A"])).mean()
expected = df.groupby(df["A"]).mean()
tm.assert_frame_equal(result, expected, check_names=False)
with pytest.raises(KeyError, match=r"^'foo'$"):
df.groupby(list(df["A"][:-1]))
# pathological case of ambiguity
df = DataFrame({"foo": [0, 1], "bar": [3, 4], "val": np.random.randn(2)})
result = df.groupby(["foo", "bar"]).mean()
expected = df.groupby([df["foo"], df["bar"]]).mean()[["val"]]
def test_groupby_keys_same_size_as_index():
# GH 11185
freq = "s"
index = pd.date_range(
start=pd.Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq
)
df = pd.DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index)
result = df.groupby([pd.Grouper(level=0, freq=freq), "metric"]).mean()
expected = df.set_index([df.index, "metric"])
tm.assert_frame_equal(result, expected)
def test_groupby_one_row():
# GH 11741
msg = r"^'Z'$"
df1 = pd.DataFrame(np.random.randn(1, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df1.groupby("Z")
df2 = pd.DataFrame(np.random.randn(2, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df2.groupby("Z")
def test_groupby_nat_exclude():
# GH 6992
df = pd.DataFrame(
{
"values": np.random.randn(8),
"dt": [
np.nan,
pd.Timestamp("2013-01-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-01-01"),
],
"str": [np.nan, "a", np.nan, "a", np.nan, "a", np.nan, "b"],
}
)
grouped = df.groupby("dt")
expected = [pd.Index([1, 7]), pd.Index([3, 5])]
keys = sorted(grouped.groups.keys())
assert len(keys) == 2
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
tm.assert_index_equal(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
assert grouped.ngroups == 2
expected = {
Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp),
Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp),
}
for k in grouped.indices:
tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-01-01")), df.iloc[[1, 7]])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-02-01")), df.iloc[[3, 5]])
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
nan_df = DataFrame(
{"nan": [np.nan, np.nan, np.nan], "nat": [pd.NaT, pd.NaT, pd.NaT]}
)
assert nan_df["nan"].dtype == "float64"
assert nan_df["nat"].dtype == "datetime64[ns]"
for key in ["nan", "nat"]:
grouped = nan_df.groupby(key)
assert grouped.groups == {}
assert grouped.ngroups == 0
assert grouped.indices == {}
with pytest.raises(KeyError, match=r"^nan$"):
grouped.get_group(np.nan)
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
def test_groupby_2d_malformed():
d = DataFrame(index=range(2))
d["group"] = ["g1", "g2"]
d["zeros"] = [0, 0]
d["ones"] = [1, 1]
d["label"] = ["l1", "l2"]
tmp = d.groupby(["group"]).mean()
res_values = np.array([[0, 1], [0, 1]], dtype=np.int64)
tm.assert_index_equal(tmp.columns, Index(["zeros", "ones"]))
tm.assert_numpy_array_equal(tmp.values, res_values)
def test_int32_overflow():
B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)))
A = np.arange(25000)
df = DataFrame({"A": A, "B": B, "C": A, "D": B, "E": np.random.randn(25000)})
left = df.groupby(["A", "B", "C", "D"]).sum()
right = df.groupby(["D", "C", "B", "A"]).sum()
assert len(left) == len(right)
def test_groupby_sort_multi():
df = DataFrame(
{
"a": ["foo", "bar", "baz"],
"b": [3, 2, 1],
"c": [0, 1, 2],
"d": np.random.randn(3),
}
)
tups = [tuple(row) for row in df[["a", "b", "c"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["a", "b", "c"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
tups = [tuple(row) for row in df[["c", "a", "b"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["c", "a", "b"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups)
tups = [tuple(x) for x in df[["b", "c", "a"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["b", "c", "a"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
df = DataFrame(
{"a": [0, 1, 2, 0, 1, 2], "b": [0, 0, 0, 1, 1, 1], "d": np.random.randn(6)}
)
grouped = df.groupby(["a", "b"])["d"]
result = grouped.sum()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = [tuple(row) for row in df[keys].values]
tups = com.asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in expected.items():
assert result[k] == v
_check_groupby(df, result, ["a", "b"], "d")
def test_dont_clobber_name_column():
df = DataFrame(
{"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2}
)
result = df.groupby("key").apply(lambda x: x)
tm.assert_frame_equal(result, df)
def test_skip_group_keys():
tsf = tm.makeTimeDataFrame()
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values(by="A")[:3])
pieces = [group.sort_values(by="A")[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_frame_equal(result, expected)
grouped = tsf["A"].groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values()[:3])
pieces = [group.sort_values()[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_series_equal(result, expected)
def test_no_nonsense_name(float_frame):
# GH #995
s = float_frame["C"].copy()
s.name = None
result = s.groupby(float_frame["A"]).agg(np.sum)
assert result.name is None
def test_multifunc_sum_bug():
# GH #1065
x = DataFrame(np.arange(9).reshape(3, 3))
x["test"] = 0
x["fl"] = [1.3, 1.5, 1.6]
grouped = x.groupby("test")
result = grouped.agg({"fl": "sum", 2: "size"})
assert result["fl"].dtype == np.float64
def test_handle_dict_return_value(df):
def f(group):
return {"max": group.max(), "min": group.min()}
def g(group):
return Series({"max": group.max(), "min": group.min()})
result = df.groupby("A")["C"].apply(f)
expected = df.groupby("A")["C"].apply(g)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grouper", ["A", ["A", "B"]])
def test_set_group_name(df, grouper):
def f(group):
assert group.name is not None
return group
def freduce(group):
assert group.name is not None
return group.sum()
def foo(x):
return freduce(x)
grouped = df.groupby(grouper)
# make sure all these work
grouped.apply(f)
grouped.aggregate(freduce)
grouped.aggregate({"C": freduce, "D": freduce})
grouped.transform(f)
grouped["C"].apply(f)
grouped["C"].aggregate(freduce)
grouped["C"].aggregate([freduce, foo])
grouped["C"].transform(f)
def test_group_name_available_in_inference_pass():
# gh-15062
df = pd.DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": np.arange(6)})
names = []
def f(group):
names.append(group.name)
return group.copy()
df.groupby("a", sort=False, group_keys=False).apply(f)
expected_names = [0, 1, 2]
assert names == expected_names
def test_no_dummy_key_names(df):
# see gh-1291
result = df.groupby(df["A"].values).sum()
assert result.index.name is None
result = df.groupby([df["A"].values, df["B"].values]).sum()
assert result.index.names == (None, None)
def test_groupby_sort_multiindex_series():
# series multiindex groupby sort argument was not being passed through
# _compress_group_index
# GH 9444
index = MultiIndex(
levels=[[1, 2], [1, 2]],
codes=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
names=["a", "b"],
)
mseries = Series([0, 1, 2, 3, 4, 5], index=index)
index = MultiIndex(
levels=[[1, 2], [1, 2]], codes=[[0, 0, 1], [1, 0, 0]], names=["a", "b"]
)
mseries_result = Series([0, 2, 4], index=index)
result = mseries.groupby(level=["a", "b"], sort=False).first()
tm.assert_series_equal(result, mseries_result)
result = mseries.groupby(level=["a", "b"], sort=True).first()
tm.assert_series_equal(result, mseries_result.sort_index())
def test_groupby_reindex_inside_function():
periods = 1000
ind = date_range(start="2012/1/1", freq="5min", periods=periods)
df = DataFrame({"high": np.arange(periods), "low": np.arange(periods)}, index=ind)
def agg_before(hour, func, fix=False):
"""
Run an aggregate func on the subset of data.
"""
def _func(data):
d = data.loc[data.index.map(lambda x: x.hour < 11)].dropna()
if fix:
data[data.index[0]]
if len(d) == 0:
return None
return func(d)
return _func
def afunc(data):
d = data.select(lambda x: x.hour < 11).dropna()
return np.max(d)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
closure_bad = grouped.agg({"high": agg_before(11, np.max)})
closure_good = grouped.agg({"high": agg_before(11, np.max, True)})
tm.assert_frame_equal(closure_bad, closure_good)
def test_groupby_multiindex_missing_pair():
# GH9049
df = DataFrame(
{
"group1": ["a", "a", "a", "b"],
"group2": ["c", "c", "d", "c"],
"value": [1, 1, 1, 5],
}
)
df = df.set_index(["group1", "group2"])
df_grouped = df.groupby(level=["group1", "group2"], sort=True)
res = df_grouped.agg("sum")
idx = MultiIndex.from_tuples(
[("a", "c"), ("a", "d"), ("b", "c")], names=["group1", "group2"]
)
exp = DataFrame([[2], [1], [5]], index=idx, columns=["value"])
tm.assert_frame_equal(res, exp)
def test_groupby_multiindex_not_lexsorted():
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[("a", ""), ("b1", "c1"), ("b2", "c2")], names=["b", "c"]
)
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
)
not_lexsorted_df = not_lexsorted_df.pivot_table(
index="a", columns=["b", "c"], values="d"
)
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.groupby("a").mean()
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.groupby("a").mean()
tm.assert_frame_equal(expected, result)
# a transforming function should work regardless of sort
# GH 14776
df = DataFrame(
{"x": ["a", "a", "b", "a"], "y": [1, 1, 2, 2], "z": [1, 2, 3, 4]}
).set_index(["x", "y"])
assert not df.index.is_lexsorted()
for level in [0, 1, [0, 1]]:
for sort in [False, True]:
result = df.groupby(level=level, sort=sort).apply(DataFrame.drop_duplicates)
expected = df
tm.assert_frame_equal(expected, result)
result = (
df.sort_index()
.groupby(level=level, sort=sort)
.apply(DataFrame.drop_duplicates)
)
expected = df.sort_index()
tm.assert_frame_equal(expected, result)
def test_index_label_overlaps_location():
# checking we don't have any label/location confusion in the
# the wake of GH5375
df = DataFrame(list("ABCDE"), index=[2, 0, 2, 1, 1])
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
tm.assert_series_equal(actual, expected)
# ... and again, with a generic Index of floats
df.index = df.index.astype(float)
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
tm.assert_series_equal(actual, expected)
def test_transform_doesnt_clobber_ints():
# GH 7972
n = 6
x = np.arange(n)
df = DataFrame({"a": x // 2, "b": 2.0 * x, "c": 3.0 * x})
df2 = DataFrame({"a": x // 2 * 1.0, "b": 2.0 * x, "c": 3.0 * x})
gb = df.groupby("a")
result = gb.transform("mean")
gb2 = df2.groupby("a")
expected = gb2.transform("mean")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sort_column",
["ints", "floats", "strings", ["ints", "floats"], ["ints", "strings"]],
)
@pytest.mark.parametrize(
"group_column", ["int_groups", "string_groups", ["int_groups", "string_groups"]]
)
def test_groupby_preserves_sort(sort_column, group_column):
# Test to ensure that groupby always preserves sort order of original
# object. Issue #8588 and #9651
df = DataFrame(
{
"int_groups": [3, 1, 0, 1, 0, 3, 3, 3],
"string_groups": ["z", "a", "z", "a", "a", "g", "g", "g"],
"ints": [8, 7, 4, 5, 2, 9, 1, 1],
"floats": [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],
"strings": ["z", "d", "a", "e", "word", "word2", "42", "47"],
}
)
# Try sorting on different types and with different group types
df = df.sort_values(by=sort_column)
g = df.groupby(group_column)
def test_sort(x):
tm.assert_frame_equal(x, x.sort_values(by=sort_column))
g.apply(test_sort)
def test_group_shift_with_null_key():
# This test is designed to replicate the segfault in issue #13813.
n_rows = 1200
# Generate a moderately large dataframe with occasional missing
# values in column `B`, and then group by [`A`, `B`]. This should
# force `-1` in `labels` array of `g.grouper.group_info` exactly
# at those places, where the group-by key is partially missing.
df = DataFrame(
[(i % 12, i % 3 if i % 3 else np.nan, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i % 3 and i < n_rows - 12 else np.nan) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1)
tm.assert_frame_equal(result, expected)
def test_group_shift_with_fill_value():
# GH #24128
n_rows = 24
df = DataFrame(
[(i % 12, i % 3, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i < n_rows - 12 else 0) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1, fill_value=0)[["Z"]]
tm.assert_frame_equal(result, expected)
def test_group_shift_lose_timezone():
# GH 30134
now_dt = pd.Timestamp.utcnow()
df = DataFrame({"a": [1, 1], "date": now_dt})
result = df.groupby("a").shift(0).iloc[0]
expected = Series({"date": now_dt}, name=result.name)
tm.assert_series_equal(result, expected)
def test_pivot_table_values_key_error():
# This test is designed to replicate the error in issue #14938
df = pd.DataFrame(
{
"eventDate": pd.date_range(datetime.today(), periods=20, freq="M").tolist(),
"thename": range(0, 20),
}
)
df["year"] = df.set_index("eventDate").index.year
df["month"] = df.set_index("eventDate").index.month
with pytest.raises(KeyError, match="'badname'"):
df.reset_index().pivot_table(
index="year", columns="month", values="badname", aggfunc="count"
)
def test_empty_dataframe_groupby():
# GH8093
df = DataFrame(columns=["A", "B", "C"])
result = df.groupby("A").sum()
expected = DataFrame(columns=["B", "C"], dtype=np.float64)
expected.index.name = "A"
tm.assert_frame_equal(result, expected)
def test_tuple_as_grouping():
# https://github.com/pandas-dev/pandas/issues/18314
df = pd.DataFrame(
{
("a", "b"): [1, 1, 1, 1],
"a": [2, 2, 2, 2],
"b": [2, 2, 2, 2],
"c": [1, 1, 1, 1],
}
)
with pytest.raises(KeyError, match=r"('a', 'b')"):
df[["a", "b", "c"]].groupby(("a", "b"))
result = df.groupby(("a", "b"))["c"].sum()
expected = pd.Series([4], name="c", index=pd.Index([1], name=("a", "b")))
tm.assert_series_equal(result, expected)
def test_tuple_correct_keyerror():
# https://github.com/pandas-dev/pandas/issues/18798
df = pd.DataFrame(
1, index=range(3), columns=pd.MultiIndex.from_product([[1, 2], [3, 4]])
)
with pytest.raises(KeyError, match=r"^\(7, 8\)$"):
df.groupby((7, 8)).mean()
def test_groupby_agg_ohlc_non_first():
# GH 21716
df = pd.DataFrame(
[[1], [1]],
columns=["foo"],
index=pd.date_range("2018-01-01", periods=2, freq="D"),
)
expected = pd.DataFrame(
[[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],
columns=pd.MultiIndex.from_tuples(
(
("foo", "sum", "foo"),
("foo", "ohlc", "open"),
("foo", "ohlc", "high"),
("foo", "ohlc", "low"),
("foo", "ohlc", "close"),
)
),
index=pd.date_range("2018-01-01", periods=2, freq="D"),
)
result = df.groupby(pd.Grouper(freq="D")).agg(["sum", "ohlc"])
tm.assert_frame_equal(result, expected)
def test_groupby_multiindex_nat():
# GH 9236
values = [
(pd.NaT, "a"),
(datetime(2012, 1, 2), "a"),
(datetime(2012, 1, 2), "b"),
(datetime(2012, 1, 3), "a"),
]
mi = pd.MultiIndex.from_tuples(values, names=["date", None])
ser = pd.Series([3, 2, 2.5, 4], index=mi)
result = ser.groupby(level=1).mean()
expected = pd.Series([3.0, 2.5], index=["a", "b"])
tm.assert_series_equal(result, expected)
def test_groupby_empty_list_raises():
# GH 5289
values = zip(range(10), range(10))
df = DataFrame(values, columns=["apple", "b"])
msg = "Grouper and axis must be same length"
with pytest.raises(ValueError, match=msg):
df.groupby([[]])
def test_groupby_multiindex_series_keys_len_equal_group_axis():
# GH 25704
index_array = [["x", "x"], ["a", "b"], ["k", "k"]]
index_names = ["first", "second", "third"]
ri = pd.MultiIndex.from_arrays(index_array, names=index_names)
s = pd.Series(data=[1, 2], index=ri)
result = s.groupby(["first", "third"]).sum()
index_array = [["x"], ["k"]]
index_names = ["first", "third"]
ei = pd.MultiIndex.from_arrays(index_array, names=index_names)
expected = pd.Series([3], index=ei)
tm.assert_series_equal(result, expected)
def test_groupby_groups_in_BaseGrouper():
# GH 26326
# Test if DataFrame grouped with a pandas.Grouper has correct groups
mi = pd.MultiIndex.from_product([["A", "B"], ["C", "D"]], names=["alpha", "beta"])
df = pd.DataFrame({"foo": [1, 2, 1, 2], "bar": [1, 2, 3, 4]}, index=mi)
result = df.groupby([pd.Grouper(level="alpha"), "beta"])
expected = df.groupby(["alpha", "beta"])
assert result.groups == expected.groups
result = df.groupby(["beta", pd.Grouper(level="alpha")])
expected = df.groupby(["beta", "alpha"])
assert result.groups == expected.groups
@pytest.mark.parametrize("group_name", ["x", ["x"]])
def test_groupby_axis_1(group_name):
# GH 27614
df = pd.DataFrame(
np.arange(12).reshape(3, 4), index=[0, 1, 0], columns=[10, 20, 10, 20]
)
df.index.name = "y"
df.columns.name = "x"
results = df.groupby(group_name, axis=1).sum()
expected = df.T.groupby(group_name).sum().T
tm.assert_frame_equal(results, expected)
# test on MI column
iterables = [["bar", "baz", "foo"], ["one", "two"]]
mi = pd.MultiIndex.from_product(iterables=iterables, names=["x", "x1"])
df = pd.DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi)
results = df.groupby(group_name, axis=1).sum()
expected = df.T.groupby(group_name).sum().T
tm.assert_frame_equal(results, expected)
@pytest.mark.parametrize(
"op, expected",
[
(
"shift",
{
"time": [
None,
None,
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
None,
None,
]
},
),
(
"bfill",
{
"time": [
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
]
},
),
(
"ffill",
{
"time": [
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
]
},
),
],
)
def test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected):
# GH19995, GH27992: Check that timezone does not drop in shift, bfill, and ffill
tz = tz_naive_fixture
data = {
"id": ["A", "B", "A", "B", "A", "B"],
"time": [
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
None,
None,
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
],
}
df = DataFrame(data).assign(time=lambda x: x.time.dt.tz_localize(tz))
grouped = df.groupby("id")
result = getattr(grouped, op)()
expected = DataFrame(expected).assign(time=lambda x: x.time.dt.tz_localize(tz))
tm.assert_frame_equal(result, expected)
def test_ffill_missing_arguments():
# GH 14955
df = pd.DataFrame({"a": [1, 2], "b": [1, 1]})
with pytest.raises(ValueError, match="Must specify a fill"):
df.groupby("b").fillna()
def test_groupby_only_none_group():
# see GH21624
# this was crashing with "ValueError: Length of passed values is 1, index implies 0"
df = pd.DataFrame({"g": [None], "x": 1})
actual = df.groupby("g")["x"].transform("sum")
expected = pd.Series([np.nan], name="x")
tm.assert_series_equal(actual, expected)
def test_groupby_duplicate_index():
# GH#29189 the groupby call here used to raise
ser = pd.Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])
gb = ser.groupby(level=0)
result = gb.mean()
expected = pd.Series([2, 5.5, 8], index=[2.0, 4.0, 5.0])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("bool_agg_func", ["any", "all"])
def test_bool_aggs_dup_column_labels(bool_agg_func):
# 21668
df = pd.DataFrame([[True, True]], columns=["a", "a"])
grp_by = df.groupby([0])
result = getattr(grp_by, bool_agg_func)()
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"idx", [ | pd.Index(["a", "a"]) | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 31 19:28:58 2020
@author: hcb
"""
import pandas as pd
import numpy as np
import lightgbm as lgb
import os
from tqdm import tqdm
from sklearn.model_selection import KFold
from sklearn.metrics import f1_score
from config import config
import warnings
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
import geohash
warnings.filterwarnings("ignore")
trn_path = config.train_dir
test_path = config.test_dir
def mode_mean(x):
return x.mode().mean()
def get_data(path):
df_list = []
for file in tqdm(sorted(os.listdir(path))):
file_path = os.path.join(path, file)
df = pd.read_csv(file_path)
df['time_id'] = list(range(len(df)))
df_list.append(df)
df = pd.concat(df_list)
return df
def get_latlng(df, precision=7):
tmp_df = pd.DataFrame()
tmp_df['lng'] = df['lon']
tmp_df['lat'] = df['lat']
tmp_df['code'] = tmp_df[[
'lng', 'lat'
]].apply(lambda x: geohash.encode(x['lat'], x['lng'],
precision=precision),
axis=1)
code = tmp_df['code'].values
return code
def transform_day(df):
df['day'] = df['time'].apply(lambda x: int(x[0:4]))
df['month'] = df['time'].apply(lambda x: int(x[0:2]))
df['hour'] = df['time'].apply(lambda x: int(x[5:7]))
df['minute'] = df['time'].apply(lambda x: int(x[8:10]))
df['seconds'] = df['time'].apply(lambda x: int(x[11:13]))
df['time_transform'] = (df['month'] * 31 + df['day']) * 24 + df[
'hour'
] + df['minute'] / 60 + df['seconds'] / 3600
return df
def get_feature(df2, train):
df = df2.copy()
df['new_id'] = (df['渔船ID'] + 1) * 10000 + df['time_id']
tmp_df = df[['渔船ID', 'lat', 'lon', 'time_transform', 'new_id']].copy()
tmp_df.columns = ['渔船ID', 'x_1', 'y_1', 'time_transform_1', 'new_id']
tmp_df['new_id'] = tmp_df['new_id'] + 1
df = df.merge(tmp_df, on=['渔船ID', 'new_id'], how='left')
df['dis_path'] = np.sqrt((df['x_1'] - df['lat']) ** 2 +
(df['y_1'] - df['lon']) ** 2)
df['slope'] = np.abs((df['y_1'] - df['lon']) /
(df['x_1'] - df['lat'] + 0.001))
df.dropna(inplace=True)
tmp_df = df.groupby('渔船ID')['dis_path'].agg({
'max', 'median', 'mean', 'sum'
}).reset_index()
tmp_df.columns = ['渔船ID', 'dis_path_max', 'dis_path_median',
'dis_path_mean', 'dis_path_sum']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df.groupby('渔船ID')['slope'].agg({
'max', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', 'slope_max', 'slope_median', 'slope_mean1']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = tmp_df.groupby('渔船ID')['dis_path'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', 'dis_path_min2', 'dis_path_std2',
'dis_path_median2', 'dis_path_mean']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df.groupby('渔船ID')['slope'].agg({
'min', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', 'slope_min', 'slope_median2', 'slope_mean2']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = tmp_df.groupby('渔船ID')['slope'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', 'slope_min3', 'slope_std3', 'slope_median3',
'slope_mean3']
train = train.merge(tmp_df, on='渔船ID', how='left')
df['time_delt'] = np.abs(df['time_transform_1'] - df['time_transform'])
df['dis/time'] = df['dis_path'] / df['time_delt']
tmp_df = df.groupby('渔船ID')['dis/time'].agg({
'mean', 'median'
}).reset_index()
tmp_df.columns = ['渔船ID', 'dis/time_mean', 'dis/time_median']
train = train.merge(tmp_df, on='渔船ID', how='left')
return train
def get_feature2(df2, train):
df = df2.copy()
df['new_id'] = (df['渔船ID'] + 1) * 10000 + df['time_id']
tmp_df = df[['渔船ID', '方向', '速度', 'new_id']].copy()
tmp_df.columns = ['渔船ID', '方向_1', '速度_1', 'new_id']
tmp_df['new_id'] = tmp_df['new_id'] + 1
df = df.merge(tmp_df, on=['渔船ID', 'new_id'], how='left')
df['方向_delt'] = np.abs(df['方向_1'] - df['方向'])
df['速度_delt'] = np.abs(df['速度_1'] - df['速度'])
df.dropna(inplace=True)
tmp_df = df.groupby('渔船ID')['方向_delt'].agg({
'max', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '方向_delt_mmax', '方向_delt_median', '方向_delt_mean']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = df.groupby('渔船ID')['方向_delt'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '方向_delt_min2', '方向_delt_std2',
'方向_delt_median2', '方向_delt_mean2']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = tmp_df.groupby('渔船ID')['方向_delt'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '方向_delt_min3', '方向_delt_std3',
'方向_delt_median3', '方向_delt_mean3']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df.groupby('渔船ID')['速度_delt'].agg({
'max', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '速度_delt_max', '速度_delt_median', '速度_delt_mean']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = df.groupby('渔船ID')['速度_delt'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '速度_delt_min2', '速度_delt_std2',
'速度_delt_median2', '速度_delt_mean2']
train = train.merge(tmp_df, on='渔船ID', how='left')
tmp_df = df[df['速度'] > 0]
tmp_df = tmp_df.groupby('渔船ID')['速度_delt'].agg({
'min', 'std', 'median', 'mean'
}).reset_index()
tmp_df.columns = ['渔船ID', '速度_delt_min3', '速度_delt_std3',
'速度_delt_median3', '速度_delt_mean3']
train = train.merge(tmp_df, on='渔船ID', how='left')
return train
df_train = get_data(trn_path)
train_ = df_train[['渔船ID', 'type']].drop_duplicates()
df_train = transform_day(df_train)
train_ = get_feature(df_train, train_)
train_ = get_feature2(df_train, train_)
train_.drop(['type', 'slope_mean1', 'slope_mean2'], axis=1, inplace=True)
df_test = get_data(test_path)
test = df_test[['渔船ID']].drop_duplicates()
df_test = transform_day(df_test)
test = get_feature(df_test, test)
test = get_feature2(df_test, test)
test.drop(['slope_mean1', 'slope_mean2'], axis=1, inplace=True)
print('begin tfidf')
data = | pd.concat((df_train, df_test)) | pandas.concat |
"""This function will load the given data and continuosly interpet selected patients"""
import argparse
import pickle as pickle
import numpy as np
import pandas as pd
import tensorflow as tf
import keras.backend as K
from keras.models import load_model, Model
from keras.preprocessing import sequence
from keras.constraints import Constraint
from keras.utils.data_utils import Sequence
def import_model(path):
"""Import model from given path and assign it to appropriate devices"""
K.clear_session()
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
config.gpu_options.allow_growth = True
tfsess = tf.Session(config=config)
K.set_session(tfsess)
model = load_model(path, custom_objects={'FreezePadding':FreezePadding,
'FreezePadding_Non_Negative':FreezePadding_Non_Negative})
model_with_attention = Model(model.inputs, model.outputs +\
[model.get_layer(name='softmax_1').output,\
model.get_layer(name='beta_dense_0').output])
return model, model_with_attention
def get_model_parameters(model):
"""Extract model arguments that were used during training"""
class ModelParameters:
"""Helper class to store model parametesrs in the same format as ARGS"""
def __init__(self):
self.num_codes = None
self.numeric_size = None
self.use_time = None
self.emb_weights = None
self.output_weights = None
self.bias = None
params = ModelParameters()
names = [layer.name for layer in model.layers]
params.num_codes = model.get_layer(name='embedding').input_dim-1
params.emb_weights = model.get_layer(name='embedding').get_weights()[0]
params.output_weights, params.bias = model.get_layer(name='time_distributed_out').get_weights()
print('Model bias: {}'.format(params.bias))
if 'numeric_input' in names:
params.numeric_size = model.get_layer(name='numeric_input').input_shape[2]
#Add artificial embeddings for each numeric feature and extend the embedding weights
#Numeric embeddings is just 1 for 1 dimension of the embedding which corresponds to taking value as is
numeric_embeddings = np.zeros((params.numeric_size, params.emb_weights.shape[1]+params.numeric_size))
for i in range(params.numeric_size):
numeric_embeddings[i, params.emb_weights.shape[1]+i] = 1
#Extended embedding is original embedding extended to larger output size and numerics embeddings added
params.emb_weights = np.append(params.emb_weights,
np.zeros((params.num_codes+1, params.numeric_size)),
axis=1)
params.emb_weights = np.append(params.emb_weights, numeric_embeddings, axis=0)
else:
params.numeric_size = 0
if 'time_input' in names:
params.use_time = True
else:
params.use_time = False
return params
class FreezePadding_Non_Negative(Constraint):
"""Freezes the last weight to be near 0 and prevents non-negative embeddings"""
def __call__(self, w):
other_weights = K.cast(K.greater_equal(w, 0)[:-1], K.floatx())
last_weight = K.cast(K.equal(K.reshape(w[-1, :], (1, K.shape(w)[1])), 0.), K.floatx())
appended = K.concatenate([other_weights, last_weight], axis=0)
w *= appended
return w
class FreezePadding(Constraint):
"""Freezes the last weight to be near 0."""
def __call__(self, w):
other_weights = K.cast(K.ones(K.shape(w))[:-1], K.floatx())
last_weight = K.cast(K.equal(K.reshape(w[-1, :], (1, K.shape(w)[1])), 0.), K.floatx())
appended = K.concatenate([other_weights, last_weight], axis=0)
w *= appended
return w
class SequenceBuilder(Sequence):
"""Generate Batches of data"""
def __init__(self, data, model_parameters, ARGS):
#Receive all appropriate data
self.codes = data[0]
index = 1
if model_parameters.numeric_size:
self.numeric = data[index]
index += 1
if model_parameters.use_time:
self.time = data[index]
self.num_codes = model_parameters.num_codes
self.batch_size = ARGS.batch_size
self.numeric_size = model_parameters.numeric_size
self.use_time = model_parameters.use_time
def __len__(self):
"""Compute number of batches.
Add extra batch if the data doesn't exactly divide into batches
"""
if len(self.codes)%self.batch_size == 0:
return len(self.codes) // self.batch_size
return len(self.codes) // self.batch_size+1
def __getitem__(self, idx):
"""Get batch of specific index"""
def pad_data(data, length_visits, length_codes, pad_value=0):
"""Pad data to desired number of visiits and codes inside each visit"""
zeros = np.full((len(data), length_visits, length_codes), pad_value)
for steps, mat in zip(data, zeros):
if steps != [[-1]]:
for step, mhot in zip(steps, mat[-len(steps):]):
#Populate the data into the appropriate visit
mhot[:len(step)] = step
return zeros
#Compute reusable batch slice
batch_slice = slice(idx*self.batch_size, (idx+1)*self.batch_size)
x_codes = self.codes[batch_slice]
#Max number of visits and codes inside the visit for this batch
pad_length_visits = max(map(len, x_codes))
pad_length_codes = max(map(lambda x: max(map(len, x)), x_codes))
#Number of elements in a batch (useful in case of partial batches)
length_batch = len(x_codes)
#Pad data
x_codes = pad_data(x_codes, pad_length_visits, pad_length_codes, self.num_codes)
outputs = [x_codes]
#Add numeric data if necessary
if self.numeric_size:
x_numeric = self.numeric[batch_slice]
x_numeric = pad_data(x_numeric, pad_length_visits, self.numeric_size, -99.0)
outputs.append(x_numeric)
#Add time data if necessary
if self.use_time:
x_time = sequence.pad_sequences(self.time[batch_slice],
dtype=np.float32, maxlen=pad_length_visits,
value=+99).reshape(length_batch, pad_length_visits, 1)
outputs.append(x_time)
return outputs
def read_data(model_parameters, path_data, path_dictionary):
"""Read the data from provided paths and assign it into lists"""
data = | pd.read_pickle(path_data) | pandas.read_pickle |
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score
import yaml
from math import ceil
import collections
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
from imblearn.metrics import geometric_mean_score
import pickle
with open('nsga2/config_file.yaml', 'r') as f:
config = yaml.load(f)
def decode(var_range, **features):
"""
Decoding hyperaparameters.
"""
features['criterion'] = round(features['criterion'], 0)
if features['max_depth'] is not None:
features['max_depth'] = int(round(features['max_depth']))
else:
features['max_depth'] = var_range[1][1]
features['min_samples_split'] = int(round(features['min_samples_split']))
#features['min_samples_leaf'] = int(round(features['min_samples_leaf']))
if features['max_leaf_nodes'] is not None:
features['max_leaf_nodes'] = int(round(features['max_leaf_nodes']))
else:
features['max_leaf_nodes'] = var_range[3][1]
if features['class_weight'] is not None:
features['class_weight'] = int(round(features['class_weight']))
hyperparameters = ['criterion', 'max_depth', 'min_samples_split', 'max_leaf_nodes', 'class_weight']
list_of_hyperparameters = [(hyperparameter, features[hyperparameter]) for hyperparameter in hyperparameters]
features = collections.OrderedDict(list_of_hyperparameters)
return features
def read_data(df_name):
"""
Reads the dataset to work with.
"""
df = pd.read_csv(config['ROOT_PATH'] + '/data/' + df_name + '.csv', sep = ',')
return df
def score_text(v):
if v == 'Low':
return 0
elif v == 'Medium':
return 1
else:
return 2
def get_matrices(df_name, seed):
"""
Split dataframe into train and test.
"""
df = read_data(df_name)
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
if(df_name == 'propublica_violent_recidivism'):
X = X[['sex', 'age', 'age_cat', 'race', 'juv_fel_count', 'juv_misd_count', 'juv_other_count', 'priors_count', 'c_charge_degree', 'c_charge_desc', 'decile_score', 'score_text']]
if(df_name == 'propublica_recidivism'):
X = X[['sex', 'age', 'age_cat', 'race', 'juv_fel_count', 'juv_misd_count', 'juv_other_count', 'priors_count', 'c_charge_degree', 'c_charge_desc', 'decile_score', 'score_text']]
le = preprocessing.LabelEncoder()
for column_name in X.columns:
if X[column_name].dtype == object:
X[column_name] = X[column_name].astype(str)
if(column_name == 'race' and df_name == 'adult'):
X[column_name] = np.where(X[column_name] == 'White', 0, 1)
elif(column_name == 'sex'):
X[column_name] = np.where(X[column_name] == 'Male', 0, 1)
elif(column_name == 'race' and (df_name == 'propublica_recidivism' or df_name == 'propublica_violent_recidivism')):
X[column_name] = np.where(X[column_name] == 'Caucasian', 0, 1)
elif(column_name == 'compas_screening_date' or column_name == 'screening_date' or column_name == 'dob'):
X[column_name] = pd.to_datetime(X[column_name])
X['year'] = X[column_name].dt.year
X['month'] = X[column_name].dt.month
X['day'] = X[column_name].dt.day
X.drop(column_name, inplace = True, axis = 1)
elif(column_name == 'Race'):
X[column_name] = np.where(X[column_name] == 'W', 0, 1)
elif(column_name == 'score_text'):
X[column_name] = X[column_name].map(score_text)
else:
X[column_name] = le.fit_transform(X[column_name])
elif(column_name == 'age' and df_name == 'german'):
X[column_name] = np.where(X[column_name] > 25, 0, 1)
else:
pass
# POSITIVE = 1
if(df_name == 'adult'):
y = np.where(y == '>50K', 1, 0)
elif(df_name == 'german'):
y = np.where(y == 1, 0, 1)
elif(df_name == 'propublica_recidivism' or df_name == 'propublica_violent_recidivism'):
c = X.select_dtypes(np.number).columns
X[c] = X[c].fillna(0)
X = X.fillna("")
y = np.where(y == 0, 0, 1)
elif(df_name == 'ricci'):
y = np.where(y >= 70.000, 0, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = seed)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, random_state = seed)
return X_train, X_val, X_test, y_train, y_val, y_test
def write_train_val_test(df_name, seed, X_train, X_val, X_test, y_train, y_val, y_test):
train = X_train
train['y'] = y_train.tolist()
train.to_csv('./data/train_val_test/' + df_name + '_train_seed_' + str(seed) + '.csv', index = False)
val = X_val
val['y'] = y_val.tolist()
val.to_csv('./data/train_val_test/' + df_name + '_val_seed_' + str(seed) + '.csv', index = False)
test = X_test
test['y'] = y_test.tolist()
test.to_csv('./data/train_val_test/' + df_name + '_test_seed_' + str(seed) + '.csv', index = False)
def print_tree(classifier, features):
dot_data = StringIO()
export_graphviz(classifier, out_file = dot_data, feature_names = features)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png("./results/trees/tree.png")
def print_properties_tree(learner):
depth = learner.get_depth()
leaves = learner.get_n_leaves()
return depth, leaves
def train_model(df_name, seed, **features):
"""
Train classifier.
"""
train = pd.read_csv('./data/train_val_test/' + df_name + '_train_seed_' + str(seed) + '.csv')
X_train = train.iloc[:, :-1]
y_train = train.iloc[:, -1]
if features['class_weight'] is not None:
if(features['criterion'] <= 0.5):
clf = DecisionTreeClassifier(criterion = 'gini', max_depth = features['max_depth'], min_samples_split = features['min_samples_split'], max_leaf_nodes = features['max_leaf_nodes'], class_weight = {0:features['class_weight'], 1:(10-features['class_weight'])}, presort = True)
else:
clf = DecisionTreeClassifier(criterion = 'entropy', max_depth = features['max_depth'], min_samples_split = features['min_samples_split'], max_leaf_nodes = features['max_leaf_nodes'], class_weight = {0:features['class_weight'], 1:(10-features['class_weight'])}, presort = True)
else:
if features['criterion'] <= 0.5:
clf = DecisionTreeClassifier(criterion = 'gini', max_depth = features['max_depth'], min_samples_split = features['min_samples_split'], max_leaf_nodes = features['max_leaf_nodes'], class_weight = features['class_weight'], presort = True)
else:
clf = DecisionTreeClassifier(criterion = 'entropy', max_depth = features['max_depth'], min_samples_split = features['min_samples_split'], max_leaf_nodes = features['max_leaf_nodes'], class_weight = features['class_weight'], presort = True)
learner = clf.fit(X_train, y_train)
return learner
def save_model(learner, dataset_name, seed, variable_name, num_of_generations, num_of_individuals, individual_id):
# save the model to disk
path = './results/models/' + dataset_name + '/'
filename = 'model_' + dataset_name + '_seed_' + str(seed) + '_gen_' + variable_name + '_indiv_' + str(num_of_generations) + '_' + str(num_of_individuals) + '_id_' + individual_id + '.sav'
pickle.dump(learner, open(path + filename, 'wb'))
return
def val_model(df_name, learner, seed):
"""
Test classifier.
"""
val = pd.read_csv('./data/train_val_test/' + df_name + '_val_seed_' + str(seed) + '.csv')
X_val = val.iloc[:, :-1]
y_val = val.iloc[:, -1]
y_pred = learner.predict(X_val)
return X_val, y_val, y_pred
def test_model(df_name, learner, seed):
test = pd.read_csv('./data/train_val_test/' + df_name + '_test_seed_' + str(seed) + '.csv')
X_test = test.iloc[:, :-1]
y_test = test.iloc[:, -1]
y_pred = learner.predict(X_test)
return X_test, y_test, y_pred
def split_protected(X, y, pred, protected_variable, protected_value = 1):
"""
Split datasets into (white, black), (male, female), etc.
"""
df = | pd.DataFrame({protected_variable: X[protected_variable], 'y_val': y, 'y_pred': pred}) | pandas.DataFrame |
import glob
import itertools
import os
from configparser import ConfigParser, MissingSectionHeaderError, NoSectionError, NoOptionError
from datetime import datetime
import numpy as np
import pandas as pd
from shapely import geometry
from shapely.geometry import Point
from simba.drop_bp_cords import getBpHeaders
from simba.rw_dfs import *
def roiAnalysis(inifile, inputcsv):
dateTime = datetime.now().strftime('%Y%m%d%H%M%S')
config = ConfigParser()
config.read(inifile)
## get dataframe column name
noAnimals = config.getint('ROI settings', 'no_of_animals')
projectPath = config.get('General settings', 'project_path')
try:
wfileType = config.get('General settings', 'workflow_file_type')
except NoOptionError:
wfileType = 'csv'
animalBodypartList = []
for bp in range(noAnimals):
animalName = 'animal_' + str(bp + 1) + '_bp'
animalBpName = config.get('ROI settings', animalName)
animalBpNameX, animalBpNameY = animalBpName + '_x', animalBpName + '_y'
animalBodypartList.append([animalBpNameX, animalBpNameY])
columns2grab = [item[0:2] for item in animalBodypartList]
columns2grab = [item for sublist in columns2grab for item in sublist]
try:
multiAnimalIDList = config.get('Multi animal IDs', 'id_list')
multiAnimalIDList = multiAnimalIDList.split(",")
if multiAnimalIDList[0] != '':
multiAnimalStatus = True
print('Applying settings for multi-animal tracking...')
else:
multiAnimalStatus = False
for animal in range(noAnimals):
multiAnimalIDList.append('Animal_' + str(animal+1) + '_')
print('Applying settings for classical tracking...')
except NoSectionError:
multiAnimalIDList = []
for animal in range(noAnimals):
multiAnimalIDList.append('Animal_' + str(animal + 1) + '_')
multiAnimalStatus = False
print('Applying settings for classical tracking...')
logFolderPath = os.path.join(projectPath, 'logs')
vidInfPath = os.path.join(logFolderPath, 'video_info.csv')
vidinfDf = pd.read_csv(vidInfPath)
vidinfDf["Video"] = vidinfDf["Video"].astype(str)
csv_dir_in = os.path.join(projectPath, 'csv', inputcsv)
ROIcoordinatesPath = os.path.join(logFolderPath, 'measures', 'ROI_definitions.h5')
rectanglesInfo = | pd.read_hdf(ROIcoordinatesPath, key='rectangles') | pandas.read_hdf |
__author__ = "<NAME>"
import json
import pandas as pd
import sqlite3
import argparse
import os
def BrowserHistoryParse(f):
conn = sqlite3.connect(f)
cursor = conn.cursor()
BrowserHistoryTable = pd.read_sql_query("SELECT events_persisted.sid, events_persisted.payload from events_persisted inner join event_tags on events_persisted.full_event_name_hash = event_tags.full_event_name_hash inner join tag_descriptions on event_tags.tag_id = tag_descriptions.tag_id where tag_descriptions.tag_id = 1", conn)
payload = BrowserHistoryTable['payload'].values.tolist()
sid = BrowserHistoryTable['sid'].values.tolist()
payload_navigation_URL = []
payload_navigation_URL_time = []
payload_navigation_URL_date = []
true_sid = []
for i in range(len(payload)):
temp = json.loads(payload[i])
if (temp['data'].__contains__("navigationUrl") == True) and len(temp['data']['navigationUrl']) > 0:
payload_navigation_URL.append(temp['data']['navigationUrl'])
true_sid.append(sid[i])
timestamp = (temp['data']['Timestamp']).replace("T", " ").replace("Z", "")
timestamp = timestamp.split(" ")
payload_navigation_URL_date.append(timestamp[0])
payload_navigation_URL_time.append(timestamp[1] + " UTC")
temp_dict = {'SID': true_sid,'Date': payload_navigation_URL_date, 'Time': payload_navigation_URL_time, 'VisitedURL': payload_navigation_URL}
return temp_dict
def SoftwareInventory(f):
conn = sqlite3.connect(f)
SoftwareInventoryTable = pd.read_sql_query("""SELECT events_persisted.sid, events_persisted.payload from events_persisted inner join event_tags on events_persisted.full_event_name_hash = event_tags.full_event_name_hash inner join tag_descriptions on event_tags.tag_id = tag_descriptions.tag_id where (tag_descriptions.tag_id = 31 and events_persisted.full_event_name="Microsoft.Windows.Inventory.Core.InventoryApplicationAdd")""", conn)
payload = SoftwareInventoryTable['payload'].values.tolist()
sid = SoftwareInventoryTable['sid'].values.tolist()
Program_Name = []
Path = []
OSVersionAtInstallTime = []
InstallDate = []
AppVersion = []
true_sid = []
for i in range(len(payload)):
temp = json.loads(payload[i])
Program_Name.append(temp['data']['Name'])
Path.append(temp['data']['RootDirPath'])
OSVersionAtInstallTime.append(temp['data']['OSVersionAtInstallTime'])
if len(temp['data']['InstallDate']) > 0:
InstallDate.append(temp['data']['InstallDate'] + " UTC")
else:
InstallDate.append("NULL")
AppVersion.append(temp['data']['Version'])
true_sid.append(sid[i])
SoftwareInventorydict = {'SID': true_sid, 'Program Name': Program_Name, 'Install Path': Path, 'Install Date': InstallDate, 'Program Version': AppVersion, 'OS Version at Install Time': OSVersionAtInstallTime}
return SoftwareInventorydict
def WlanScanResults(f):
conn = sqlite3.connect(f)
cursor = conn.cursor()
wlan_scan_results_table = pd.read_sql_query("""SELECT events_persisted.sid, events_persisted.payload from events_persisted inner join event_tags on events_persisted.full_event_name_hash = event_tags.full_event_name_hash inner join tag_descriptions on event_tags.tag_id = tag_descriptions.tag_id where (tag_descriptions.tag_id = 11 and events_persisted.full_event_name = "WlanMSM.WirelessScanResults")""", conn)
payload = wlan_scan_results_table['payload'].values.tolist()
sid = wlan_scan_results_table['sid'].values.tolist()
ssid = []
mac_addr = []
time = []
true_sid = []
for i in range(len(payload)):
temp = json.loads(payload[i])
scan_results_list = temp['data']['ScanResults'].split('\n')
for j in range(len(scan_results_list) - 1):
temp_list = scan_results_list[j].split('\t')
ssid.append(temp_list[0])
mac_addr.append(temp_list[2])
time.append(temp['time'])
true_sid.append(sid[i])
WlanScanDict = {'SID': true_sid, 'Time': time, 'SSID': ssid, 'MAC Address': mac_addr}
return WlanScanDict
def UserDefault(f, file):
conn = sqlite3.connect(f)
user_default_table = pd.read_sql_query("""SELECT events_persisted.sid, events_persisted.payload from events_persisted inner join event_tags on events_persisted.full_event_name_hash = event_tags.full_event_name_hash inner join tag_descriptions on event_tags.tag_id = tag_descriptions.tag_id where (tag_descriptions.tag_id = 11 and events_persisted.full_event_name = "Census.Userdefault")""", conn)
payload = user_default_table['payload'].values.tolist()
sid = user_default_table['sid'].values.tolist()
true_sid = []
temp_file = open(file, "w")
for i in range(len(payload)):
temp = json.loads(payload[i])
temp_file.write("Device Make: " + temp['ext']['protocol']['devMake'] + "\n")
temp_file.write("Device Model: "+ temp['ext']['protocol']['devModel']+ "\n")
temp_file.write("Timezone: "+ temp['ext']['loc']['tz'] + "\n")
true_sid.append(sid[i])
temp_file.write("Default Browser: "+ temp['data']['DefaultBrowserProgId'] + "\n")
temp_list = temp['data']['DefaultApp'].split('|')
for j in range(len(temp_list)):
temp_file.write(temp_list[j]+ "\n")
temp_file.write("----------------------------------\n\n")
return temp_file
def PhysicalDiskInfo(f, file):
conn = sqlite3.connect(f)
physicaldisk_info_table = | pd.read_sql_query("""SELECT events_persisted.payload from events_persisted inner join event_tags on events_persisted.full_event_name_hash = event_tags.full_event_name_hash inner join tag_descriptions on event_tags.tag_id = tag_descriptions.tag_id where (tag_descriptions.tag_id = 11 and events_persisted.full_event_name = "Microsoft.Windows.Inventory.General.InventoryMiscellaneousPhysicalDiskInfoAdd")""", conn) | pandas.read_sql_query |
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib.pyplot as plt # for making plots,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import tensorflow as tf
import tensorflow_hub as hub
import scipy.stats as stats # library for statistics and technical programming,
import tensorflow.keras as keras
from PIL import Image, ImageDraw
from IPython.display import display
from tensorflow.keras import backend as K # used for housekeeping of tf models,
import matplotlib.patches as mpatches
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Function ................................................................................
def create_augmented_images(*, external_generator, augm_img_nr=10, paramsforgenerator=""):
"""
Function that takes pictures in a batch, provided with keras generators
and uses another generator.
Secondarly, this function can be used to create dataframe with data on images in image batch
if, augm_img_nr is set 0,
external_generator : iterator, based on keras image generator
the function was designed to work with all images in a given dataset
provided as one batch,
augm_img_nr : the number of augment images that will be created
for each image, if augm_img_nr=0, no augmented images will be created,
but both array, and dataframe will be returned,
paramsforgenerator : dictionary, with parameters for image generator,
used for image augmentation,
Returns : numpy array with img batch, [?, pixel_size, pixel_size, 3]
pandas dataframe, with rows corresponding to each image in the batch,
and following columns:
class = foldername in data directory, imagename= original image name,
imgtype={'raw', 'aug'}, imgidnumber=0, foir raw, >=1 for augmented images
"""
# extract one batch with all images in a given dataset
img_batch, batch_labels = next(external_generator)
#.. create df, with class, image and image type names
""" I will use this df, to create, new file with subdirectories,
and save raw and augmented images with proper names
"""
img_filenames = pd.Series(external_generator.filenames).str.split(pat="/", expand=True)
img_filenames = pd.concat([img_filenames, | pd.Series(["raw"]*img_filenames.shape[0]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.base import BaseEstimator, TransformerMixin #gives fit_transform method for free
import pdb
from sklearn.base import TransformerMixin
from collections import defaultdict
####################################################################################################
class My_LabelEncoder(BaseEstimator, TransformerMixin):
"""
################################################################################################
###### The My_LabelEncoder class was developed by <NAME> for AutoViML #########
###### The My_LabelEncoder class works just like sklearn's Label Encoder but better! #######
##### It label encodes any cat var in your dataset. It also handles NaN's in your dataset! ####
## The beauty of this function is that it takes care of NaN's and unknown (future) values.#####
##################### This is the BEST working version - don't mess with it!! ##################
################################################################################################
Usage:
le = My_LabelEncoder()
le.fit_transform(train[column]) ## this will give your transformed values as an array
le.transform(test[column]) ### this will give your transformed values as an array
Usage in Column Transformers and Pipelines:
No. It cannot be used in pipelines since it need to produce two columns for the next stage in pipeline.
See my other module called My_LabelEncoder_Pipe() to see how it can be used in Pipelines.
"""
def __init__(self):
self.transformer = defaultdict(str)
self.inverse_transformer = defaultdict(str)
self.max_val = 0
def fit(self,testx, y=None):
if isinstance(testx, pd.Series):
pass
elif isinstance(testx, np.ndarray):
testx = pd.Series(testx)
else:
#### There is no way to transform dataframes since you will get a nested renamer error if you try ###
### But if it is a one-dimensional dataframe, convert it into a Series
if testx.shape[1] == 1:
testx = pd.Series(testx.values.ravel(),name=testx.columns[0])
else:
#### Since it is multi-dimensional, So in this case, just return the data as is
return self
ins = np.unique(testx.factorize()[1]).tolist()
outs = np.unique(testx.factorize()[0]).tolist()
#ins = testx.value_counts(dropna=False).index
if -1 in outs:
# it already has nan if -1 is in outs. No need to add it.
if not np.nan in ins:
ins.insert(0,np.nan)
self.transformer = dict(zip(ins,outs))
self.inverse_transformer = dict(zip(outs,ins))
return self
def transform(self, testx, y=None):
if isinstance(testx, pd.Series):
pass
elif isinstance(testx, np.ndarray):
testx = pd.Series(testx)
else:
#### There is no way to transform dataframes since you will get a nested renamer error if you try ###
### But if it is a one-dimensional dataframe, convert it into a Series
if testx.shape[1] == 1:
testx = pd.Series(testx.values.ravel(),name=testx.columns[0])
else:
#### Since it is multi-dimensional, So in this case, just return the data as is
return testx, y
### now convert the input to transformer dictionary values
new_ins = np.unique(testx.factorize()[1]).tolist()
missing = [x for x in new_ins if x not in self.transformer.keys()]
if len(missing) > 0:
for each_missing in missing:
self.transformer[each_missing] = int(self.max_val + 1)
self.inverse_transformer[int(self.max_val+1)] = each_missing
self.max_val = int(self.max_val+1)
else:
self.max_val = np.max(list(self.transformer.values()))
outs = testx.map(self.transformer).values.astype(int)
### To handle category dtype you must do the next step #####
testk = testx.map(self.transformer) ## this must be still a pd.Series
if testx.dtype not in [np.int16, np.int32, np.int64, float, bool, object]:
if testx.isnull().sum().sum() > 0:
fillval = self.transformer[np.nan]
testk = testk.cat.add_categories([fillval])
testk = testk.fillna(fillval)
testk = testk.astype(int)
return testk, y
else:
testk = testk.astype(int)
return testk, y
else:
return outs
def inverse_transform(self, testx, y=None):
### now convert the input to transformer dictionary values
if isinstance(testx, pd.Series):
outs = testx.map(self.inverse_transformer).values
elif isinstance(testx, np.ndarray):
outs = pd.Series(testx).map(self.inverse_transformer).values
else:
outs = testx[:]
return outs
#################################################################################
class My_LabelEncoder_Pipe(BaseEstimator, TransformerMixin):
"""
################################################################################################
###### The My_LabelEncoder_Pipe class was developed by <NAME> for Auto_TS #####
###### The My_LabelEncoder_Pipe class works just like sklearn's Label Encoder but better! #####
##### It label encodes any cat var in your dataset. But it can also be used in Pipelines! #####
## The beauty of this function is that it takes care of NaN's and unknown (future) values.#####
##### Since it produces an unused second column it can be used in sklearn's Pipelines. #####
##### But for that you need to add a drop_second_col() function to this My_LabelEncoder_Pipe ##
##### and then feed the whole pipeline to a Column_Transformer function. It is very easy. #####
##################### This is the BEST working version - don't mess with it!! ##################
################################################################################################
Usage in pipelines:
le = My_LabelEncoder_Pipe()
le.fit_transform(train[column]) ## this will give you two columns - beware!
le.transform(test[column]) ### this will give you two columns - beware!
Usage in Column Transformers:
def drop_second_col(Xt):
### This deletes the 2nd column. Hence col number=1 and axis=1 ###
return np.delete(Xt, 1, 1)
drop_second_col_func = FunctionTransformer(drop_second_col)
le_one = make_pipeline(le, drop_second_col_func)
ct = make_column_transformer(
(le_one, catvars[0]),
(le_one, catvars[1]),
(imp, numvars),
remainder=remainder)
"""
def __init__(self):
self.transformer = defaultdict(str)
self.inverse_transformer = defaultdict(str)
self.max_val = 0
def fit(self,testx, y=None):
if isinstance(testx, pd.Series):
pass
elif isinstance(testx, np.ndarray):
testx = pd.Series(testx)
else:
#### There is no way to transform dataframes since you will get a nested renamer error if you try ###
### But if it is a one-dimensional dataframe, convert it into a Series
if testx.shape[1] == 1:
testx = pd.Series(testx.values.ravel(),name=testx.columns[0])
else:
#### Since it is multi-dimensional, So in this case, just return the data as is
return self
ins = np.unique(testx.factorize()[1]).tolist()
outs = np.unique(testx.factorize()[0]).tolist()
#ins = testx.value_counts(dropna=False).index
if -1 in outs:
# it already has nan if -1 is in outs. No need to add it.
if not np.nan in ins:
ins.insert(0,np.nan)
self.transformer = dict(zip(ins,outs))
self.inverse_transformer = dict(zip(outs,ins))
return self
def transform(self, testx, y=None):
if isinstance(testx, pd.Series):
pass
elif isinstance(testx, np.ndarray):
testx = | pd.Series(testx) | pandas.Series |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012',
'2011'], name='pidx', freq='A')
pexpected = PeriodIndex(
['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp,
check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx',
freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'],
name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(
['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
def test_nat_new(self):
idx = pd.period_range('2011-01', freq='M', periods=5, name='x')
result = idx._nat_new()
exp = pd.PeriodIndex([pd.NaT] * 5, freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.PeriodIndex([], name='xxx', freq='H')
with tm.assertRaises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq='H')
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(-3), exp)
def test_repeat(self):
index = pd.period_range('2001-01-01', periods=2, freq='D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], freq='D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.period_range('2001-01-01', periods=2, freq='2D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], freq='2D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')
exp = pd.PeriodIndex(['2001-01', '2001-01', '2001-01',
'NaT', 'NaT', 'NaT',
'2003-01', '2003-01', '2003-01'], freq='M')
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
def test_nat(self):
self.assertIs(pd.PeriodIndex._na_value, pd.NaT)
self.assertIs(pd.PeriodIndex([], freq='M')._na_value, pd.NaT)
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = | pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D') | pandas.PeriodIndex |
# coding: utf8
import torch
import pandas as pd
import numpy as np
from os import path
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import abc
from clinicadl.tools.inputs.filename_types import FILENAME_TYPE
import os
import nibabel as nib
import torch.nn.functional as F
from scipy import ndimage
import socket
from utils import get_dynamic_image
from .batchgenerators.transforms.color_transforms import ContrastAugmentationTransform, BrightnessTransform, \
GammaTransform, BrightnessGradientAdditiveTransform, LocalSmoothingTransform
from .batchgenerators.transforms.crop_and_pad_transforms import CenterCropTransform, RandomCropTransform, \
RandomShiftTransform
from .batchgenerators.transforms.noise_transforms import RicianNoiseTransform, GaussianNoiseTransform, \
GaussianBlurTransform
from .batchgenerators.transforms.spatial_transforms import Rot90Transform, MirrorTransform, SpatialTransform
from .batchgenerators.transforms.abstract_transforms import Compose
from .batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter
from .data_tool import hilbert_2dto3d_cut, hilbert_3dto2d_cut, hilbert_2dto3d, hilbert_3dto2d, linear_2dto3d_cut, \
linear_3dto2d_cut, linear_2dto3d, linear_3dto2d
#################################
# Datasets loaders
#################################
class MRIDataset(Dataset):
"""Abstract class for all derived MRIDatasets."""
def __init__(self, caps_directory, data_file,
preprocessing, transformations=None):
self.caps_directory = caps_directory
self.transformations = transformations
self.diagnosis_code = {
'CN': 0,
'AD': 1,
'sMCI': 0,
'pMCI': 1,
'MCI': 2,
'unlabeled': -1}
self.preprocessing = preprocessing
self.num_fake_mri = 0
if not hasattr(self, 'elem_index'):
raise ValueError(
"Child class of MRIDataset must set elem_index attribute.")
if not hasattr(self, 'mode'):
raise ValueError(
"Child class of MRIDataset must set mode attribute.")
# Check the format of the tsv file here
if isinstance(data_file, str):
self.df = pd.read_csv(data_file, sep='\t')
elif isinstance(data_file, pd.DataFrame):
self.df = data_file
else:
raise Exception('The argument data_file is not of correct type.')
mandatory_col = {"participant_id", "session_id", "diagnosis"}
if self.elem_index == "mixed":
mandatory_col.add("%s_id" % self.mode)
if not mandatory_col.issubset(set(self.df.columns.values)):
raise Exception("the data file is not in the correct format."
"Columns should include %s" % mandatory_col)
self.elem_per_image = self.num_elem_per_image()
def __len__(self):
return len(self.df) * self.elem_per_image
def _get_path(self, participant, session, mode="image", fake_caps_path=None):
if self.preprocessing == "t1-linear":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_linear',
participant + '_' + session
+ FILENAME_TYPE['cropped'] + '.pt')
origin_nii_path = path.join(self.caps_directory, 'subjects', participant, session,
't1_linear', participant + '_' + session
+ FILENAME_TYPE['cropped'] + '.nii.gz')
# temp_path = path.join(self.caps_directory, 'subjects', participant, session,
# 't1_linear')
# for file in os.listdir(temp_path):
# if file.find('_run-01_') != '-1':
# new_name = file.replace('_run-01_', '_')
# os.rename(os.path.join(temp_path, file), os.path.join(temp_path, new_name))
# print('rename {} to {}'.format(os.path.join(temp_path, file), os.path.join(temp_path, new_name)))
if fake_caps_path is not None:
fake_image_path = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['cropped'] + '.pt')
fake_nii_path = path.join(fake_caps_path, 'subjects', participant, session,
't1_linear', participant + '_' + session
+ FILENAME_TYPE['cropped'] + '.nii.gz')
# first use fake image, because some image lacked in tsv but have in caps
if os.path.exists(fake_image_path):
image_path = fake_image_path
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(fake_nii_path):
image_array = nib.load(fake_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), fake_image_path)
print('save fake image: {}'.format(fake_image_path))
self.num_fake_mri = self.num_fake_mri + 1
image_path = fake_image_path
elif os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real nii file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print(
'Can not find:{} and {} and {} in both real and fake folder'.format(image_path, fake_image_path,
fake_nii_path))
else:
if os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_linear')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
elif self.preprocessing == "t1-extensive":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_extensive',
participant + '_' + session
+ FILENAME_TYPE['skull_stripped'] + '.pt')
elif self.preprocessing == "t1-spm-graymatter":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-graymatter'] + '.pt')
origin_nii_path = path.join(self.caps_directory, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-graymatter'] + '.nii.gz')
temp_path = path.join(self.caps_directory, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space')
# for file in os.listdir(temp_path):
# if file.find('_run-01_') != '-1':
# new_name = file.replace('_run-01_', '_')
# os.rename(os.path.join(temp_path, file), os.path.join(temp_path, new_name))
# print('rename {} to {}'.format(os.path.join(temp_path, file), os.path.join(temp_path, new_name)))
if fake_caps_path is not None:
fake_image_path = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-graymatter'] + '.pt')
fake_nii_path = path.join(fake_caps_path, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-graymatter'] + '.nii.gz')
# first use fake image, because some image lacked in tsv but have in caps
if os.path.exists(fake_image_path):
image_path = fake_image_path
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(fake_nii_path):
image_array = nib.load(fake_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), fake_image_path)
print('save fake image: {}'.format(fake_image_path))
self.num_fake_mri = self.num_fake_mri + 1
image_path = fake_image_path
elif os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print(
'Can not find:{} and {} and {} in both real and fake folder'.format(image_path, fake_image_path,
fake_nii_path))
else:
if os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
print('Can not find:{}'.format(origin_nii_path))
elif self.preprocessing == "t1-spm-whitematter":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-whitematter'] + '.pt')
origin_nii_path = path.join(self.caps_directory, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-whitematter'] + '.nii.gz')
if fake_caps_path is not None:
fake_image_path = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-whitematter'] + '.pt')
fake_nii_path = path.join(fake_caps_path, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-whitematter'] + '.nii.gz')
# first use fake image, because some image lacked in tsv but have in caps
if os.path.exists(fake_image_path):
image_path = fake_image_path
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(fake_nii_path):
image_array = nib.load(fake_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), fake_image_path)
image_path = fake_image_path
print('save fake image: {}'.format(fake_image_path))
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
else:
if os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
elif self.preprocessing == "t1-spm-csf":
image_path = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-csf'] + '.pt')
origin_nii_path = path.join(self.caps_directory, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-csf'] + '.nii.gz')
if fake_caps_path is not None:
fake_image_path = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm',
participant + '_' + session
+ FILENAME_TYPE['segm-csf'] + '.pt')
fake_nii_path = path.join(fake_caps_path, 'subjects', participant, session,
't1', 'spm', 'segmentation', 'normalized_space', participant + '_' + session
+ FILENAME_TYPE['segm-csf'] + '.nii.gz')
# first use fake image, because some image lacked in tsv but have in caps
if os.path.exists(fake_image_path):
image_path = fake_image_path
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(fake_nii_path):
image_array = nib.load(fake_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(fake_caps_path, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), fake_image_path)
image_path = fake_image_path
print('save fake image: {}'.format(fake_image_path))
self.num_fake_mri = self.num_fake_mri + 1
elif os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
else:
if os.path.exists(image_path): # exist real pt file
None
elif os.path.exists(origin_nii_path): # exist real pt file
image_array = nib.load(origin_nii_path).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
save_dir = path.join(self.caps_directory, 'subjects', participant, session,
'deeplearning_prepare_data', '%s_based' % mode, 't1_spm')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(image_tensor.clone(), image_path)
print('save {}'.format(image_path))
else:
print('Can not find:{}'.format(image_path))
return image_path
def _get_meta_data(self, idx):
image_idx = idx // self.elem_per_image
participant = self.df.loc[image_idx, 'participant_id']
session = self.df.loc[image_idx, 'session_id']
if self.elem_index is None:
elem_idx = idx % self.elem_per_image
elif self.elem_index == "mixed":
elem_idx = self.df.loc[image_idx, '%s_id' % self.mode]
else:
elem_idx = self.elem_index
diagnosis = self.df.loc[image_idx, 'diagnosis']
label = self.diagnosis_code[diagnosis]
return participant, session, elem_idx, label
def _get_full_image(self):
from ..data.utils import find_image_path as get_nii_path
import nibabel as nib
if self.preprocessing in ["t1-linear", "t1-extensive"]:
participant_id = self.df.loc[0, 'participant_id']
session_id = self.df.loc[0, 'session_id']
try:
image_path = self._get_path(participant_id, session_id, "image")
image = torch.load(image_path)
except FileNotFoundError:
try:
image_path = get_nii_path(
self.caps_directory,
participant_id,
session_id,
preprocessing=self.preprocessing)
image_nii = nib.load(image_path)
image_np = image_nii.get_fdata()
image = ToTensor()(image_np)
except:
# if we use moved folder which only has slice/patch, we can not find the whole image in folder, so use this file to get full image
# image_path = os.path.join(self.caps_directory,'sub-ADNI002S0295_ses-M00_T1w_space-MNI152NLin2009cSym_desc-Crop_res-1x1x1_T1w.nii.gz')
# image_nii = nib.load(image_path)
# image_np = image_nii.get_fdata()
# image = ToTensor()(image_np)
image = torch.zeros([169, 208, 179]) # in those segm data, size : [169, 208, 179]
elif self.preprocessing in ["t1-spm-whitematter", "t1-spm-whitematter", "t1-spm-csf"]:
image = torch.zeros([121, 145, 121]) # in those segm data, size : [121, 145, 121]
return image
@abc.abstractmethod
def __getitem__(self, idx):
pass
@abc.abstractmethod
def num_elem_per_image(self):
pass
class MRIDatasetImage(MRIDataset):
"""Dataset of MRI organized in a CAPS folder."""
def __init__(self, caps_directory, data_file,
preprocessing='t1-linear', transformations=None, crop_padding_to_128=False, resample_size=None,
fake_caps_path=None, roi=False, roi_size=32, model=None, data_preprocess='MinMax',
data_Augmentation=False, method_2d=None):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
transformations (callable, optional): Optional transform to be applied on a sample.
"""
self.elem_index = None
self.mode = "image"
self.model = model
self.data_preprocess = data_preprocess
self.data_Augmentation = data_Augmentation
self.crop_padding_to_128 = crop_padding_to_128
self.resample_size = resample_size
self.fake_caps_path = fake_caps_path
self.roi = roi
self.roi_size = roi_size
self.method_2d = method_2d
# if self.roi:
# if socket.gethostname() == 'zkyd':
# aal_mask_dict_dir = '/root/Downloads/atlas/aal_mask_dict_128.npy'
# elif socket.gethostname() == 'tian-W320-G10':
# aal_mask_dict_dir = '/home/tian/pycharm_project/MRI_GNN/atlas/aal_mask_dict_128.npy'
# self.aal_mask_dict = np.load(aal_mask_dict_dir, allow_pickle=True).item() # 116; (181,217,181)
super().__init__(caps_directory, data_file, preprocessing, transformations)
print('crop_padding_to_128 type:{}'.format(self.crop_padding_to_128))
def __getitem__(self, idx):
participant, session, _, label = self._get_meta_data(idx)
image_path = self._get_path(participant, session, "image", fake_caps_path=self.fake_caps_path)
if self.preprocessing == 't1-linear':
ori_name = 't1_linear'
else:
ori_name = 't1_spm'
resampled_image_path = image_path.replace(ori_name, '{}_{}_resample_{}'.format(ori_name, self.data_preprocess,
self.resample_size))
CNN2020_DEEPCNN_image_path = image_path.replace(ori_name,
'{}_{}_model_{}'.format(ori_name, self.data_preprocess,
self.model))
roi_image_path = resampled_image_path.replace('image_based',
'AAL_roi_based_{}'.format(self.roi_size))
# delate_image_path = image_path.replace('image_based',
# 'AAL_roi_based_{}'.format(self.roi_size))
# if os.path.exists(delate_image_path):
# os.remove(delate_image_path)
# print('delating:{}'.format(delate_image_path))
if not self.data_Augmentation: # No data_Augmentation, 1. check local disk whether have saved data. 2. If not, process data and save to desk
if self.roi and 'ROI' in self.model:
# Get resampled_image
if os.path.exists(resampled_image_path):
try:
resampled_image = torch.load(resampled_image_path)
# print('loading:{}'.format(roi_image_path))
except:
print('Wrong file:{}'.format(resampled_image_path))
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
resampled_image = self.transformations(begin_trans_indx=0, **dict)
resampled_data = resampled_image.squeeze() # [128, 128, 128]
try:
resampled_image = resampled_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(resampled_data, 0)
dir, file = os.path.split(resampled_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(resampled_image, resampled_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, resampled_image_path))
# Get roi image
if os.path.exists(roi_image_path):
try:
ROI_image = torch.load(roi_image_path)
# print('loading:{}'.format(roi_image_path))
except:
print('Wrong file:{}'.format(roi_image_path))
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
image = self.transformations(begin_trans_indx=0, **dict)
data = image.squeeze() # [128, 128, 128]
data = self.roi_extract(data, roi_size=self.roi_size, sub_id=participant,
preprocessing=self.preprocessing,
session=session, save_nii=False)
ROI_image = data.unsqueeze(dim=0) # [1, num_roi, 128, 128, 128]
# sample = {'image': image, 'roi_image': ROI_image, 'label': label, 'participant_id': participant,
# 'session_id': session,
# 'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
dir, file = os.path.split(roi_image_path)
if not os.path.exists(dir):
os.makedirs(dir)
torch.save(ROI_image, roi_image_path)
print('Save roi image: {}'.format(roi_image_path))
sample = {'image': ROI_image, 'label': label, 'participant_id': participant,
'session_id': session, 'all_image': resampled_image,
'image_path': roi_image_path, 'num_fake_mri': self.num_fake_mri}
elif self.model in ["CNN2020", "DeepCNN"]:
if os.path.exists(CNN2020_DEEPCNN_image_path):
CNN2020_DEEPCNN_image_image = torch.load(CNN2020_DEEPCNN_image_path)
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
image = self.transformations(begin_trans_indx=0, **dict)
data = image.squeeze() # [128, 128, 128]
try:
CNN2020_DEEPCNN_image_image = data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
CNN2020_DEEPCNN_image_image = np.expand_dims(data, 0)
dir, file = os.path.split(CNN2020_DEEPCNN_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(image, CNN2020_DEEPCNN_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, CNN2020_DEEPCNN_image_path))
sample = {'image': CNN2020_DEEPCNN_image_image, 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': CNN2020_DEEPCNN_image_path, 'num_fake_mri': self.num_fake_mri}
elif self.method_2d is not None:
path, file = os.path.split(resampled_image_path)
file_2d = file.split('.')[0] + '_' + self.method_2d + '.' + file.split('.')[1]
path_2d = os.path.join(path, file_2d)
if os.path.exists(path_2d):
try:
data_2d = torch.load(path_2d)
except:
print('Wrong file:{}'.format(path_2d))
else:
if os.path.exists(resampled_image_path):
try:
resampled_image = torch.load(resampled_image_path)
# print('loading:{}'.format(roi_image_path))
except:
print('Wrong file:{}'.format(resampled_image_path))
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
resampled_image = self.transformations(begin_trans_indx=0, **dict)
resampled_data = resampled_image.squeeze() # [128, 128, 128]
try:
resampled_image = resampled_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(resampled_data, 0)
dir, file = os.path.split(resampled_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(resampled_image, resampled_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, resampled_image_path))
if self.method_2d == 'hilbert_cut':
data_2d = hilbert_3dto2d_cut(resampled_image)
torch.save(data_2d.clone(), path_2d)
print('saving:{}'.format(path_2d))
elif self.method_2d == 'linear_cut':
data_2d = linear_3dto2d_cut(resampled_image)
torch.save(data_2d.clone(), path_2d)
print('saving:{}'.format(path_2d))
elif self.method_2d == 'hilbert_downsampling':
data_low = self.__resize_data__(resampled_image.squeeze(), target_size=[64, 64, 64])
data_low = torch.from_numpy(data_low)
data_2d = hilbert_3dto2d(data_low)
data_2d = data_2d.unsqueeze(0) # [1,512,512]
torch.save(data_2d.clone(), path_2d)
print('saving:{}'.format(path_2d))
elif self.method_2d == 'linear_downsampling':
data_low = self.__resize_data__(resampled_image.squeeze(), target_size=[64, 64, 64])
data_low = torch.from_numpy(data_low)
data_2d = linear_3dto2d(data_low)
data_2d = data_2d.unsqueeze(0) # [1,512,512]
torch.save(data_2d.clone(), path_2d)
print('saving:{}'.format(path_2d))
sample = {'image': data_2d.squeeze(), 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': path_2d, 'num_fake_mri': self.num_fake_mri}
elif self.model not in [
"Conv5_FC3",
'DeepCNN',
'CNN2020',
'CNN2020_gcn',
'DeepCNN_gcn',
"Dynamic2D_net_Alex",
"Dynamic2D_net_Res34",
"Dynamic2D_net_Res18",
"Dynamic2D_net_Vgg16",
"Dynamic2D_net_Vgg11",
"Dynamic2D_net_Mobile",
'ROI_GCN']:
if os.path.exists(resampled_image_path):
try:
resampled_image = torch.load(resampled_image_path)
except:
raise FileExistsError('file error:{}'.format(resampled_image_path))
# if self.data_Augmentation and self.transformations:
# dict = {}
# dict['data'] = resampled_image
# begin_trans_indx = 0
# for i in range(len(self.transformations.transforms)):
# if self.transformations.transforms[i].__class__.__name__ in ['ItensityNormalizeNonzeorVolume',
# 'ItensityNormalizeNonzeorVolume',
# 'MinMaxNormalization']:
# begin_trans_indx = i + 1
# resampled_image = self.transformations(begin_trans_indx=begin_trans_indx, **dict)
else:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
resampled_image = self.transformations(begin_trans_indx=0, **dict)
resampled_data = resampled_image.squeeze() # [128, 128, 128]
try:
resampled_image = resampled_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(resampled_data, 0)
dir, file = os.path.split(resampled_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(image, resampled_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, resampled_image_path))
sample = {'image': resampled_image, 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': resampled_image_path, 'num_fake_mri': self.num_fake_mri}
elif self.model in ["Dynamic2D_net_Alex", "Dynamic2D_net_Res34", "Dynamic2D_net_Res18",
"Dynamic2D_net_Vgg16", "Dynamic2D_net_Vgg11", "Dynamic2D_net_Mobile"]:
image = torch.load(image_path)
if self.transformations:
dict = {}
dict['data'] = image
resampled_image = self.transformations(begin_trans_indx=0, **dict)
resampled_data = resampled_image.squeeze() # [128, 128, 128]
image_np = np.array(resampled_data)
image_np = np.expand_dims(image_np, 0) # 0,w,h,d
image_np = np.swapaxes(image_np, 0, 3) # w,h,d,0
im = get_dynamic_image(image_np)
im = np.expand_dims(im, 0)
im = np.concatenate([im, im, im], 0)
im = torch.from_numpy(im)
im = im.float()
sample = {'image': im, 'label': label, 'participant_id': participant, 'session_id': session,
'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
return sample
return sample
else: # Use data_Augmentation, 1. Just load original data and process it
# 1. load original data
image = torch.load(image_path)
if self.transformations: # Augmentation
dict = {}
dict['data'] = image
image = self.transformations(begin_trans_indx=0, **dict)
augmentation_data = image.squeeze() # [128, 128, 128]
# print(self.transformations)
# print(self.transformations[0])
# if self.crop_padding_to_128 and image.shape[1] != 128:
# image = image[:, :, 8:-9, :] # [1, 121, 128, 121]
# image = image.unsqueeze(0) # [1, 1, 121, 128, 121]
# pad = torch.nn.ReplicationPad3d((4, 3, 0, 0, 4, 3))
# image = pad(image) # [1, 1, 128, 128, 128]
# image = image.squeeze(0) # [1, 128, 128, 128]
# if self.resample_size is not None:
# assert self.resample_size > 0, 'resample_size should be a int positive number'
# image = image.unsqueeze(0)
# image = F.interpolate(image,
# size=self.resample_size) # resize to resample_size * resample_size * resample_size
# print('resample before trans shape:{}'.format(image.shape))
# print('resample before trans mean:{}'.format(image.mean()))
# print('resample before trans std:{}'.format(image.std()))
# print('resample before trans max:{}'.format(image.max()))
# print('resample before trans min:{}'.format(image.min()))
# # image = self.transformations(image)
# # print('resample after trans shape:{}'.format(image.shape))
# # print('resample after trans mean:{}'.format(image.mean()))
# # print('resample after trans std:{}'.format(image.std()))
# # print('resample after trans max:{}'.format(image.max()))
# # print('resample after trans min:{}'.format(image.min()))
# image = image.squeeze(0)
#
# if self.model in ['DeepCNN', 'DeepCNN_gcn']:
# image = image.unsqueeze(0)
# image = F.interpolate(image, size=[49, 39, 38])
# image = image.squeeze(0)
# elif self.model in ['CNN2020', 'CNN2020_gcn']:
# image = image.unsqueeze(0)
# image = F.interpolate(image, size=[139, 177, 144])
# image = image.squeeze(0)
# # preprocessing data
# data = image.squeeze() # [128, 128, 128]
# # print(data.shape)
# input_W, input_H, input_D = data.shape
# if self.model not in ["ConvNet3D", "ConvNet3D_gcn", "VoxCNN", "Conv5_FC3", 'DeepCNN', 'CNN2020', 'CNN2020_gcn',
# "VoxCNN_gcn", 'DeepCNN_gcn', "ConvNet3D_v2", "ConvNet3D_ori", "Dynamic2D_net_Alex",
# "Dynamic2D_net_Res34", "Dynamic2D_net_Res18", "Dynamic2D_net_Vgg16",
# "Dynamic2D_net_Vgg11", "Dynamic2D_net_Mobile"]:
# # drop out the invalid range
# # if self.preprocessing in ['t1-spm-graymatter', 't1-spm-whitematter', 't1-spm-csf']:
# data = self.__drop_invalid_range__(data)
# print('drop_invalid_range shape:{}'.format(data.shape))
# print('drop_invalid_range mean:{}'.format(data.mean()))
# print('drop_invalid_range std:{}'.format(data.std()))
# print('drop_invalid_range max:{}'.format(data.max()))
# print('drop_invalid_range min:{}'.format(data.min()))
# # resize data
# data = self.__resize_data__(data, input_W, input_H, input_D)
# print('resize_data shape:{}'.format(data.shape))
# print('resize_data mean:{}'.format(data.mean()))
# print('resize_data std:{}'.format(data.std()))
# print('resize_data max:{}'.format(data.max()))
# print('resize_data min:{}'.format(data.min()))
# # normalization datas
# data = np.array(data)
# data = self.__itensity_normalize_one_volume__(data)
# print('itensity_normalize shape:{}'.format(data.shape))
# print('itensity_normalize mean:{}'.format(data.mean()))
# print('itensity_normalize std:{}'.format(data.std()))
# print('itensity_normalize max:{}'.format(data.max()))
# print('itensity_normalize min:{}'.format(data.min()))
# # if self.transformations and self.model in ["ConvNet3D", "VoxCNN"]:
# # data = self.transformations(data)
# data = torch.from_numpy(data)
# if self.model in ['CNN2020', 'CNN2020_gcn']:
# data = np.array(data)
# data = self.__itensity_normalize_one_volume__(data, normalize_all=True)
# data = torch.from_numpy(data)
if self.model in ["Dynamic2D_net_Alex", "Dynamic2D_net_Res34", "Dynamic2D_net_Res18",
"Dynamic2D_net_Vgg16", "Dynamic2D_net_Vgg11", "Dynamic2D_net_Mobile"]:
image_np = np.array(augmentation_data)
image_np = np.expand_dims(image_np, 0) # 0,w,h,d
image_np = np.swapaxes(image_np, 0, 3) # w,h,d,0
im = get_dynamic_image(image_np)
im = np.expand_dims(im, 0)
im = np.concatenate([im, im, im], 0)
im = torch.from_numpy(im)
im = im.float()
sample = {'image': im, 'label': label, 'participant_id': participant, 'session_id': session,
'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
return sample
if self.roi and 'ROI' in self.model:
try:
resampled_image = augmentation_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(augmentation_data, 0)
augmentation_data = self.roi_extract(augmentation_data, roi_size=self.roi_size, sub_id=participant,
preprocessing=self.preprocessing,
session=session, save_nii=False)
ROI_image = augmentation_data.unsqueeze(dim=0) # [1, num_roi, 128, 128, 128]
sample = {'image': ROI_image, 'all_image': resampled_image, 'label': label,
'participant_id': participant,
'session_id': session,
'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
elif self.method_2d is not None:
path, file = os.path.split(resampled_image_path)
file_2d = file.split('.')[0] + '_' + self.method_2d + '.' + file.split('.')[1]
path_2d = os.path.join(path, file_2d)
try:
resampled_image = augmentation_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
resampled_image = np.expand_dims(augmentation_data, 0)
if self.method_2d == 'hilbert_cut':
data_2d = hilbert_3dto2d_cut(resampled_image)
elif self.method_2d == 'linear_cut':
data_2d = linear_3dto2d_cut(resampled_image)
elif self.method_2d == 'hilbert_downsampling':
data_low = self.__resize_data__(resampled_image.squeeze(), target_size=[64, 64, 64])
data_low = torch.from_numpy(data_low)
data_2d = hilbert_3dto2d(data_low)
data_2d = data_2d.unsqueeze(0) # [1,512,512]
elif self.method_2d == 'linear_downsampling':
data_low = self.__resize_data__(resampled_image.squeeze(), target_size=[64, 64, 64])
data_low = torch.from_numpy(data_low)
data_2d = linear_3dto2d(data_low)
data_2d = data_2d.unsqueeze(0) # [1,512,512]
sample = {'image': data_2d.squeeze(), 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': path_2d, 'num_fake_mri': self.num_fake_mri}
elif self.model in ["CNN2020", "DeepCNN"]:
try:
CNN2020_DEEPCNN_image_image = augmentation_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
CNN2020_DEEPCNN_image_image = np.expand_dims(augmentation_data, 0)
dir, file = os.path.split(CNN2020_DEEPCNN_image_path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
torch.save(image, CNN2020_DEEPCNN_image_path)
print('Save resampled {} image: {}'.format(self.resample_size, CNN2020_DEEPCNN_image_path))
sample = {'image': CNN2020_DEEPCNN_image_image, 'label': label, 'participant_id': participant,
'session_id': session,
'image_path': CNN2020_DEEPCNN_image_path, 'num_fake_mri': self.num_fake_mri}
else:
try:
image = augmentation_data.unsqueeze(dim=0) # [1, 128, 128, 128]
except:
image = np.expand_dims(augmentation_data, 0)
sample = {'image': image, 'label': label, 'participant_id': participant, 'session_id': session,
'image_path': image_path, 'num_fake_mri': self.num_fake_mri}
return sample
def __drop_invalid_range__(self, volume):
"""
Cut off the invalid area
"""
zero_value = volume[0, 0, 0]
# print('zero:{}'.format(zero_value))
non_zeros_idx = np.where(volume != zero_value)
# print('zero idx:{}'.format(non_zeros_idx))
try:
[max_z, max_h, max_w] = np.max(np.array(non_zeros_idx), axis=1)
[min_z, min_h, min_w] = np.min(np.array(non_zeros_idx), axis=1)
except:
print(zero_value)
print(non_zeros_idx)
return volume[min_z:max_z + 1, min_h:max_h + 1, min_w:max_w + 1]
def __resize_data__(self, data, input_W, input_H, input_D):
"""
Resize the data to the input size
"""
[depth, height, width] = data.shape
scale = [input_W * 1.0 / depth, input_H * 1.0 / height, input_D * 1.0 / width]
data = ndimage.interpolation.zoom(data, scale, order=0)
return data
def __itensity_normalize_one_volume__(self, volume, normalize_all=False):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
if normalize_all:
pixels = volume
else:
pixels = volume[volume > 0]
mean = pixels.mean()
std = pixels.std()
out = (volume - mean) / std
if not normalize_all:
out_random = np.random.normal(0, 1, size=volume.shape)
out[volume == 0] = out_random[volume == 0]
return out
def num_elem_per_image(self):
return 1
def roi_extract(self, MRI, roi_size=32, sub_id=None, preprocessing=None, session=None, save_nii=False):
roi_data_list = []
roi_label_list = []
if 'slave' in socket.gethostname():
aal_mask_dict_dir = '/root/Downloads/atlas/aal_mask_dict_right.npy'
elif socket.gethostname() == 'tian-W320-G10':
aal_mask_dict_dir = '/home/tian/pycharm_project/MRI_GNN/atlas/aal_mask_dict_right.npy'
elif socket.gethostname() == 'zkyd':
aal_mask_dict_dir = '/data/fanchenchen/atlas/aal_mask_dict_right.npy'
self.aal_mask_dict = np.load(aal_mask_dict_dir, allow_pickle=True).item() # 116; (181,217,181)
for i, key in enumerate(self.aal_mask_dict.keys()):
# useful_data = self.__drop_invalid_range__(self.aal_mask_dict[key])
# useful_data = resize_data(useful_data, target_size=[128, 128, 128])
# useful_data = useful_data[np.newaxis, np.newaxis, :, :, :] # 1,1,128,128,128
# roi_batch_data = MRI.cpu().numpy() * useful_data # batch, 1, 128,128,128
mask = self.aal_mask_dict[key]
# print('mask min:{}'.format(mask.min()))
# print('mask max:{}'.format(mask.max()))
# print('mask:{}'.format(mask))
ww, hh, dd = MRI.shape
MRI = self.__resize_data__(MRI, 181, 217, 181)
# MRI = (MRI - MRI.min()) / (MRI.max() - MRI.min())
roi_data = MRI * mask.squeeze() # batch, 1, 128,128,128
# print('roi_data min:{}'.format(roi_data.min()))
# print('roi_data max:{}'.format(roi_data.max()))
roi_label_list.append(key)
# save nii to Visualization
# print(image_np.max())
# print(image_np.min())
# print(roi_data.shape)
if save_nii:
image_nii = nib.Nifti1Image(roi_data, np.eye(4))
MRI_path = '/data/fanchenchen/atlas/{}_{}_{}_ori_roi_{}.nii.gz'.format(sub_id, session,
preprocessing, i)
nib.save(image_nii, MRI_path)
try:
roi_data = self.__drop_invalid_range__(roi_data) # xx,xx,xx
except:
print(sub_id)
print(session)
assert True
# roi_data = self.__drop_invalid_range__(mask.squeeze()) # xx,xx,xx
if save_nii:
image_nii = nib.Nifti1Image(roi_data, np.eye(4))
MRI_path = '/data/fanchenchen/atlas/{}_{}_{}_drop_invalid_roi_{}.nii.gz'.format(sub_id, session,
preprocessing, i)
nib.save(image_nii, MRI_path)
# print(roi_data.shape)
roi_data = self.__resize_data__(roi_data, roi_size, roi_size, roi_size) # roi_size, roi_size, roi_size
# print(roi_data.shape)
roi_data = torch.from_numpy(roi_data)
roi_data_list.append(roi_data) # roi_size, roi_size, roi_size
# save nii to Visualization
if save_nii:
image_np = roi_data.numpy()
image_nii = nib.Nifti1Image(image_np, np.eye(4))
MRI_path = '/data/fanchenchen/atlas/{}_{}_{}_resize_roi_{}.nii.gz'.format(sub_id, session,
preprocessing, i)
nib.save(image_nii, MRI_path)
if i >= 89:
break
roi_batch = torch.stack(roi_data_list).type(torch.float32) # num_roi, roi_size, roi_size, roi_size
return roi_batch
class MRIDatasetPatch(MRIDataset):
def __init__(self, caps_directory, data_file, patch_size, stride_size, transformations=None, prepare_dl=False,
patch_index=None, preprocessing="t1-linear"):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
transformations (callable, optional): Optional transform to be applied on a sample.
prepare_dl (bool): If true pre-extracted patches will be loaded.
patch_index (int, optional): If a value is given the same patch location will be extracted for each image.
else the dataset will load all the patches possible for one image.
patch_size (int): size of the regular cubic patch.
stride_size (int): length between the centers of two patches.
"""
self.patch_size = patch_size
self.stride_size = stride_size
self.elem_index = patch_index
self.mode = "patch"
super().__init__(caps_directory, data_file, preprocessing, transformations)
self.prepare_dl = prepare_dl
def __getitem__(self, idx):
participant, session, patch_idx, label = self._get_meta_data(idx)
if self.prepare_dl:
patch_path = path.join(self._get_path(participant, session, "patch")[0:-7]
+ '_patchsize-' + str(self.patch_size)
+ '_stride-' + str(self.stride_size)
+ '_patch-' + str(patch_idx) + '_T1w.pt')
image = torch.load(patch_path)
else:
image_path = self._get_path(participant, session, "image")
full_image = torch.load(image_path)
image = self.extract_patch_from_mri(full_image, patch_idx)
if self.transformations:
image = self.transformations(image)
sample = {'image': image, 'label': label,
'participant_id': participant, 'session_id': session, 'patch_id': patch_idx}
return sample
def num_elem_per_image(self):
if self.elem_index is not None:
return 1
image = self._get_full_image()
patches_tensor = image.unfold(1, self.patch_size, self.stride_size
).unfold(2, self.patch_size, self.stride_size
).unfold(3, self.patch_size, self.stride_size).contiguous()
patches_tensor = patches_tensor.view(-1,
self.patch_size,
self.patch_size,
self.patch_size)
num_patches = patches_tensor.shape[0]
return num_patches
def extract_patch_from_mri(self, image_tensor, index_patch):
patches_tensor = image_tensor.unfold(1, self.patch_size, self.stride_size
).unfold(2, self.patch_size, self.stride_size
).unfold(3, self.patch_size, self.stride_size).contiguous()
patches_tensor = patches_tensor.view(-1,
self.patch_size,
self.patch_size,
self.patch_size)
extracted_patch = patches_tensor[index_patch, ...].unsqueeze_(
0).clone()
return extracted_patch
class MRIDatasetRoi(MRIDataset):
def __init__(self, caps_directory, data_file, preprocessing="t1-linear",
transformations=None, prepare_dl=False):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
transformations (callable, optional): Optional transform to be applied on a sample.
prepare_dl (bool): If true pre-extracted patches will be loaded.
"""
self.elem_index = None
self.mode = "roi"
super().__init__(caps_directory, data_file, preprocessing, transformations)
self.prepare_dl = prepare_dl
def __getitem__(self, idx):
participant, session, roi_idx, label = self._get_meta_data(idx)
if self.prepare_dl:
raise NotImplementedError(
'The extraction of ROIs prior to training is not implemented.')
else:
image_path = self._get_path(participant, session, "image")
image = torch.load(image_path)
patch = self.extract_roi_from_mri(image, roi_idx)
if self.transformations:
patch = self.transformations(patch)
sample = {'image': patch, 'label': label,
'participant_id': participant, 'session_id': session,
'roi_id': roi_idx}
return sample
def num_elem_per_image(self):
return 2
def extract_roi_from_mri(self, image_tensor, left_is_odd):
"""
:param image_tensor: (Tensor) the tensor of the image.
:param left_is_odd: (int) if 1 the left hippocampus is extracted, else the right one.
:return: Tensor of the extracted hippocampus
"""
if self.preprocessing == "t1-linear":
if left_is_odd == 1:
# the center of the left hippocampus
crop_center = (61, 96, 68)
else:
# the center of the right hippocampus
crop_center = (109, 96, 68)
else:
raise NotImplementedError("The extraction of hippocampi was not implemented for "
"preprocessing %s" % self.preprocessing)
crop_size = (50, 50, 50) # the output cropped hippocampus size
extracted_roi = image_tensor[
:,
crop_center[0] - crop_size[0] // 2: crop_center[0] + crop_size[0] // 2:,
crop_center[1] - crop_size[1] // 2: crop_center[1] + crop_size[1] // 2:,
crop_center[2] - crop_size[2] // 2: crop_center[2] + crop_size[2] // 2:
].clone()
return extracted_roi
class MRIDatasetSlice(MRIDataset):
def __init__(self, caps_directory, data_file, preprocessing="t1-linear",
transformations=None, mri_plane=0, prepare_dl=False,
discarded_slices=20, mixed=False):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
transformations (callable, optional): Optional transform to be applied on a sample.
prepare_dl (bool): If true pre-extracted patches will be loaded.
mri_plane (int): Defines which mri plane is used for slice extraction.
discarded_slices (int or list): number of slices discarded at the beginning and the end of the image.
If one single value is given, the same amount is discarded at the beginning and at the end.
mixed (bool): If True will look for a 'slice_id' column in the input DataFrame to load each slice
independently.
"""
# Rename MRI plane
self.mri_plane = mri_plane
self.direction_list = ['sag', 'cor', 'axi']
if self.mri_plane >= len(self.direction_list):
raise ValueError(
"mri_plane value %i > %i" %
(self.mri_plane, len(
self.direction_list)))
# Manage discarded_slices
if isinstance(discarded_slices, int):
discarded_slices = [discarded_slices, discarded_slices]
if isinstance(discarded_slices, list) and len(discarded_slices) == 1:
discarded_slices = discarded_slices * 2
self.discarded_slices = discarded_slices
if mixed:
self.elem_index = "mixed"
else:
self.elem_index = None
self.mode = "slice"
super().__init__(caps_directory, data_file, preprocessing, transformations)
self.prepare_dl = prepare_dl
def __getitem__(self, idx):
participant, session, slice_idx, label = self._get_meta_data(idx)
slice_idx = slice_idx + self.discarded_slices[0]
if self.prepare_dl:
# read the slices directly
slice_path = path.join(self._get_path(participant, session, "slice")[0:-7]
+ '_axis-%s' % self.direction_list[self.mri_plane]
+ '_channel-rgb_slice-%i_T1w.pt' % slice_idx)
image = torch.load(slice_path)
else:
image_path = self._get_path(participant, session, "image")
full_image = torch.load(image_path)
image = self.extract_slice_from_mri(full_image, slice_idx)
if self.transformations:
image = self.transformations(image)
sample = {'image': image, 'label': label,
'participant_id': participant, 'session_id': session,
'slice_id': slice_idx}
return sample
def num_elem_per_image(self):
if self.elem_index == "mixed":
return 1
image = self._get_full_image()
return image.size(self.mri_plane + 1) - \
self.discarded_slices[0] - self.discarded_slices[1]
def extract_slice_from_mri(self, image, index_slice):
"""
This is a function to grab one slice in each view and create a rgb image for transferring learning: duplicate the slices into R, G, B channel
:param image: (tensor)
:param index_slice: (int) index of the wanted slice
:return:
To note, for each view:
Axial_view = "[:, :, slice_i]"
Coronal_view = "[:, slice_i, :]"
Sagittal_view= "[slice_i, :, :]"
"""
image = image.squeeze(0)
simple_slice = image[(slice(None),) * self.mri_plane + (index_slice,)]
triple_slice = torch.stack((simple_slice, simple_slice, simple_slice))
return triple_slice
def return_dataset(mode, input_dir, data_df, preprocessing,
transformations, params, cnn_index=None):
"""
Return appropriate Dataset according to given options.
Args:
mode: (str) input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
input_dir: (str) path to a directory containing a CAPS structure.
data_df: (DataFrame) List subjects, sessions and diagnoses.
preprocessing: (str) type of preprocessing wanted ('t1-linear' or 't1-extensive')
transformations: (transforms) list of transformations performed on-the-fly.
params: (Namespace) options used by specific modes.
cnn_index: (int) Index of the CNN in a multi-CNN paradigm (optional).
Returns:
(Dataset) the corresponding dataset.
"""
if cnn_index is not None and mode in ["image", "roi", "slice"]:
raise ValueError("Multi-CNN is not implemented for %s mode." % mode)
if params.model == "ROI_GCN":
use_roi = True
else:
use_roi = False
if mode == "image":
return MRIDatasetImage(
input_dir,
data_df,
preprocessing,
transformations=transformations,
crop_padding_to_128=params.crop_padding_to_128,
resample_size=params.resample_size,
fake_caps_path=params.fake_caps_path,
# only_use_fake=params.only_use_fake,
roi=use_roi,
roi_size=params.roi_size,
model=params.model,
data_preprocess=params.data_preprocess,
data_Augmentation=params.data_Augmentation,
method_2d=params.method_2d
)
if mode == "patch":
return MRIDatasetPatch(
input_dir,
data_df,
params.patch_size,
params.stride_size,
preprocessing=preprocessing,
transformations=transformations,
prepare_dl=params.prepare_dl,
patch_index=cnn_index
)
elif mode == "roi":
return MRIDatasetRoi(
input_dir,
data_df,
preprocessing=preprocessing,
transformations=transformations
)
elif mode == "slice":
return MRIDatasetSlice(
input_dir,
data_df,
preprocessing=preprocessing,
transformations=transformations,
mri_plane=params.mri_plane,
prepare_dl=params.prepare_dl,
discarded_slices=params.discarded_slices)
else:
raise ValueError("Mode %s is not implemented." % mode)
def compute_num_cnn(input_dir, tsv_path, options, data="train"):
transformations = get_transforms(options)
if data == "train":
example_df, _ = load_data(tsv_path, options.diagnoses, 0, options.n_splits, options.baseline)
elif data == "classify":
example_df = pd.read_csv(tsv_path, sep='\t')
else:
example_df = load_data_test(tsv_path, options.diagnoses)
full_dataset = return_dataset(options.mode, input_dir, example_df,
options.preprocessing, transformations, options)
return full_dataset.elem_per_image
##################################
# Transformations
##################################
class GaussianSmoothing(object):
def __init__(self, sigma):
self.sigma = sigma
def __call__(self, sample):
from scipy.ndimage.filters import gaussian_filter
image = sample['image']
np.nan_to_num(image, copy=False)
smoothed_image = gaussian_filter(image, sigma=self.sigma)
sample['image'] = smoothed_image
return sample
def __repr__(self):
return self.__class__.__name__ + '(sigma={})'.format(self.sigma)
class ToTensor(object):
"""Convert image type to Tensor and diagnosis to diagnosis code"""
def __call__(self, image):
np.nan_to_num(image, copy=False)
image = image.astype(float)
return torch.from_numpy(image[np.newaxis, :]).float()
def __repr__(self):
return self.__class__.__name__
class MinMaxNormalization(object):
"""Normalizes a tensor between 0 and 1"""
def __call__(self, **data_dict):
image = data_dict['data']
image = (image - image.min()) / (image.max() - image.min())
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__
class ItensityNormalizeNonzeorVolume(object):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
def __call__(self, **data_dict):
image = data_dict['data']
image = image.squeeze()
image = np.array(image)
pixels = image[image > 0]
mean = pixels.mean()
std = pixels.std()
out = (image - mean) / std
out_random = np.random.normal(0, 1, size=image.shape)
out[image == 0] = out_random[image == 0]
out = torch.from_numpy(out.copy())
data_dict['data'] = out.unsqueeze(0)
return data_dict
def __repr__(self):
return self.__class__.__name__
class ItensityNormalizeAllVolume(object):
"""
normalize the itensity of an nd volume based on the mean and std of all region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
def __call__(self, **data_dict):
image = data_dict['data']
image = (image - image.mean()) / image.std()
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__
class CropPpadding128(object):
"""
crop padding image to 128
"""
def __call__(self, **data_dict):
image = data_dict['data']
if image.shape[1] == 121 and image.shape[2] == 145 and image.shape[
3] == 121:
image = image[:, :, 8:-9, :] # [1, 121, 128, 121]
image = image.unsqueeze(0) # [1, 1, 121, 128, 121]
pad = torch.nn.ReplicationPad3d((4, 3, 0, 0, 4, 3))
image = pad(image) # [1, 1, 128, 128, 128]
image = image.squeeze(0) # [1, 128, 128, 128]
elif image.shape[1] == 128 and image.shape[2] == 128 and image.shape[
3] == 128:
pass
else:
assert image.shape[1] == 121 and image.shape[2] == 145 and image.shape[
3] == 121, "image shape must be 1*121*145*122 or 1*128*128*128, but given shape:{}".format(image.shape)
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__
class Resize(torch.nn.Module):
"""
Resize data to target size
"""
def __init__(self, resample_size):
super().__init__()
# assert resample_size > 0, 'resample_size should be a int positive number'
self.resample_size = resample_size
def forward(self, **data_dict):
image = data_dict['data']
image = image.unsqueeze(0)
image = F.interpolate(image, size=self.resample_size)
image = image.squeeze(0)
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__ + '(resample_size={0})'.format(self.resample_size)
class CheckDictSize(object):
"""
check dict size to dim 5 :1,1,128,128,128
"""
def __call__(self, **dict):
image = dict['data']
if len(image.shape) == 4:
image = np.array(image.unsqueeze(0))
elif len(image.shape) == 3:
image = np.array(image.unsqueeze(0).unsqueeze(0))
assert len(image.shape) == 5
dict['data'] = image
return dict
def __repr__(self):
return self.__class__.__name__
class DictToImage(object):
"""
dict 2 data
"""
def __call__(self, **dict):
image = dict['data']
if len(image.shape) == 5:
image = image.squeeze(0)
elif len(image.shape) == 3:
image = image.unsqueeze(0)
return image
def __repr__(self):
return self.__class__.__name__
class DropInvalidRange(torch.nn.Module):
"""
Cut off the invalid area
"""
def __init__(self, keep_size=True):
super().__init__()
self.keep_size = keep_size
def __call__(self, **data_dict):
image = data_dict['data']
image = image.squeeze(0)
zero_value = image[0, 0, 0]
z, h, w = image.shape
non_zeros_idx = np.where(image != zero_value)
[max_z, max_h, max_w] = np.max(np.array(non_zeros_idx), axis=1)
[min_z, min_h, min_w] = np.min(np.array(non_zeros_idx), axis=1)
image = image[min_z:max_z, min_h:max_h, min_w:max_w].unsqueeze(0)
if self.keep_size:
image = image.unsqueeze(0)
image = F.interpolate(image, size=[z, h, w])
image = image.squeeze(0)
data_dict['data'] = image
return data_dict
def __repr__(self):
return self.__class__.__name__ + '(keep_size={})'.format(self.keep_size)
def get_transforms(params, is_training=True):
if params.mode == 'image':
trans_list = []
trans_list.append(MinMaxNormalization())
if params.preprocessing != 't1-linear':
trans_list.append(CropPpadding128())
trans_list.append(DropInvalidRange(keep_size=True))
if params.resample_size is not None:
trans_list.append(Resize(params.resample_size))
if params.data_preprocess == 'MinMax':
trans_list.append(MinMaxNormalization())
elif params.data_preprocess == 'NonzeorZscore':
trans_list.append(ItensityNormalizeNonzeorVolume())
elif params.data_preprocess == 'AllzeorZscore':
trans_list.append(ItensityNormalizeAllVolume())
if is_training:
if params.ContrastAugmentationTransform > 0:
trans_list.append(CheckDictSize()) # for this code library, input data must be dim=5, 1,1,128,128,128
trans_list.append(ContrastAugmentationTransform((0.3, 3.), preserve_range=True,
p_per_sample=params.ContrastAugmentationTransform))
if params.BrightnessTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
BrightnessTransform(mu=0, sigma=1, per_channel=False, p_per_sample=params.BrightnessTransform))
if params.GammaTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
GammaTransform(gamma_range=(0.5, 2), invert_image=False, per_channel=False, retain_stats=False,
p_per_sample=params.GammaTransform))
if params.BrightnessGradientAdditiveTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(BrightnessGradientAdditiveTransform(scale=(5, 5),
p_per_sample=params.BrightnessGradientAdditiveTransform))
if params.LocalSmoothingTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(LocalSmoothingTransform(scale=(5, 5),
p_per_sample=params.LocalSmoothingTransform))
if params.RandomShiftTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
RandomShiftTransform(shift_mu=0, shift_sigma=3, p_per_sample=params.RandomShiftTransform))
if params.RicianNoiseTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
RicianNoiseTransform(noise_variance=(0, 0.1), p_per_sample=params.RicianNoiseTransform))
if params.GaussianNoiseTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
GaussianNoiseTransform(noise_variance=(0, 0.1), p_per_sample=params.RicianNoiseTransform))
if params.GaussianBlurTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
GaussianBlurTransform(blur_sigma=(1, 5), different_sigma_per_channel=False,
p_per_sample=params.RicianNoiseTransform))
if params.Rot90Transform > 0:
trans_list.append(CheckDictSize())
trans_list.append(Rot90Transform(num_rot=(1, 2, 3), axes=(0, 1, 2), p_per_sample=params.Rot90Transform))
if params.MirrorTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(MirrorTransform(axes=(0, 1, 2), p_per_sample=params.MirrorTransform))
if params.SpatialTransform > 0:
trans_list.append(CheckDictSize())
trans_list.append(
SpatialTransform(patch_size=(params.resample_size, params.resample_size, params.resample_size),
p_el_per_sample=params.SpatialTransform,
p_rot_per_axis=params.SpatialTransform,
p_scale_per_sample=params.SpatialTransform,
p_rot_per_sample=params.SpatialTransform))
trans_list.append(DictToImage())
transformations = Compose(trans_list)
if params.model in ['DeepCNN', 'DeepCNN_gcn']:
trans_list = []
trans_list.append(MinMaxNormalization())
trans_list.append(Resize(resample_size=[49, 39, 38]))
trans_list.append(DictToImage())
transformations = Compose(trans_list)
if params.model in ['CNN2020', 'CNN2020_gcn']:
trans_list = []
trans_list.append(MinMaxNormalization())
trans_list.append(Resize(resample_size=[139, 177, 144]))
trans_list.append(ItensityNormalizeAllVolume())
trans_list.append(DictToImage())
transformations = Compose(trans_list)
elif params.mode in ["patch", "roi"]:
if params.minmaxnormalization:
transformations = Compose([MinMaxNormalization(), DictToImage()])
else:
transformations = None
elif params.mode == "slice":
trg_size = (224, 224)
if params.minmaxnormalization:
transformations = transforms.Compose([MinMaxNormalization(),
transforms.ToPILImage(),
transforms.Resize(trg_size),
transforms.ToTensor()])
else:
transformations = transforms.Compose([transforms.ToPILImage(),
transforms.Resize(trg_size),
transforms.ToTensor()])
else:
raise ValueError("Transforms for mode %s are not implemented." % params.mode)
print('transformer:{}'.format(transformations.__repr__))
return transformations
################################
# tsv files loaders
################################
def load_data(train_val_path, diagnoses_list,
split, n_splits=None, baseline=True, fake_caps_path=None, only_use_fake=False):
train_df = pd.DataFrame()
valid_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Update acled
------------
"""
from datetime import timedelta
import pandas as pd
import dateutil.relativedelta
from hdx.data.resource import Resource
from os.path import join
from hdx.location.country import Country
from src.helpers import OutputError, hxlate, drop_columns
def get_country_url_string(country_list):
return ':OR:iso='.join([str(Country.get_m49_from_iso3(Country.get_iso3_country_code(country))) for country in country_list])
def update_acled(today, base_url, countries_to_keep, valid_names, replace_values,
column_for_cannonicalization, output_path, admbin=0):
# configuration
columns_to_keep = ['EVENT_ID_CNTY', 'EVENT_DATE', 'EVENT_TYPE', 'ACTOR1', 'ASSOC_ACTOR_1',
'ACTOR2', 'ASSOC_ACTOR_2', 'COUNTRY', 'ADMIN1', 'ADMIN2',
'LOCATION', 'LATITUDE', 'LONGITUDE', 'SOURCE', 'NOTES', 'FATALITIES']
def cannonize_names(df, valid_names, column_for_cannonicalization, replace_values):
# get the list of desired admin names
cannon_column_name = 'Name'
names = | pd.DataFrame({cannon_column_name: valid_names}) | pandas.DataFrame |
# Package imports
import pandas as pd
import requests
import datetime
from unidecode import unidecode as UnicodeFormatter
import os
import bcolors
# Local imports
import path_configuration
import url_configuration
import progress_calculator
class GrandPrix(object):
Url = None
Path = None
Requests = None
def __init__(self):
self.Url = url_configuration.Url_builder()
self.Path = path_configuration.Path()
self.Requests = requests
def import_grand_prix(self):
content = os.listdir(self.Path.get_season_path())
content.sort()
"""for year in content:
DataFrame = pd.read_csv(Path.get_season_path()+year)
print(DataFrame)"""
DataFrame = pd.read_csv(self.Path.get_season_path()+'2019.csv')
Date = list(DataFrame['Date'])
GrandPrix = list(DataFrame['Grand Prix'])
Round = list(DataFrame['Round'])
Date_obj = []
# DATE OBJ
for date in Date:
Date_obj.append(datetime.datetime.strptime(date, '%Y-%m-%d'))
Progress = progress_calculator.ProgressBar(Round)
# WHILE - BY GPS OF THE YEAR
i = 0
while i < Round.__len__():
# CHECK YEAR
if Date_obj[i] < datetime.datetime.now():
# METHOD CALLS
print(bcolors.PASS + 'STARTING EXTRACTOR, GETTING FROM', GrandPrix[i], 'DATE:', Date[i] + bcolors.END)
self.drivers_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.contructors_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.pitstops_times_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.result_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.by_lap_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.current_driver_standings(Round[i], Date_obj[i].year, GrandPrix[i])
self.status(Round[i], Date_obj[i].year, GrandPrix[i])
if Date_obj[i].year > 2017:
url = self.Url.f1_url(Date_obj[i].year, Date_obj[i].date(), GrandPrix[i])
self.load_data_from_f1(url, Date_obj[i].year, GrandPrix[i])
Progress.get_progress_bar()
i = i + 1
def drivers_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING DRIVERS BY RACE...', gp_name + bcolors.END)
url = self.Url.url_driver(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['DriverTable']
Drivers = json['Drivers']
DriversID = []
DriversInitials = []
DriversName = []
YearsOld = []
for driver in Drivers:
DriversID.append(driver['driverId'])
DriversInitials.append(driver['code'])
DriversName.append(UnicodeFormatter(driver['givenName']+' '+driver['familyName']))
YearsOld.append(
datetime.datetime.now().year - datetime.datetime.strptime(driver['dateOfBirth'], '%Y-%m-%d').year
)
Drivers_Dict = {'Driver ID': DriversID, 'Driver Initials': DriversInitials,
'Driver Name': DriversName, 'Years Old': YearsOld}
Drivers_Data = pd.DataFrame(data=Drivers_Dict)
Path = self.Path.grandprix_path(year, gp_name, 'Drivers')
Drivers_Data.to_csv(Path)
def contructors_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING CONSTRUCTORS BY RACE...', gp_name + bcolors.END)
url = self.Url.url_constructor(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['ConstructorTable']
Constructors = json['Constructors']
ConstructorID = []
ConstructorName = []
for constructor in Constructors:
ConstructorID.append(constructor['constructorId'])
ConstructorName.append(constructor['name'])
Constructors_Dict = {"Constructor ID": ConstructorID, "Constructor Name": ConstructorName}
Constructor_Data = pd.DataFrame(data=Constructors_Dict)
Path = self.Path.grandprix_path(year, gp_name, 'Constructors')
Constructor_Data.to_csv(Path)
def pitstops_times_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING PITSTOPS BY RACE...', gp_name + bcolors.END)
url = self.Url.url_pitstops_time(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['RaceTable']
Race = json['Races'][0]
PitStops = Race['PitStops']
DriverID = []
Corresponding_Lap = []
Driver_Stop_Number = []
PitStop_Time = []
for pitstop in PitStops:
DriverID.append(pitstop['driverId'])
Corresponding_Lap.append(pitstop['lap'])
Driver_Stop_Number.append(pitstop['stop'])
PitStop_Time.append(pitstop['duration'])
PitStop_Dict = {'Pit Stop Lap': Corresponding_Lap, 'Driver ID': DriverID, 'Pit Stop Number': Driver_Stop_Number,
'Pit Stop Time': PitStop_Time}
PitStop_Data = pd.DataFrame(data=PitStop_Dict)
Path = self.Path.grandprix_path(year, gp_name, 'PitStop')
PitStop_Data.to_csv(Path)
def result_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING RESULT BY RACE...', gp_name + bcolors.END)
url = self.Url.url_results(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['RaceTable']
Race = json['Races'][0]
Results = Race['Results']
DriverPosition = []
DriverGridPosition = []
DriverID = []
ConstructorID = []
TimeToLeader = []
RaceStatus = []
FastestLapRank = []
AverageSpeed = []
for result in Results:
# DRIVER POSITION
if result['positionText'] == 'R':
DriverPosition.append(None)
else:
DriverPosition.append(result['positionText'])
# GRID
DriverGridPosition.append(result['grid'])
# DRIVER ID
DriverID.append(result['Driver']['driverId'])
# CONSTRUCTOR ID
ConstructorID.append(result['Constructor']['constructorId'])
# TIME TO LEADER
if result['position'] == '1':
TimeToLeader.append("0")
elif result['status'] != 'Finished':
Check = result['status']
if Check[0] == '+':
TimeToLeader.append(result['status'])
else:
TimeToLeader.append(None)
else:
TimeToLeader.append(result['Time']['time'])
# RACE STATUS
if result['status'][0] == '+':
RaceStatus.append('Finished')
else:
RaceStatus.append(result['status'])
# CASE THE DRIVER GET OUT OF RACE WITHOUT DO ONE LAP
if 'FastestLap' not in result:
# RANK FASTEST LAP
FastestLapRank.append(None)
# AVERAGE SPEED
AverageSpeed.append(None)
else:
# RANK FASTEST LAP
FastestLapRank.append(result['FastestLap']['rank'])
# AVERAGE SPEED
AverageSpeed.append(result['FastestLap']['AverageSpeed']['speed'])
Initial_Ps_Dict = {'Positions': DriverGridPosition, 'DriverID': DriverID}
Initial_Ps_Data = pd.DataFrame(data=Initial_Ps_Dict)
Initial_Ps_Data = Initial_Ps_Data.set_index('Positions')
Path = self.Path.grandprix_path(year, gp_name, 'InitialPositions')
Initial_Ps_Data.to_csv(Path)
Result_Dict = {'Positions': DriverPosition, 'DriverID': DriverID, 'ConstructorID': ConstructorID,
'Time to Leader': TimeToLeader, 'Status': RaceStatus,
'Fastest Rank': FastestLapRank, 'Average Speed': AverageSpeed}
Result_Data = pd.DataFrame(data=Result_Dict)
Result_Data = Result_Data.set_index('Positions')
Path = self.Path.grandprix_path(year, gp_name, 'Result')
Result_Data.to_csv(Path)
def by_lap_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING LAP TIMES AND POSITIONS BY RACE...', gp_name + bcolors.END)
# Progress Calculator
Progress = progress_calculator.ProgressBar(True)
# URL
url_1, url_2 = self.Url.url_lapbylap(round, year)
# LAP COUNTER
Lap_Counter = 1
# LAP VALIDATOR
Lap_v = True
# DRIVER LIST
driver_list = list(pd.read_csv(self.Path.grandprix_path(year, gp_name, 'Drivers'))['Driver ID'].values)
# DRIVERS DICT
Lap_Times_Dict = {}
Lap_Positions_Dict = {}
# START VALUES
Lap_Times_Dict['Driver ID'] = driver_list
Lap_Positions_Dict['Driver ID'] = driver_list
while Lap_v:
# PROGRESS
Progress.get_progress_counter(Lap_Counter)
# DRIVERS LIST
Lap_Times = []
Lap_Positions = []
page = self.Requests.get(url_1 + str(Lap_Counter) + url_2)
json = page.json()
json = json['MRData']
if int(json['total']) == 0:
Lap_v = False
else:
jtemp = json['RaceTable']
jtemp = jtemp['Races'][0]
jtemp = jtemp['Laps'][0]
Laps = jtemp['Timings']
for driver in driver_list:
Driver_Out_Checker = True
for lap in Laps:
if driver == lap['driverId']:
Driver_Out_Checker = False
Lap_Times.append(lap['time'])
Lap_Positions.append(lap['position'])
if Driver_Out_Checker:
Lap_Times.append(None)
Lap_Positions.append(None)
Lap_Times_Dict[Lap_Counter] = Lap_Times
Lap_Positions_Dict[Lap_Counter] = Lap_Positions
Lap_Counter = Lap_Counter + 1
Lap_Times_Data = pd.DataFrame(data=Lap_Times_Dict)
Lap_Times_Data = Lap_Times_Data.set_index('Driver ID')
Path = self.Path.grandprix_path(year, gp_name, 'TimesByLap')
Lap_Times_Data.to_csv(Path)
Lap_Positions_Data = pd.DataFrame(data=Lap_Positions_Dict)
Lap_Positions_Data = Lap_Positions_Data.set_index('Driver ID')
Path = self.Path.grandprix_path(year, gp_name, 'PositionsByLap')
Lap_Positions_Data.to_csv(Path)
def current_driver_standings(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING DRIVER STANDINGS FROM ERGAST...', gp_name + bcolors.END)
url = self.Url.url_driver_standings(round, year)
# LOAD JSON
page = requests.get(url)
json = page.json()
json = json['MRData']
json = json['StandingsTable']
json = json['StandingsLists'][0]
DriverStandings = json['DriverStandings']
# STARTING LISTS
DriverPosition = []
DriverPoints = []
DriverWins = []
DriverID = []
ConstructorID = []
for driver in DriverStandings:
DriverPosition.append(driver['position'])
DriverPoints.append(driver['points'])
DriverWins.append(driver['wins'])
DriverID.append(driver['Driver']['driverId'])
ConstructorID.append(driver['Constructors'][-1]['constructorId'])
DriverStandingsDict = {'Position': DriverPosition, 'DriverID': DriverID, 'ConstructorID': ConstructorID,
'Wins': DriverWins, 'Points': DriverPoints}
DriverStandingsData = pd.DataFrame(data=DriverStandingsDict)
DriverStandingsData = DriverStandingsData.set_index('Position')
Path = self.Path.standings_path(year)
DriverStandingsData.to_csv(Path)
def status(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING STATUS FROM ERGAST...', gp_name + bcolors.END)
url = self.Url.url_status(round, year)
# LOAD JSON
page = requests.get(url)
json = page.json()
json = json['MRData']
json = json['StatusTable']
Status = json['Status']
# STARTING LISTS
StatusID = []
StatusDescription = []
StatusOccurrences = []
for state in Status:
StatusID.append(state['statusId'])
StatusDescription.append(state['status'])
StatusOccurrences.append(state['count'])
StatusDict = {'StatusID': StatusID, 'Status Description': StatusDescription,
'Status Occurrences': StatusOccurrences}
StatusData = pd.DataFrame(data=StatusDict)
StatusData = StatusData.set_index('StatusID')
Path = self.Path.grandprix_path(year, gp_name, 'RaceStatus')
StatusData.to_csv(Path)
def load_data_from_f1(self, url, year, gp_name):
print(bcolors.ITALIC + 'GETTING SOME DATA FROM F1...', gp_name + bcolors.END)
page = requests.get(url)
json = page.json()
def for_loop_by_time(json):
Time = []
Something = []
i = 0
for value in json:
if i == 0:
Time.append(value)
i = 1
else:
Something.append(value)
i = 0
return Time, Something
def weather(json):
json = json['Weather']
json = json['graph']
weather_data = json['data']
def temperature(json):
def temp_df(json, description):
Time, Temp = for_loop_by_time(json)
TrackTempDict = {"Time": Time, description: Temp}
TrackTempData = pd.DataFrame(data=TrackTempDict)
TrackTempData = TrackTempData.set_index('Time')
return TrackTempData
def track_temp(json):
print(bcolors.ITALIC + 'GETTING TRACK TEMP FROM F1...', gp_name + bcolors.END)
json = json['pTrack']
TrackTempData = temp_df(json, "Track Temperature")
Path = self.Path.grandprix_path(year, gp_name, 'TrackTemp')
TrackTempData.to_csv(Path)
def air_temp(json):
print(bcolors.ITALIC + 'GETTING AIR TEMP FROM F1...', gp_name + bcolors.END)
json = json['pAir']
TrackTempData = temp_df(json, "Air Temperature")
Path = self.Path.grandprix_path(year, gp_name, 'AirTemp')
TrackTempData.to_csv(Path)
track_temp(json)
air_temp(json)
def is_raining(json):
print(bcolors.ITALIC + 'GETTING WEATHER FROM F1...', gp_name + bcolors.END)
json = json['pRaining']
Time, Raining = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Is Raining": Raining}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Raining')
TrackTempData.to_csv(Path)
def wind_speed(json):
print(bcolors.ITALIC + 'GETTING WIND SPEED FROM F1...', gp_name + bcolors.END)
json = json['pWind Speed']
Time, Wind_Speed = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Wind Speed": Wind_Speed}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Wind_Speed')
TrackTempData.to_csv(Path)
def wind_direction(json):
print(bcolors.ITALIC + 'GETTING WIND DIRECTION FROM F1...', gp_name + bcolors.END)
json = json['pWind Dir']
Time, Wind_Direction = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Wind Direction": Wind_Direction}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Wind_Direction')
TrackTempData.to_csv(Path)
def humidity(json):
print(bcolors.ITALIC + 'GETTING HUMIDITY FROM F1...', gp_name + bcolors.END)
json = json['pHumidity']
Time, Humidity = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Humidity": Humidity}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Humidity')
TrackTempData.to_csv(Path)
def air_pressure(json):
print(bcolors.ITALIC + 'GETTING AIR PRESSURE FROM F1...', gp_name + bcolors.END)
json = json['pPressure']
Time, Air_Pressure = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Air Pressure": Air_Pressure}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Air_Pressure')
TrackTempData.to_csv(Path)
temperature(weather_data)
is_raining(weather_data)
wind_speed(weather_data)
wind_direction(weather_data)
humidity(weather_data)
air_pressure(weather_data)
def track_status(json):
print(bcolors.ITALIC + 'GETTING TRACK STATUS FROM F1...', gp_name + bcolors.END)
json = json['Scores']
json = json['graph']
TrackStatusJson = json['TrackStatus']
TrackStatus = []
Laps = []
i = 0
for lap in TrackStatusJson:
if i == 1:
if lap == '':
TrackStatus.append(None)
elif lap == 'Y':
TrackStatus.append('YellowFlag')
elif lap == 'S':
TrackStatus.append('SafetyCar')
elif lap == 'R':
TrackStatus.append('RedFlag')
else:
TrackStatus.append(lap)
i = i - 1
else:
Laps.append(lap)
i = i + 1
TrackStatusDict = {"Lap": Laps, "Status": TrackStatus}
TrackStatusData = | pd.DataFrame(data=TrackStatusDict) | pandas.DataFrame |
import pandas as pd
import glob as glob
# **Introduction**
# <NAME>
#
# The dataset from MS Birka Stockholm is in .xls Excel-97 format.
# And the data was gathered in several steps during three different trips.
# Some of the data is overlapping in time-index, and same headers (data points) exist in several files.
# So to be able to filter and consolidate all the data it must be done in several steps.
# As the Excel-97 format is limited in 65k rows and also a limited amount of columns it was needed to
# divide into several files.
#
# Some of the data is in Boolean format, and some have data-points missing but
# the majority should be in numerical format.
#
# In all Excel-files the meta data of each data-point (header) is in the first 14 rows.
# The first step is to make a pre-processing of the .xls files, and filter out non uni-code characters,
# put in a split character between the meta-data and joining everything in the data header.
# Still keeping the index in time-series format.
#
# In[7]:
csv_data_path = '/Users/fredde/Database/csv-1year/'
xls_data_path = '/Users/fredde/Database/data-files/'
database_path = '/Users/fredde/Database/'
df = pd.DataFrame()
all_data = pd.DataFrame()
# As there are non uni-code characters in the original headers file it needs be fixed..
# The following function was found here:
# http://stackoverflow.com/questions/20078816/replace-non-ascii-characters-with-a-single-space
# And replaces all non unicode chars with a space
def remove_non_ascii(text):
return ''.join([i if ord(i) < 128 else ' ' for i in text])
# Clean up csv
#%%
# Doing some manual import of the passengers and crew-lists, so it can be included
# in the DB from a CSV.
df = pd.read_excel(xls_data_path+'2014_crew_passengers.xlsx',index_col=0)
df.index = pd.to_datetime(df.index)
df2 = df.resample('15min').pad()
df2.to_csv(csv_data_path+'passengers_2014.csv')
headers = open(database_path + '1_headers.csv','w')
a = list(df)
for item in a:
headers.write('\n' + item)
headers.close()
#%%
# Doing some manual import of the FO-flow meters and engine readings., so it can be included
# in the DB from a CSV.
# I am resampling it to 15-min before saving it in .csv
df = pd.read_excel(xls_data_path+'Mass_flowmeters.xlsx',index_col=0)
df.index = pd.to_datetime(df.index)
df2 = df.resample('15min').pad()
df2.to_csv(csv_data_path+'Mass_flowmeters.csv')
headers = open(database_path + '2_headers.csv','w')
a = list(df)
for item in a:
headers.write('\n' + item)
headers.close()
#%%
# Doing some manual import of the passengers and crew-lists, so it can be included
# in the DB from a CSV.
df = | pd.read_excel(xls_data_path+'2014_fw_gw_distance.xlsx',index_col=0) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 16 00:05:11 2017
@author: kbui1993
"""
import sys
import queue
import os
from copy import deepcopy
import pandas as pd
from matplotlib.dates import strpdate2num
#CHANGE DIRECTORIES HERE
base_directory = "C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/base(cap_and_delay)/"
output_directory = "C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/"
current_DSA = ['ALOB', 'AROR', 'AZOB', 'CADN', 'CAGS', \
'CAOP', 'CASD', 'CORS', 'CTOP', 'DCTC',\
'FLFH', 'FLMP', 'FLUF', 'FLWC', 'GALL', \
'HIOP', 'IAOP', 'ILIP', 'INOP', 'KYDA', \
'LAOP', 'MAOB', 'MDPC', 'MIOP', 'MNOP', \
'MOMA', 'MSOP', 'MWOB', 'NCCM', 'NCNC', \
'NEOR', 'NJTO', 'NMOP', 'NVLV', 'NYAP', \
'NYFL', 'NYRT', 'NYWN', 'OHLB', 'OHLC', \
'OHLP', 'OHOV', 'OKOP', 'ORUO', 'PADV', \
'PATF', 'PRLL', 'SCOP', 'TNDS', 'TNMS', \
'TXGC', 'TXSA', 'TXSB', 'UTOP', 'VATB', \
'WALC', 'WIDN', 'WIUW']
#list of cases
cases = ['default',\
'SRTR',\
'Share29_Share15_0boost(8district)',\
'Share29_Share18_3boost(8district)',\
'Share29_Share20_5boost(8district)',\
'Share35_Share15_0boost(8district)',\
'Share35_Share15_3boost(8district)',\
'Share35_Share15_5boost(8district)',\
'Share35_Share18_3boost(8district)',\
'Share35_Share20_5boost(8district)',\
'Share29_Share15_0boost(11district)',\
'Share29_Share18_3boost(11district)',\
'Share29_Share20_5boost(11district)',\
'Share35_Share18_3boost(11district)',\
'Share35_Share20_5boost(11district)',\
'Share35_Share18_0boost(11district)',\
'Share35_Share20_0boost(11district)',\
'Share29_Share15_0boost(400mile)',\
'Share29_Share18_3boost(400mile)',\
'Share29_Share20_5boost(400mile)',\
'Share35_Share15_0boost(400mile)',\
'Share35_Share15_3boost(400mile)',\
'Share35_Share15_5boost(400mile)',\
'Share35_Share18_0boost(400mile)',\
'Share35_Share18_3boost(400mile)',\
'Share35_Share18_5boost(400mile)',\
'Share35_Share20_0boost(400mile)',\
'Share35_Share20_3boost(400mile)',\
'Share35_Share20_5boost(400mile)',\
'Share35_Share22_0boost(400mile)',\
'Share35_Share22_3boost(400mile)',\
'Share35_Share22_5boost(400mile)',\
'Share29_Share15_0boost(500mile)',\
'Share29_Share18_3boost(500mile)',\
'Share29_Share20_5boost(500mile)',\
'Share35_Share15_0boost(500mile)',\
'Share35_Share15_3boost(500mile)',\
'Share35_Share15_5boost(500mile)',\
'Share35_Share18_0boost(500mile)',\
'Share35_Share18_3boost(500mile)',\
'Share35_Share18_5boost(500mile)',\
'Share35_Share20_0boost(500mile)',\
'Share35_Share20_3boost(500mile)',\
'Share35_Share20_5boost(500mile)',\
'Share35_Share22_0boost(500mile)',\
'Share35_Share22_3boost(500mile)',\
'Share35_Share22_5boost(500mile)',\
'Share29_Share15_0boost(600mile)',\
'Share29_Share18_3boost(600mile)',\
'Share29_Share20_5boost(600mile)',\
'Share35_Share15_0boost(600mile)',\
'Share35_Share15_3boost(600mile)',\
'Share35_Share15_5boost(600mile)',\
'Share35_Share18_0boost(600mile)',\
'Share35_Share18_3boost(600mile)',\
'Share35_Share18_5boost(600mile)',\
'Share35_Share20_0boost(600mile)',\
'Share35_Share20_3boost(600mile)',\
'Share35_Share20_5boost(600mile)',\
'Share35_Share22_0boost(600mile)',\
'Share35_Share22_3boost(600mile)',\
'Share35_Share22_5boost(600mile)',\
'Share29_Share15_0boost(Constrained400mile)',\
'Share29_Share18_3boost(Constrained400mile)',\
'Share29_Share20_5boost(Constrained400mile)',\
'Share35_Share15_0boost(Constrained400mile)',\
'Share35_Share15_3boost(Constrained400mile)',\
'Share35_Share15_5boost(Constrained400mile)',\
'Share35_Share18_0boost(Constrained400mile)',\
'Share35_Share18_3boost(Constrained400mile)',\
'Share35_Share18_5boost(Constrained400mile)',\
'Share35_Share20_0boost(Constrained400mile)',\
'Share35_Share20_3boost(Constrained400mile)',\
'Share35_Share20_5boost(Constrained400mile)',\
'Share29_Share15_0boost(Constrained500mile)',\
'Share29_Share18_3boost(Constrained500mile)',\
'Share29_Share20_5boost(Constrained500mile)',\
'Share35_Share15_0boost(Constrained500mile)',\
'Share35_Share15_3boost(Constrained500mile)',\
'Share35_Share15_5boost(Constrained500mile)',\
'Share35_Share18_0boost(Constrained500mile)',\
'Share35_Share18_3boost(Constrained500mile)',\
'Share35_Share18_5boost(Constrained500mile)',\
'Share35_Share20_0boost(Constrained500mile)',\
'Share35_Share20_3boost(Constrained500mile)',\
'Share35_Share20_5boost(Constrained500mile)',\
'Share29_Share15_0boost(Constrained600mile)',\
'Share29_Share18_3boost(Constrained600mile)',\
'Share29_Share20_5boost(Constrained600mile)',\
'Share35_Share15_0boost(Constrained600mile)',\
'Share35_Share15_3boost(Constrained600mile)',\
'Share35_Share15_5boost(Constrained600mile)',\
'Share35_Share18_0boost(Constrained600mile)',\
'Share35_Share18_3boost(Constrained600mile)',\
'Share35_Share18_5boost(Constrained600mile)',\
'Share35_Share20_0boost(Constrained600mile)',\
'Share35_Share20_3boost(Constrained600mile)',\
'Share35_Share20_5boost(Constrained600mile)']
#list of files
files = ['C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/base(cap_and_delay)/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/SRTR/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/8district/Share29_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/8district/Share29_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/8district/Share29_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/8district/Share35_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/8district/Share35_Share15_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/8district/Share35_Share15_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/8district/Share35_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/8district/Share35_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/Current/Share29_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/Current/Share29_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/Current/Share29_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/Current/Share35_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/Current/Share35_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/Current/Share35_Share18_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/Current/Share35_Share20_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share29_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share29_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share29_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share35_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share35_Share15_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share35_Share15_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share35_Share18_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share35_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share35_Share18_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share35_Share20_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share35_Share20_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share35_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share35_Share22_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share35_Share22_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(400)/Share35_Share22_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share29_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share29_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share29_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share35_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share35_Share15_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share35_Share15_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share35_Share18_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share35_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share35_Share18_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share35_Share20_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share35_Share20_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share35_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share35_Share22_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share35_Share22_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(500)/Share35_Share22_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share29_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share29_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share29_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share35_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share35_Share15_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share35_Share15_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share35_Share18_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share35_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share35_Share18_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share35_Share20_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share35_Share20_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share35_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share35_Share22_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share35_Share22_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/LivSim(600)/Share35_Share22_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(400)/Share29_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(400)/Share29_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(400)/Share29_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(400)/Share35_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(400)/Share35_Share15_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(400)/Share35_Share15_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(400)/Share35_Share18_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(400)/Share35_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(400)/Share35_Share18_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(400)/Share35_Share20_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(400)/Share35_Share20_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(400)/Share35_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(500)/Share29_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(500)/Share29_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(500)/Share29_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(500)/Share35_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(500)/Share35_Share15_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(500)/Share35_Share15_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(500)/Share35_Share18_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(500)/Share35_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(500)/Share35_Share18_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(500)/Share35_Share20_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(500)/Share35_Share20_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(500)/Share35_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(600)/Share29_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(600)/Share29_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(600)/Share29_Share20_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(600)/Share35_Share15_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(600)/Share35_Share15_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(600)/Share35_Share15_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(600)/Share35_Share18_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(600)/Share35_Share18_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(600)/Share35_Share18_5boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(600)/Share35_Share20_0boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(600)/Share35_Share20_3boost/',\
'C:/Users/kbui1993/Desktop/New Results/Cap_and_Delay/ConstrainedLivSim(600)/Share35_Share20_5boost/']
#create a list to store the total number of transplant per DSA across replication-years for each case
ytransplant = []
#create a list to store the total MELD per DSA across replication-years for each case
ymeld = []
for file in files:
#read in transplant result
ytransplant_case = pd.read_csv(file+"RawOutput_ytransplants.csv")
#eliminate miscellaneous rows and columns
ytransplant_case = ytransplant_case.iloc[1:,3:]
#compute total transplant per DSA
ytransplant_case = ytransplant_case.sum(axis = 0)
#Add to the list
ytransplant.append(ytransplant_case)
#read in MELD result
ymeld_case = | pd.read_csv(file+"RawOutput_yMELD.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
def make_onehot(sequences, seq_length):
"""
Converts a sequence string into a one-hot encoded array
"""
fd = {'A': [1, 0, 0, 0], 'T': [0, 1, 0, 0], 'G': [0, 0, 1, 0],
'C': [0, 0, 0, 1], 'N': [0, 0, 0, 0]}
onehot = [fd[base] for seq in sequences for base in seq]
onehot_np = np.reshape(onehot, (-1, seq_length, 4))
return onehot_np
def load_data(seq_data_file, chromatin_data_files, window_size):
"""
Loads the input user-provided data.
"""
# Load data (without using a generator)
# Note: Switching to using Pandas instead of numpy loadtxt due to
# the faster pandas load time.
# sequence
seq = pd.read_csv(seq_data_file, header=None,
names=['seq_str'])
seq = seq['seq_str'].values
seq_dat_onehot = make_onehot(list(seq), window_size)
# prior chromatin (merge user provided chromatin data)
chromatin_data = []
for chromatin_data_file in chromatin_data_files:
dat = pd.read_csv(chromatin_data_file, delim_whitespace=True,
header=None)
chromatin_data.append(dat)
merged_chromatin_data = | pd.concat(chromatin_data, axis=1) | pandas.concat |
import os, sys, re, copy
import pandas as pd
import rdkit
from rdkit import Chem, RDLogger
from rdkit.Chem import rdChemReactions
RDLogger.DisableLog('rdApp.*')
sys.path.append('../')
from LocalTemplate.template_extractor import extract_from_reaction
from Extract_from_train_data import build_template_extractor, get_reaction_template, get_full_template
def get_edit_site(smiles):
mol = Chem.MolFromSmiles(smiles)
A = [a for a in range(mol.GetNumAtoms())]
B = []
for atom in mol.GetAtoms():
others = []
bonds = atom.GetBonds()
for bond in bonds:
atoms = [bond.GetBeginAtom().GetIdx(), bond.GetEndAtom().GetIdx()]
other = [a for a in atoms if a != atom.GetIdx()][0]
others.append(other)
b = [(atom.GetIdx(), other) for other in sorted(others)]
B += b
V = []
for a in A:
V += [(a,b) for b in A if a != b and (a,b) not in B]
return V, B
def labeling_dataset(args, split, template_dicts, template_infos, extractor):
if os.path.exists('../data/%s/preprocessed_%s.csv' % (args['dataset'], split)) and args['force'] == False:
print ('%s data already preprocessed...loaded data!' % split)
return pd.read_csv('../data/%s/preprocessed_%s.csv' % (args['dataset'], split))
rxns = {}
with open('../data/%s/%s.txt' % (args['dataset'], split), 'r') as f:
for i, line in enumerate(f.readlines()):
rxns[i] = line.split(' ')[0]
reactants = []
reagents = []
products = []
labels_sep = []
labels_mix = []
frequency = []
success = 0
for i, reaction in rxns.items():
reactant = reaction.split('>>')[0]
reagent = ''
rxn_labels_s = []
rxn_labels_m = []
try:
rxn, result = get_reaction_template(extractor, reaction, i)
template = result['reaction_smarts']
reactant = result['reactants']
product = result['products']
reagent = '.'.join(result['necessary_reagent'])
reactants.append(reactant)
reagents.append(reagent)
products.append(product)
if len(result['necessary_reagent']) == 0:
reactant_mix = reactant
else:
reactant_mix = '%s.%s' % (reactant, reagent)
edit_bonds = {edit_type: edit_bond[0] for edit_type, edit_bond in result['edits'].items()}
H_change, Charge_change = result['H_change'], result['Charge_change']
template_H = get_full_template(template, H_change, Charge_change)
if template_H not in template_infos.keys():
labels_sep.append(rxn_labels_s)
labels_mix.append(rxn_labels_m)
frequency.append(0)
continue
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
except Exception as e:
print (i, e)
labels_sep.append(rxn_labels_s)
labels_mix.append(rxn_labels_m)
frequency.append(0)
continue
edit_n = 0
for edit_type in edit_bonds:
if edit_type == 'C':
edit_n += len(edit_bonds[edit_type])/2
else:
edit_n += len(edit_bonds[edit_type])
if edit_n <= args['max_edit_n']:
virtual_sites_s, real_sites_s = get_edit_site(reactant)
virtual_sites_m, real_sites_m = get_edit_site(reactant_mix)
try:
success += 1
for edit_type in edit_bonds:
bonds = edit_bonds[edit_type]
for bond in bonds:
if edit_type != 'A':
rxn_labels_s.append(('r', real_sites_s.index(bond), template_dicts['real']['%s_%s' % (template_H, edit_type)]))
rxn_labels_m.append(('r', real_sites_m.index(bond), template_dicts['real']['%s_%s' % (template_H, edit_type)]))
else:
rxn_labels_s.append(('v', virtual_sites_s.index(bond), template_dicts['virtual']['%s_%s' % (template_H, edit_type)]))
rxn_labels_m.append(('v', virtual_sites_m.index(bond), template_dicts['virtual']['%s_%s' % (template_H, edit_type)]))
labels_sep.append(rxn_labels_s)
labels_mix.append(rxn_labels_m)
frequency.append(template_infos[template_H]['frequency'])
except Exception as e:
print (i, e)
labels_sep.append(rxn_labels_s)
labels_mix.append(rxn_labels_m)
frequency.append(0)
continue
if i % 100 == 0:
print ('\r Processing %s %s data..., success %s data (%s/%s)' % (args['dataset'], split, success, i, len(rxns)), end='', flush=True)
else:
print ('\nReaction # %s has too many edits (%s)...may be wrong mapping!' % (i, edit_n))
labels_sep.append(rxn_labels_s)
labels_mix.append(rxn_labels_m)
frequency.append(0)
print ('\nDerived tempaltes cover %.3f of %s data reactions' % ((success/len(rxns)), split))
df = pd.DataFrame({'Reactants': reactants, 'Reagents': reagents, 'Products': products, 'Labels_sep': labels_sep, 'Labels_mix': labels_mix, 'Frequency': frequency})
df.to_csv('../data/%s/preprocessed_%s.csv' % (args['dataset'], split))
return df
def combine_preprocessed_data(train_data, val_data, test_data, args):
train_data['Split'] = ['train'] * len(train_data)
val_data['Split'] = ['valid'] * len(val_data)
test_data['Split'] = ['test'] * len(test_data)
all_data = train_data.append(val_data, ignore_index=True)
all_data = all_data.append(test_data, ignore_index=True)
all_data['Mask'] = [int(f>=args['min_template_n']) for f in all_data['Frequency']]
print ('Valid data size: %s' % len(all_data))
all_data.to_csv('../data/%s/labeled_data.csv' % args['dataset'], index = None)
return
def load_templates(args):
template_dicts = {}
for site in ['real', 'virtual']:
template_df = pd.read_csv('../data/%s/%s_templates.csv' % (args['dataset'], site))
template_dict = {template_df['Template'][i]:template_df['Class'][i] for i in template_df.index}
print ('loaded %s %s templates' % (len(template_dict), site))
template_dicts[site] = template_dict
template_infos = | pd.read_csv('../data/%s/template_infos.csv' % args['dataset']) | pandas.read_csv |
import os
import glob
import psycopg2
import pandas as pd
import json
from io import StringIO
import logging
import datetime
from postgre import Postgre
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def get_json_data(filepath):
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for file in files :
with open(os.path.abspath(file), 'r') as f:
try:
yield json.load(f)
except:
with open(os.path.abspath(file), 'r') as f:
for line in f.readlines():
try:
yield json.loads(line)
except:
yield None
def df_rows_to_list(df:pd.DataFrame, number_of_rows:int=None):
list_of_row_values = []
number_of_rows = number_of_rows if number_of_rows else df.shape[0]
for index in range(0, number_of_rows):
list_of_row_values.append(list(df.values[index]))
return list_of_row_values
def list_to_string_io(list_of_entries:list):
"""
Return file like object of the type StringIO from a given list of list of strings.
Argument:
- list_of_entries {list} - list of list of strings to transform to StringIO
Example:
[
['AR8IEZO1187B99055E', 'SOINLJW12A8C13314C', 'City Slickers', 2008, 149.86404],
['AR558FS1187FB45658', 'SOGDBUF12A8C140FAA', 'Intro', 2003, 75.67628]
]
Return:
{StringIO} - file type object with values in input list concatenated.
Example:
'AR8IEZO1187B99055E\tSOINLJW12A8C13314C\tCity Slickers\t2008\t149.86404\n
AR558FS1187FB45658\tSOGDBUF12A8C140FAA\tIntro\t2003\t75.67628'
"""
return StringIO('\n'.join(['\t'.join([str(entry) for entry in set_of_entries]) for set_of_entries in list_of_entries]))
def convert_timestamp(timestamp_miliseconds_list:list):
series = []
for record in timestamp_miliseconds_list:
series.append(datetime.datetime.fromtimestamp(record/1000.0))
return series
class Config(object):
songs_table_name = "songs"
artists_table_name = "artists"
time_table_name = "time"
users_table_name = "users"
songsplay_table_name = 'songplays'
table_info ={
songs_table_name: {'artist_id': 'CHAR(18) NOT NULL',
'song_id': 'CHAR(18) NOT NULL',
'title': 'VARCHAR(100)',
'year': 'INTEGER',
'duration':'REAL'
},
artists_table_name: {'artist_id': 'CHAR(18) NOT NULL',
'artist_name': 'VARCHAR(100)',
'artist_location': 'VARCHAR(100)',
'artist_latitude': 'REAL',
'artist_longitude':'REAL'
},
time_table_name: {'day': 'INTEGER',
'hour': 'INTEGER',
'month': 'INTEGER',
'timestamp': 'TIMESTAMP(6)',
'week': 'INTEGER',
'weekday': 'INTEGER',
'year': 'INTEGER',
},
users_table_name: {'userId': 'INTEGER',
'firstName': 'VARCHAR(55)',
'lastName': 'VARCHAR(55)',
'gender': 'CHAR(1)',
'level': 'CHAR(10)'
},
songsplay_table_name:{
"ts":"BIGINT",
"userId":"INTEGER",
"level":"CHAR(10)",
"song_id":"CHAR(18)",
"artist_id":"CHAR(18)",
"sessionId":"INTEGER",
"location":"VARCHAR(255)",
"userAgent":"VARCHAR(255)",
}
}
def get_song_artist_id(postgre:Postgre, song_title:str, artist_name, song_duration:int, config:Config):
results = [None, None]
if all((song_title, artist_name, song_duration)):
song_select ="""SELECT DISTINCT a.song_id, a.artist_id
FROM {SONGS_TABLE_NAME} a
FULL JOIN {ARTISTS_TABLE_NAME} b ON a.artist_id = b.artist_id
WHERE a.title='{SONG_TITLE}'
-- AND a.duration={SONG_DURATION}
-- AND b.artist_name='{ARTIST_NAME}'
"""
results = postgre._execute_query(song_select.format(
SONGS_TABLE_NAME=config.songs_table_name,
ARTISTS_TABLE_NAME=config.artists_table_name,
SONG_TITLE=song_title.replace("'","''"),
SONG_DURATION=song_duration,
ARTIST_NAME=artist_name.replace("'","''") ),
results=True)
if results:
results = list(results[0].values())
return results
def main():
sparkifydb = 'sparkifydb'
Postgre('studentdb').create_database(sparkifydb)
postgre = Postgre(sparkifydb)
config = Config()
songs_filepath = "./data/song_data"
songs_data =[json_data for json_data in get_json_data(songs_filepath) if json_data]
songs_df = pd.DataFrame.from_dict(songs_data)
logs_filepath = "./data/log_data"
logs_data =[json_data for json_data in get_json_data(logs_filepath) if json_data]
logs_df = pd.DataFrame.from_dict(logs_data)
songs_column_names = list(config.table_info.get(config.songs_table_name).keys())
song_selected_df = songs_df[songs_column_names]
postgre.create_table(table_name=config.songs_table_name, columns_dict=config.table_info.get(config.songs_table_name))
artists_column_names = list(config.table_info.get(config.artists_table_name).keys())
artist_df = songs_df[artists_column_names]
postgre.create_table(table_name=config.artists_table_name, columns_dict=config.table_info.get(config.artists_table_name))
time_df = logs_df[logs_df['page'] == 'NextSong'].reset_index(drop=True)
time_df = time_df.apply(lambda x: convert_timestamp(x) if x.name == 'ts' else x)
time_data = (time_df.ts, time_df.ts.dt.hour, time_df.ts.dt.day, time_df.ts.dt.week, time_df.ts.dt.month, time_df.ts.dt.year, time_df.ts.dt.weekday)
column_labels = ('timestamp', 'hour', 'day', 'week', 'month', 'year', 'weekday')
time_dict = []
for index in range(0, len(time_data[0])):
time_dict.append({key:value for key, value in zip(column_labels, (entry[index] for entry in time_data))})
time_df = | pd.DataFrame(time_dict) | pandas.DataFrame |
import operator
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
from pandas.tests.apply.common import frame_transform_kernels
from pandas.tests.frame.common import zip_frames
def unpack_obj(obj, klass, axis):
"""
Helper to ensure we have the right type of object for a test parametrized
over frame_or_series.
"""
if klass is not DataFrame:
obj = obj["A"]
if axis != 0:
pytest.skip(f"Test is only for DataFrame with axis={axis}")
return obj
def test_transform_ufunc(axis, float_frame, frame_or_series):
# GH 35964
obj = unpack_obj(float_frame, frame_or_series, axis)
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(obj)
# ufunc
result = obj.transform(np.sqrt, axis=axis)
expected = f_sqrt
tm.assert_equal(result, expected)
@pytest.mark.parametrize("op", frame_transform_kernels)
def test_transform_groupby_kernel(axis, float_frame, op, request):
# GH 35964
args = [0.0] if op == "fillna" else []
if axis == 0 or axis == "index":
ones = np.ones(float_frame.shape[0])
else:
ones = np.ones(float_frame.shape[1])
expected = float_frame.groupby(ones, axis=axis).transform(op, *args)
result = float_frame.transform(op, axis, *args)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"ops, names",
[
([np.sqrt], ["sqrt"]),
([np.abs, np.sqrt], ["absolute", "sqrt"]),
(np.array([np.sqrt]), ["sqrt"]),
(np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
],
)
def test_transform_listlike(axis, float_frame, ops, names):
# GH 35964
other_axis = 1 if axis in {0, "index"} else 0
with np.errstate(all="ignore"):
expected = zip_frames([op(float_frame) for op in ops], axis=other_axis)
if axis in {0, "index"}:
expected.columns = MultiIndex.from_product([float_frame.columns, names])
else:
expected.index = MultiIndex.from_product([float_frame.index, names])
result = float_frame.transform(ops, axis=axis)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ops", [[], np.array([])])
def test_transform_empty_listlike(float_frame, ops, frame_or_series):
obj = unpack_obj(float_frame, frame_or_series, 0)
with pytest.raises(ValueError, match="No transform functions were provided"):
obj.transform(ops)
@pytest.mark.parametrize("box", [dict, Series])
def test_transform_dictlike(axis, float_frame, box):
# GH 35964
if axis == 0 or axis == "index":
e = float_frame.columns[0]
expected = float_frame[[e]].transform(np.abs)
else:
e = float_frame.index[0]
expected = float_frame.iloc[[0]].transform(np.abs)
result = float_frame.transform(box({e: np.abs}), axis=axis)
tm.assert_frame_equal(result, expected)
def test_transform_dictlike_mixed():
# GH 40018 - mix of lists and non-lists in values of a dictionary
df = DataFrame({"a": [1, 2], "b": [1, 4], "c": [1, 4]})
result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"})
expected = DataFrame(
[[1.0, 1, 1.0], [2.0, 4, 2.0]],
columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"ops",
[
{},
{"A": []},
{"A": [], "B": "cumsum"},
{"A": "cumsum", "B": []},
{"A": [], "B": ["cumsum"]},
{"A": ["cumsum"], "B": []},
],
)
def test_transform_empty_dictlike(float_frame, ops, frame_or_series):
obj = unpack_obj(float_frame, frame_or_series, 0)
with pytest.raises(ValueError, match="No transform functions were provided"):
obj.transform(ops)
@pytest.mark.parametrize("use_apply", [True, False])
def test_transform_udf(axis, float_frame, use_apply, frame_or_series):
# GH 35964
obj = unpack_obj(float_frame, frame_or_series, axis)
# transform uses UDF either via apply or passing the entire DataFrame
def func(x):
# transform is using apply iff x is not a DataFrame
if use_apply == isinstance(x, frame_or_series):
# Force transform to fallback
raise ValueError
return x + 1
result = obj.transform(func, axis=axis)
expected = obj + 1
tm.assert_equal(result, expected)
@pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"])
def test_transform_method_name(method):
# GH 19760
df = DataFrame({"A": [-1, 2]})
result = df.transform(method)
expected = operator.methodcaller(method)(df)
tm.assert_frame_equal(result, expected)
wont_fail = ["ffill", "bfill", "fillna", "pad", "backfill", "shift"]
frame_kernels_raise = [x for x in frame_transform_kernels if x not in wont_fail]
@pytest.mark.parametrize("op", [*frame_kernels_raise, lambda x: x + 1])
def test_transform_bad_dtype(op, frame_or_series):
# GH 35964
obj = DataFrame({"A": 3 * [object]}) # DataFrame that will fail on most transforms
if frame_or_series is not DataFrame:
obj = obj["A"]
msg = "Transform function failed"
# tshift is deprecated
warn = None if op != "tshift" else FutureWarning
with tm.assert_produces_warning(warn):
with pytest.raises(ValueError, match=msg):
obj.transform(op)
with pytest.raises(ValueError, match=msg):
obj.transform([op])
with pytest.raises(ValueError, match=msg):
obj.transform({"A": op})
with pytest.raises(ValueError, match=msg):
obj.transform({"A": [op]})
@pytest.mark.parametrize("op", frame_kernels_raise)
def test_transform_partial_failure(op):
# GH 35964 & GH 40211
match = "Allowing for partial failure is deprecated"
# Using object makes most transform kernels fail
df = DataFrame({"A": 3 * [object], "B": [1, 2, 3]})
expected = df[["B"]].transform([op])
with | tm.assert_produces_warning(FutureWarning, match=match) | pandas._testing.assert_produces_warning |
import unittest
import pickle as pkl
import numpy as np
import pandas as pd
import os
from keras.preprocessing import image
from keras.applications import vgg16
from src.server.context import Context
from src.model.prediction import PredictionHandler
from src.model.encoder import Encoder
DATA_DIR = "tests/data"
EXPECTED_DIR = "tests/expected"
FILES = [
"predicted_categories_VSAM+FL_deepfashion.pkl",
"predicted_categories_probs_VSAM+FL_deepfashion.pkl",
"predicted_attributes_VSAM+FL_deepfashion.pkl",
"predicted_attributes_probs_VSAM+FL_deepfashion.pkl",
]
TOLERANCE = 1e-06
def load_encoder(file):
with open(file, 'rb') as fp:
params = pkl.load(fp)
encoder = Encoder(*params)
n_classes = len(encoder.encoder.classes_)
return encoder, n_classes
def is_close_row(row1, row2):
index1, values1 = row1
index2, values2 = row2
return index1 == index2 and all(np.isclose(values1, values2, rtol=TOLERANCE, atol=TOLERANCE))
def is_close_df(df1, df2):
return all([
is_close_row(row1, row2) for row1, row2 in zip(df1.iterrows(), df2.iterrows())
])
class PredictionHandlerUT(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def setUp(self) -> None:
config = Context()
self.expected = []
model_dir: str = config.model_dir
categories_path: str = model_dir + '/' + config.ohe_categories
attributes_path: str = model_dir + '/' + config.ohe_attributes
weights_path: str = model_dir + '/' + config.weights
self.categories_encoder, n_categories = load_encoder(categories_path)
self.attributes_encoder, n_attributes = load_encoder(attributes_path)
self.service = PredictionHandler(weights_path, n_categories, n_attributes)
for file in FILES:
with open(EXPECTED_DIR + "/" + file, 'rb') as fp:
self.expected.append(pkl.load(fp))
def __test_single_image(self, file_name):
img = image.load_img(DATA_DIR + '/' + file_name, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = vgg16.preprocess_input(x)
heatmap = np.random.randn(224, 224)
heatmap = np.expand_dims(heatmap, axis=0)
heatmap = np.expand_dims(heatmap, axis=3)
result_x = self.service.predict([x, heatmap])
# for multiclass - category level
pred_cat2 = result_x[0]
max_pred_indx = np.argmax(pred_cat2)
prob_cat2 = np.max(pred_cat2)
np_zeros_ones_array_cat2 = np.zeros([pred_cat2.shape[1], 1], dtype=np.int8)
np_zeros_ones_array_cat2[max_pred_indx] = 1
np_diagonal = np.zeros(shape=(pred_cat2.shape[1], pred_cat2.shape[1]))
np.fill_diagonal(np_diagonal, 1)
columns = [x[0] for x in self.categories_encoder.decode(np_diagonal)]
y_pred_cat = pd.DataFrame(np.array(np_zeros_ones_array_cat2).reshape(1, -1), columns=columns).T
y_pred_cat_expected = pd.DataFrame(self.expected[0].loc[file_name])
self.assertTrue(is_close_df(y_pred_cat, y_pred_cat_expected))
y_pred_probs_cat = pd.DataFrame(pred_cat2.reshape(1, -1), columns=columns).T
y_pred_probs_cat_expected = pd.DataFrame(self.expected[1].loc[file_name])
self.assertTrue(is_close_df(y_pred_probs_cat, y_pred_probs_cat_expected))
# for multilabel - attributes level
multi_label_threshold = 0.5 # threshold used when the focal loss is applied
pred_att = result_x[1]
np_pred_att_boolean = np.array(pred_att) > multi_label_threshold
prob_att = pred_att[0][(np.where(np_pred_att_boolean)[1])]
np_zeros_ones_array_att = np_pred_att_boolean.astype(int)
np_diagonal = np.zeros(shape=(pred_att.shape[1], pred_att.shape[1]))
np.fill_diagonal(np_diagonal, 1)
columns = [x[0] for x in self.attributes_encoder.decode(np_diagonal)]
y_pred_att = | pd.DataFrame(np_zeros_ones_array_att, columns=columns) | pandas.DataFrame |
#!/usr/bin/env python
import pandas as pd
import sys
import os
from os.path import basename as bn
from snakemake.io import expand
# configuration
def prepost_string(config):
"""
Generate preprocess string based on configuration
:param config: Snakemake config dictionary
:return: PREPROCESS, POSTPROCESS tuple
"""
PREPROCESS = ""
POSTPROCESS = ""
preprocess_suffices = {"sortmerna": "", "trimming": "", "phixfilt": "",
"fastuniq": ""}
# SortMeRNA rRNA filtering
if config["preprocessing"]["sortmerna"]:
PREPROCESS += ".sortmerna"
preprocess_suffices["trimming"] = ".sortmerna"
# Trimming
if config["preprocessing"]["trimmomatic"]:
PREPROCESS += ".trimmomatic"
preprocess_suffices["phixfilt"] = preprocess_suffices[
"trimming"] + ".trimmomatic"
elif config["preprocessing"]["cutadapt"]:
PREPROCESS += ".cutadapt"
preprocess_suffices["phixfilt"] = preprocess_suffices[
"trimming"] + ".cutadapt"
else:
preprocess_suffices["phixfilt"] = preprocess_suffices["trimming"]
# Filtering
if config["preprocessing"]["phix_filter"]:
preprocess_suffices["fastuniq"] = preprocess_suffices[
"phixfilt"] + ".phixfilt"
PREPROCESS += ".phixfilt"
else:
preprocess_suffices["fastuniq"] = preprocess_suffices["phixfilt"]
# Deduplication
if config["preprocessing"]["fastuniq"]:
PREPROCESS += ".fastuniq"
if PREPROCESS != "":
config["run_preprocessing"] = True
else:
config["run_preprocessing"] = False
if config["remove_duplicates"]:
POSTPROCESS += ".markdup"
return PREPROCESS, POSTPROCESS, preprocess_suffices, config
def parse_samples(df, config, PREPROCESS):
assemblies = {}
samples = {}
df.fillna("", inplace=True)
for i in list(df.index):
# Add sample to dict
sample = df.iloc[i]["sample"]
if sample not in samples.keys():
samples[sample] = {}
# Add unit to dict
unit = str(df.iloc[i]["unit"])
if unit not in samples[sample].keys():
samples[sample][unit] = {}
R1 = df.iloc[i]["fq1"]
groups = []
r2 = False
# Set preprocessed file paths
R1_p = config["paths"]["results"]+"/intermediate/preprocess/{}_{}_R1{}.fastq.gz".format(sample, unit, PREPROCESS)
R2_p = config["paths"]["results"]+"/intermediate/preprocess/{}_{}_R2{}.fastq.gz".format(sample, unit, PREPROCESS)
se_p = config["paths"]["results"]+"/intermediate/preprocess/{}_{}_se{}.fastq.gz".format(sample, unit, PREPROCESS)
# Initiate keys for all assembly group values
if "assembly" in df.columns:
assem_list = df.iloc[i]["assembly"].split(",")
assem_list = [a for a in assem_list if a != ""]
for a in assem_list:
if a not in assemblies.keys():
assemblies[a] = {}
else:
assem_list = []
# Handling of paired and/or single end sequence files If the sample
# annotation file has a 'pair' column, add the read files as 'R1' and
# 'R2'
if "fq2" in df.columns and df.iloc[i]["fq2"]:
R2 = df.iloc[i]["fq2"]
r2 = True
samples[sample][unit]["R1"] = R1
samples[sample][unit]["R2"] = R2
# Add filepaths to preprocessed output files for each of the read
# files in each of the assemblies. This will be the initial
# input to the assembly rule
for a in assem_list:
if sample not in assemblies[a].keys():
assemblies[a][sample] = {unit: {}}
if unit not in assemblies[a][sample].keys():
assemblies[a][sample][unit] = {}
if r2:
assemblies[a][sample][unit]["R1"] = [R1_p]
assemblies[a][sample][unit]["R2"] = [R2_p]
else:
assemblies[a][sample][unit]["se"] = [se_p]
# If there is no 'fq2' column, add the single file path as 'se'
else:
samples[sample][unit]["se"] = R1
for a in assemblies:
if sample not in assemblies[a].keys():
assemblies[a][sample] = {unit: {}}
assemblies[a][sample][unit]["se"] = [se_p]
return samples, assemblies
def check_uppmax(config):
"""
Set specific params for running on uppmax
:param config: Snakemake config
:return: updated config dictionary
"""
import platform
hostname = platform.node()
if 'uppmax.uu.se' in hostname:
config["runOnUppMax"] = True
# Set temp to $TMPDIR
config["paths"]["temp"] = "$TMPDIR"
else:
config["runOnUppMax"] = False
return config
def check_annotation(config):
"""
Checks whether to run annotation/assembly
:param config: Snakemake config
:return: Updated config dict
"""
# Check whether to set annotation downstream of assembly
tools = [config["annotation"][key] for key in config["annotation"].keys()]
assems = [config["assembly"]["metaspades"], config["assembly"]["megahit"]]
config["run_assembly"] = False
if True in tools and True in assems:
config["run_annotation"] = True
# if True also assume the user wants assembly
config["run_assembly"] = True
# Set megahit as default unless metaspades is set
if not config["assembly"]["megahit"] and not config["assembly"][
"metaspades"]:
config["assembly"]["megahit"] = True
return config
def check_assembly(config, assemblies):
"""
Check assemblies and config settings
:param config: Snakemake config
:param assemblies: Assembly dictionary
:return: Tuple of updated config and assembly dict
"""
config["assembler"] = "Megahit" if config["megahit"] else "Metaspades"
if len(assemblies) > 0:
if config["assembly"]["metaspades"]:
# Remove single-end only assemblies
# that Metaspades won't be able to run
assemblies = filter_metaspades_assemblies(assemblies)
return assemblies
def check_classifiers(config):
"""
Set paths and params specific to classifiers
:param config: Snakemake config
:return: Updated config dict
"""
# Add read-based config info
config["centrifuge"]["index_path"] = ""
config["centrifuge"]["base"] = ""
config["centrifuge"]["dir"] = ""
if config["classification"]["centrifuge"]:
# Check if custom database exists
custom = expand("{b}.{i}.cf", b=config["centrifuge"]["custom"],
i=[1, 2, 3])
if list(set([os.path.exists(x) for x in custom]))[0]:
config["centrifuge"]["index_path"] = config["centrifuge"]["custom"]
# If not, use prebuilt default
else:
p = config["centrifuge"]["prebuilt"]
config["centrifuge"]["index_path"] = "resources/centrifuge/"+p
# Set centrifuge index config variables
index_path = config["centrifuge"]["index_path"]
config["centrifuge"]["dir"] = os.path.dirname(index_path)
config["centrifuge"]["base"] = bn(index_path)
config["kraken"]["index_path"] = ""
config["kraken"]["mem"] = ""
if config["classification"]["kraken"]:
# Check if custom database exists
custom = expand(config["kraken"]["custom"]+"/{n}.k2d",
n=["hash", "opts", "taxo"])
if list(set(os.path.exists(x) for x in custom))[0]:
config["kraken"]["index_path"] = config["kraken"]["custom"]
# If not, use prebuilt or standard
elif config["kraken"]["standard_db"]:
config["kraken"]["index_path"] = "resources/kraken/standard"
else:
config["kraken"]["index_path"] = "resources/kraken/prebuilt/"+config["kraken"]["prebuilt"]
if config["kraken"]["reduce_memory"]:
config["kraken"]["mem"] += "--memory-mapping"
return config
# preprocessing functions
def preprocessing_input(config):
if config["run_preprocessing"] or config["preprocessing"]["fastqc"]:
return config["paths"]["results"]+"/report/samples_report.html"
return []
def link(target, link_name):
"""
Generates symlinks with absolute paths
:param target:
:param link_name:
:return:
"""
src = os.path.abspath(target)
dst = os.path.abspath(link_name)
os.symlink(src, dst)
def is_pe(d):
if "R2" in d.keys():
return True
return False
def get_all_files(samples, directory, suffix="", nested=False):
"""
Returns a list of files based on samples and directory
:param samples: Samples dictionary
:param directory: Directory to find files in
:param suffix: Suffix of files to return
:param nested: If True look for files inside sample_run directory
:return:
"""
files = []
if type(directory) == list:
directory = directory[0]
for sample in samples:
for unit in samples[sample].keys():
if nested:
d = "{}/{}_{}".format(directory, sample, unit)
else:
d = "{}".format(directory)
if is_pe(samples[sample][unit]):
files.append(d+"/{}_{}_pe{}".format(sample, unit, suffix))
else:
files.append(d+"/{}_{}_se{}".format(sample, unit, suffix))
return files
def multiqc_input(samples, config):
files = []
pre, post, d, _ = prepost_string(config)
for sample in samples.keys():
for unit in samples[sample].keys():
if is_pe(samples[sample][unit]):
pairs = ["R1", "R2"]
seq_type = "pe"
else:
pairs = ["se"]
seq_type = "se"
files += get_fastqc_files(sample, unit, pairs, config, pre)
files += get_trim_logs(sample, unit, pairs, config, d)
files += get_filt_logs(sample, unit, seq_type, config, d)
files += get_sortmerna_logs(sample, unit, seq_type, config)
return files
def get_fastqc_files(sample, unit, pairs, config, pre):
"""Get all fastqc output"""
if config["preprocessing"]["fastqc"]:
files = expand(config["paths"]["results"]+"/intermediate/fastqc/{sample}_{unit}_{pair}{PREPROCESS}_fastqc.zip",
sample=sample, unit=unit, pair=pairs, PREPROCESS=pre)
return files
return []
def get_trim_logs(sample, unit, pairs, config, d):
if not config["preprocessing"]["trimmomatic"] and not \
config["preprocessing"]["cutadapt"]:
return []
if config["preprocessing"]["trimmomatic"]:
trimmer = "trimmomatic"
else:
trimmer = "cutadapt"
files = expand(config["paths"]["results"]+"/intermediate/preprocess/{sample}_{unit}_{pair}{s}.{trimmer}.log",
sample=sample, unit=unit, pair=pairs, s=d["trimming"],
trimmer=trimmer)
return files
def get_filt_logs(sample, unit, seq_type, config, d):
if not config["preprocessing"]["phix_filter"]:
return []
files = expand(config["paths"]["results"]+"/intermediate/preprocess/{sample}_{unit}_PHIX_{seq_type}{s}.log",
sample=sample, unit=unit, seq_type=seq_type, s=d["phixfilt"])
return files
def get_sortmerna_logs(sample, unit, seq_type, config):
if not config["preprocessing"]["sortmerna"]:
return []
files = expand(config["paths"]["results"]+"/intermediate/preprocess/{sample}_{unit}_{seq_type}.sortmerna.log",
sample=sample, unit=unit, seq_type=seq_type)
return files
def get_trimmomatic_string(seq_type, config):
"""
Generates trimsetting string for Trimmomatic
:param seq_type: PE or SE depending on sequencing type
:return: string
"""
trim_adapters = config["trimmomatic"]["trim_adapters"]
adapter_fasta_dir = "$CONDA_PREFIX/share/trimmomatic/adapters"
# Get params based on sequencing type
param_dict = config["trimmomatic"][seq_type]
# Set path to adapter
adapter = "{}/{}.fa".format(adapter_fasta_dir, param_dict["adapter"])
adapter_params = param_dict["adapter_params"]
pre_adapter_params = param_dict["pre_adapter_params"]
post_adapter_params = param_dict["post_adapter_params"]
trimsettings = pre_adapter_params
if trim_adapters:
trimsettings = " {} ILLUMINACLIP:{}:{}".format(pre_adapter_params,
adapter, adapter_params)
return "{} {}".format(trimsettings, post_adapter_params)
def get_sortmerna_ref_string(dbs):
"""
Constructs the SortMeRNA --ref string
:param dbs: Sortmerna databases from config
:return: STRING, STRING formatted string
"""
files = ["resources/rRNA_databases/{db}".format(db=db) for db in dbs]
ref_string = ":".join(["{},{}".format(f, f) for f in files])
return ref_string
# assembly functions
def filter_metaspades_assemblies(d):
"""
This function removes assemblies that contain only single-end samples
:param d: Assembly group dictionary
:return: Dictionary containing only assemblies with at least 1 paired sample
"""
se_only = []
for assembly in d.keys():
i = 0
for sample in d[assembly].keys():
for unit in d[assembly][sample].keys():
if is_pe(d[assembly][sample][unit]):
i += 1
break
if i == 0:
se_only.append(assembly)
for assembly in se_only:
del d[assembly]
# Quit and warn if all assemblies have been removed
if len(d) == 0:
sys.exit("""
WARNING: Metaspades requires paired-end data but all specified assemblies
only contain single-end data. Exiting...
""")
return d
def get_all_assembly_files(assembly_dict):
files = []
for sample in assembly_dict.keys():
for unit in assembly_dict[sample].keys():
for pair in assembly_dict[sample][unit].keys():
files.append(assembly_dict[sample][unit][pair][0])
return files
def get_bamfiles(g, assembly_dict, results_path, POSTPROCESS):
files = []
for sample in assembly_dict.keys():
for unit in assembly_dict[sample].keys():
if "R2" in assembly_dict[sample][unit].keys():
seq_type = "pe"
else:
seq_type = "se"
files.append(results_path+"/assembly/{}/mapping/{}_{}_{}{}.bam".format(g, sample, unit, seq_type, POSTPROCESS))
return files
def rename_records(f, fh, i):
"""
Prepends a number to read ids
:param f: Input fastq file (gzipped)
:param fh: Output filehandle
:param i: File index to prepend to reads
:return: Output filehandle
"""
from Bio import SeqIO
import gzip as gz
for record in SeqIO.parse(gz.open(f, 'rt'), 'fastq'):
record.id = "{}_{}".format(i, record.id)
SeqIO.write(record, fh, "fastq")
return fh
# binning functions
def binning_input(config, report=False):
"""
Generates input list for the binning part of the workflow
:param config: Snakemake config
:param report: Whether to gather input for the bin report rule
:return:
"""
bin_input = []
if len(get_binners(config)) > 0:
bin_input.append(config["paths"]["results"]+"/report/binning/binning_summary.tsv")
if config["binning"]["checkm"]:
bin_input.append(config["paths"]["results"]+"/report/checkm/checkm.stats.tsv")
# Don't include profile in report
if not report:
bin_input.append(config["paths"]["results"]+"/report/checkm/checkm.profiles.tsv")
if config["binning"]["gtdbtk"]:
bin_input.append(config["paths"]["results"]+"/report/gtdbtk/gtdbtk.summary.tsv")
bin_input.append(config["paths"]["results"]+"/report/bin_annotation/tRNA.total.tsv")
bin_input.append(config["paths"]["results"]+"/report/bin_annotation/rRNA.types.tsv")
config["fastani"]["ref_genomes"] = {}
if config["binning"]["fastani"]:
bin_input.append(config["paths"]["results"]+"/report/binning/genome_clusters.tsv")
# read list of genome references if path exists
if os.path.exists(config["fastani"]["ref_list"]):
_ = pd.read_csv(config["fastani"]["ref_list"], index_col=0,
sep="\t", header=None, names=["genome_id", "url"])
# filter genome list
_ = _.loc[(_["url"].str.contains("ftp")) | (
_["url"].str.contains("http"))].head()
config["fastani"]["ref_genomes"] = _.to_dict()["url"]
else:
config["fastani"]["ref_genomes"] = {}
return bin_input
def get_fw_reads(config, samples, p):
"""
MaxBin2 only uses unpaired reads for mapping with bowtie2.
Here we iterate over all samples
"""
files = []
for sample in samples.keys():
for unit in samples[sample].keys():
if "R1" in samples[sample][unit].keys():
r="R1"
else:
r="se"
f = config["paths"]["results"]+"/intermediate/preprocess/{sample}_{unit}_{r}{p}.fastq.gz".format(sample=sample,
unit=unit, r=r, p=p)
files.append(f)
reads_string = ""
for i, f in enumerate(files, start=1):
reads_string += "-reads{i} {f} ".format(i=i, f=f)
return reads_string
def get_tree_settings(config):
"""
Return checkm parameter based on tree settings
:param config:
:return:
"""
if config["checkm"]["reduced_tree"]:
return "-r"
return ""
def get_binners(config):
"""
Return a list of binners used
:param config:
:return:
"""
binners = []
if config["binning"]["metabat"]:
binners.append("metabat")
if config["binning"]["concoct"]:
binners.append("concoct")
if config["binning"]["maxbin"]:
binners.append("maxbin")
return binners
def get_fields(f, index):
"""
Extract assembly, binner and length fields from file path
:param f: Input file
:param index: Path split index starting from right
:return:
"""
items = f.split("/")
return items[index - 2], items[index - 1], items[index]
def assign_fields(x, l, group, binner):
"""
Assign assembly, binner and length fields
:param x: pandas DataFrame
:param l: minimum contig length used
:param group: assembly group
:param binner: binner used
:return: updated pandas DataFrame
"""
rows = x.shape[0]
x = x.assign(binner=pd.Series([binner] * rows, index=x.index))
x = x.assign(min_contig_length=pd.Series([l] * rows, index=x.index))
x = x.assign(assembly=pd.Series([group] * rows, index=x.index))
return x
def concatenate(input, index):
"""
Concatenate bin info dataframes
:param input:
:return:
"""
df = pd.DataFrame()
for i, f in enumerate(input):
binner, group, l = get_fields(f, index)
_df = pd.read_csv(f, sep="\t", index_col=0)
rows = _df.shape[0]
if rows == 0:
continue
_df = assign_fields(_df, l, group, binner)
if i == 0:
df = _df.copy()
else:
_df = _df.loc[:, df.columns]
df = | pd.concat([df, _df]) | pandas.concat |
"""Main flashbang class
The Simulation object represents a single 1D FLASH model.
It can load model datafiles, manipulate/extract that data,
and plot it across various axes.
Expected model directory structure
----------------------------------
$FLASH_MODELS
│
└───<model_set>
| |
| └───<model>
| │ │ <run>.dat
| │ │ <run>.log
| │ │ ...
| │ │
| │ └───output
| │ │ <run>_hdf5_chk_0000
| │ │ <run>_hdf5_chk_0001
| │ │ ...
Nomenclature
-------------------
Setup arguments
---------------
model_set: name of directory containing the set of models
If <model> is directly below $FLASH_MODELS,
i.e. there is no <model_set> level, just use model_set=''.
model: name of the model directory
Typically corresponds to a particular compiled `flash4` executable.
run: sub-model label (the actual prefix used in filenames)
This can also be used to distinguish between multiple "runs"
executed under the same umbrella "model".
Data objects
---------------
dat: Integrated time-series quantities found in the `<run>.dat` file.
chk: Checkpoint data found in `chk` files.
profile: Radial profiles extracted from chk files.
log: Diagnostics printed during simulation, found in the `<run>.log` file.
tracers: Time-dependent trajectories/tracers for given mass shells.
Extracted from profiles for a chosen mass grid.
"""
import time
import numpy as np
import xarray as xr
import pandas as pd
from astropy import units
# flashbang
from . import load_save
from .plotting import plot_tools
from .plotting.plotter import Plotter
from .plotting.slider import FlashSlider
from .quantities import get_density_zone
from .paths import model_path
from .tools import ensure_sequence
from .config import Config
class Simulation:
def __init__(self,
run,
model,
model_set,
config=None,
verbose=True,
load_all=True,
reload=False,
save=True,
load_tracers=False):
"""Object representing a 1D flash simulation
parameters
----------
run : str
Label used in chk and .dat filenames, e.g. 'run' for 'run.dat'
model : str
Name of simulation directory containing .dat files, etc.
model_set : str
Name of higher-level model collection (see module docstring)
config : str
Name of config file to use, e.g. 'stir' for 'config/stir.ini'
load_all : bool
Immediately load all model data (chk profiles, dat)
load_tracers : bool
Extract mass tracers/trajectories from profiles
reload : bool
Force reload from raw model files (don't load from cache/)
save : bool
Save extracted model data to temporary files (for faster loading)
verbose : bool
Print information to terminal
"""
t0 = time.time()
self.verbose = verbose
self.run = run
self.model = model
self.model_set = model_set
self.model_path = model_path(model, model_set=model_set)
self.dat = None # time-integrated data from .dat; see load_dat()
self.bounce = {} # bounce properties
self.trans_dens = None # transition densities (helmholtz models)
self.mass_grid = None # mass shells of tracers
self.chk_table = | pd.DataFrame() | pandas.DataFrame |
import sys, os
import argparse
import numpy as np
import pandas as pd
import json
import time
import torch
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import (roc_curve, accuracy_score, log_loss,
balanced_accuracy_score, confusion_matrix,
roc_auc_score, make_scorer)
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from yattag import Doc
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description='sklearn LogisticRegression')
parser.add_argument('--train_vitals_csv', type=str,
help='Location of vitals data for training')
parser.add_argument('--test_vitals_csv', type=str,
help='Location of vitals data for testing')
parser.add_argument('--metadata_csv', type=str,
help='Location of metadata for testing and training')
parser.add_argument('--data_dict', type=str)
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--save', type=str, default='LRmodel.pt',
help='path to save the final model')
parser.add_argument('--report_dir', type=str, default='results',
help='dir in which to save results report')
parser.add_argument('--is_data_simulated', type=bool, default=False,
help='boolean to check if data is simulated or from mimic')
parser.add_argument('--output_filename_prefix', type=str, default='current_config', help='file to save the loss and validation over epochs')
args = parser.parse_args()
if not(args.is_data_simulated):
# extract data
train_vitals = pd.read_csv(args.train_vitals_csv)
test_vitals = | pd.read_csv(args.test_vitals_csv) | pandas.read_csv |
#!/usr/bin/env python
from __future__ import print_function
import argparse
from collections import Counter
from datetime import datetime
import logging
import re, sys
import os, pycurl, tarfile, zipfile, gzip, shutil
from pkg_resources import resource_filename
from sistr.version import __version__
from sistr.src.blast_wrapper import BlastRunner
from sistr.src.cgmlst import run_cgmlst
from sistr.src.logger import init_console_logger
from sistr.src.qc import qc
from sistr.src.serovar_prediction import SerovarPredictor, overall_serovar_call, serovar_table, SISTR_DB_URL, SISTR_DATA_DIR
def init_parser():
prog_desc = '''
SISTR (Salmonella In Silico Typing Resource) Command-line Tool
==============================================================
Serovar predictions from whole-genome sequence assemblies by determination of antigen gene and cgMLST gene alleles using BLAST.
Note about using the "--use-full-cgmlst-db" flag:
The "centroid" allele database is ~10% the size of the full set so analysis is much quicker with the "centroid" vs "full" set of alleles. Results between 2 cgMLST allele sets should not differ.
If you find this program useful in your research, please cite as:
The Salmonella In Silico Typing Resource (SISTR): an open web-accessible tool for rapidly typing and subtyping draft Salmonella genome assemblies.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
PLoS ONE 11(1): e0147101. doi: 10.1371/journal.pone.0147101
'''
parser = argparse.ArgumentParser(prog='sistr_cmd',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=prog_desc)
parser.add_argument('fastas',
metavar='F',
nargs='*',
help='Input genome FASTA file')
parser.add_argument('-i',
'--input-fasta-genome-name',
nargs=2,
metavar=('fasta_path', 'genome_name'),
action='append',
help='fasta file path to genome name pair')
parser.add_argument('-f',
'--output-format',
default='json',
help='Output format (json, csv, pickle)')
parser.add_argument('-o',
'--output-prediction',
help='SISTR serovar prediction output path')
parser.add_argument('-M',
'--more-results',
action='count',
default=0,
help='Output more detailed results (-M) and all antigen search blastn results (-MM)')
parser.add_argument('-p',
'--cgmlst-profiles',
help='Output CSV file destination for cgMLST allelic profiles')
parser.add_argument('-n',
'--novel-alleles',
help='Output FASTA file destination of novel cgMLST alleles from input genomes')
parser.add_argument('-a',
'--alleles-output',
help='Output path of allele sequences and info to JSON')
parser.add_argument('-T',
'--tmp-dir',
default='/tmp',
help='Base temporary working directory for intermediate analysis files.')
parser.add_argument('-K',
'--keep-tmp',
action='store_true',
help='Keep temporary analysis files.')
parser.add_argument('--use-full-cgmlst-db',
action='store_true',
help='Use the full set of cgMLST alleles which can include highly similar alleles. By default the smaller "centroid" alleles or representative alleles are used for each marker. ')
parser.add_argument('--no-cgmlst',
action='store_true',
help='Do not run cgMLST serovar prediction')
parser.add_argument('-m', '--run-mash',
action='store_true',
help='Determine Mash MinHash genomic distances to Salmonella genomes with trusted serovar designations. Mash binary must be in accessible via $PATH (e.g. /usr/bin).')
parser.add_argument('--qc',
action='store_true',
help='Perform basic QC to provide level of confidence in serovar prediction results.')
parser.add_argument('-t', '--threads',
type=int,
default=1,
help='Number of parallel threads to run sistr_cmd analysis.')
parser.add_argument('-v',
'--verbose',
action='count',
default=0,
help='Logging verbosity level (-v == show warnings; -vvv == show debug info)')
parser.add_argument('-V', '--version', action='version', version='%(prog)s {}'.format(__version__))
return parser
def run_mash(input_fasta):
from sistr.src.mash import mash_dist_trusted, mash_output_to_pandas_df, mash_subspeciation
mash_out = mash_dist_trusted(input_fasta)
df_mash = mash_output_to_pandas_df(mash_out)
if df_mash.empty:
logging.error('Could not perform Mash subspeciation!')
mash_result_dict = {
'mash_genome': '',
'mash_serovar': '',
'mash_distance': 1.0,
'mash_match': 0,
'mash_subspecies': '',
'mash_top_5': {},
}
return mash_result_dict
df_mash_top_5 = df_mash[['ref', 'dist', 'n_match', 'serovar']].head(n=5)
logging.debug('Mash top 5 results:\n{}\n'.format(df_mash_top_5))
mash_spp_tuple = mash_subspeciation(df_mash)
spp = None
if mash_spp_tuple is not None:
spp, spp_dict, spp_counter = mash_spp_tuple
logging.info('Mash spp %s (dist=%s; counter=%s)', spp, spp_dict, spp_counter)
else:
logging.error('Could not perform Mash subspeciation!')
for idx, row in df_mash_top_5.iterrows():
mash_genome = row['ref']
mash_serovar = row['serovar']
mash_distance = row['dist']
mash_match = row['n_match']
log_msg = 'Top serovar by Mash: "{}" with dist={}, # matching sketches={}, matching genome={}'
logging.info(log_msg.format(mash_serovar, mash_distance, mash_match, mash_genome))
mash_result_dict = {
'mash_genome': mash_genome,
'mash_serovar': mash_serovar,
'mash_distance': mash_distance,
'mash_match': mash_match,
'mash_subspecies': spp,
'mash_top_5': df_mash_top_5.to_dict(),
}
return mash_result_dict
def merge_mash_prediction(prediction, mash_prediction):
for k in mash_prediction:
prediction.__dict__[k] = mash_prediction[k]
return prediction
def merge_cgmlst_prediction(serovar_prediction, cgmlst_prediction):
serovar_prediction.cgmlst_distance = cgmlst_prediction['distance']
serovar_prediction.cgmlst_genome_match = cgmlst_prediction['genome_match']
serovar_prediction.serovar_cgmlst = cgmlst_prediction['serovar']
if 'found_loci' in cgmlst_prediction:
serovar_prediction.cgmlst_found_loci = cgmlst_prediction['found_loci']
else:
serovar_prediction.cgmlst_found_loci = 0
serovar_prediction.cgmlst_matching_alleles = cgmlst_prediction['matching_alleles']
serovar_prediction.cgmlst_subspecies = cgmlst_prediction['subspecies']
serovar_prediction.cgmlst_ST = cgmlst_prediction['cgmlst330_ST']
return serovar_prediction
def infer_o_antigen(prediction):
df_serovar = serovar_table()
if '|' in prediction.serovar:
prediction.o_antigen = '-'
else:
predicted_serovars = [prediction.serovar]
series_o_antigens = df_serovar.O_antigen[df_serovar.Serovar.isin(predicted_serovars)]
if series_o_antigens.size == 0:
prediction.o_antigen = '-'
else:
counter_o_antigens = Counter(series_o_antigens)
prediction.o_antigen = counter_o_antigens.most_common(1)[0][0]
def download_to_file(url,file):
with open(file, 'wb') as f:
c = pycurl.Curl()
# Redirects to https://www.python.org/.
c.setopt(c.URL, url)
# Follow redirect.
c.setopt(c.FOLLOWLOCATION, True)
c.setopt(c.WRITEDATA, f)
c.perform()
c.close()
def extract(fname,outdir):
if (fname.endswith("tar.gz")):
tar = tarfile.open(fname, "r:gz")
tar.extractall(outdir)
tar.close()
elif (fname.endswith("tar")):
tar = tarfile.open(fname, "r:")
tar.extractall(outdir)
tar.close()
elif(fname.endswith("zip")):
zip_ref = zipfile.ZipFile(fname, 'r')
zip_ref.extractall(outdir)
zip_ref.close()
elif(fname.endswith("gz")):
outfile = os.path.join(outdir,fname.replace('.gz',''))
with gzip.open(fname, 'rb') as f_in:
with open(outfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
f_in.close()
f_out.close()
os.remove(fname)
def setup_sistr_dbs():
tmp_file = resource_filename('sistr', 'data.tar.gz')
logging.info("Downloading needed SISTR databases from: {}".format(SISTR_DB_URL))
download_to_file(SISTR_DB_URL, tmp_file)
if os.path.isdir(resource_filename('sistr', 'data/')):
shutil.rmtree(resource_filename('sistr', 'data/'))
os.mkdir(resource_filename('sistr', 'data/'))
if (not os.path.isfile(tmp_file)):
logging.error('Downloading databases failed, please check your internet connection and retry')
sys.exit(-1)
else:
logging.info('Downloading databases successful')
f = open(resource_filename('sistr', 'dbstatus.txt'),'w')
f.write("DB downloaded on : {} from {}".format(datetime.today().strftime('%Y-%m-%d'),SISTR_DB_URL))
f.close()
extract(tmp_file, resource_filename('sistr', ''))
os.remove(tmp_file)
def sistr_predict(input_fasta, genome_name, tmp_dir, keep_tmp, args):
blast_runner = None
try:
assert os.path.exists(input_fasta), "Input fasta file '%s' must exist!" % input_fasta
if genome_name is None or genome_name == '':
genome_name = genome_name_from_fasta_path(input_fasta)
dtnow = datetime.now()
genome_name_no_spaces = re.sub(r'\W', '_', genome_name)
genome_tmp_dir = os.path.join(tmp_dir, dtnow.strftime("%Y%m%d%H%M%S") + '-' + 'SISTR' + '-' + genome_name_no_spaces)
blast_runner = BlastRunner(input_fasta, genome_tmp_dir)
logging.info('Initializing temporary analysis directory "%s" and preparing for BLAST searching.', genome_tmp_dir)
blast_runner.prep_blast()
logging.info('Temporary FASTA file copied to %s', blast_runner.tmp_fasta_path)
spp = None
mash_prediction = None
if args.run_mash:
mash_prediction = run_mash(input_fasta)
spp = mash_prediction['mash_subspecies']
cgmlst_prediction = None
cgmlst_results = None
if not args.no_cgmlst:
cgmlst_prediction, cgmlst_results = run_cgmlst(blast_runner, full=args.use_full_cgmlst_db)
spp = cgmlst_prediction['subspecies']
serovar_predictor = SerovarPredictor(blast_runner, spp)
serovar_predictor.predict_serovar_from_antigen_blast()
prediction = serovar_predictor.get_serovar_prediction()
prediction.genome = genome_name
prediction.fasta_filepath = os.path.abspath(input_fasta)
if cgmlst_prediction:
merge_cgmlst_prediction(prediction, cgmlst_prediction)
if mash_prediction:
merge_mash_prediction(prediction, mash_prediction)
overall_serovar_call(prediction, serovar_predictor)
infer_o_antigen(prediction)
logging.info('%s | Antigen gene BLAST serovar prediction: "%s" serogroup=%s %s:%s:%s',
genome_name,
prediction.serovar_antigen,
prediction.serogroup,
prediction.o_antigen,
prediction.h1,
prediction.h2)
logging.info('%s | Subspecies prediction: %s',
genome_name,
spp)
logging.info('%s | Overall serovar prediction: %s',
genome_name,
prediction.serovar)
if args.qc:
qc_status, qc_msgs = qc(blast_runner.tmp_fasta_path, cgmlst_results, prediction)
prediction.qc_status = qc_status
prediction.qc_messages = ' | '.join(qc_msgs)
finally:
if not keep_tmp:
logging.info('Deleting temporary working directory at %s', blast_runner.tmp_work_dir)
blast_runner.cleanup()
else:
logging.info('Keeping temp dir at %s', blast_runner.tmp_work_dir)
return prediction, cgmlst_results
def genome_name_from_fasta_path(fasta_path):
"""Extract genome name from fasta filename
Get the filename without directory and remove the file extension.
Example:
With fasta file path ``/path/to/genome_1.fasta``::
fasta_path = '/path/to/genome_1.fasta'
genome_name = genome_name_from_fasta_path(fasta_path)
print(genome_name)
# => "genome_1"
Args:
fasta_path (str): fasta file path
Returns:
str: genome name
"""
filename = os.path.basename(fasta_path)
return re.sub(r'(\.fa$)|(\.fas$)|(\.fasta$)|(\.fna$)|(\.\w{1,}$)', '', filename)
def write_cgmlst_profiles(fastas, cgmlst_results, output_path):
genome_marker_cgmlst_result = {}
for genome, res in zip(fastas, cgmlst_results):
tmp = {}
for marker, res_dict in res.items():
aname = res_dict['name']
tmp[marker] = int(aname) if aname is not None else None
genome_marker_cgmlst_result[genome] = tmp
import pandas as pd
df = | pd.DataFrame(genome_marker_cgmlst_result) | pandas.DataFrame |
import sqlite3
import pandas as pd
import geopandas as gpd
import os
def fix_badtext_in_litholog(filename,stateID = 'SA'):
"""fixes up bad text in the South Australia, QLD 'NGIS_LithologyLog.csv' file
filename: string.csv name of bad file eg 'NGIS_LithologyLog.csv'
Do not use on NT lithology.csv files. Makes them worse
returns: creates a new file names new_filename """
f1 = open(filename,encoding = 'utf-8')
str1 = str(f1.read())
f1.close()
if stateID == 'SA':
str2 = str1.replace('",SAGeodata','#$$d1')
elif stateID == 'QLD':
str2 = str1.replace('",QLD DNRM GWDB','#$$d1')
else:
raise ValueError('Error: Only SA or QLD need to be put throug this function')
str3 = str2.replace(',"','#&&d1')
str4 = str3.replace('"','Quote')
if stateID == 'SA':
str5 = str4.replace('#$$d1','",SAGeodata')
elif stateID == 'QLD':
str5 = str4.replace('#$$d1','",QLD DNRM GWDB')
str6 = str5.replace('#&&d1',',"')
str7 = str6.replace('\'',''')
f2 = open('new_'+filename,'w',encoding='utf-8')
f2.write(str7)
f2.close()
return
def clean_col_strings(df,col_name,chars = '/\,:%&()-=<>.–;?‘’+~'):
"""removes specified characters from dataframe (df) column (col_name)
Some characters were found to cause issues with folium"""
df[col_name]=df[col_name].astype(str)
for i in range(len(chars)):
df[col_name] = df[col_name].apply(lambda x: x.replace(chars[i],''))
df[col_name]=df[col_name].apply(lambda x: x.replace("'",''))
return df
def create_SQL_from_NGIS_bore_data(state_datainput='NSW',NGISdata_dir='',SQLdata_saveto_dir=''):
"""Script to create SQL database from bom databases
aims to rename certain columns for consistency (between states) and then save as a SQL db
input is the state code and two directory locations
NGISdata_directory: link to a folder containing the diffeent state folders which contain the NGIS csv, shp files"""
dir1 = os.getcwd()
os.chdir(NGISdata_dir)
#read data into pandas
print(' reading metadata ...')
if state_datainput == 'NSW' or state_datainput == 'VIC':
meta = gpd.read_file('NGIS_Bore.shp')
else:
meta = gpd.read_file('NGIS_Bore.shp')
meta = | pd.DataFrame(meta) | pandas.DataFrame |
import pytest
import numpy as np
from datetime import date, timedelta, time, datetime
import dateutil
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas.compat.numpy import np_datetime64_compat
from pandas import (DatetimeIndex, Index, date_range, DataFrame,
Timestamp, offsets)
from pandas.util.testing import assert_almost_equal
randn = np.random.randn
class TestDatetimeIndexLikeTimestamp(object):
# Tests for DatetimeIndex behaving like a vectorized Timestamp
def test_dti_date_out_of_range(self):
# see gh-1475
pytest.raises(ValueError, DatetimeIndex, ['1400-01-01'])
pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter',
'days_in_month', 'is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
assert result == expected
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
class TestDatetimeIndex(object):
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
if method is not None:
assert idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')) == 1
assert idx.get_loc('2000-01-01', method='nearest') == 0
assert idx.get_loc('2000-01-01T12', method='nearest') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)) == 1
with tm.assert_raises_regex(ValueError,
'unit abbreviation w/o a number'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with pytest.raises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
with pytest.raises(
ValueError,
match='tolerance size must match target index size'):
idx.get_loc('2000-01-01', method='nearest',
tolerance=[pd.Timedelta('1day').to_timedelta64(),
pd.Timedelta('1day').to_timedelta64()])
assert idx.get_loc('2000', method='nearest') == slice(0, 3)
assert idx.get_loc('2000-01', method='nearest') == slice(0, 3)
assert idx.get_loc('1999', method='nearest') == 0
assert idx.get_loc('2001', method='nearest') == 2
with pytest.raises(KeyError):
idx.get_loc('1999', method='pad')
with pytest.raises(KeyError):
idx.get_loc('2001', method='backfill')
with pytest.raises(KeyError):
idx.get_loc('foobar')
with pytest.raises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
assert idx.get_loc('2000-01-02', method='nearest') == 0
assert idx.get_loc('2000-01-03', method='nearest') == 1
assert idx.get_loc('2000-01', method='nearest') == slice(0, 2)
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with pytest.raises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
tol_raw = [pd.Timedelta('1 hour'),
pd.Timedelta('1 hour'),
pd.Timedelta('1 hour').to_timedelta64(), ]
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=[np.timedelta64(x) for x in tol_raw]),
np.array([0, -1, 1], dtype=np.intp))
tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),
pd.Timedelta('1 hour').to_timedelta64(),
'foo', ]
with pytest.raises(
ValueError, match='abbreviation w/o a number'):
idx.get_indexer(target, 'nearest', tolerance=tol_bad)
with pytest.raises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
assert '2000' in str(e)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = tm.round_trip_pickle(index)
tm.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
assert str(index.reindex([])[0].tz) == 'US/Eastern'
assert str(index.reindex(np.array([]))[0].tz) == 'US/Eastern'
def test_time_loc(self): # GH8667
from datetime import time
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
assert len(idx1) == periods
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
assert len(idx2) == periods
def test_nat(self):
assert DatetimeIndex([np.nan])[0] is pd.NaT
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq='WOM-1SAT')
dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06']
expected = DatetimeIndex(dates, freq='WOM-1SAT')
tm.assert_index_equal(result, expected)
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
# GH2658
import datetime
start = datetime.datetime.now()
idx = DatetimeIndex(start=start, freq="1d", periods=10)
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
pytest.raises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = Index([f(x) for x in rng], dtype='<U8')
tm.assert_index_equal(result, exp)
def test_iteration_preserves_tz(self):
# see gh-8890
index = date_range("2012-01-01", periods=3, freq='H', tz='US/Eastern')
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result == expected
index = date_range("2012-01-01", periods=3, freq='H',
tz=dateutil.tz.tzoffset(None, -28800))
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
# 9100
index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00',
'2014-12-01 04:12:34.987000-08:00'])
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
assert isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
assert not idx.equals(list(idx))
non_datetime = Index(list('abc'))
assert not idx.equals(list(non_datetime))
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.loc['1/3/2000']
assert result.name == df.index[2]
result = df.T['1/3/2000']
assert result.name == df.index[2]
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_sort_values(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = Index([f(index[0])])
tm.assert_index_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
assert isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
assert (result['B'] == dr).all()
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
assert (result == expected).all()
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
assert cols.dtype == np.dtype('O')
assert cols.dtype == joined.dtype
tm.assert_numpy_array_equal(cols.values, joined.values)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
assert index is joined
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * | offsets.Nano() | pandas.offsets.Nano |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 10 17:22:51 2019
Work flow: to obtain the TD products for use with ZWD (after download):
1)use fill_fix_all_10mins_IMS_stations() after copying the downloaded TD
2)use IMS_interpolating_to_GNSS_stations_israel(dt=None, start_year=2019(latest))
3)use resample_GNSS_TD(path=ims_path) to resample all TD
@author: ziskin
"""
from PW_paths import work_yuval
from pathlib import Path
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
ims_10mins_path = ims_path / '10mins'
awd_path = work_yuval/'AW3D30'
axis_path = work_yuval/'axis'
cwd = Path().cwd()
# fill missing data:
#some_missing = ds.tmin.sel(time=ds['time.day'] > 15).reindex_like(ds)
#
#In [20]: filled = some_missing.groupby('time.month').fillna(climatology.tmin)
#
#In [21]: both = xr.Dataset({'some_missing': some_missing, 'filled': filled})
# kabr, nzrt, katz, elro, klhv, yrcm, slom have ims stations not close to them!
gnss_ims_dict = {
'alon': 'ASHQELON-PORT', 'bshm': 'HAIFA-TECHNION', 'csar': 'HADERA-PORT',
'tela': 'TEL-AVIV-COAST', 'slom': 'BESOR-FARM', 'kabr': 'SHAVE-ZIYYON',
'nzrt': 'DEIR-HANNA', 'katz': 'GAMLA', 'elro': 'MEROM-GOLAN-PICMAN',
'mrav': 'MAALE-GILBOA', 'yosh': 'ARIEL', 'jslm': 'JERUSALEM-GIVAT-RAM',
'drag': 'METZOKE-DRAGOT', 'dsea': 'SEDOM', 'ramo': 'MIZPE-RAMON-20120927',
'nrif': 'NEOT-SMADAR', 'elat': 'ELAT', 'klhv': 'SHANI',
'yrcm': 'ZOMET-HANEGEV', 'spir': 'PARAN-20060124', 'nizn': 'EZUZ'}
ims_units_dict = {
'BP': 'hPa',
'NIP': 'W/m^2',
'Rain': 'mm',
'TD': 'deg_C',
'WD': 'deg',
'WS': 'm/s',
'U': 'm/s',
'V': 'm/s',
'G': ''}
def save_daily_IMS_params_at_GNSS_loc(ims_path=ims_path,
param_name='WS', stations=[x for x in gnss_ims_dict.keys()]):
import xarray as xr
from aux_gps import save_ncfile
param = xr.open_dataset(
ims_path / 'IMS_{}_israeli_10mins.nc'.format(param_name))
ims_stns = [gnss_ims_dict.get(x) for x in stations]
param = param[ims_stns]
param = param.resample(time='D', keep_attrs=True).mean(keep_attrs=True)
inv_dict = {v: k for k, v in gnss_ims_dict.items()}
for da in param:
param = param.rename({da: inv_dict.get(da)})
filename = 'GNSS_{}_daily.nc'.format(param_name)
save_ncfile(param, ims_path, filename)
return param
def produce_bet_dagan_long_term_pressure(path=ims_path, rate='1H',
savepath=None, fill_from_jerusalem=True):
import xarray as xr
from aux_gps import xr_reindex_with_date_range
from aux_gps import get_unique_index
from aux_gps import save_ncfile
from aux_gps import anomalize_xr
# load manual old measurements and new 3 hr ones:
bd_man = xr.open_dataset(
path / 'IMS_hourly_03hr.nc')['BET-DAGAN-MAN_2520_ps']
bd_auto = xr.open_dataset(path / 'IMS_hourly_03hr.nc')['BET-DAGAN_2523_ps']
bd = xr.concat(
[bd_man.dropna('time'), bd_auto.dropna('time')], 'time', join='inner')
bd = get_unique_index(bd)
bd = bd.sortby('time')
bd = xr_reindex_with_date_range(bd, freq='1H')
# remove dayofyear mean, interpolate and reconstruct signal to fill it with climatology:
climatology = bd.groupby('time.dayofyear').mean(keep_attrs=True)
bd_anoms = anomalize_xr(bd, freq='DOY')
bd_inter = bd_anoms.interpolate_na(
'time', method='cubic', max_gap='24H', keep_attrs=True)
# bd_inter = bd.interpolate_na('time', max_gap='3H', method='cubic')
bd_inter = bd_inter.groupby('time.dayofyear') + climatology
bd_inter = bd_inter.reset_coords(drop=True)
# load 10-mins new measurements:
bd_10 = xr.open_dataset(path / 'IMS_BP_israeli_hourly.nc')['BET-DAGAN']
bd_10 = bd_10.dropna('time').sel(
time=slice(
'2019-06-30T00:00:00',
None)).resample(
time='1H').mean()
bd_inter = xr.concat([bd_inter, bd_10], 'time', join='inner')
bd_inter = get_unique_index(bd_inter)
bd_inter = bd_inter.sortby('time')
bd_inter.name = 'bet-dagan'
bd_inter.attrs['action'] = 'interpolated from 3H'
if fill_from_jerusalem:
print('filling missing gaps from 2018 with jerusalem')
jr_10 = xr.load_dataset(
path / 'IMS_BP_israeli_hourly.nc')['JERUSALEM-CENTRE']
climatology = bd_inter.groupby('time.dayofyear').mean(keep_attrs=True)
jr_10_anoms = anomalize_xr(jr_10, 'DOY')
bd_anoms = anomalize_xr(bd_inter, 'DOY')
bd_anoms = xr.concat(
[bd_anoms.dropna('time'), jr_10_anoms.dropna('time')], 'time', join='inner')
bd_anoms = get_unique_index(bd_anoms)
bd_anoms = bd_anoms.sortby('time')
bd_anoms = xr_reindex_with_date_range(bd_anoms, freq='5T')
bd_anoms = bd_anoms.interpolate_na(
'time', method='cubic', max_gap='2H')
bd_anoms.name = 'bet-dagan'
bd_anoms.attrs['action'] = 'interpolated from 3H'
bd_anoms.attrs['filled'] = 'using Jerusalem-centre'
bd_anoms.attrs['long_name'] = 'Pressure Anomalies'
bd_anoms.attrs['units'] = 'hPa'
bd_inter = bd_anoms.groupby('time.dayofyear') + climatology
bd_inter = bd_inter.resample(
time='1H', keep_attrs=True).mean(keep_attrs=True)
# if savepath is not None:
# yr_min = bd_anoms.time.min().dt.year.item()
# yr_max = bd_anoms.time.max().dt.year.item()
# filename = 'IMS_BD_anoms_5min_ps_{}-{}.nc'.format(
# yr_min, yr_max)
# save_ncfile(bd_anoms, savepath, filename)
# return bd_anoms
if savepath is not None:
# filename = 'IMS_BD_hourly_ps.nc'
yr_min = bd_inter.time.min().dt.year.item()
yr_max = bd_inter.time.max().dt.year.item()
filename = 'IMS_BD_hourly_ps_{}-{}.nc'.format(yr_min, yr_max)
save_ncfile(bd_inter, savepath, filename)
bd_anoms = anomalize_xr(bd_inter, 'DOY', units='std')
filename = 'IMS_BD_hourly_anoms_std_ps_{}-{}.nc'.format(yr_min, yr_max)
save_ncfile(bd_anoms, savepath, filename)
bd_anoms = anomalize_xr(bd_inter, 'DOY')
filename = 'IMS_BD_hourly_anoms_ps_{}-{}.nc'.format(yr_min, yr_max)
save_ncfile(bd_anoms, savepath, filename)
return bd_inter
def transform_wind_speed_direction_to_u_v(path=ims_path, savepath=ims_path):
import xarray as xr
import numpy as np
WS = xr.load_dataset(path / 'IMS_WS_israeli_10mins.nc')
WD = xr.load_dataset(path / 'IMS_WD_israeli_10mins.nc')
# change angles to math:
WD = 270 - WD
U = WS * np.cos(np.deg2rad(WD))
V = WS * np.sin(np.deg2rad(WD))
print('updating attrs...')
for station in WS:
attrs = WS[station].attrs
attrs.update(channel_name='U')
attrs.update(units='m/s')
attrs.update(field_name='zonal velocity')
U[station].attrs = attrs
attrs.update(channel_name='V')
attrs.update(field_name='meridional velocity')
V[station].attrs = attrs
if savepath is not None:
filename = 'IMS_U_israeli_10mins.nc'
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in U.data_vars}
U.to_netcdf(savepath / filename, 'w', encoding=encoding)
filename = 'IMS_V_israeli_10mins.nc'
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in V.data_vars}
V.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
return
def perform_harmonic_analysis_all_IMS(path=ims_path, var='BP', n=4,
savepath=ims_path):
import xarray as xr
from aux_gps import harmonic_analysis_xr
from aux_gps import keep_iqr
ims = xr.load_dataset(path / 'IMS_{}_israeli_10mins.nc'.format(var))
sites = [x for x in gnss_ims_dict.values()]
ims_actual_sites = [x for x in ims if x in sites]
ims = ims[ims_actual_sites]
if var == 'NIP':
ims = xr.merge([keep_iqr(ims[x]) for x in ims])
max_nip = ims.to_array('site').max()
ims /= max_nip
dss_list = []
for site in ims:
da = ims[site]
da = keep_iqr(da)
print('performing harmonic analysis for IMS {} field at {} site:'.format(var, site))
dss = harmonic_analysis_xr(da, n=n, anomalize=True, normalize=False)
dss_list.append(dss)
dss_all = xr.merge(dss_list)
dss_all.attrs['field'] = var
dss_all.attrs['units'] = ims_units_dict[var]
if savepath is not None:
filename = 'IMS_{}_harmonics_diurnal.nc'.format(var)
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in dss_all.data_vars}
dss_all.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
return dss_all
def align_10mins_ims_to_gnss_and_save(ims_path=ims_path, field='G7',
gnss_ims_dict=gnss_ims_dict,
savepath=work_yuval):
import xarray as xr
d = dict(zip(gnss_ims_dict.values(), gnss_ims_dict.keys()))
gnss_list = []
for station, gnss_site in d.items():
print('loading IMS station {}'.format(station))
ims_field = xr.load_dataset(
ims_path / 'IMS_{}_israeli_10mins.nc'.format(field))[station]
gnss = ims_field.load()
gnss.name = gnss_site
gnss.attrs['IMS_station'] = station
gnss_list.append(gnss)
gnss_sites = xr.merge(gnss_list)
if savepath is not None:
filename = 'GNSS_IMS_{}_israeli_10mins.nc'.format(field)
print('saving {} to {}'.format(filename, savepath))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in gnss_sites.data_vars}
gnss_sites.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
return gnss_sites
def produce_10mins_gustiness(path=ims_path, rolling=5):
import xarray as xr
from aux_gps import keep_iqr
from aux_gps import xr_reindex_with_date_range
ws = xr.load_dataset(path / 'IMS_WS_israeli_10mins.nc')
stations = [x for x in ws.data_vars]
g_list = []
for station in stations:
print('proccesing station {}'.format(station))
attrs = ws[station].attrs
g = ws[station].rolling(time=rolling, center=True).std(
) / ws[station].rolling(time=rolling, center=True).mean()
g = keep_iqr(g)
g = xr_reindex_with_date_range(g, freq='10min')
g.name = station
g.attrs = attrs
g_list.append(g)
G = xr.merge(g_list)
filename = 'IMS_G{}_israeli_10mins.nc'.format(rolling)
print('saving {} to {}'.format(filename, path))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in G.data_vars}
G.to_netcdf(path / filename, 'w', encoding=encoding)
print('Done resampling!')
return G
def produce_10mins_absolute_humidity(path=ims_path):
from sounding_procedures import wrap_xr_metpy_mixing_ratio
from aux_gps import dim_intersection
import xarray as xr
P = xr.load_dataset(path / 'IMS_BP_israeli_10mins.nc')
stations = [x for x in P.data_vars]
T = xr.open_dataset(path / 'IMS_TD_israeli_10mins.nc')
T = T[stations].load()
RH = xr.open_dataset(path / 'IMS_RH_israeli_10mins.nc')
RH = RH[stations].load()
mr_list = []
for station in stations:
print('proccesing station {}'.format(station))
p = P[station]
t = T[station]
rh = RH[station]
new_time = dim_intersection([p, t, rh])
p = p.sel(time=new_time)
rh = rh.sel(time=new_time)
t = t.sel(time=new_time)
mr = wrap_xr_metpy_mixing_ratio(p, t, rh, verbose=True)
mr_list.append(mr)
MR = xr.merge(mr_list)
filename = 'IMS_MR_israeli_10mins.nc'
print('saving {} to {}'.format(filename, path))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in MR.data_vars}
MR.to_netcdf(path / filename, 'w', encoding=encoding)
print('Done resampling!')
return MR
def produce_wind_frequency_gustiness(path=ims_path,
station='TEL-AVIV-COAST',
season='DJF', plot=True):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
from aux_gps import keep_iqr
ws = xr.open_dataset(path / 'IMS_WS_israeli_10mins.nc')[station]
ws.load()
ws = ws.sel(time=ws['time.season'] == season)
gustiness = ws.rolling(time=5).std() / ws.rolling(time=5).mean()
gustiness = keep_iqr(gustiness)
gustiness_anoms = gustiness.groupby(
'time.month') - gustiness.groupby('time.month').mean('time')
gustiness_anoms = gustiness_anoms.reset_coords(drop=True)
G = gustiness_anoms.groupby('time.hour').mean('time')
wd = xr.open_dataset(path / 'IMS_WD_israeli_10mins.nc')[station]
wd.load()
wd.name = 'WD'
wd = wd.sel(time=wd['time.season'] == season)
all_Q = wd.groupby('time.hour').count()
Q1 = wd.where((wd >= 0) & (wd < 90)).dropna('time')
Q2 = wd.where((wd >= 90) & (wd < 180)).dropna('time')
Q3 = wd.where((wd >= 180.1) & (wd < 270)).dropna('time')
Q4 = wd.where((wd >= 270) & (wd < 360)).dropna('time')
Q = xr.concat([Q1, Q2, Q3, Q4], 'Q')
Q['Q'] = [x + 1 for x in range(4)]
Q_freq = 100.0 * (Q.groupby('time.hour').count() / all_Q)
if plot:
fig, ax = plt.subplots(figsize=(16, 8))
for q in Q_freq['Q']:
Q_freq.sel(Q=q).plot(ax=ax)
ax.set_title(
'Relative wind direction frequency in {} IMS station in {} season'.format(
station, season))
ax.set_ylabel('Relative frequency [%]')
ax.set_xlabel('Time of day [UTC]')
ax.set_xticks(np.arange(0, 24, step=1))
ax.legend([r'0$\degree$-90$\degree$', r'90$\degree$-180$\degree$',
r'180$\degree$-270$\degree$', r'270$\degree$-360$\degree$'], loc='upper left')
ax.grid()
ax2 = ax.twinx()
G.plot.line(ax=ax2, color='k', marker='o')
ax2.axhline(0, color='k', linestyle='--')
ax2.legend(['{} Gustiness anomalies'.format(station)],
loc='upper right')
ax2.set_ylabel('Gustiness anomalies')
return
def produce_gustiness(path=ims_path,
station='TEL-AVIV-COAST',
season='DJF', pw_station='tela', temp=False,
ax=None):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
from aux_gps import keep_iqr
from aux_gps import groupby_date_xr
from matplotlib.ticker import FixedLocator
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
adjust_yaxis(ax2, (y1-y2)/2, v2)
adjust_yaxis(ax1, (y2-y1)/2, v1)
def adjust_yaxis(ax, ydif, v):
"""shift axis ax by ydiff, maintaining point v at the same location"""
inv = ax.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, ydif))
miny, maxy = ax.get_ylim()
miny, maxy = miny - v, maxy - v
if -miny > maxy or (-miny == maxy and dy > 0):
nminy = miny
nmaxy = miny*(maxy+dy)/(miny+dy)
else:
nmaxy = maxy
nminy = maxy*(miny+dy)/(maxy+dy)
ax.set_ylim(nminy+v, nmaxy+v)
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
print('loading {} IMS station...'.format(station))
g = xr.open_dataset(path / 'IMS_G_israeli_10mins.nc')[station]
g.load()
g = g.sel(time=g['time.season'] == season)
date = groupby_date_xr(g)
# g_anoms = g.groupby('time.month') - g.groupby('time.month').mean('time')
g_anoms = g.groupby(date) - g.groupby(date).mean('time')
g_anoms = g_anoms.reset_coords(drop=True)
G = g_anoms.groupby('time.hour').mean('time')
if ax is None:
fig, ax = plt.subplots(figsize=(16, 8))
G.plot(ax=ax, color='b', marker='o')
ax.set_title(
'Gustiness {} IMS station in {} season'.format(
station, season))
ax.axhline(0, color='b', linestyle='--')
ax.set_ylabel('Gustiness anomalies [dimensionless]', color='b')
ax.set_xlabel('Time of day [UTC]')
ax.set_xticks(np.arange(0, 24, step=1))
ax.yaxis.label.set_color('b')
ax.tick_params(axis='y', colors='b')
ax.grid()
if pw_station is not None:
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_thresh_50_homogenized.nc')[pw_station]
pw.load().dropna('time')
pw = pw.sel(time=pw['time.season'] == season)
date = groupby_date_xr(pw)
pw = pw.groupby(date) - pw.groupby(date).mean('time')
pw = pw.reset_coords(drop=True)
pw = pw.groupby('time.hour').mean()
axpw = ax.twinx()
pw.plot.line(ax=axpw, color='k', marker='o')
axpw.axhline(0, color='k', linestyle='--')
axpw.legend(['{} PW anomalies'.format(
pw_station.upper())], loc='upper right')
axpw.set_ylabel('PW anomalies [mm]')
align_yaxis(ax, 0, axpw, 0)
if temp:
axt = ax.twinx()
axt.spines["right"].set_position(("axes", 1.05))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(axt)
# Second, show the right spine.
axt.spines["right"].set_visible(True)
p3, = T.plot.line(ax=axt, marker='s', color='m',
label="Temperature")
axt.yaxis.label.set_color(p3.get_color())
axt.tick_params(axis='y', colors=p3.get_color())
axt.set_ylabel('Temperature anomalies [$C\degree$]')
return G
def produce_relative_frequency_wind_direction(path=ims_path,
station='TEL-AVIV-COAST',
season='DJF', with_weights=False,
pw_station='tela', temp=False,
plot=True):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
wd = xr.open_dataset(path / 'IMS_WD_israeli_10mins.nc')[station]
wd.load()
wd.name = 'WD'
wd = wd.sel(time=wd['time.season'] == season)
all_Q = wd.groupby('time.hour').count()
Q1 = wd.where((wd >= 0) & (wd < 90)).dropna('time')
Q2 = wd.where((wd >= 90) & (wd < 180)).dropna('time')
Q3 = wd.where((wd >= 180.1) & (wd < 270)).dropna('time')
Q4 = wd.where((wd >= 270) & (wd < 360)).dropna('time')
Q = xr.concat([Q1, Q2, Q3, Q4], 'Q')
Q['Q'] = [x + 1 for x in range(4)]
Q_freq = 100.0 * (Q.groupby('time.hour').count() / all_Q)
T = xr.open_dataset(path / 'IMS_TD_israeli_10mins.nc')[station]
T.load()
T = T.groupby('time.month') - T.groupby('time.month').mean('time')
T = T.reset_coords(drop=True)
T = T.sel(time=T['time.season'] == season)
T = T.groupby('time.hour').mean('time')
if with_weights:
ws = xr.open_dataset(path / 'IMS_WS_israeli_10mins.nc')[station]
ws.load()
ws = ws.sel(time=ws['time.season'] == season)
ws.name = 'WS'
wind = xr.merge([ws, wd])
wind = wind.dropna('time')
all_Q = wind['WD'].groupby('time.hour').count()
Q1 = wind['WS'].where(
(wind['WD'] >= 0) & (wind['WD'] < 90)).dropna('time')
Q2 = wind['WS'].where(
(wind['WD'] >= 90) & (wind['WD'] < 180)).dropna('time')
Q3 = wind['WS'].where(
(wind['WD'] >= 180) & (wind['WD'] < 270)).dropna('time')
Q4 = wind['WS'].where(
(wind['WD'] >= 270) & (wind['WD'] < 360)).dropna('time')
Q = xr.concat([Q1, Q2, Q3, Q4], 'Q')
Q['Q'] = [x + 1 for x in range(4)]
Q_ratio = (Q.groupby('time.hour').count() / all_Q)
Q_mean = Q.groupby('time.hour').mean() / Q.groupby('time.hour').max()
Q_freq = 100 * ((Q_mean * Q_ratio) / (Q_mean * Q_ratio).sum('Q'))
if plot:
fig, ax = plt.subplots(figsize=(16, 8))
for q in Q_freq['Q']:
Q_freq.sel(Q=q).plot(ax=ax)
ax.set_title(
'Relative wind direction frequency in {} IMS station in {} season'.format(
station, season))
ax.set_ylabel('Relative frequency [%]')
ax.set_xlabel('Time of day [UTC]')
ax.legend([r'0$\degree$-90$\degree$', r'90$\degree$-180$\degree$',
r'180$\degree$-270$\degree$', r'270$\degree$-360$\degree$'], loc='upper left')
ax.set_xticks(np.arange(0, 24, step=1))
ax.grid()
if pw_station is not None:
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_thresh_50_homogenized.nc')[pw_station]
pw.load().dropna('time')
pw = pw.groupby('time.month') - \
pw.groupby('time.month').mean('time')
pw = pw.reset_coords(drop=True)
pw = pw.sel(time=pw['time.season'] == season)
pw = pw.groupby('time.hour').mean()
axpw = ax.twinx()
pw.plot.line(ax=axpw, color='k', marker='o')
axpw.axhline(0, color='k', linestyle='--')
axpw.legend(['{} PW anomalies'.format(
pw_station.upper())], loc='upper right')
axpw.set_ylabel('PW anomalies [mm]')
if temp:
axt = ax.twinx()
axt.spines["right"].set_position(("axes", 1.05))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(axt)
# Second, show the right spine.
axt.spines["right"].set_visible(True)
p3, = T.plot.line(ax=axt, marker='s',
color='m', label="Temperature")
axt.yaxis.label.set_color(p3.get_color())
axt.tick_params(axis='y', colors=p3.get_color())
axt.set_ylabel('Temperature anomalies [$C\degree$]')
return Q_freq
def plot_closest_line_from_point_to_israeli_coast(point, ax=None, epsg=None,
path=gis_path, color='k',
ls='-', lw=1.0):
import matplotlib.pyplot as plt
from shapely.geometry import LineString
from pyproj import Geod
"""returns the distance in kms"""
coast_gdf = get_israeli_coast_line(path=path, epsg=epsg)
coast_pts = coast_gdf.geometry.unary_union
point_in_coast = get_closest_point_from_a_line_to_a_point(point, coast_pts)
AB = LineString([point_in_coast, point])
if ax is None:
fig, ax = plt.subplots()
ax.plot(*AB.xy, color='k', linestyle=ls, linewidth=lw)
geod = Geod(ellps="WGS84")
distance = geod.geometry_length(AB) / 1000.0
return distance
def get_closest_point_from_a_line_to_a_point(point, line):
from shapely.ops import nearest_points
p1, p2 = nearest_points(point, line)
return p2
def get_israeli_coast_line(path=gis_path, minx=34.0, miny=30.0, maxx=36.0,
maxy=34.0, epsg=None):
"""use epsg=2039 to return in meters"""
from shapely.geometry import box
import geopandas as gpd
# create bounding box using shapely:
bbox = box(minx, miny, maxx, maxy)
# read world coast lines:
coast = gpd.read_file(gis_path / 'ne_10m_coastline.shp')
# clip:
gdf = gpd.clip(coast, bbox)
if epsg is not None:
gdf = gdf.to_crs('epsg:{}'.format(epsg))
return gdf
def clip_raster(fp=awd_path/'Israel_Area.tif',
out_tif=awd_path/'israel_dem.tif',
minx=34.0, miny=29.0, maxx=36.5, maxy=34.0):
def getFeatures(gdf):
"""Function to parse features from GeoDataFrame in such a manner that
rasterio wants them"""
import json
return [json.loads(gdf.to_json())['features'][0]['geometry']]
import rasterio
from rasterio.plot import show
from rasterio.plot import show_hist
from rasterio.mask import mask
from shapely.geometry import box
import geopandas as gpd
from fiona.crs import from_epsg
import pycrs
print('reading {}'.format(fp))
data = rasterio.open(fp)
# create bounding box using shapely:
bbox = box(minx, miny, maxx, maxy)
# insert the bbox into a geodataframe:
geo = gpd.GeoDataFrame({'geometry': bbox}, index=[0], crs=from_epsg(4326))
# re-project with the same projection as the data:
geo = geo.to_crs(crs=data.crs.data)
# get the geometry coords:
coords = getFeatures(geo)
# clipping is done with mask:
out_img, out_transform = mask(dataset=data, shapes=coords, crop=True)
# copy meta data:
out_meta = data.meta.copy()
# parse the epsg code:
epsg_code = int(data.crs.data['init'][5:])
# update the meta data:
out_meta.update({"driver": "GTiff",
"height": out_img.shape[1],
"width": out_img.shape[2],
"transform": out_transform,
"crs": pycrs.parse.from_epsg_code(epsg_code).to_proj4()})
# save to disk:
print('saving {} to disk.'.format(out_tif))
with rasterio.open(out_tif, "w", **out_meta) as dest:
dest.write(out_img)
print('Done!')
return
def create_israel_area_dem(path):
"""merge the raw DSM tif files from AW3D30 model of Israel area togather"""
from aux_gps import path_glob
import rasterio
from rasterio.merge import merge
src_files_to_mosaic = []
files = path_glob(path, '*DSM*.tif')
for fp in files:
src = rasterio.open(fp)
src_files_to_mosaic.append(src)
mosaic, out_trans = merge(src_files_to_mosaic)
out_meta = src.meta.copy()
out_meta.update({"driver": "GTiff",
"height": mosaic.shape[1],
"width": mosaic.shape[2],
"transform": out_trans,
"crs": src.crs
}
)
with rasterio.open(path/'Israel_Area.tif', "w", **out_meta) as dest:
dest.write(mosaic)
return
def parse_cv_results(grid_search_cv):
from aux_gps import process_gridsearch_results
"""parse cv_results from GridsearchCV object"""
# only supports neg-abs-mean-error with leaveoneout
from sklearn.model_selection import LeaveOneOut
if (isinstance(grid_search_cv.cv, LeaveOneOut)
and grid_search_cv.scoring == 'neg_mean_absolute_error'):
cds = process_gridsearch_results(grid_search_cv)
cds = - cds
return cds
def IMS_interpolating_to_GNSS_stations_israel(dt='2013-10-19T22:00:00',
stations=None,
lapse_rate='auto',
method='okrig',
variogram='spherical',
n_neighbors=3,
start_year='1996',
cut_days_ago=3,
plot=False,
verbose=False,
savepath=ims_path,
network='soi-apn',
axis_path=axis_path,
ds_td=None):
"""interpolate the IMS 10 mins field(e.g., TD) to the location
of the GNSS sites in ISRAEL(use dt=None for this). other dt is treated
as datetime str and will give the "snapshot" for the field for just this
datetime"""
from pykrige.rk import Krige
import pandas as pd
from aux_gps import path_glob
import xarray as xr
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import geopandas as gpd
from sklearn.neighbors import KNeighborsRegressor
from axis_process import read_axis_stations
# import time
def pick_model(method, variogram, n_neighbors):
if method == 'okrig':
if variogram is not None:
model = Krige(method='ordinary', variogram_model=variogram,
verbose=verbose)
else:
model = Krige(method='ordinary', variogram_model='linear',
verbose=verbose)
elif method == 'knn':
if n_neighbors is None:
model = KNeighborsRegressor(n_neighbors=5, weights='distance')
else:
model = KNeighborsRegressor(
n_neighbors=n_neighbors, weights='distance')
else:
raise Exception('{} is not supported yet...'.format(method))
return model
def prepare_Xy(ts_lr_neutral, T_lats, T_lons):
import numpy as np
df = ts_lr_neutral.to_frame()
df['lat'] = T_lats
df['lon'] = T_lons
# df = df.dropna(axis=0)
c = np.linspace(
df['lat'].min(),
df['lat'].max(),
df['lat'].shape[0])
r = np.linspace(
df['lon'].min(),
df['lon'].max(),
df['lon'].shape[0])
rr, cc = np.meshgrid(r, c)
vals = ~np.isnan(ts_lr_neutral)
X = np.column_stack([rr[vals, vals], cc[vals, vals]])
# rr_cc_as_cols = np.column_stack([rr.flatten(), cc.flatten()])
# y = da_scaled.values[vals]
y = ts_lr_neutral[vals]
return X, y
def neutrilize_t(ts_vs_alt, lapse_rate):
ts_lr_neutral = (ts_vs_alt +
lapse_rate *
ts_vs_alt.index /
1000.0)
return ts_lr_neutral
def choose_dt_and_lapse_rate(tdf, dt, T_alts, lapse_rate):
ts = tdf.loc[dt, :]
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
# ts.name = dt_col
# Tloc_df = Tloc_df.join(ts, how='right')
# Tloc_df = Tloc_df.dropna(axis=0)
ts_vs_alt = pd.Series(ts.values, index=T_alts)
ts_vs_alt_for_fit = ts_vs_alt.dropna()
# try:
[a, b] = np.polyfit(ts_vs_alt_for_fit.index.values,
ts_vs_alt_for_fit.values, 1)
# except TypeError as e:
# print('{}, dt: {}'.format(e, dt))
# print(ts_vs_alt)
# return
if lapse_rate == 'auto':
lapse_rate = np.abs(a) * 1000
if lapse_rate < 5.0:
lapse_rate = 5.0
elif lapse_rate > 10.0:
lapse_rate = 10.0
return ts_vs_alt, lapse_rate
# import time
dt = pd.to_datetime(dt)
# read Israeli GNSS sites coords:
if network == 'soi-apn':
df = pd.read_csv(
cwd /
'israeli_gnss_coords.txt',
delim_whitespace=True,
header=0)
elif network == 'axis':
df = read_axis_stations(path=axis_path)
# use station=None to pick all stations, otherwise pick one...
if stations is not None:
if isinstance(stations, str):
stations = [stations]
df = df.loc[stations, :]
print('selected only {} stations'.format(stations))
else:
print('selected all {} stations.'.format(network))
# prepare lats and lons of gnss sites:
gps_lats = np.linspace(df.lat.min(), df.lat.max(), df.lat.values.shape[0])
gps_lons = np.linspace(df.lon.min(), df.lon.max(), df.lon.values.shape[0])
gps_lons_lats_as_cols = np.column_stack([gps_lons, gps_lats])
# load IMS temp data:
if ds_td is None:
glob_str = 'IMS_TD_israeli_10mins*.nc'
file = path_glob(ims_path, glob_str=glob_str)[0]
ds = xr.open_dataset(file)
else:
ds = ds_td
time_dim = list(set(ds.dims))[0]
# slice to a starting year(1996?):
ds = ds.sel({time_dim: slice(start_year, None)})
years = sorted(list(set(ds[time_dim].dt.year.values)))
# get coords and alts of IMS stations:
T_alts = np.array([ds[x].attrs['station_alt'] for x in ds])
T_lats = np.array([ds[x].attrs['station_lat'] for x in ds])
T_lons = np.array([ds[x].attrs['station_lon'] for x in ds])
print('loading IMS_TD of israeli stations 10mins freq..')
# transform to dataframe and add coords data to df:
tdf = ds.to_dataframe()
if cut_days_ago is not None:
# use cut_days_ago to drop last x days of data:
# this is vital bc towards the newest data, TD becomes scarce bc not
# all of the stations data exists...
n = cut_days_ago * 144
tdf.drop(tdf.tail(n).index, inplace=True)
print('last date to be handled is {}'.format(tdf.index[-1]))
# use this to solve for a specific datetime:
if dt is not None:
dt_col = dt.strftime('%Y-%m-%d %H:%M')
# t0 = time.time()
# prepare the ims coords and temp df(Tloc_df) and the lapse rate:
ts_vs_alt, lapse_rate = choose_dt_and_lapse_rate(
tdf, dt, T_alts, lapse_rate)
if plot:
fig, ax_lapse = plt.subplots(figsize=(10, 6))
sns.regplot(x=ts_vs_alt.index, y=ts_vs_alt.values, color='r',
scatter_kws={'color': 'b'}, ax=ax_lapse)
suptitle = dt.strftime('%Y-%m-%d %H:%M')
ax_lapse.set_xlabel('Altitude [m]')
ax_lapse.set_ylabel('Temperature [degC]')
ax_lapse.text(0.5, 0.95, 'Lapse_rate: {:.2f} degC/km'.format(lapse_rate),
horizontalalignment='center', verticalalignment='center',
transform=ax_lapse.transAxes, fontsize=12, color='k',
fontweight='bold')
ax_lapse.grid()
ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')
# neutrilize the lapse rate effect:
ts_lr_neutral = neutrilize_t(ts_vs_alt, lapse_rate)
# prepare the regressors(IMS stations coords) and the
# target(IMS temperature at the coords):
X, y = prepare_Xy(ts_lr_neutral, T_lats, T_lons)
# pick the model and params:
model = pick_model(method, variogram, n_neighbors)
# fit the model:
model.fit(X, y)
# predict at the GNSS stations coords:
interpolated = model.predict(
gps_lons_lats_as_cols).reshape((gps_lats.shape))
# add prediction to df:
df[dt_col] = interpolated
# fix for lapse rate:
df[dt_col] -= lapse_rate * df['alt'] / 1000.0
# concat gnss stations and Tloc DataFrames:
Tloc_df = | pd.DataFrame(T_lats, index=tdf.columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
from concurrent.futures import ProcessPoolExecutor
import re
import os
import time
import warnings
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
def gender_age_percentage (df_name,df):
df=df.rename(columns={'NUMBER_OF_PATIENTS':'register'})
practices_ids=list(set(df['ORG_CODE']))
print ('number of practices',len (practices_ids))
new_headers=['ORG_CODE', 'SEX', 'AGE_GROUP_5', 'register','percentage','End_date']
new_month_df= | pd.DataFrame(columns=new_headers) | pandas.DataFrame |
"""Purpose: generate profiles for obs and models at multiple leadtimes.
Author: <NAME>
Date: 04/05/2022.
"""
# Standard library
from pprint import pprint
# Third-party
import matplotlib.pyplot as plt
import pandas as pd
# First-party
from plot_profile.utils.stations import sdf
from plot_profile.utils.utils import get_cubehelix_colors
from plot_profile.utils.utils import linestyle_dict
from plot_profile.utils.utils import save_fig
from plot_profile.utils.variables import vdf
# from ipdb import set_trace
def create_mult_plot(
data_dict,
variable,
leadtimes,
date_ref,
location,
xlims,
ylims,
grid,
datatypes,
outpath,
verbose=False,
):
# get location dataframe
loc = sdf[location]
# get devices
devices = data_dict.keys()
# get ymin, ymax
ymin = ylims[0]
ymax = ylims[1]
# determine ymin_dynamic from data & apply if ymin=None
ymin_dynamic = None
# prepare figure
fig, ax = plt.subplots(1, 1, figsize=(5, 8), tight_layout=True)
if grid:
ax.grid(which="major", color="#DDDDDD", linewidth=0.8)
ax.grid(which="minor", color="#EEEEEE", linestyle=":", linewidth=0.5)
ax.minorticks_on()
# xlims = (xmin, xmax) & xmin = (xmin1, xmin2) & xmax = (xmax1, xmax2)
if xlims:
xmins = xlims[0]
xmaxs = xlims[1]
if len(xmins) == len(xmaxs):
if len(xmins) == 1:
ax.set_xlim(xmins[0], xmaxs[0])
if len(xmins) == 2: # have xmins for two x-axes
ax.set_xlim(xmins[0], xmaxs[0])
else:
print(
f"Check xmin/xmax values again. Got {len(xmins)} x-min values and {len(xmaxs)} x-max values."
)
print(f"Warning: No x-limits have been applied.")
first_unit = None
# variable informations
variable = vdf[variable]
unit = variable.unit
var_long = variable.long_name
# def color list
colors = get_cubehelix_colors(len(leadtimes), 0.1, 0.8)
device_namelist = []
device_title = None
dev_index = -1
# plotting
for i, device in enumerate(devices):
if "~" in device:
device_name = device.split("~")[0]
device_lt = device.split("~")[1]
else:
device_name, device_title = device, device
device_lt = None
if device_name not in device_namelist:
device_namelist.append(device_name)
dev_index += 1
# 1) retrieve df
try:
df = data_dict[device]
except KeyError:
print(f"! no data for: {device_name} at leadtime: {device_lt} :(")
# sys.exit(1)
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[6]:
import pandas as pd
import io
import requests
import time
import random
# In[3]:
# gets the hidden API keys
api_key = pd.read_csv('secrets.csv').api_key.to_string().split()[1]
# In[124]:
# gets data using user's parameters
def get_data(symbol, interval):
"""
Signature: get_data(symbol, period) -> 'DataFrame'
Docstring:
Retrieves market data for the selected symbol and period.
Parameters
----------
symbol : str
The name of the equity of your choice. For example: symbol=GOOGL.
interval : str
Time interval between two consecutive data points in the time series.
The following values are supported: 1min, 5min, 15min, 30min, 60min.
Returns
-------
DataFrame
Examples
--------
>>> get_data('GOOGL', '60min')
"""
# main url or alphavantage and selection of features from user
BASE_URL = 'https://www.alphavantage.co/query?'
q = {
'function':'TIME_SERIES_INTRADAY_EXTENDED',
'symbol':symbol,
'interval':interval,
'slice':'year1month1',
'apikey':'<KEY>'
}
df=pd.DataFrame()
for y in range(1,3):
for m in range(1,13):
# create 'slices' of 1 month each. has to do with how the api functions
q['slice'] = f'year{y}month{m}'
# concatenate all user's selected values into one string
q_str = "".join([i for i in [str(i) + "=" + str(q[i]) + "&" for i in q]])[:-1]
# concatenate the base alphavantage url with the user's query
url = BASE_URL + q_str
print(url)
# GET url
response = requests.get(url)
# read data into a pandas dataframe
df=pd.concat([df, pd.read_csv(io.StringIO(response.content.decode('utf-8')))], axis=0)
# because the free api has a limit of 5 calls per minute, we need to wait
time.sleep(60/5)
# returns a dataframe
return(df)
# In[125]:
# auto complete function for stocks
def auto_complete_stocks(x):
"""
Signature: auto_complete_stocks(str) -> 'json'
Docstring:
Makes use of the auto-completion function of Alpha Vantage API.
It takes the user's input and returns a json with the coincidences.
Parameters
----------
symbol : str
A string containing part of the symbol or description of the equity.
For example 'amaz' would return the symbol and description for AMZN stocks, etc.
Returns
-------
json
"""
BASE_URL = 'https://www.alphavantage.co/query?'
url = f'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={x}&datatype=json&apikey={api_key}'
response = requests.get(url).json()
return(response)
# In[ ]:
# to fetch all updated stocks and ETFs supported
def get_supported_stocks():
"""
Signature: get_supported_stocks() -> 'DataFrame'
Docstring:
Retrieves the supported list of stocks and ETFs from Alpha Vantage, using their API.
See https://www.alphavantage.co/
Returns
-------
DataFrame
Examples
--------
>>> get_supported_stocks()
"""
BASE_URL = 'https://www.alphavantage.co/query?'
url = f'https://www.alphavantage.co/query?function=LISTING_STATUS&apikey={api_key}'
response = requests.get(url)
x=pd.read_csv(io.StringIO(response.content.decode('utf-8')))
return(x)
# In[ ]:
# to fetch all updated stocks and ETFs supported
# static version loading from .csv previously downloaded
def get_supported_stocks_static():
"""
Signature: get_supported_stocks() -> 'DataFrame'
Docstring:
Retrieves the supported list of stocks and ETFs from Alpha Vantage, using their API.
This 'static' version loads the list from a .csv file.
Returns
-------
DataFrame
Examples
--------
>>> get_supported_stocks()
"""
x = | pd.read_csv('data/stocks_etfs_list.csv') | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.