prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
import os
import locale
import codecs
import sys
from uuid import uuid4
from collections import OrderedDict
import pytest
from pandas.compat import intern
from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf
from pandas.util._decorators import deprecate_kwarg
from pandas.util._validators import (validate_args, validate_kwargs,
validate_args_and_kwargs,
validate_bool_kwarg)
import pandas.util.testing as tm
CURRENT_LOCALE = locale.getlocale()
LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None)
class TestDecorators(object):
def setup_method(self, method):
@deprecate_kwarg('old', 'new')
def _f1(new=False):
return new
@deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
def _f2(new=False):
return new
@deprecate_kwarg('old', 'new', lambda x: x + 1)
def _f3(new=0):
return new
self.f1 = _f1
self.f2 = _f2
self.f3 = _f3
def test_deprecate_kwarg(self):
x = 78
with tm.assert_produces_warning(FutureWarning):
result = self.f1(old=x)
assert result is x
with tm.assert_produces_warning(None):
self.f1(new=x)
def test_dict_deprecate_kwarg(self):
x = 'yes'
with tm.assert_produces_warning(FutureWarning):
result = self.f2(old=x)
assert result
def test_missing_deprecate_kwarg(self):
x = 'bogus'
with tm.assert_produces_warning(FutureWarning):
result = self.f2(old=x)
assert result == 'bogus'
def test_callable_deprecate_kwarg(self):
x = 5
with tm.assert_produces_warning(FutureWarning):
result = self.f3(old=x)
assert result == x + 1
with pytest.raises(TypeError):
self.f3(old='hello')
def test_bad_deprecate_kwarg(self):
with pytest.raises(TypeError):
@deprecate_kwarg('old', 'new', 0)
def f4(new=None):
pass
def test_rands():
r = | tm.rands(10) | pandas.util.testing.rands |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from validada.slicers import iloc
import validada.functions.raising as ck
import validada.decorators.raising as dc
import datetime as dt
def _add_one(df):
return df + 1
def _safe_add_one(df):
return df.fillna(0.0) + 1
def _noop(df):
return df
def test_is_in_index():
dr = pd.date_range(start='2015-01-01', periods=6, freq='D')
df = pd.DataFrame(data = list(range(6)), index=dr)
d = dt.date(2015,1,3)
result = ck.has_in_index(df, obj=d)
tm.assert_frame_equal(df, result)
result = dc.has_in_index(obj=d)(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
result = ck.has_in_index(df, obj=d, try_ix=True)
result = ck.has_in_index(df, obj=d, try_ix=True, try_strftime="%Y-%m")
result = ck.has_in_index(df, obj=d, check_na=True)
def test_is_in_index_raises():
dr = pd.date_range(start='2015-01-01', periods=6, freq='D')
da = list(range(6))
da[2] = pd.np.nan
df = pd.DataFrame(data = da, index=dr)
d = dt.date(2015,1,12)
with pytest.raises(AssertionError):
ck.has_in_index(df, obj=d)
with pytest.raises(AssertionError):
dc.has_in_index(obj=d)(_add_one)(df)
with pytest.raises(AssertionError):
ck.has_in_index(df, obj=d, try_ix=True)
ck.has_in_index(df, obj=d, try_ix=True, try_strftime="%Y-%m")
d = dt.datetime(2015,1,3)
ck.has_in_index(df, obj=d)
ck.has_in_index(df, obj=d, check_na=False)
with pytest.raises(AssertionError):
ck.has_in_index(df, obj=d, check_na=True)
def test_equal_columns_sum():
df = pd.DataFrame({'A': [1,2,3,4,5], 'B': [1,2,3,4,5]})
result = ck.equal_columns_sum(df)
tm.assert_frame_equal(df, result)
result = dc.equal_columns_sum()(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
def test_equal_columns_sum_raises_slice():
df = pd.DataFrame({'A': [None,2,3,4,0], 'B': [1,2,3,4,None]})
with pytest.raises(AssertionError):
ck.equal_columns_sum(df)
with pytest.raises(AssertionError):
dc.equal_columns_sum()(_add_one)(df)
s = iloc[-3:]
result = ck.equal_columns_sum(df, s)
tm.assert_frame_equal(df, result)
result = dc.equal_columns_sum(s)(_safe_add_one)(df)
tm.assert_frame_equal(result, _safe_add_one(df))
def test_none_missing():
df = pd.DataFrame(np.random.randn(5, 3))
result = ck.none_missing(df)
tm.assert_frame_equal(df, result)
result = dc.none_missing()(_add_one)(df)
| tm.assert_frame_equal(result, df + 1) | pandas.util.testing.assert_frame_equal |
# Correr desde HOME
import re
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import pandas as pd
import os
import httplib2
from geopy.geocoders import GoogleV3
from Dicc_Tipo_Danhos import camb_tipos
import tqdm
import datetime
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/sheets.googleapis.com-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
CLIENT_SECRET_FILE = 'creds/secreto_cliente.json'
APPLICATION_NAME = 'Temblor'
geolocator = GoogleV3(api_key=os.environ.get('GM_KEY'))
# Dirección debe ser de la forma "Num Calle Ciudad"
def dir_correct(calle, numero, ciudad, estado):
k = []
k.append('Calle ' + calle + ' ' + numero)
k.append(ciudad)
k.append(estado + ', ' + 'MX')
dirr = ', '.join(k)
return dirr
def obtain_latlong(dirr):
try:
location = geolocator.geocode(dirr, region='MX')
lat = location.latitude
lon = location.longitude
except:
lat = ''
lon = ''
return lat, lon
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(
credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def get_Data_temblor():
"""Shows basic usage of the Sheets API.
Creates a Sheets API service object and prints the names and majors of
students in a sample spreadsheet:
https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets',
'v4',
http=http,
discoveryServiceUrl=discoveryUrl)
# DAÑOS Y DERRUMBES VERIFICADOS
# Para descargar otras páginas cambiar el onmbre en el campo range
result = service.spreadsheets().values().get(
spreadsheetId='1i__c44wIg760LmxZcM8oTjDR0cGFVdL9YrjbCcb9Op0',
range='Form Responses 1!A1:AH10000').execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
return values
def insert_Data_temblor(datos):
"""Shows basic usage of the Sheets API.
Creates a Sheets API service object and prints the names and majors of
students in a sample spreadsheet:
https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets',
'v4',
http=http,
discoveryServiceUrl=discoveryUrl)
result = service.spreadsheets().values().get(
spreadsheetId='1wLHf5ITtTsfErWoPHwhu7Vfy-96eQKKxZO2AmZbP9XY',
range='Datos!A1:H1000').execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print(values)
def estructura_sheet(listas):
columnas = listas[0]
info = | pd.DataFrame() | pandas.DataFrame |
import pytest
from pandas import DataFrame
import pandas._testing as tm
class TestCopy:
@pytest.mark.parametrize("attr", ["index", "columns"])
def test_copy_index_name_checking(self, float_frame, attr):
# don't want to be able to modify the index stored elsewhere after
# making a copy
ind = getattr(float_frame, attr)
ind.name = None
cp = float_frame.copy()
getattr(cp, attr).name = "foo"
assert getattr(float_frame, attr).name is None
def test_copy_cache(self):
# GH#31784 _item_cache not cleared on copy causes incorrect reads after updates
df = DataFrame({"a": [1]})
df["x"] = [0]
df["a"]
df.copy()
df["a"].values[0] = -1
tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0]}))
df["y"] = [0]
assert df["a"].values[0] == -1
tm.assert_frame_equal(df, | DataFrame({"a": [-1], "x": [0], "y": [0]}) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import cStringIO as StringIO
import nose
from numpy import nan
import numpy as np
import numpy.ma as ma
from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
from pandas.util import py3compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
#-------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEquals(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEquals(result.name, self.ts.name)
# def test_copy_index_name_checking(self):
# # don't want to be able to modify the index stored elsewhere after
# # making a copy
# self.ts.index.name = None
# cp = self.ts.copy()
# cp.index.name = 'foo'
# self.assert_(self.ts.index.name is None)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEquals(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEquals(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEquals(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assert_(result.name is None)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEquals(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEquals(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEquals(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEquals(result.name, self.ts.name)
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(range(0,len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth"]
expected = "\n".join(expected)
self.assertEquals(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEquals(result.name, s.name)
self.assertEquals(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
# test big series (diff code path)
s = Series(range(0,1000))
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip(self.ts)
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEquals(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEquals(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEquals(result.name, self.ts.name)
class SafeForSparse(object):
pass
class TestSeries(unittest.TestCase, CheckNameIntegration):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=range(10))
empty2 = Series(np.nan, index=range(10))
assert_series_equal(empty, empty2)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dict(self):
d = {'a' : 0., 'b' : 1., 'c' : 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(tm.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
def test_iget(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
def test_getitem_regression(self):
s = Series(range(5), index=range(5))
result = s[range(5)]
assert_series_equal(result, s)
def test_getitem_slice_bug(self):
s = Series(range(10), range(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_(np.array_equal(result.index, s.index[mask]))
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
cop = s.copy()
cop[omask] = 5
s[mask] = 5
assert_series_equal(cop, s)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, offset=datetools.bday) > ts.median()
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assert_((s[:4] == 0).all())
self.assert_(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
def test_getitem_box_float64(self):
value = self.ts[5]
self.assert_(isinstance(value, np.float64))
def test_getitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_setitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__setitem__, 1, 5)
self.assertRaises(KeyError, s.ix.__setitem__, 1, 5)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assert_((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
# set item that's not contained
self.assertRaises(Exception, self.series.__setitem__,
'foobar', 1)
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assert_(res is self.ts)
self.assertEqual(self.ts[idx], 0)
res = self.series.set_value('foobar', 0)
self.assert_(res is not self.series)
self.assert_(res.index[-1] == 'foobar')
self.assertEqual(res['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
self.assertRaises(Exception, self.ts.__getitem__,
(slice(None, None), 2))
self.assertRaises(Exception, self.ts.__setitem__,
(slice(None, None), 2), 2)
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
| assert_series_equal(cp, exp) | pandas.util.testing.assert_series_equal |
import pytest
import pandas as pd
from pandas import Series
import pandas._testing as tm
@pytest.mark.parametrize(
"data, index, drop_labels, axis, expected_data, expected_index",
[
# Unique Index
([1, 2], ["one", "two"], ["two"], 0, [1], ["one"]),
([1, 2], ["one", "two"], ["two"], "rows", [1], ["one"]),
([1, 1, 2], ["one", "two", "one"], ["two"], 0, [1, 2], ["one", "one"]),
# GH 5248 Non-Unique Index
([1, 1, 2], ["one", "two", "one"], "two", 0, [1, 2], ["one", "one"]),
([1, 1, 2], ["one", "two", "one"], ["one"], 0, [1], ["two"]),
([1, 1, 2], ["one", "two", "one"], "one", 0, [1], ["two"]),
],
)
def test_drop_unique_and_non_unique_index(
data, index, axis, drop_labels, expected_data, expected_index
):
s = Series(data=data, index=index)
result = s.drop(drop_labels, axis=axis)
expected = Series(data=expected_data, index=expected_index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data, index, drop_labels, axis, error_type, error_desc",
[
# single string/tuple-like
(range(3), list("abc"), "bc", 0, KeyError, "not found in axis"),
# bad axis
(range(3), list("abc"), ("a",), 0, KeyError, "not found in axis"),
(range(3), list("abc"), "one", "columns", ValueError, "No axis named columns"),
],
)
def test_drop_exception_raised(data, index, drop_labels, axis, error_type, error_desc):
ser = Series(data, index=index)
with pytest.raises(error_type, match=error_desc):
ser.drop(drop_labels, axis=axis)
def test_drop_with_ignore_errors():
# errors='ignore'
s = Series(range(3), index=list("abc"))
result = s.drop("bc", errors="ignore")
tm.assert_series_equal(result, s)
result = s.drop(["a", "d"], errors="ignore")
expected = s.iloc[1:]
tm.assert_series_equal(result, expected)
# GH 8522
s = Series([2, 3], index=[True, False])
assert s.index.is_object()
result = s.drop(True)
expected = Series([3], index=[False])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("index", [[1, 2, 3], [1, 1, 3]])
@pytest.mark.parametrize("drop_labels", [[], [1], [3]])
def test_drop_empty_list(index, drop_labels):
# GH 21494
expected_index = [i for i in index if i not in drop_labels]
series = pd.Series(index=index, dtype=object).drop(drop_labels)
expected = pd.Series(index=expected_index, dtype=object)
tm.assert_series_equal(series, expected)
@pytest.mark.parametrize(
"data, index, drop_labels",
[
(None, [1, 2, 3], [1, 4]),
(None, [1, 2, 2], [1, 4]),
([2, 3], [0, 1], [False, True]),
],
)
def test_drop_non_empty_list(data, index, drop_labels):
# GH 21494 and GH 16877
dtype = object if data is None else None
ser = | pd.Series(data=data, index=index, dtype=dtype) | pandas.Series |
from itertools import product
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.validation import quality_mapping
def test_ok_user_flagged():
assert quality_mapping.DESCRIPTION_MASK_MAPPING['OK'] == 0
assert quality_mapping.DESCRIPTION_MASK_MAPPING['USER FLAGGED'] == 1
def test_description_dict_version_compatibility():
for dict_ in quality_mapping.BITMASK_DESCRIPTION_DICT.values():
assert dict_['VERSION IDENTIFIER 0'] == 1 << 1
assert dict_['VERSION IDENTIFIER 1'] == 1 << 2
assert dict_['VERSION IDENTIFIER 2'] == 1 << 3
def test_latest_version_flag():
# test valid while only identifiers 0 - 2 present
last_identifier = max(
int(vi.split(' ')[-1]) for vi in
quality_mapping.DESCRIPTION_MASK_MAPPING.keys() if
vi.startswith('VERSION IDENTIFIER'))
assert last_identifier == 2
assert (quality_mapping.LATEST_VERSION_FLAG ==
quality_mapping.LATEST_VERSION << 1)
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask(flag_val):
flag, mask = flag_val
mask |= quality_mapping.LATEST_VERSION_FLAG
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, True)
assert_series_equal(flags, pd.Series([
mask, mask, quality_mapping.LATEST_VERSION_FLAG, mask,
quality_mapping.LATEST_VERSION_FLAG]))
@pytest.mark.parametrize('flag_invert', product(
quality_mapping.DESCRIPTION_MASK_MAPPING.keys(), [True, False]))
def test_convert_bool_flags_to_flag_mask_none(flag_invert):
assert quality_mapping.convert_bool_flags_to_flag_mask(
None, *flag_invert) is None
@pytest.mark.parametrize('flag_invert', product(
quality_mapping.DESCRIPTION_MASK_MAPPING.keys(), [True, False]))
def test_convert_bool_flags_to_flag_mask_adds_latest_version(flag_invert):
ser = pd.Series([0, 0, 0, 1, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(
ser, *flag_invert)
assert (flags & quality_mapping.LATEST_VERSION_FLAG).all()
@pytest.fixture()
def ignore_latest_version(mocker):
mocker.patch(
'solarforecastarbiter.validation.quality_mapping.LATEST_VERSION_FLAG',
0)
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask_invert(flag_val,
ignore_latest_version):
flag, mask = flag_val
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, True)
assert_series_equal(flags, pd.Series([mask, mask, 0, mask, 0]))
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask_no_invert(flag_val,
ignore_latest_version):
flag, mask = flag_val
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, False)
assert_series_equal(flags, pd.Series([0, 0, mask, 0, mask]))
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_mask_flags(flag_val):
flag, mask = flag_val
latest = quality_mapping.LATEST_VERSION_FLAG
mask |= latest
@quality_mapping.mask_flags(flag)
def f():
return | pd.Series([True, True, False, False]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# Predictions based off mean probabilities by zone
# In[1]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Read input data files
labels = pd.read_csv('../input/stage1_labels.csv')
# split the Id to create a column for zones and person ('subject')
new_list = []
for i,r in labels.iterrows():
subject = r['Id'].split('_')[0]
zone = r['Id'].split('_')[1][4:]
prob = r['Probability']
new_list.append({'Id': r['Id'], 'subject': subject, 'zone': zone, 'prob':prob})
df = pd.DataFrame(new_list)
# In[2]:
# get mean probabilitys by zone
zone_means = df.groupby(['zone'])['prob'].mean()
zone_means.plot.bar()
# write the csv
sample = pd.read_csv('../input/stage1_sample_submission.csv')
output = []
for i,r in sample.iterrows():
zone = r['Id'].split('_')[1][4:]
prob = zone_means[zone]
output.append({'Id': r['Id'],'Probability':prob})
op_csv = | pd.DataFrame(output) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This function creates kymographs from a stack of images.
By <NAME> 2020
"""
from skimage import io, measure
import matplotlib.pyplot as plt
import numpy as np
from cell_segmentation import cell_seg_no_cell_crop
import statistics
from matplotlib import gridspec
import pandas as pd
import os
def kymo_generator(image, fname, save_data, interval, pixel_size, bit_depth, small_obj = 1000, save_destination = os.path.dirname(__file__)):
"""
This function takes an image, generates four kymographs, and analyze them.
Parameters
----------
image : array
An input image.
fname : string
The filename.
save_data : boolean
Whether to save the data.
interval : integer
The interval at which images were acquired (e.g. every 5 seconds)
pixel_size : integer
The pixel size of the image.
bit_depth : integer
The bit depth of the image.
small_obj : integer, optional
The smallest object allowed. The default is 1000 pixels.
save_destination : string, optional
The saving directory. The default is os.path.dirname(__file__).
Returns
-------
A confirmation note "done".
"""
all_cell_masks, all_cell_props = cell_seg_no_cell_crop(image, filename = fname, DEPTH = bit_depth, small_obj = small_obj,
show_img = False, save_contour = False)
y, x = all_cell_props[0][-1].centroid
y = int(y)
x = int(x)
kymo_1 = np.empty((y+1,all_cell_masks[0].shape[0]))
kymo_2 = np.empty((all_cell_masks[0].shape[1]-y,all_cell_masks[0].shape[0]))
kymo_3 = np.empty((x+1,all_cell_masks[0].shape[0]))
kymo_4 = np.empty((all_cell_masks[0].shape[2]-x,all_cell_masks[0].shape[0]))
width = 3
all_kymos = []
for slice_number in range (all_cell_masks[0].shape[0]):
profile_line_1 = measure.profile_line(all_cell_masks[0][slice_number, :, :], src=(y, x), dst=(0, x), linewidth=width, mode='constant')
kymo_1[:,slice_number] = np.flip(profile_line_1, axis=0)
profile_line_2 = measure.profile_line(all_cell_masks[0][slice_number, :, :], src=(y, x), dst=(all_cell_masks[0][slice_number, :, :].shape[0]-1, x), linewidth=width, mode='constant')
kymo_2[:,slice_number] = np.flip(profile_line_2, axis=0)
profile_line_3 = measure.profile_line(all_cell_masks[0][slice_number, :, :], src=(y, x), dst=(y, 0), linewidth=width, mode='constant')
kymo_3[:,slice_number] = np.flip(profile_line_3, axis=0)
profile_line_4 = measure.profile_line(all_cell_masks[0][slice_number, :, :], src=(y, x), dst=(y, all_cell_masks[0][slice_number, :, :].shape[1]-1), linewidth=width, mode='constant')
kymo_4[:,slice_number] = np.flip(profile_line_4, axis=0)
all_kymos.append(kymo_1)
all_kymos.append(kymo_2)
all_kymos.append(kymo_3)
all_kymos.append(kymo_4)
del kymo_1, kymo_2, kymo_3, kymo_4 # to save memory
from kymo_to_coords import kymo_to_coords
all_normalized_coords = []
all_filtered_coords = []
for n in range(len(all_kymos)):
normalized, filtered_coords = kymo_to_coords(all_kymos[n], thres=15, pixel_length = 0.1833333)
all_normalized_coords.append(normalized)
all_filtered_coords.append(filtered_coords)
################################dividing line###########################################
from measure_protrusions import measure_protrusions
all_plateau_idx = []
all_minimas = []
all_retraction_rate = []
all_avg_speed = []
all_lowest_point_idx = []
print(fname + ' results')
print('----------------------------------------')
for n in range(len(all_normalized_coords)):
lowest_point_idx, plateau_idx, minima, retraction_rate, avg_speed = measure_protrusions(normalized_coords = all_normalized_coords[n], frame_rate = interval)
all_plateau_idx.append(plateau_idx)
all_minimas.append(minima)
all_retraction_rate.append(retraction_rate)
all_avg_speed.append(avg_speed)
all_lowest_point_idx.append(lowest_point_idx)
all_avg_speed_avg = statistics.mean(all_avg_speed)
all_avg_speed_stdev = statistics.stdev(all_avg_speed)
all_retraction_rate_avg = statistics.mean(all_retraction_rate)
all_retraction_rate_stdev = statistics.stdev(all_retraction_rate)
print('----------------------------------------')
print('Average retraction rate of all kymos = ' + str(round(all_retraction_rate_avg, 3))+ ' ± ' + str(round(all_retraction_rate_stdev,2)))
print('Average protrusion speed of all kymos = ' + str(round(all_avg_speed_avg, 2))+ ' ± ' + str(round(all_avg_speed_stdev,2)))
################################dividing line###########################################
color_1 = '#003f5c'
color_2 = '#7a5195'
color_3 = '#ef5675'
color_4 = '#ffa600'
fig = plt.figure(figsize=(20, 10)) # 20 in x and 10 in y
gs = gridspec.GridSpec(2, 4) # 2 in x and 4 in y
axes0 = plt.subplot(gs[:,0:2])
axes0.imshow(image[-1,:,:], cmap='Greys')
axes0.plot([x, x], [y, 0], color_1, [x, x], [y, all_cell_masks[0][slice_number, :, :].shape[0]-1], color_2,
[x, 0], [y, y], color_3, [x, all_cell_masks[0][slice_number, :, :].shape[1]-1], [y, y], color_4, linewidth = width, linestyle='dashed')
axes0.axis('off')
###################
axes1 = plt.subplot(gs[0,2])
x_axis = np.linspace(start = 0, stop = int((len(all_normalized_coords[0])-1)*interval), num = len(all_normalized_coords[0]))
axes1.plot(x_axis, all_normalized_coords[0], 'k')
last_slope_point_0 = x_axis[all_plateau_idx[0]]
axes1.plot([x_axis[all_lowest_point_idx[0]], last_slope_point_0], [all_normalized_coords[0][all_lowest_point_idx[0]],
all_normalized_coords[0][all_plateau_idx[0]]], color_1, linewidth = width/2, linestyle='dashed', label='Protrusion speed')
# plot retraction points
axes1.scatter(all_minimas[0]*interval, [all_normalized_coords[0][n] for n in all_minimas[0]], s=20, c='r', label='Retraction')
axes1.legend(loc="lower right")
for spine in axes1.spines.values():
spine.set_edgecolor(color_1)
spine.set_linewidth(3)
axes1.set_ylabel('Distance (µm)')
axes1.set_ylim(top = int(np.max(all_normalized_coords)+2)) #limit y axis to be the maximum of all the numbers
###################
axes2 = plt.subplot(gs[0,3], sharex=axes1, sharey=axes1)
x_axis = np.linspace(start = 0, stop = int((len(all_normalized_coords[1])-1)*interval), num = len(all_normalized_coords[1]))
axes2.plot(x_axis, all_normalized_coords[1], 'k')
last_slope_point_1 = x_axis[all_plateau_idx[1]]
axes2.plot([x_axis[all_lowest_point_idx[1]], last_slope_point_1], [all_normalized_coords[1][all_lowest_point_idx[1]],
all_normalized_coords[1][all_plateau_idx[1]]], color_2, linewidth = width/2, linestyle='dashed', label='Protrusion speed')
axes2.scatter(all_minimas[1]*interval, [all_normalized_coords[1][n] for n in all_minimas[1]], s=20, c='r', label='Retraction')
axes2.legend(loc="lower right")
for spine in axes2.spines.values():
spine.set_edgecolor(color_2)
spine.set_linewidth(3)
###################
axes3 = plt.subplot(gs[1,2], sharex=axes1, sharey=axes1)
x_axis = np.linspace(start = 0, stop = int((len(all_normalized_coords[2])-1)*interval), num = len(all_normalized_coords[2]))
axes3.plot(x_axis, all_normalized_coords[2], 'k')
last_slope_point_2 = x_axis[all_plateau_idx[2]]
axes3.plot([x_axis[all_lowest_point_idx[2]], last_slope_point_2], [all_normalized_coords[2][all_lowest_point_idx[2]],
all_normalized_coords[2][all_plateau_idx[2]]], color_3, linewidth = width/2, linestyle='dashed', label='Protrusion speed')
axes3.scatter(all_minimas[2]*interval, [all_normalized_coords[2][n] for n in all_minimas[2]], s=20, c='r', label='Retraction')
axes3.legend(loc="lower right")
for spine in axes3.spines.values():
spine.set_edgecolor(color_3)
spine.set_linewidth(3)
axes3.set_xlabel('Time (s)')
axes3.set_ylabel('Distance (µm)')
###################
axes4 = plt.subplot(gs[1,3], sharex=axes1, sharey=axes1)
x_axis = np.linspace(start = 0, stop = int((len(all_normalized_coords[3])-1)*interval), num = len(all_normalized_coords[3]))
axes4.plot(x_axis, all_normalized_coords[3], 'k')
last_slope_point_3 = x_axis[all_plateau_idx[3]]
axes4.plot([x_axis[all_lowest_point_idx[3]], last_slope_point_3], [all_normalized_coords[3][all_lowest_point_idx[3]],
all_normalized_coords[3][all_plateau_idx[3]]], color_4, linewidth = width/2, linestyle='dashed', label='Protrusion speed')
axes4.scatter(all_minimas[3]*interval, [all_normalized_coords[3][n] for n in all_minimas[3]], s=20, c='r', label='Retraction')
axes4.legend(loc="lower right")
for spine in axes4.spines.values():
spine.set_edgecolor(color_4)
spine.set_linewidth(3)
axes4.set_xlabel('Time (s)')
plt.show()
################################dividing line###########################################
if save_data:
df = pd.DataFrame()
df[fname + ' Kymo_1'] = pd.Series(all_normalized_coords[0])
df[fname + ' Kymo_1' + ' retraction pts'] = pd.Series(all_minimas[0]*interval)
df[fname + ' Kymo_1' + ' plateau idx'] = | pd.Series(all_plateau_idx[0]) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.finance as mpf
'''
计算收益率
输入:V, C
输出:收益率
'''
def returnRatio(V, C=100000.0):
return V/C-1.0
'''
计算收益率
输入:V数组, C
输出:收益率数组
'''
def returnRatioArr(VArr, C=100000.0):
arr = []
for v in VArr: arr.append(v/C-1.0)
return arr
'''
计算有效投资天数
输入:买入df, 卖出df, 模拟投资结果perf(df的索引为时间)
输出:收益率
'''
def validInvestDays(buys, sells, perf):
days = 0
for i in range(len(sells)):
days += (sells.index[i]-buys.index[i]).days
if len(buys)>len(sells):
days += (perf.index[-1]-buys.index[-1]).days
return days
'''
计算年化收益率
输入:收益率数组, T, D
输出:年化收益率
'''
def annualizedReturnRatio(returnRatioArr, T=250.0, D=250.0):
import math
tmp = 1
for r in returnRatioArr: tmp *= (r+1)
return math.pow(tmp, D/T)-1
'''
计算MA
输入:
输出:DataFrame
'''
def MA(closeSeries, shortWin=5, longWin=20):
shortMA = pd.rolling_mean(closeSeries, window=shortWin)
longMA = pd.rolling_mean(closeSeries, window=longWin)
return pd.DataFrame({'Close': closeSeries, str(shortWin)+'MA':shortMA, str(longWin)+'MA': longMA})
'''
计算Bollinger bands布林带
输入:
输出:DataFrame
'''
def BollingerBand(closeSeries, win=20):
MA = | pd.rolling_mean(closeSeries, window=win) | pandas.rolling_mean |
from collections import deque
import pandas as pd
import datetime
import os
import numpy as np
from torch_rl.utils import Parameters, prRed, Callback, timestamp
import glob
import shutil
class TrainingStatsCallback(Callback):
"""
Keeps training statistics, writes them to a file and loads them.
"""
def __init__(self, episode_window=10, step_window=10,
sample_rate_episodes=1, sample_rate_steps=None, save_rate=10,
save_destination=None, hyperparameters=None, stepwise=False, episodewise=True):
super(TrainingStatsCallback, self).__init__(episodewise=episodewise, stepwise=stepwise)
self.episode_window = episode_window
self.episode_reward_buffer = deque(maxlen=episode_window)
self.step_rewards = []
self.episode_rewards = []
self.sample_rate_episodes=sample_rate_episodes
self.sample_rate_steps = sample_rate_steps
self.rewards = []
self.moving_average_rewards = []
self.save_rate = save_rate
self.hyperparameters = hyperparameters
if save_destination is None:
self.save_destination = 'training_stats_' + timestamp()
else:
self.save_destination = os.path.join(save_destination, "training_stats")
if os.path.isdir(self.save_destination):
prRed(self.save_destination + " is a directory already, delete for new training data? y/n")
res = input()
res = res.lower()
if res == 'y':
shutil.rmtree(self.save_destination)
else:
raise Exception("Start training with another save destination name.")
os.makedirs(self.save_destination)
# Save hyperparameters to a file
self.save_hyperparameters()
# Pandas data frames
self.episode_data = None
self.step_data = None
def save_hyperparameters(self):
if self.hyperparameters:
if not isinstance(self.hyperparameters, Parameters):
raise Exception("User Parameters from torch_rl.utils.Parameters to store parameters")
df = pd.DataFrame.from_records(self.hyperparameters.__dict__)
df.to_pickle(os.path.join(self.save_destination, "parameters.cfg"))
def _step(self, episode, step, reward,**kwargs):
kwargs["reward"] = reward
kwargs['episode'] = episode
kwargs['step'] = step
df = pd.DataFrame.from_records([kwargs], index=['step'])
if not self.step_data is None:
self.step_data = pd.concat([self.step_data, df])
else:
self.step_data = df
if episode % self.save_rate == 0:
self.save()
def _episode_step(self, **kwargs):
self.episode_reward_buffer.append(kwargs['episode_reward'])
episode = kwargs['episode']
kwargs["mvavg_reward"] = np.mean(self.episode_reward_buffer)
df = pd.DataFrame.from_records([kwargs], index=['episode'])
if not self.episode_data is None:
self.episode_data = pd.concat([self.episode_data, df])
else:
self.episode_data = df
if episode % self.save_rate == 0:
self.save()
def save(self):
time_stamp = str(datetime.datetime.now())
path = self.save_destination
print(" #Saving data to", path)
if not self.episode_data is None:
name = time_stamp + "_episode.stats"
self.episode_data.to_pickle(os.path.join(path, name))
if not self.step_data is None:
name = time_stamp + "_step.stats"
self.step_data.to_pickle(os.path.join(path, name))
self.step_data = None
self.episode_data = None
@staticmethod
def load(path="./training_stats"):
files = glob.glob(path + "/*.stats")
files = sorted(files)
data = pd.read_pickle(os.path.join(path, files[0]))
if len(files) > 1:
for f in files[1:]:
d = pd.read_pickle(os.path.join(path, f))
data = | pd.concat([data, d]) | pandas.concat |
import pandas as pd
from abc import ABC, abstractmethod
from .tempstate import TempState
class UtilizationMetric:
"""
Keeps, updates, and aggregates a particular utilization metric in
a time series format.
"""
def __init__(self, metric_name : str, utilization : dict = None):
self.metric_name = metric_name
self.utilization = {'datetime': list(), 'value': list()} if utilization is None else utilization
self.tmp_state = TempState()
def __add__(self, other : 'UtilizationMetric'):
df_1 = pd.DataFrame({'datetime': self.utilization['datetime'], 'value': self.utilization['value']}).set_index('datetime')
df_2 = pd.DataFrame({'datetime': other.utilization['datetime'], 'value': other.utilization['value']}).set_index('datetime')
df_res = df_1.add(df_2, fill_value = 0) / 2
result = self.__class__(self.metric_name, {'datetime': df_res.index.to_list(), 'value': df_res.value.to_list()})
result.tmp_state += other.tmp_state
return result
def update(self, cur_ts : pd.Timestamp, cur_val : float, averaging_interval : pd.Timedelta):
"""
Updates the utilization metric with help of the temporary state.
The temporary state bufferizes observations to aggregate them later on
using the moving average.
"""
util = self.tmp_state.update_and_get(cur_ts, cur_val, averaging_interval)
self.utilization['datetime'].append(cur_ts)
self.utilization['value'].append(util)
def get(self, interval : pd.Timedelta):
"""
Returns the most recent utilization metric values that fall into the
specified interval. If interval is 0, returns all the values.
"""
utilization = | pd.DataFrame(self.utilization) | pandas.DataFrame |
#!/usr/bin/env python3
# Author: <NAME> <<EMAIL>>
"""Describes global network structure and computes
centralities for all nodes of a given network.
"""
from glob import glob
from operator import itemgetter
from os.path import basename, splitext
import networkx as nx
import pandas as pd
from scipy.stats import spearmanr
from _200_build_networks import year_name, write_stats
NETWORK_FOLDER = "./200_yearly_networks/"
TARGET_FOLDER = "./205_centralities/"
OUTPUT_FOLDER = "./990_output/"
def compute_centralities(H, G):
"""Return DataFrame with node-wise network measures."""
df = pd.DataFrame(index=sorted(H.nodes()))
df['giant'] = df.index.map(lambda x: int(str(x) in G))
try:
df["in_degree"] = pd.Series(dict(H.in_degree))
df["out_degree"] = pd.Series(dict(H.out_degree))
except AttributeError: # Undirected network
df["degree"] = pd.Series(dict(H.degree))
df["num_2nd_neighbors"] = pd.Series(
{n: num_sec_neigh(n, H) for n in H.nodes()})
df["betweenness"] = pd.Series(
nx.betweenness_centrality(G.to_undirected(), weight="weight"))
df['closeness'] = pd.Series(nx.closeness_centrality(G))
df["eigenvector"] = pd.Series(
nx.eigenvector_centrality_numpy(G, weight="weight"))
return df
def giant(H):
"""Return giant component of a network."""
try:
components = nx.connected_components(H)
except nx.NetworkXNotImplemented: # Directed network
components = nx.weakly_connected_components(H)
return H.subgraph(sorted(components, key=len, reverse=True)[0])
def p_to_stars(p, thres=(0.1, 0.05, 0.01)):
"""Return stars for significance values."""
stars = []
for t in thres:
if p < t:
stars.append("*")
return "".join(stars)
def global_analysis(H, G):
"""Return Series with network descriptives."""
s = pd.Series()
G = G.to_undirected()
s["Nodes"] = nx.number_of_nodes(H)
s["Links"] = nx.number_of_edges(H)
s['Avg. clustering'] = round(nx.average_clustering(H.to_undirected()), 3)
try:
s["Components"] = nx.number_weakly_connected_components(H)
except nx.NetworkXNotImplemented: # Undirected network
s["Components"] = nx.number_connected_components(H)
s["Giant"] = nx.number_of_nodes(G)
s["Density"] = round(nx.density(G), 4)
s["Avg. path length"] = nx.average_shortest_path_length(G)
s["Diameter"] = nx.diameter(G)
return s
def num_sec_neigh(node, G):
"""Return number of unique second-order neighbors."""
neigh_sec_order = nx.single_source_shortest_path_length(G, node, cutoff=2)
return sum(1 for x in neigh_sec_order.values() if x == 2)
def main():
auth = pd.DataFrame(columns=['index', 'centrality'])
com = pd.DataFrame(columns=['index', 'centrality'])
global_auth = | pd.DataFrame() | pandas.DataFrame |
from . import column
from . import load
from . import null
from . import row
from . import save
from . import series
from . import stat
from . import value
import pandas as __pd
from sklearn.preprocessing import MinMaxScaler as __minmax
from sklearn.preprocessing import StandardScaler as __standard
from imblearn.over_sampling import SMOTE as __smote
import anoapycore as __ap
def array_to_df (a_array,b_as_column='') :
"""
This will convert array to pandas dataframe
use [] for b_as_column
"""
if b_as_column == '' :
loc_result = __pd.DataFrame(data=a_array)
else :
loc_result = | __pd.DataFrame(data=a_array,columns=b_as_column) | pandas.DataFrame |
from qfengine.data.price.price_source import MySQLPriceDataSource as SQLTable
from qfengine.asset import assetClasses
import pandas as pd
from typing import Union, List, Dict
import os
import numpy as np
import logging
import functools
from qfengine import settings
import concurrent.futures
logger = logging.getLogger(__name__)
# todo: MOVE THESE TO RESPECTIVE DIR MODULE
class DataVendorMySQL(SQLTable):
create_schema = (
'''
CREATE TABLE `%s` (
`id` int NOT NULL AUTO_INCREMENT,
`name` varchar(64) NOT NULL,
`website_url` varchar(255) NULL,
`api_endpoint_url` varchar(255) NULL,
`api_key_id` varchar(255) NULL,
`api_key` varchar(255) NULL,
`created_date` datetime NULL DEFAULT CURRENT_TIMESTAMP(),
`last_updated_date` datetime NULL DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP(),
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8
'''
)
def __init__(
self,
db_credentials:Dict=None,
name:str = 'data_vendor',
**kwargs
):
super().__init__(
(db_credentials if db_credentials is not None
else settings.MYSQL_CREDENTIALS),
name,
(DataVendorMySQL.create_schema %name),
**kwargs
)
from qfengine.data import vendor_api
import pandas as pd
#---| init all availables
_vendorAPIs = {api:getattr(vendor_api,api) for api in vendor_api.__all__}
currentInfo = []
_to_pop = []
for api_name, API in _vendorAPIs.items():
_i = {'name':api_name}
for f in self.all_accepted_columns:
try:
f_dat = getattr(API,f)
except:
pass
else:
if callable(f_dat):
f_dat = f_dat()
_i[f] = f_dat
missing_required = [c for c in self.required_columns if c not in _i]
if len(missing_required) != 0:
_to_pop.append(api_name)
else:
currentInfo.append(_i)
currentInfo = pd.DataFrame.from_dict(currentInfo)
currentInfo = currentInfo.where(pd.notnull(currentInfo),None)
upserted = self.upsertDF(currentInfo,["name"])
for p in _to_pop:
_vendorAPIs.pop(p)
self._APIs = {API.name: API for _,API in _vendorAPIs.items()}
#---| check all init for essential funcs
def get_vendor_API(self,vendor:str):
assert vendor in self.List
return self._APIs[vendor]()
@property
def DF(self):
return self._fullDF().set_index("name").reindex(list(self._APIs.keys()))
@property
def List(self):
return list(self._APIs.keys())
class ExchangeMySQL(SQLTable):
create_schema = (
'''
CREATE TABLE `%s` (
`id` int NOT NULL AUTO_INCREMENT,
`ref_id` varchar(32) NOT NULL,
`name` varchar(255) NOT NULL,
`currency` varchar(64) NULL,
`region` varchar(255) NULL,
`created_date` datetime NULL DEFAULT CURRENT_TIMESTAMP(),
`last_updated_date` datetime NULL DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP(),
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8
'''
)
def __init__(
self,
db_credentials:Dict = None,
vendors:DataVendorMySQL = None,
name:str = 'exchange',
**kwargs
):
super().__init__(
(db_credentials if db_credentials is not None
else settings.MYSQL_CREDENTIALS),
name,
(ExchangeMySQL.create_schema %name),
**kwargs
)
self.vendors = vendors or DataVendorMySQL(db_credentials,mdb_conn = self._conn)
def UPDATE(self, vendor:str = 'IEX', DF:pd.DataFrame=None):
if DF is None:
DF = self.vendors.get_vendor_API(vendor).exchangesDF()
upserted = self.upsertDF(DF,['ref_id'])
for i,_count in upserted.items():
if _count > 0:
logger.warning("%s %s Exchanges" %(str(i).upper(), str(_count)))
@property
def DF(self):
return self._fullDF().set_index("ref_id")
@property
def List(self):
return [e[0] for e in self.executeSQL("select ref_id from %s" %self._name)]
class SecurityMySQL(SQLTable):
create_schema = (
'''
CREATE TABLE `%s` (
`id` int NOT NULL AUTO_INCREMENT,
`exchange_id` int NOT NULL,
`symbol` varchar(10) NOT NULL,
`type` varchar(10) NULL,
`name` varchar(255) NULL,
`sector` varchar(255) NULL,
`industry` varchar(255) NULL,
`currency` varchar(32) NULL,
`region` varchar(32) NULL,
`figi` varchar(255) NULL,
`created_date` datetime NULL DEFAULT CURRENT_TIMESTAMP(),
`last_updated_date` datetime NULL DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP(),
PRIMARY KEY (`id`),
KEY `exchange_id` (`exchange_id` ASC),
KEY `symbol` (`symbol` ASC),
CONSTRAINT `fk_exchange_id`
FOREIGN KEY (`exchange_id`)
REFERENCES `exchange` (`id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8
'''
)
def __init__(
self,
db_credentials:Dict = None,
exchanges:ExchangeMySQL=None,
vendors:DataVendorMySQL=None,
name:str = 'security',
**kwargs
):
super().__init__(
(db_credentials if db_credentials is not None
else settings.MYSQL_CREDENTIALS),
name,
(SecurityMySQL.create_schema %name),
**kwargs
)
self.exchanges = exchanges or ExchangeMySQL(db_credentials, mdb_conn = self._conn)
self.vendors = vendors or self.exchanges.vendors
def UPDATE(self, vendor:str = 'IEX', DF:pd.DataFrame = None,):
if DF is None:
DF = self.vendors.get_vendor_API(vendor).symbolsDF()
if 'exchange_id' not in DF.columns:
potential_exch_cols = [c for c in DF.columns if 'exchange' in c.lower()]
db_exch_id = self.exchanges.DF['id']
for c in potential_exch_cols:
db_exch = db_exch_id.reindex(DF[c].unique())
if len(db_exch.dropna()) == len(db_exch):
db_exch = db_exch.where(pd.notnull(db_exch),None)
DF['exchange_id'] = [db_exch[i] for i in DF[c].values]
break
assert 'exchange_id' in DF.columns, ("unidentified exchange(s) in DF")
DF = DF.drop(columns=potential_exch_cols)
if not isinstance(DF.index, pd.RangeIndex):
DF.reset_index(inplace=True)
assert set(DF.columns).issubset(set(self.all_accepted_columns)), "Unrecognized column(s): %s" %str([c for c in DF.columns if c not in self.all_accepted_columns])
upserted = self.upsertDF(DF,['symbol','exchange_id'])
for i,_count in upserted.items():
if _count > 0:
logger.warning("%s %s Security Symbols" %(str(i).upper(), str(_count)))
@property
def DF(self):
return self._fullDF().set_index("symbol")
@property
def List(self):
return [e[0] for e in self.executeSQL("select symbol from %s" %self._name)]
class DailyPriceMySQL(SQLTable):
create_schema = (
'''
CREATE TABLE `%s` (
`id` int NOT NULL AUTO_INCREMENT,
`data_vendor_id` int NOT NULL,
`symbol_id` int NOT NULL,
`price_date` date NOT NULL,
`open` decimal(19,4) NULL,
`high` decimal(19,4) NULL,
`low` decimal(19,4) NULL,
`close` decimal(19,4) NULL,
`volume` bigint NULL,
`created_date` datetime NULL DEFAULT CURRENT_TIMESTAMP(),
`last_updated_date` datetime NULL DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP(),
PRIMARY KEY (`id`),
KEY `price_date` (`price_date` ASC),
KEY `data_vendor_id` (`data_vendor_id`),
KEY `symbol_id` (`symbol_id`),
CONSTRAINT `fk_symbol_id `
FOREIGN KEY (`symbol_id`)
REFERENCES `security` (`id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_data_vendor_id`
FOREIGN KEY (`data_vendor_id`)
REFERENCES `data_vendor` (`id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8
'''
)
def __init__(
self,
asset_type:assetClasses,
db_credentials:Dict=None,
symbols:SecurityMySQL=None,
vendors:DataVendorMySQL=None,
name:str = 'daily_price',
by_vendor:str = None,
symbols_list:List[str] = None,
**kwargs
):
super().__init__(
db_credentials = (db_credentials if db_credentials is not None
else settings.MYSQL_CREDENTIALS),
price_table_name = name,
price_table_schema = (DailyPriceMySQL.create_schema %name),
**kwargs
)
self.symbols = symbols or SecurityMySQL(db_credentials, mdb_conn = self._conn)
self.vendors = vendors or self.symbols.vendors
self.asset_type = asset_type
self.symbols_list = symbols_list
self.by_vendor = by_vendor
if self.by_vendor:
assert self.by_vendor in self.vendors.List
self.symbols_list = self._query_available_symbols_in_database_by_vendor(self.by_vendor)
if settings.PRINT_EVENTS:
print("Initialized DailyPriceMySQL DataSource From Vendor '%s' | Available Symbols Count = %s" %(str(self.by_vendor), str(len(self.symbols_list))))
self._cached_copies = []
#!----------| ABSTRACTED METHODS OF A PRICE DATA SOURCE |------------#
#---| Self-Copy
def create_price_source_copy(self,
cache_copy:bool = False,
):
copy = DailyPriceMySQL(
asset_type = self.asset_type,
db_credentials = self._db_credentials.copy(),
name = self._full_credentials['table_name'],
by_vendor = None,
)
copy.by_vendor = self.by_vendor #--| skip SQL vetting
copy.symbols_list = (self.symbols_list.copy() if self.symbols_list else self.symbols_list)
if cache_copy:
self._cached_copies.append(copy)
return copy
#---------------------------|
#------| Assets/Universe
def assetsDF(self,
**kwargs
)->pd.DataFrame:
df = self.symbols.DF.astype(str)
if self.symbols_list:
df = df.reindex(self.symbols_list)
if 'sector' in kwargs:
assert isinstance(kwargs['sector'],str)
df = df[df.sector == kwargs['sector']]
return df
def assetsList(self,
**kwargs
)->list:
return list(self.assetsDF(**kwargs).index.values)
@property
def sectorsList(self)->list:
return self.symbols.DF.sector.dropna().unique()
#---------------------------|
#-----------| Price
def get_assets_bid_ask_dfs(self,
asset:str,
*assets:str,
start_dt=None,
end_dt=None,
)->pd.DataFrame:
return self._price_dfs_to_bid_ask_dfs(
self.get_assets_historical_price_dfs(asset,
*assets,
start_dt = start_dt,
end_dt = end_dt
)
)
def get_assets_historical_price_dfs(self,
asset:str,
*assets:str,
price:str = None,
start_dt = None,
end_dt = None,
adjusted = None,
**kwargs
)->pd.DataFrame:
if price:
assert price in [
"open", "high", "low",
"close","volume"
]
#--| parallelizing queries for performance
symbols = [asset] + [s for s in assets]
result = self._assets_daily_price_DF(*symbols)
if price:
result = result[
[col for col in result.columns if price in col]
]
result.columns = result.columns.get_level_values('symbols')
if start_dt:
result = result[result.index >= self._format_dt(start_dt)]
if end_dt:
result = result[result.index <= self._format_dt(end_dt)]
return result
#---------------------------|
#----| Price Date Ranges
@functools.lru_cache(maxsize = 1024 * 1024)
def get_assets_price_date_ranges_df(self,
asset:str,
*assets:str,
)->pd.DataFrame:
symbols = [asset] + [s for s in assets]
if self.symbols_list:
assert set(symbols).issubset(self.symbols_list)
def _get_result(source, symbol, vendor):
return {
'symbol': symbol,
'start_dt': self._format_dt(source._asset_symbol_min_price_date_by_vendor(symbol, vendor)),
'end_dt': self._format_dt(source._asset_symbol_max_price_date_by_vendor(symbol,vendor)),
}
final_df = pd.DataFrame()
for vendor in self.vendorsList:
result = pd.DataFrame.from_dict(
list(
concurrent.futures.ThreadPoolExecutor().map(
_get_result,
*zip(*(
(
self.create_price_source_copy(cache_copy = True),
symbol,
vendor,
) for symbol in symbols
))
)
)
).set_index('symbol').dropna()
self._close_cached_copies()
final_df = final_df.append(result)
symbols = [s for s in symbols if s not in final_df.index]
if len(symbols) == 0:
break
return final_df
def get_assets_minimum_start_dt(self,
asset:str,
*assets:str,
)->pd.Timestamp:
return self._format_dt(max(
self.get_assets_price_date_ranges_df(
asset, *assets
).start_dt.values
))
def get_assets_maximum_end_dt(self,
asset:str,
*assets:str,
)->pd.Timestamp:
return self._format_dt(min(
self.get_assets_price_date_ranges_df(
asset, *assets
).end_dt.values
))
#---------------------------|
#!----------------------------------------------------------------#
def update_assets_daily_price(self,
vendor:str,
batch_size:int=100,
symbols_to_update:List[str] = None,
skip_empty_update:bool = True,
DF:pd.DataFrame=None,
):
assert vendor in self.vendors.List
vendor_id = self.vendors.DF.id[vendor]
symbols_id = self.symbols.DF['id']
inserted_count = 0
updated_count = 0
t0 = pd.Timestamp.now()
if DF is not None: #---| UPSERT ONCE WITH GIVEN DATAFRAME (NO API CALLS WILL BE MADE)
self._upsert_daily_price_DF(DF,['price_date', 'symbol_id', 'data_vendor_id'])
else: #---| PERFORM UPSERT IN BATCHES OF SYMBOLS BY MAKING API CALLS
if symbols_to_update is not None:
omitted = [s for s in symbols_to_update if s not in symbols_id.index]
if len(omitted) > 0:
logger.warning("Omitting %s given symbols that are not in database universe" %str(len(omitted)))
logger.warning(str(omitted))
symbols_id = symbols_id.reindex([s for s in symbols_to_update if s not in omitted])
symbols_id = dict(symbols_id)
assert len(symbols_id) != 0, "No symbols in Symbol table, code out schematic or manually prepare it (symbol.csv)"
logger.warning("----------------------------------------------------------------")
logger.warning("---------------| Updating Equities Daily Prices |---------------")
logger.warning("----------------------------------------------------------------")
logger.warning("Checking database for latest available price_date from '%s' of %s symbols..." %(vendor,str(len(symbols_id))))
#!---| Query for all symbols to get their max price_dates from SQL Database
symbols_by_max_dates = {}
for s in symbols_id:
d = self._asset_symbol_max_price_date_by_vendor(s,vendor)
if d not in symbols_by_max_dates:
symbols_by_max_dates[d] = []
if s not in symbols_by_max_dates[d]:
symbols_by_max_dates[d].append(s)
#!---| Download latest daily price for symbols with their respective start dates & process for upsert to Database
logger.warning("Performing Update in batches of %s" %str(batch_size))
for start_date,symbols in symbols_by_max_dates.items():
logger.warning("For %s symbols with '%s' as start_date:" %(str(len(symbols)),str(start_date)))
i = 0
batch_number = 1
while True:
symbols_batch = symbols[i:i+batch_size]
batch_data = self.vendors.get_vendor_API(vendor).get_barset(symbols_batch,"1D",start_date)
try:
batch_data = self._transform_DF_for_daily_price_upsert(batch_data, vendor = vendor, start_date=start_date)
except Exception:
logger.warning("Cannot transform, skipping this batch of symbols: %s" %str(symbols_batch))
pass
else:
upserted = self.upsertDF(batch_data,no_filter=True)
for a,_count in upserted.items():
if _count > 0:
if a == 'inserted':
logger.warning(" Batch #%s : %s New Data Points Inserted" %(str(batch_number),_count))
inserted_count += _count
elif a == 'updated':
logger.warning(" Batch #%s : %s Existing Data Points Updated" %(str(batch_number),_count))
updated_count += _count
if (upserted['inserted'] == upserted['updated'] == 0) and skip_empty_update:
logger.warning(" No New Data Upserted. Skipping remaining symbols (set skip_empty_update=False for otherwise)")
break
#---| Loop Breaker
if symbols_batch[-1] == symbols[-1]:
break
else:
i += batch_size
batch_number += 1
print("Update Completed:")
print("--Total Data Points Inserted: %s" %(str(inserted_count)))
print("--Total Data Points Updated: %s" %(str(updated_count)))
print("--Total Time Elapsed: %s" %(str(pd.Timestamp.now() - t0)))
@property
def vendorsDF(self,)->pd.DataFrame:
return self.vendors.DF.reindex(self.vendorsList)
@property
def vendorsList(self,)->list:
return self.vendors.List if self.by_vendor is None else [self.by_vendor]
#!---| BACKEND
def _transform_DF_for_daily_price_upsert(self,
upsert_df:pd.DataFrame,
**kwargs
):
df = upsert_df.copy()
#---| Case I: Basic DF with no MultiIndex columns
if not isinstance(df.columns,pd.MultiIndex):
assert 'symbol_id' in df.columns
if 'price_date' not in df.columns:
assert not isinstance(df.index, pd.RangeIndex)
df.index = pd.DatetimeIndex(df.index)
df['price_date'] = df.index.values
df.index = range(len(df))
if 'data_vendor_id' not in df.columns:
vendor_id = int(self.vendors.DF.id[kwargs['vendor']]) if 'vendor' in kwargs else (
int(kwargs['vendor_id']) if 'vendor_id' in kwargs else None
)
assert isinstance(vendor_id, int)
df['data_vendor_id'] = vendor_id
return df.where(pd.notnull(df), None)
#---| Case II: MultiIndex Columns of (symbols, columns)
else:
assert not isinstance(df.index, pd.RangeIndex)
df.index = pd.DatetimeIndex(df.index)
symbols_id = dict(kwargs['symbols_id']) if 'symbols_id' in kwargs else dict(self.symbols.DF['id'])
vendor_id = int(self.vendors.DF.id[kwargs['vendor']]) if 'vendor' in kwargs else (
int(kwargs['vendor_id']) if 'vendor_id' in kwargs else None
)
assert isinstance(vendor_id, int)
assert isinstance(symbols_id, dict)
try:
df_symbols = list(df.columns.get_level_values('symbols').unique())
except KeyError:
if settings.PRINT_EVENTS:
print("Daily Price columns does not contain 'symbols' as name. "
"Attempting to grab the first index locations..."
)
df_symbols = list(pd.Series([i[0] for i in df.columns]).unique())
assert set(df_symbols).issubset(set(symbols_id.keys())), (
"Daily Price data contains unidentified symbol(s) without id(s): %s" %(
str([
s for s in df_symbols if s not in symbols_id
])
)
)
start_date = pd.Timestamp(kwargs['start_date']) if ('start_date' in kwargs) else None
transformed_df = | pd.DataFrame() | pandas.DataFrame |
import h2o
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
import numpy as np
import pandas as pd
import xgboost as xgb
class LIMEExplainer(object):
""" Basic framework for building Local, Interpretable, Model-agnostic
Explanations (LIMEs) for XGBoost models. Supports regression, binomial,
and multinomial classification.
:ivar training_frame: Pandas DataFrame containing the row to be explained,
mandatory.
:ivar X: List of XGBoost model inputs. Inputs must be numeric, mandatory.
:ivar model: Trained XGBoost booster to be explained, mandatory.
:ivar N: Size of LIME local, perturbed sample. Integer, default 10000.
:ivar discretize: Numeric variables to discretize. List, default `X`.
:ivar quantiles: Number of bins to create in numeric variables. Integer,
default 4.
:ivar intercept: Whether local linear models should include an intercept.
Boolean, default True. (EXPERIMENTAL)
:ivar seed: Random seed for enhanced reproducibility. Integer, default
12345.
:ivar print_: Whether to print a table of local contributions (reason
codes) and plot `top_n` local contributions (reason codes).
Boolean, default True.
:ivar top_n: Number of highest and lowest Local contributions (reason codes)
to plot. Integer, default 5.
:ivar reason_code_values: Pandas DataFrame containing local contributions
(reason codes) for `model` and row to be
explained.
:ivar lime_r2: R\ :sup:`2` statistic for trained local linear model, float.
:ivar lime_pred: Prediction of trained local linear model for row of
interest.
:ivar lime: Trained local linear model, H2OGeneralizedLinearEstimator.
:ivar bins_dict: Dictionary of bins used to discretize the LIME sample.
:ivar multinomial: Whether the model is a multinomial model
Reference: https://arxiv.org/abs/1602.04938
"""
def __init__(self, training_frame=None, X=None, model=None,
N=None, discretize=None, quantiles=None, seed=None,
print_=None, top_n=None, intercept=None, multinomial=False):
# mandatory
if training_frame is not None:
self.training_frame = training_frame
else:
raise ValueError('Parameter training_frame must be defined.')
if X is not None:
self.X = X
else:
raise ValueError('Parameter X must be defined.')
if model is not None:
self.model = model
else:
raise ValueError('Parameter model must be defined.')
# defaults
if N is not None:
self.N = N
else:
self.N = 10000
if discretize is not None:
self.discretize = discretize
else:
self.discretize = None
if quantiles is not None:
self.quantiles = quantiles
else:
self.quantiles = 4
if seed is not None:
self.seed = seed
else:
self.seed = 12345
if print_ is not None:
self.print_ = print_
else:
self.print_ = True
if top_n is not None:
self.top_n = top_n
else:
self.top_n = 5
if intercept is not None:
self.intercept = intercept
else:
self.intercept = True
# internal storage
self.reason_code_values = None
self.lime_r2 = None
self.lime = None
self.lime_pred = None
self.bins_dict = {}
self.multinomial = multinomial
h2o.no_progress() # do not show h2o progress bars
def _generate_local_sample(self, row):
""" Generates a perturbed, local sample around a row of interest.
:param row: Row of Pandas DataFrame to be explained.
:return: Pandas DataFrame containing perturbed, local sample.
"""
# initialize Pandas DataFrame
sample_frame = pd.DataFrame(data=np.zeros(shape=(self.N, len(self.X))),
columns=self.X)
# generate column vectors of
# normally distributed numeric values around mean of numeric variables
# with std. dev. of original numeric variables
for key, val in self.training_frame[self.X].dtypes.items():
rs = np.random.RandomState(self.seed)
loc = row[key]
sd = self.training_frame[key].std()
draw = rs.normal(loc, sd, (self.N, 1))
sample_frame[key] = draw
return sample_frame
def _score_local_sample(self, local_sample, row):
""" Scores the perturbed, local sample with the user-supplied XGBoost
`model`.
:param local_sample: perturbed, local sample generated by
`_generate_local_sample`.
:param row: Row of Pandas DataFrame to be explained.
:return: Pandas DataFrame containing scored, perturbed, local sample.
"""
dlocal_sample = xgb.DMatrix(local_sample)
if not self.multinomial:
scored_local_sample = pd.DataFrame(self.model.predict(dlocal_sample))
else:
scored_local_sample = pd.DataFrame(self.model.predict(dlocal_sample)).max(axis=1) #For multinomial -> .max(axis=1))
scored_local_sample = scored_local_sample.to_frame()
scored_local_sample.columns = ['predict']
if not self.multinomial:
print('\nModel Prediction: %.2f' % self.model.predict(xgb.DMatrix(pd.DataFrame(pd.to_numeric(row)).T))[0])
else:
pred_row = row.to_frame().T
print("Row:\n")
print(pred_row)
print("\n")
print("Multinomial Model Prediction:\n")
print(pd.DataFrame(self.model.predict(xgb.DMatrix(pred_row))))
print("\n")
return pd.concat([local_sample, scored_local_sample], axis=1)
def _calculate_distance_weights(self, row_id, scored_local_sample):
""" Adds inverse distance weighting from row of interest to perturbed
local sample.
:param row_id: Row index of row to be explained in `training_frame`.
:param scored_local_sample: Scored, perturbed, local sample generated by
`_score_local_sample`.
:return: Pandas DataFrame containing weighted, scored perturbed local
sample.
"""
# scaling for calculating Euclidian distance
# for the row of interest
scaled_training_frame = (self.training_frame[self.X] -
self.training_frame[self.X].mean()) \
/ self.training_frame[self.X].std()
row = scaled_training_frame.iloc[row_id, :][self.X]
# scaling for calculating Euclidian distance
# for the perturbed local sample
scaled_scored_local_sample = scored_local_sample[self.X].copy(deep=True)
scaled_scored_local_sample = (scaled_scored_local_sample -
scaled_scored_local_sample.mean()) \
/ scaled_scored_local_sample.std()
# convert to h2o and calculate distance
row_h2o = h2o.H2OFrame(pd.DataFrame(row).T)
scaled_scored_local_sample_h2o = \
h2o.H2OFrame(scaled_scored_local_sample)
distance = row_h2o.distance(scaled_scored_local_sample_h2o,
measure='l2').transpose()
distance.columns = ['distance']
# lower distances, higher weight in LIME
distance = distance.max() - distance
return pd.concat([scored_local_sample, distance.as_data_frame()],
axis=1)
def _discretize_numeric(self, weighted_local_sample):
""" Conditionally discretize the inputs in the weighted,
scored, perturbed, local sample generated by
`_calculate_distance_weights` into `quantiles` bins.
:param weighted_local_sample: Weighted, scored, perturbed, local
sample generated by
`_calculate_distance_weights`.
:return: Pandas DataFrame containing discretized, weighted, scored,
perturbed, local sample.
"""
# initialize empty dataframe to be returned
columns = self.X + ['predict', 'distance']
discretized_weighted_local_sample = \
pd.DataFrame(np.zeros((self.N, len(columns))),
columns=columns)
# save bins for later use and apply to current sample
for name in self.discretize:
ser, bins = pd.qcut(weighted_local_sample.loc[:, name],
self.quantiles, retbins=True)
discretized_weighted_local_sample.loc[:, name] = ser
self.bins_dict[name] = bins
# fill in remaining columns
not_in = list(set(columns)-set(self.discretize))
discretized_weighted_local_sample.loc[:, not_in] = \
weighted_local_sample.loc[:, not_in]
return discretized_weighted_local_sample
def _regress(self, weighted_local_sample, row_h2o):
""" Train local linear model using h2o and calculate local
contributions (reason codes).
:param weighted_local_sample: Weighted, scored, perturbed local
sample generated by
`_calculate_distance_weights` OR weighted,
scored, perturbed, and discretized local
sample generated by
`_discretize_numeric`.
:param row_h2o: Row to be explained as H2OFrame.
:return: Trained local linear model as H2OGeneralizedLinearEstimator.
"""
# convert to h2o and split
weighted_local_sample_h2o = h2o.H2OFrame(weighted_local_sample)
# initialize
lime = H2OGeneralizedLinearEstimator(lambda_search=True,
weights_column='distance',
intercept=self.intercept,
seed=self.seed)
# train
lime.train(x=self.X, y='predict',
training_frame=weighted_local_sample_h2o)
# output
self.lime_r2 = lime.r2()
self.lime_pred = lime.predict(row_h2o)[0, 0]
print('Local GLM Prediction: %.2f' % self.lime_pred)
print('Local GLM R-square: %.2f' % self.lime_r2)
print('Local GLM Intercept: %.2f\n' % lime.coef()['Intercept'])
# initialize Pandas DataFrame to store results
self.reason_code_values = pd.DataFrame(columns=['Input',
'Local Contribution'])
# multiply values in row by local glm coefficients to local
# contributions (reason codes)
for key, val in row_h2o[self.X].types.items():
contrib = 0
if val == 'enum':
level = row_h2o[key][0, 0]
name = '.'.join([str(key), str(level)])
if name in lime.coef():
contrib = lime.coef()[name]
else:
name = key
if name in lime.coef():
contrib = row_h2o[name][0, 0] * lime.coef()[name]
# save only non-zero values
if np.abs(contrib) > 0.0:
self.reason_code_values = \
self.reason_code_values.append({'Input': name,
'Local Contribution':
contrib},
ignore_index=True)
# sort
self.reason_code_values.sort_values(by='Local Contribution',
inplace=True)
self.reason_code_values.reset_index(inplace=True, drop=True)
return lime
def _plot_local_contrib(self):
""" Plots local contributions (reason codes) in a bar chart. """
local_contrib_plot_frame = self.reason_code_values
if local_contrib_plot_frame.shape[0] > self.top_n * 2:
# plot top and bottom local contribs
local_contrib_plot_frame = \
self.reason_code_values.iloc[:self.top_n, :].\
append(self.reason_code_values.iloc[-self.top_n:, :])
_ = local_contrib_plot_frame.plot(x='Input',
y='Local Contribution',
kind='bar',
title='Reason Codes',
legend=False,
color='b')
def explain(self, row_id):
""" Executes lime process.
:param row_id: The row index of the row in `training_frame` to be
explained.
"""
row = self.training_frame.iloc[row_id, :]
local_sample = self._generate_local_sample(row)
scored_local_sample = \
self._score_local_sample(local_sample,
row[local_sample.columns])
weighted_scored_local_sample = \
self._calculate_distance_weights(row_id,
scored_local_sample)
if self.discretize is not None:
discretized_weighted_local_sampled = \
self._discretize_numeric(weighted_scored_local_sample)
disc_row = pd.DataFrame(columns=self.X)
for name in self.discretize:
disc_row[name] = pd.cut( | pd.Series(row[name]) | pandas.Series |
import pandas as pd
import src.variables as var
| pd.set_option('display.max_rows', 500) | pandas.set_option |
from os.path import exists, join
import pandas as pd
import torch
import logging
from transformers import AutoModelForSequenceClassification
from train_bert import compute_negative_entropy, LMForSequenceClassification
from dataset import get_dataset_by_name, TokenizerDataModule
from torch.utils.data import DataLoader
from tqdm import tqdm
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
import glob
import numpy as np
from IPython.display import display
import os
from os.path import join
import re
import torch
from collections import namedtuple
import pdb
logging.basicConfig(
format="%(levelname)s:%(asctime)s:%(module)s:%(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
class ScoreHandler:
"""Standardize how scores are saved and loaded for a given model & dataset."""
def __init__(self, dataset: torch.utils.data.Dataset):
self.dataset = dataset
def save_scores(self, scores, root_dir: str, column_name: str, dataset: str):
"""Save the scores for a model on a dataset.
It uses a single csv file per dataset. Each column refers to the scores of a
single dataset.
Return: (datafram with scores, epath of the file containing the scores)
"""
file_name = f"scores_{dataset}.csv"
file_path = join(root_dir, file_name)
df = pd.read_csv(file_path) if exists(file_path) else self.dataset.data.copy()
if column_name in df.columns:
logging.info(f"Scores for {column_name} are present. Overriding them...")
df[column_name] = scores
df.to_csv(file_path, index=False)
return df, file_path
def load_model_from_folder(model_dir, pattern=None):
if pattern:
ckpt = glob.glob(join(model_dir, f"*{pattern}*"))[0]
else:
ckpt = glob.glob(f"{model_dir}/*.ckpt")[0]
print("Loading", ckpt)
if pattern:
model = LMForSequenceClassification.load_from_checkpoint(ckpt)
else:
model = AutoModelForSequenceClassification.from_pretrained(model_dir)
return model
def join_subwords(tokens):
span_start_idx = -1
spans = list()
for i, t in enumerate(tokens):
if t.startswith("#") and span_start_idx == -1:
span_start_idx = i - 1
continue
if not t.startswith("#") and span_start_idx != -1:
spans.append((span_start_idx, i))
span_start_idx = -1
# span open at the end
if span_start_idx != -1:
spans.append((span_start_idx, len(tokens)))
merged_tkns = list()
pop_idxs = list()
for span in spans:
merged = "".join([t.strip("#") for t in tokens[span[0] : span[1]]])
merged_tkns.append(merged)
# indexes to remove in the final sequence
for pop_idx in range(span[0] + 1, span[1]):
pop_idxs.append(pop_idx)
new_tokens = tokens.copy()
for i, (span, merged) in enumerate(zip(spans, merged_tkns)):
new_tokens[span[0]] = merged # substitue with whole word
mask = np.ones(len(tokens))
mask[pop_idxs] = 0
new_tokens = np.array(new_tokens)[mask == 1]
assert len(new_tokens) == len(tokens) - len(pop_idxs)
return new_tokens, pop_idxs, spans
def average_2d_over_spans(tensor, spans, reduce_fn="mean"):
# print("Spans #", spans)
slices = list()
last_span = None
for span in spans:
# first slice
if last_span is None:
slices.append(tensor[:, : span[0]])
else:
slices.append(tensor[:, last_span[1] : span[0]])
# average over the subwords
if reduce_fn == "mean":
slices.append(tensor[:, span[0] : span[1]].mean(-1).unsqueeze(-1))
else:
slices.append(tensor[:, span[0] : span[1]].sum(-1).unsqueeze(-1))
last_span = span
# last slice
if spans[-1][1] != tensor.shape[1]:
slices.append(tensor[:, last_span[1] :])
res = torch.cat(slices, dim=1)
# print("After average:", res.shape)
return res
def get_scores(y_true, scores_path):
scores = torch.load(scores_path)
y_pred = torch.zeros(scores.shape[0]).masked_fill(scores >= 0.5, 1)
fp_mask = (y_true == 0) & (y_pred == 1)
fp = torch.zeros(scores.shape[0]).masked_fill(fp_mask, 1)
fp_indexes = torch.nonzero(fp).squeeze(-1)
print(f"Found {fp_indexes.shape[0]} FPs")
return {"scores": scores, "y_pred": y_pred, "fp_indexes": fp_indexes}
#### VISUALIZATION: ENTROPY ####
def show_entropy(
models,
tokenizer,
max_sequence_length,
data,
names,
n_samples=2,
idxs=None,
regularization="entropy",
join=False,
layers_mean=False,
prompt=None,
exp=False,
remove_special=False,
labelsize=15,
titlesize=15,
set_figsize=True,
set_tightlayout=True,
):
def process_text(idx, text):
with torch.no_grad():
print(text)
encoding = tokenizer(
text,
add_special_tokens=True,
padding=True,
truncation=True,
max_length=max_sequence_length,
return_tensors="pt",
)
tokens = tokenizer.convert_ids_to_tokens(encoding["input_ids"][0])
if remove_special:
tokens = tokens[1:-1]
# print("Len:", len(tokens), "tokens:", tokens)
if join:
# join subwords for better visualization
new_tokens, pop_idxs, spans = join_subwords(tokens)
# print("Len new tokens", len(new_tokens))
tokens = new_tokens
heatmap_list = list()
final_entropies = list()
y_scores = list()
for i, (model, name) in enumerate(zip(models, names)):
if regularization == "entropy":
output = model(**encoding, output_attentions=True)
reg_target = output["attentions"]
else:
output = model(**encoding, output_norms=True)
norms = output["norms"]
afx_norms = [t[1] for t in norms]
reg_target = afx_norms
logits = output["logits"]
y_score = logits.softmax(-1)[0, 1]
print(y_score)
neg_entropy, entropies = compute_negative_entropy(
reg_target, encoding["attention_mask"], return_values=True
)
# print("Entropies shape:", entropies[0].shape)
# join_subwords(entropies, tokens)
# print(name, "Final entropy: ", -neg_entropy.item())
entropies = -entropies[0] # take positive entropy
entropies = torch.flipud(entropies) # top layers are placed to the top
# average subwords
if join and len(spans) > 0:
entropies = average_2d_over_spans(entropies, spans)
if layers_mean:
entropies = entropies.mean(0).unsqueeze(0)
if exp:
entropies = (1 / entropies).log()
if remove_special:
entropies = entropies[:, 1:-1]
heatmap_list.append(entropies)
final_entropies.append(-neg_entropy.item())
y_scores.append(y_score)
#### VISUALIZATION ####
if layers_mean:
figsize = (12, 2 * len(models))
else:
figsize = (6 * len(models), 6)
if set_figsize:
fig = plt.figure(constrained_layout=False, figsize=figsize)
else:
fig = plt.figure(constrained_layout=False)
if regularization == "entropy":
fig.suptitle(
f"H: Entropy on Attention (a), ID:{idx}"
) # , {data[idx]}")
else:
fig.suptitle(
f"Entropy on Norm (||a*f(zx)||), ID:{idx}"
) # , {data[idx]}")
if set_tightlayout:
fig.tight_layout()
# compute global min and global max
heatmap_tensor = torch.stack(heatmap_list)
glob_min = heatmap_tensor.min().item()
glob_max = heatmap_tensor.max().item()
# print("Glob max:", glob_max, "Glob min", glob_min)
for i, name in enumerate(names):
if layers_mean:
gspec = fig.add_gridspec(
len(models), 2, width_ratios=[20, 1], wspace=0.1, hspace=0.1
)
splot = fig.add_subplot(gspec[i, 0])
if i == (len(names) - 1):
cbar_ax = fig.add_subplot(gspec[:, 1])
sns.heatmap(
heatmap_list[i],
ax=splot,
cbar=True,
cbar_ax=cbar_ax,
square=True,
vmin=glob_min,
vmax=glob_max,
)
splot.set_xticks(np.arange(heatmap_list[i].shape[-1]) + 0.5)
splot.set_xticklabels(tokens, rotation=90, fontsize=labelsize)
[t.set_fontsize(labelsize) for t in cbar_ax.get_yticklabels()]
# title to colorbar
cbar_ax.set_title(
"log(1/H)", fontsize=titlesize
) if exp else cbar_ax.set_title("H", fontsize=titlesize)
else:
sns.heatmap(
heatmap_list[i],
ax=splot,
cbar=False,
square=True,
vmin=glob_min,
vmax=glob_max,
)
splot.set_xticklabels([])
splot.set_yticklabels([])
splot.set_title(
f"{name}, p(1|x)={y_scores[i]:.3f}, H={final_entropies[i]:.3f}",
fontsize=titlesize,
)
else:
width_ratios = [10] * len(models)
width_ratios += [1]
gspec = fig.add_gridspec(
1, len(models) + 1, width_ratios=width_ratios, wspace=0.2
)
splot = fig.add_subplot(gspec[0, i])
if i == (len(names) - 1):
cbar_ax = fig.add_subplot(gspec[0, -1])
sns.heatmap(
heatmap_list[i],
ax=splot,
cbar=True,
cbar_ax=cbar_ax,
square=True,
vmin=glob_min,
vmax=glob_max,
)
[t.set_fontsize(labelsize) for t in cbar_ax.get_yticklabels()]
# title to colorbar
cbar_ax.set_title(
"log(1/H)", fontsize=titlesize
) if exp else cbar_ax.set_title("H", fontsize=titlesize)
else:
sns.heatmap(heatmap_list[i], ax=splot, cbar=False, square=True)
if i == 0:
splot.set_ylabel("Layer", fontsize=labelsize)
splot.set_yticklabels(np.arange(11, -1, -1), fontsize=labelsize)
else:
splot.set_yticklabels([])
splot.set_xticks(np.arange(heatmap_list[i].shape[-1]) + 0.5)
splot.set_xticklabels(tokens, rotation=90, fontsize=labelsize)
splot.set_title(
f"{name}, p(1|x)={y_scores[i]:.3f}, H={final_entropies[i]:.3f}",
fontsize=titlesize,
)
# print(len(tokens), len(axes[i].get_xticklabels()))
# print(entropies.shape)
# axes[i].set_xticks(np.arange(heatmap_list[i].shape[-1]))
# axes[i].set_xticklabels(tokens, rotation=90)
# axes[i].set_title(f"{name}, p(1|x)={y_scores[i]:.3f}, e={final_entropies[i]:.3f}")
# axes[i].set_yticklabels([])
return fig
if prompt:
idx = "custom"
text = prompt
print("ID: ", idx, text)
return process_text(idx, text)
if idxs is None:
# pick random samples to show
idxs = np.random.randint(len(data), size=n_samples)
print(idxs)
for idx in idxs:
print("ID: ", idx, data[idx])
process_text(idx, data[idx]["text"])
def compare_sentences(
model,
tokenizer,
sentences,
max_sequence_length=120,
remove_special=True,
join=True,
show_log=True,
labelsize=15,
titlesize=15,
figsize=(12, 12),
):
processed = list()
with torch.no_grad():
for text in sentences:
encoding = tokenizer(
text,
add_special_tokens=True,
padding=True,
truncation=True,
max_length=max_sequence_length,
return_tensors="pt",
)
tokens = tokenizer.convert_ids_to_tokens(encoding["input_ids"][0])
if remove_special:
tokens = tokens[1:-1]
if join:
# join subwords for better visualization
new_tokens, pop_idxs, spans = join_subwords(tokens)
# print("Len new tokens", len(new_tokens))
tokens = new_tokens
output = model(**encoding, output_attentions=True)
logits = output["logits"]
y_score = logits.softmax(-1)[0, 1]
neg_entropy, entropies = compute_negative_entropy(
output["attentions"], encoding["attention_mask"], return_values=True
)
# print("Entropies shape:", entropies[0].shape)
# print(name, "Final entropy: ", -neg_entropy.item())
entropies = -entropies[0] # take positive entropy
# average subwords
if join and len(spans) > 0:
entropies = average_2d_over_spans(entropies, spans)
entropies = entropies.mean(0).unsqueeze(0)
if show_log:
entropies = (1 / entropies).log()
if remove_special:
entropies = entropies[:, 1:-1]
processed.append((tokens, y_score, entropies))
# print(processed)
fig = plt.figure(constrained_layout=False, figsize=figsize)
gspec = fig.add_gridspec(len(sentences) * 2, 1, hspace=2, wspace=5)
vmin = torch.stack([p[2] for p in processed]).min().item()
vmax = torch.stack([p[2] for p in processed]).max().item()
print(vmin, vmax)
for i, (tokens, y_score, entropies) in enumerate(processed):
splot = fig.add_subplot(gspec[i, 0])
# cbar_ax = fig.add_subplot(gspec[:, 1])
sns.heatmap(
entropies,
ax=splot,
cbar=False,
# cbar_ax=cbar_ax,
square=True,
# cmap="Reds",
annot=False,
vmin=vmin,
vmax=vmax,
)
splot.set_xticks(np.arange(entropies.shape[-1]) + 0.5)
splot.set_xticklabels(tokens, rotation=90, fontsize=labelsize)
splot.set_yticklabels([])
splot.set_title(
f"p(1|x)={y_score:.3f}",
fontsize=titlesize,
)
# [t.set_fontsize(labelsize) for t in cbar_ax.get_yticklabels()]
# title to colorbar
# cbar_ax.set_title(
# "log(1/H)", fontsize=titlesize
# ) if exp else cbar_ax.set_title("H", fontsize=titlesize)
# fig.tight_layout()
#### BIAS_ANALYSIS: parsing results and bias analysis
def match_pattern_concat(main_dir, pattern, verbose=True):
"""Find all files that match a patter in main_dir. Then concatenate their content into a pandas df."""
versions = glob.glob(join(main_dir, pattern))
if verbose:
print(f"Found {len(versions)} versions")
res = list()
for version in versions:
df = | pd.read_csv(version) | pandas.read_csv |
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pds
from datetime import datetime as dt
import datetime
import plotly.graph_objs as go
import plotly.express as px
from app import app, template
update_date = dt(2020, 3, 31)
d_trust = pds.read_csv('assets/d_trust.csv')
d_trust['hospitaladmissiondate'] = pds.to_datetime(d_trust['hospitaladmissiondate'], errors='coerce')
icu_forecast = pds.read_csv('assets/icu_risk_forecast.csv')
icu_forecast['hospitaladmissiondate'] = pds.to_datetime(icu_forecast['hospitaladmissiondate'], errors='coerce')
death_forecast = pds.read_csv('assets/death_risk_forecast.csv')
death_forecast['hospitaladmissiondate'] = | pds.to_datetime(death_forecast['hospitaladmissiondate'], errors='coerce') | pandas.to_datetime |
import sys
import unittest
import subprocess
import time
import logging
import numpy as np
import pandas as pd
import swifter
from tqdm.auto import tqdm
from psutil import cpu_count
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)-8s.%(msecs)03d %(levelname)-8s %(name)s:%(lineno)-3s %(message)s")
ch.setFormatter(formatter)
LOG.addHandler(ch)
def math_vec_square(x):
return x ** 2
def math_foo(x, compare_to=1):
return x ** 2 if x < compare_to else x ** (1 / 2)
def math_vec_multiply(row):
return row["x"] * row["y"]
def math_agg_foo(row):
return row.sum() - row.min()
def text_foo(row):
if row["letter"] == "A":
return row["value"] * 3
elif row["letter"] == "B":
return row["value"] ** 3
elif row["letter"] == "C":
return row["value"] / 3
elif row["letter"] == "D":
return row["value"] ** (1 / 3)
elif row["letter"] == "E":
return row["value"]
class TestSwifter(unittest.TestCase):
def assertSeriesEqual(self, a, b, msg):
try:
pd.testing.assert_series_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def assertDataFrameEqual(self, a, b, msg):
try:
pd.testing.assert_frame_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def setUp(self):
LOG.info(f"Version {swifter.__version__}")
self.addTypeEqualityFunc(pd.Series, self.assertSeriesEqual)
self.addTypeEqualityFunc(pd.DataFrame, self.assertDataFrameEqual)
self.ncores = cpu_count()
def test_set_npartitions(self):
LOG.info("test_set_npartitions")
for swifter_df, set_npartitions, expected in zip(
[
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.rolling("1d"),
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.resample("3T"),
],
[None, 1000, 1001, 1002],
[cpu_count() * 2, 1000, 1001, 1002],
):
before = swifter_df._npartitions
swifter_df.set_npartitions(set_npartitions)
actual = swifter_df._npartitions
self.assertEqual(actual, expected)
if set_npartitions is not None:
self.assertNotEqual(before, actual)
def test_set_dask_threshold(self):
LOG.info("test_set_dask_threshold")
expected = 1000
for swifter_df in [
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.rolling("1d"),
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.resample("3T"),
]:
before = swifter_df._dask_threshold
swifter_df.set_dask_threshold(expected)
actual = swifter_df._dask_threshold
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_set_dask_scheduler(self):
LOG.info("test_set_dask_scheduler")
expected = "my-scheduler"
for swifter_df in [
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.rolling("1d"),
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.resample("3T"),
]:
before = swifter_df._scheduler
swifter_df.set_dask_scheduler(expected)
actual = swifter_df._scheduler
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_disable_progress_bar(self):
LOG.info("test_disable_progress_bar")
expected = False
for swifter_df in [
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)}, index= | pd.date_range("2019-01-1", "2020-01-1", periods=10) | pandas.date_range |
import pandas as pd
# ====================== level 0: basic input =======================
def read_bas_ecp(fmol):
from qharv.reel import ascii_out
mm = ascii_out.read(fmol)
# skip comment lines
for iline in range(3):
line = mm.readline()
lines = []
for iline in range(3):
line = mm.readline().decode()
lines.append(line)
symm_line = lines[0]
if not symm_line.startswith('C'):
msg = 'need Cartesian expansion for ECP run\n'
msg += symm_line
raise RuntimeError(msg)
atom_line = lines[1]
toks = list(map(float, atom_line.split()))
if len(toks) > 2:
msg = 'can handle only 1 species for now'
raise NotImplementedError(msg)
pos_line = lines[2]
elem = pos_line.split()[0]
# convert basis string to PySCF format
dbas_str = ascii_out.block_text(mm, 'LARGE EXPLICIT', 'ECP')
ang_moms = ['S', 'P', 'D', 'F', 'G', 'H']
nang_avail = len(ang_moms)
bas_fmt = '%16.8f %20.8f\n' # expo, c
iang = -1
nbas1 = None
bas_str = ''
for line in dbas_str.split('\n'):
toks = line.split()
if len(toks) < 1: continue
if line.startswith('f'):
iang += 1
if iang > nang_avail-1:
msg = 'iang = %d not available from %s' % (iang, ang_moms)
raise RuntimeError(msg)
ang = ang_moms[iang]
if nbas1 is not None:
ifmatch = nbas1 == nbas0
if not ifmatch:
msg = 'expected %d basis, got %d' % (nbas0, nbas1)
raise RuntimeError(msg)
nbas0 = int(toks[1])
nbas1 = 0
else:
expo = float(toks[0])
new1 = '%2s %s\n' % (elem, ang)
new1 += bas_fmt % (expo, 1.0)
bas_str += new1
nbas1 += 1
# convert ECP string to PySCF format
ang_moms += ['ul'] # fake local channel as last ang_mom
decp_str = ascii_out.block_text(mm, 'ECP', 'FINISH', skip_header=False)
lines = decp_str.split('\n')
header = lines[0]
nelec = int(header.split()[1])
ecp_str = '%s nelec %d\n' % (elem, nelec)
iang = -1
nbas1 = None
for line in lines[1:]:
toks = line.split()
if len(toks) < 1: continue
if len(toks) == 1:
if iang > nang_avail-1:
msg = 'iang = %d not available from %s' % (iang, ang_moms)
raise RuntimeError(msg)
ang = ang_moms[iang]
iang += 1
if nbas1 is not None:
ifmatch = nbas1 == nbas0
if not ifmatch:
msg = 'expected %d ecp basis, got %d' % (nbas0, nbas1)
raise RuntimeError(msg)
nbas1 = 0
nbas0 = int(toks[0])
ecp_str += '%s %s\n' % (elem, ang)
else:
nbas1 += 1
ecp_str += line + '\n'
bas = {elem: bas_str}
ecp = {elem: ecp_str}
return bas, ecp
# ====================== level 0: basic output ======================
def read(fout, vp_kwargs=None, mp_kwargs=None):
from qharv.reel import ascii_out
if vp_kwargs is None:
vp_kwargs = dict()
if mp_kwargs is None:
mp_kwargs = dict()
mm = ascii_out.read(fout)
etot = ascii_out.name_sep_val(mm, 'Total energy', ':')
ehomo = ascii_out.name_sep_val(mm, 'E(HOMO)', ':')
elumo = ascii_out.name_sep_val(mm, 'E(LUMO)', ':')
data = {
'etot': etot,
'ehomo': ehomo,
'elumo': elumo,
}
if mm.find(b'* Vector print *') > 0:
data['vectors'] = parse_vector_print(mm, **vp_kwargs)
if mm.find(b'* Mulliken population analysis *') > 0:
data['populations'] = parse_mulliken(mm, **mp_kwargs)
mm.close()
return data
def parse_basis_line(line):
# eg. '1 L W 1 s -1.1034620201 0.0000000000'
assert len(line) == 80
ibas = int(line[:8])
sl = line[8:11].strip() # large or small component
elem = line[11:14].strip()
ie = int(line[14:17])
symm = line[17:24].strip()
istart = 24
nspan = 14
cupr = float(line[istart:istart+nspan])
istart += nspan
cupi = float(line[istart:istart+nspan])
istart += nspan
cdnr = float(line[istart:istart+nspan])
istart += nspan
cdni = float(line[istart:istart+nspan])
cup = cupr + 1j*cupi
cdn = cdnr + 1j*cdni
entry = {'elem': elem, 'ibas': ibas, 'ao_symm': symm,
'cup': cup, 'cdn': cdn}
return entry
def parse_ev_text(text):
lines = text.split('\n')
entryl = []
for line in lines:
if len(line) != 80:
continue
entry = parse_basis_line(line)
entryl.append(entry)
df = pd.DataFrame(entryl)
return df
def parse_eigenvectors(mm, idxl):
"""Parse eigenvectors from DIRAC 'Vector print' .PRIVEC output
Args:
mm (mmap.mmap): memory map of outputfile
idxl (list): a list of starting memory locations for eigenvectors
Return:
pd.DataFrame: eigenvector information
Example:
>>> from qharv.reel import ascii_out
>>> mm = ascii_out.read('inp_mol.out')
>>> idx = mm.find(b'* Vector print *')
>>> mm.seek(idx)
>>> header = 'Electronic eigenvalue no.'
>>> idxl = ascii_out.all_lines_with_tag(mm, header)
>>> df = parse_eigenvectors(mm, idxl[:2]) # first two vectors
"""
from qharv.reel import ascii_out
header = '===================================================='
trailer = 'Electronic eigenvalue no'
dfl = []
for i in idxl:
mm.seek(i)
line = mm.readline().decode()
# eg. 'eigenvalue no. 2: -0.2364785578899'
left, right = line.split(':')
iev = int(left.split()[-1])
ev = float(right)
meta = {'iev': iev, 'ev': ev}
# read body
i0, i1 = ascii_out.locate_block(mm, header, trailer,
force_tail=True, skip_trailer=True)
if i1 < 0:
i0, i1 = ascii_out.locate_block(mm, header, '*********')
# parse
text = mm[i0:i1].decode()
df1 = parse_ev_text(text)
for key, val in meta.items():
df1[key] = val
dfl.append(df1)
df = pd.concat(dfl, axis=0).reset_index(drop=True)
return df
def parse_vector_print(mm,
header='* Vector print *',
mid_tag='Fermion ircop E1u',
end_tag='* Mulliken population analysis *',
):
from qharv.reel import ascii_out
# seek to header
idx = mm.find(header.encode())
mm.seek(idx)
# find all potential vectors to read
idxl = ascii_out.all_lines_with_tag(mm, 'Electronic eigenvalue no.')
# exclude population analysis
iend = mm.find(end_tag.encode())
if iend > 0:
idxl = [i for i in idxl if i < iend]
# partition into even and odd
imid = mm.find(mid_tag.encode())
idxg = [i for i in idxl if i < imid]
idxu = [i for i in idxl if i >= imid]
gdf = parse_eigenvectors(mm, idxg)
gdf['mo_symm'] = 'E1g'
udf = parse_eigenvectors(mm, idxu)
udf['mo_symm'] = 'E1u'
df = pd.concat([gdf, udf], sort=False).reset_index(drop=True)
return df
def parse_populations(mm, idxl):
"""Parse population from DIRAC 'Mulliken' .MULPOP output
modified from parse_eigenvectors
Args:
mm (mmap.mmap): memory map of outputfile
idxl (list): a list of starting memory locations for eigenvectors
Return:
pd.DataFrame: population information
"""
from qharv.reel import ascii_out
header = '--------------------------------------'
trailer = 'Electronic eigenvalue no'
entryl = []
for i in idxl:
mm.seek(i)
line = mm.readline().decode()
# eg. 'eigenvalue no. 2: -0.2364785578899 ('
toks = line.split(':')
left = toks[0]
right = toks[1].split()[0]
iev = int(left.split()[-1])
ev = float(right)
meta = {'iev': iev, 'ev': ev}
# read body
i0, i1 = ascii_out.locate_block(mm, header, trailer,
force_tail=True, skip_trailer=True)
if i1 < 0:
i0, i1 = ascii_out.locate_block(mm, header, '**')
# parse
text = mm[i0:i1].decode()
# e.g.
# alpha 1.0000 | 1.0000
# beta 0.0000 | 0.0000
lines = text.split('\n')
aline = lines[0]
bline = lines[1]
assert 'alpha' in aline
assert 'beta' in bline
atot = float(aline.split()[1])
btot = float(bline.split()[1])
entry = {'a_tot': atot, 'b_tot': btot}
entry.update(meta)
entryl.append(entry)
df = | pd.DataFrame(entryl) | pandas.DataFrame |
from __future__ import division
import os
import argparse
from ruamel import yaml
import tqdm
from os.path import join
import csv
import numpy as np
import pandas as pd
import librosa
import logging
from parakeet.data import DatasetMixin
class LJSpeechMetaData(DatasetMixin):
def __init__(self, root):
self.root = root
self._wav_dir = join(root, "wavs")
csv_path = join(root, "metadata.csv")
self._table = pd.read_csv(
csv_path,
sep="|",
encoding="utf-8",
header=None,
quoting=csv.QUOTE_NONE,
names=["fname", "raw_text", "normalized_text"])
def get_example(self, i):
fname, raw_text, normalized_text = self._table.iloc[i]
abs_fname = join(self._wav_dir, fname + ".wav")
return fname, abs_fname, raw_text, normalized_text
def __len__(self):
return len(self._table)
class Transform(object):
def __init__(self, sample_rate, n_fft, hop_length, win_length, n_mels, reduction_factor):
self.sample_rate = sample_rate
self.n_fft = n_fft
self.win_length = win_length
self.hop_length = hop_length
self.n_mels = n_mels
self.reduction_factor = reduction_factor
def __call__(self, fname):
# wave processing
audio, _ = librosa.load(fname, sr=self.sample_rate)
# Pad the data to the right size to have a whole number of timesteps,
# accounting properly for the model reduction factor.
frames = audio.size // (self.reduction_factor * self.hop_length) + 1
# librosa's stft extract frame of n_fft size, so we should pad n_fft // 2 on both sidess
desired_length = (frames * self.reduction_factor - 1) * self.hop_length + self.n_fft
pad_amount = (desired_length - audio.size) // 2
# we pad mannually to control the number of generated frames
if audio.size % 2 == 0:
audio = np.pad(audio, (pad_amount, pad_amount), mode='reflect')
else:
audio = np.pad(audio, (pad_amount, pad_amount + 1), mode='reflect')
# STFT
D = librosa.stft(audio, self.n_fft, self.hop_length, self.win_length, center=False)
S = np.abs(D)
S_mel = librosa.feature.melspectrogram(sr=self.sample_rate, S=S, n_mels=self.n_mels, fmax=8000.0)
# log magnitude
log_spectrogram = np.log(np.clip(S, a_min=1e-5, a_max=None))
log_mel_spectrogram = np.log(np.clip(S_mel, a_min=1e-5, a_max=None))
num_frames = log_spectrogram.shape[-1]
assert num_frames % self.reduction_factor == 0, "num_frames is wrong"
return (log_spectrogram.T, log_mel_spectrogram.T, num_frames)
def save(output_path, dataset, transform):
if not os.path.exists(output_path):
os.makedirs(output_path)
records = []
for example in tqdm.tqdm(dataset):
fname, abs_fname, _, normalized_text = example
log_spec, log_mel_spec, num_frames = transform(abs_fname)
records.append((num_frames,
fname + "_spec.npy",
fname + "_mel.npy",
normalized_text))
np.save(join(output_path, fname + "_spec"), log_spec)
np.save(join(output_path, fname + "_mel"), log_mel_spec)
meta_data = | pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
#!python3
import os
import argparse
import pandas as pd
import numpy as np
import statsmodels.api as sm
from plot_module import *
def print_b(text):
print('\033[34m' + '\033[1m' + text + '\033[0m')
def grec_letter(s):
if s == "beta" or s == "chi" or s == "omega":
return '\\' + s
elif s == "gamma":
return "\\Delta \\Delta G"
elif s == "alpha":
return "\\Delta G_{\\mathrm{min}}"
elif s == "pop_size":
return "N_{\\mathrm{e}}"
elif s == "gamma_std":
return "\\sigma ( \\gamma )"
elif s == "gamma_distribution_shape":
return "k"
elif s == "exon_size":
return "n"
elif s == "expression_level":
return "y"
elif s == "sub-ΔG-mean":
return "\\Delta G"
else:
return s
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-o', '--output', required=True, type=str, dest="output")
parser.add_argument('-i', '--input', required=True, type=str, nargs='+', dest="input")
parser.add_argument('-f', '--node', required=False, default=False, type=bool, dest="node")
args = parser.parse_args()
array_values = dict()
li = []
for filepath in args.input:
if not os.path.isfile(filepath): continue
x, y = float(filepath.split("_")[-3]), float(filepath.split("_")[-2])
li.append(pd.read_csv(filepath.replace(".tsv", ".parameters.tsv"), sep='\t').assign(x=x, y=y))
for param, vals in pd.read_csv(filepath, sep='\t').items():
if (args.node and "Node" not in param) or (("dnd" not in param) and ("sub-ΔG-mean" not in param)): continue
if param not in array_values: array_values[param] = dict()
array_values[param][(x, y)] = vals
df_p = pd.concat(li, axis=0, ignore_index=True)
uniq = df_p.apply(pd.Series.nunique)
df = df_p.drop(uniq[uniq == 1].index, axis=1)
x_uniq = df[df["x"] == df["x"].values[0]].apply(pd.Series.nunique)
df_x = df.drop(x_uniq[x_uniq != 1].index, axis=1).drop_duplicates()
col_x = [c for c in df_x if c != "x"][0]
x_axis = sorted(df_x["x"].values)
x_range = np.array([df_x[df_x["x"] == x][col_x].values[0] for x in x_axis])
csv_output = []
plt.figure(figsize=(1920 / my_dpi, 1080 / my_dpi), dpi=my_dpi)
for param, x_y_z in array_values.items():
if len(x_axis) < 2: continue
name = param.replace("/", "").replace("|", "")
y_axis = sorted(set([k[1] for k in x_y_z.keys()]))
for (j, y) in enumerate(y_axis):
label_dict = dict()
if "chi" in df_p and "dnd" in param:
label_dict["chi"] = df_p[df_p["y"] == y]["chi"].values[0]
if len(y_axis) > 1:
df_y = df[df["y"] == y]
y_uniq = df_y.apply(pd.Series.nunique)
df_y = df_y.drop(y_uniq[y_uniq != 1].index, axis=1).drop_duplicates()
for col in df_y:
if col == "y" or col == "chi": continue
label_dict[col] = df_y[col].values[0]
mean_z = [np.mean(x_y_z[(x, y)]) for x in x_axis]
label = ("$" + ", \\ ".join(
["{0}={1:.3g}".format(grec_letter(k), v) for k, v in label_dict.items()]) + "$") if len(
label_dict) > 0 else None
base_line, = plt.plot(x_range, mean_z, linewidth=2, label=label)
plt.fill_between(x_range, [np.percentile(x_y_z[(x, y)], 5) for x in x_axis],
[np.percentile(x_y_z[(x, y)], 95) for x in x_axis], alpha=0.3)
if ('SimuStab' not in args.output) and ('SimuFold' not in args.output): continue
results = sm.OLS(mean_z, sm.add_constant(np.log(x_range))).fit()
b, a = results.params[0:2]
idf = np.logspace(np.log(min(x_range)), np.log(max(x_range)), 30, base=np.exp(1))
linear = a * np.log(idf) + b
reg = '$\\hat{\\chi}' + '={0:.4g}\\ (r^2={1:.3g})$'.format(float(a), results.rsquared)
print(reg)
csv_output.append({"name": name, "mean": True, "a": a, "b": b, "r2:": results.rsquared,
"label": label})
plt.plot(idf, linear, '-', linewidth=4, color=base_line.get_color(), linestyle="--", label=reg)
if len(y_axis) > 1:
continue
for i, x in enumerate(x_axis):
plt.scatter([x_range[i]] * len(x_y_z[(x, y)]), x_y_z[(x, y)], color=base_line.get_color(), alpha=0.05)
plt.xscale("log")
plt.xlabel("$" + grec_letter(col_x) + "$", fontsize=label_size)
if "dnd" in param: param = 'omega'
plt.ylabel("$" + grec_letter(param) + "$", fontsize=label_size)
# plt.ylim((0.3, 0.4))
plt.legend(fontsize=legend_size)
if len([c for c in df_x]) > 2:
plt.title("Scaling also $" + ", ".join(
[grec_letter(c) for c in df_x if (c != "x" and c != col_x)]) + "$ on the x-axis.")
plt.tight_layout()
plt.savefig("{0}/{1}.pdf".format(args.output, name), format="pdf", dpi=my_dpi)
plt.savefig("{0}/{1}.png".format(args.output, name), format="png", dpi=my_dpi)
plt.clf()
plt.close('all')
| pd.DataFrame(csv_output) | pandas.DataFrame |
from datetime import datetime
import pandas as pd
from featuretools.primitives import IsNull, Max
from featuretools.primitives.base import PrimitiveBase, make_agg_primitive
from featuretools.variable_types import DatetimeTimeIndex, Numeric
def test_call_agg():
primitive = Max()
# the assert is run twice on purpose
assert 5 == primitive(range(6))
assert 5 == primitive(range(6))
def test_call_trans():
primitive = IsNull()
assert pd.Series([False for i in range(6)]).equals(primitive(range(6)))
assert pd.Series([False for i in range(6)]).equals(primitive(range(6)))
def test_uses_calc_time():
def time_since_last(values, time=None):
time_since = time - values.iloc[0]
return time_since.total_seconds()
TimeSinceLast = make_agg_primitive(time_since_last,
[DatetimeTimeIndex],
Numeric,
name="time_since_last",
uses_calc_time=True)
primitive = TimeSinceLast()
datetimes = pd.Series([datetime(2015, 6, 7), datetime(2015, 6, 6)])
answer = 86400.0
assert answer == primitive(datetimes, time=datetime(2015, 6, 8))
def test_call_multiple_args():
class TestPrimitive(PrimitiveBase):
def get_function(self):
def test(x, y):
return y
return test
primitive = TestPrimitive()
assert | pd.Series([0, 1]) | pandas.Series |
"""Monkey-patch data frame formatter to
1. add dtypes next to column names when printing
2. collapse data frames when they are elements of a parent data frame.
"""
from pandas import DataFrame
from pandas.io.formats.html import (
HTMLFormatter,
NotebookFormatter,
Mapping,
MultiIndex,
get_level_lengths,
)
from pandas.io.formats.format import (
DataFrameFormatter,
GenericArrayFormatter,
partial,
List,
QUOTE_NONE,
get_option,
NA,
NaT,
np,
PandasObject,
extract_array,
lib,
notna,
is_float,
format_array,
)
from pandas.io.formats.string import StringFormatter
from pandas.io.formats.printing import pprint_thing
from pandas.core.dtypes.common import is_scalar
from pandas.core.dtypes.missing import isna
# patch more formatters?
# pandas 1.2.0 doesn't have this function
def _trim_zeros_single_float(str_float: str) -> str: # pragma: no cover
"""
Trims trailing zeros after a decimal point,
leaving just one if necessary.
"""
str_float = str_float.rstrip("0")
if str_float.endswith("."):
str_float += "0"
return str_float
class PdtypesDataFrameFormatter(DataFrameFormatter):
"""Custom formatter for DataFrame"""
def get_strcols(self) -> List[List[str]]:
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
strcols = self._get_strcols_without_index()
if self.index:
# dtype
str_index = [""] + self._get_formatted_index(self.tr_frame)
strcols.insert(0, str_index)
return strcols
def format_col(self, i: int) -> List[str]:
"""Format column, add dtype ahead"""
frame = self.tr_frame
formatter = self._get_formatter(i)
dtype = frame.iloc[:, i].dtype.name
return [f"<{dtype}>"] + format_array(
frame.iloc[:, i]._values,
formatter,
float_format=self.float_format,
na_rep=self.na_rep,
space=self.col_space.get(frame.columns[i]),
decimal=self.decimal,
leading_space=self.index,
)
class PdtypesGenericArrayFormatter(GenericArrayFormatter):
"""Generic Array Formatter to show DataFrame element in a cell in a
collpased representation
"""
def _format_strings(self) -> List[str]:
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
precision = get_option("display.precision")
# previous pandas
# float_format = lambda x: f"{x: .{precision:d}f}"
# pandas 1.4
float_format = lambda x: _trim_zeros_single_float(
f"{x: .{precision:d}f}"
)
else: # pragma: no cover
float_format = self.float_format
if self.formatter is not None: # pragma: no cover
formatter = self.formatter
else:
quote_strings = (
self.quoting is not None and self.quoting != QUOTE_NONE
)
formatter = partial(
pprint_thing,
escape_chars=("\t", "\r", "\n"),
quote_strings=quote_strings,
)
def _format(x):
if (
self.na_rep is not None
and is_scalar(x) and | isna(x) | pandas.core.dtypes.missing.isna |
import nose
import unittest
from numpy import nan
from pandas.core.daterange import DateRange
from pandas.core.index import Index, MultiIndex
from pandas.core.common import rands, groupby
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal)
from pandas.core.panel import WidePanel
from collections import defaultdict
import pandas.core.datetools as dt
import numpy as np
import pandas.util.testing as tm
# unittest.TestCase
def commonSetUp(self):
self.dateRange = DateRange('1/1/2005', periods=250, offset=dt.bday)
self.stringIndex = Index([rands(8).upper() for x in xrange(250)])
self.groupId = Series([x[0] for x in self.stringIndex],
index=self.stringIndex)
self.groupDict = dict((k, v) for k, v in self.groupId.iteritems())
self.columnIndex = Index(['A', 'B', 'C', 'D', 'E'])
randMat = np.random.randn(250, 5)
self.stringMatrix = DataFrame(randMat, columns=self.columnIndex,
index=self.stringIndex)
self.timeMatrix = DataFrame(randMat, columns=self.columnIndex,
index=self.dateRange)
class GroupByTestCase(unittest.TestCase):
setUp = commonSetUp
def test_python_grouper(self):
groupFunc = self.groupDict.get
groups = groupby(self.stringIndex, groupFunc)
setDict = dict((k, set(v)) for k, v in groups.iteritems())
for idx in self.stringIndex:
key = groupFunc(idx)
groupSet = setDict[key]
assert(idx in groupSet)
class TestGroupBy(unittest.TestCase):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B' : ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C' : np.random.randn(8),
'D' : np.random.randn(8)})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
def test_basic(self):
data = Series(np.arange(9) / 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
self.assertEqual(len(v), 3)
agged = grouped.aggregate(np.mean)
self.assertEqual(agged[1], 1)
assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
assert_series_equal(agged, grouped.mean())
# Cython only returning floating point for now...
assert_series_equal(grouped.agg(np.sum).astype(float),
grouped.sum())
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
value_grouped = data.groupby(data)
assert_series_equal(value_grouped.aggregate(np.mean), agged)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
agged = grouped.aggregate({'one' : np.mean,
'two' : np.std})
group_constants = {
0 : 10,
1 : 20,
2 : 30
}
agged = grouped.agg(lambda x: group_constants[x.groupName] + x.mean())
self.assertEqual(agged[1], 21)
# corner cases
self.assertRaises(Exception, grouped.aggregate, lambda x: x * 2)
def test_series_agg_corner(self):
# nothing to group, all NA
result = self.ts.groupby(self.ts * np.nan).sum()
assert_series_equal(result, Series([]))
def test_aggregate_str_func(self):
def _check_results(grouped):
# single series
result = grouped['A'].agg('std')
expected = grouped['A'].std()
assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate('var')
expected = grouped.var()
assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg({'A' : 'var', 'B' : 'std', 'C' : 'mean'})
expected = DataFrame({'A' : grouped['A'].var(),
'B' : grouped['B'].std(),
'C' : grouped['C'].mean()})
assert_frame_equal(result, expected)
by_weekday = self.tsframe.groupby(lambda x: x.weekday())
_check_results(by_weekday)
by_mwkday = self.tsframe.groupby([lambda x: x.month,
lambda x: x.weekday()])
_check_results(by_mwkday)
def test_basic_regression(self):
# regression
T = [1.0*x for x in range(1,10) *10][:1095]
result = Series(T, range(0, len(T)))
groupings = np.random.random((1100,))
groupings = Series(groupings, range(0, len(groupings))) * 10.
grouped = result.groupby(groupings)
grouped.mean()
def test_transform(self):
data = Series(np.arange(9) / 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
transformed = grouped.transform(np.mean)
for name, group in grouped:
mean = group.mean()
for idx in group.index:
self.assertEqual(transformed[idx], mean)
def test_dispatch_transform(self):
df = self.tsframe[::5].reindex(self.tsframe.index)
filled = df.groupby(lambda x: x.month).fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
assert_frame_equal(filled, expected)
def test_with_na(self):
index = Index(np.arange(10))
values = Series(np.ones(10), index)
labels = Series([nan, 'foo', 'bar', 'bar', nan, nan, 'bar',
'bar', nan, 'foo'], index=index)
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=['bar', 'foo'])
assert_series_equal(agged, expected)
def test_attr_wrapper(self):
grouped = self.ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {}
for name, gp in grouped:
expected[name] = gp.describe()
expected = DataFrame(expected).T
assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
self.assertRaises(AttributeError, getattr, grouped, 'foo')
def test_series_describe_multikey(self):
raise nose.SkipTest
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
grouped.describe()
def test_frame_groupby(self):
grouped = self.tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
self.assertEqual(len(aggregated), 5)
self.assertEqual(len(aggregated.columns), 4)
# by string
tscopy = self.tsframe.copy()
tscopy['weekday'] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby('weekday').aggregate(np.mean)
assert_frame_equal(stragged, aggregated)
# transform
transformed = grouped.transform(lambda x: x - x.mean())
self.assertEqual(len(transformed), 30)
self.assertEqual(len(transformed.columns), 4)
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
assert_almost_equal(transformed.xs(idx), mean)
# iterate
for weekday, group in grouped:
self.assert_(group.index[0].weekday() == weekday)
# groups / group_indices
groups = grouped.primary.groups
indices = grouped.primary.indices
for k, v in groups.iteritems():
samething = self.tsframe.index.take(indices[k])
self.assert_(np.array_equal(v, samething))
def test_frame_groupby_columns(self):
mapping = {
'A' : 0, 'B' : 0, 'C' : 1, 'D' : 1
}
grouped = self.tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
self.assertEqual(len(aggregated), len(self.tsframe))
self.assertEqual(len(aggregated.columns), 2)
# transform
tf = lambda x: x - x.mean()
groupedT = self.tsframe.T.groupby(mapping, axis=0)
assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
self.assertEqual(len(v.columns), 2)
# tgroupby
grouping = {
'A' : 0,
'B' : 1,
'C' : 0,
'D' : 1
}
grouped = self.frame.tgroupby(grouping.get, np.mean)
self.assertEqual(len(grouped), len(self.frame.index))
self.assertEqual(len(grouped.columns), 2)
def test_multi_iter(self):
s = Series(np.arange(6))
k1 = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
k2 = np.array(['1', '2', '1', '2', '1', '2'])
grouped = s.groupby([k1, k2])
iterated = list(grouped)
expected = [('a', '1', s[[0, 2]]),
('a', '2', s[[1]]),
('b', '1', s[[4]]),
('b', '2', s[[3, 5]])]
for i, (one, two, three) in enumerate(iterated):
e1, e2, e3 = expected[i]
self.assert_(e1 == one)
self.assert_(e2 == two)
assert_series_equal(three, e3)
def test_multi_iter_frame(self):
k1 = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
k2 = np.array(['1', '2', '1', '2', '1', '2'])
df = DataFrame({'v1' : np.random.randn(6),
'v2' : np.random.randn(6),
'k1' : k1, 'k2' : k2},
index=['one', 'two', 'three', 'four', 'five', 'six'])
grouped = df.groupby(['k1', 'k2'])
# things get sorted!
iterated = list(grouped)
idx = df.index
expected = [('a', '1', df.ix[idx[[4]]]),
('a', '2', df.ix[idx[[3, 5]]]),
('b', '1', df.ix[idx[[0, 2]]]),
('b', '2', df.ix[idx[[1]]])]
for i, (one, two, three) in enumerate(iterated):
e1, e2, e3 = expected[i]
self.assert_(e1 == one)
self.assert_(e2 == two)
assert_frame_equal(three, e3)
# don't iterate through groups with no data
df['k1'] = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
df['k2'] = np.array(['1', '1', '1', '2', '2', '2'])
grouped = df.groupby(['k1', 'k2'])
groups = {}
for a, b, gp in grouped:
groups[a, b] = gp
self.assertEquals(len(groups), 2)
def test_multi_func(self):
col1 = self.df['A']
col2 = self.df['B']
grouped = self.df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = self.df.groupby(['A', 'B']).mean()
assert_frame_equal(agged.ix[:, ['C', 'D']],
expected.ix[:, ['C', 'D']])
# some "groups" with no data
df = DataFrame({'v1' : np.random.randn(6),
'v2' : np.random.randn(6),
'k1' : np.array(['b', 'b', 'b', 'a', 'a', 'a']),
'k2' : np.array(['1', '1', '1', '2', '2', '2'])},
index=['one', 'two', 'three', 'four', 'five', 'six'])
# only verify that it works for now
grouped = df.groupby(['k1', 'k2'])
grouped.agg(np.sum)
def test_groupby_multiple_columns(self):
data = self.df
grouped = data.groupby(['A', 'B'])
def _check_op(op):
result1 = op(grouped)
expected = defaultdict(dict)
for n1, gp1 in data.groupby('A'):
for n2, gp2 in gp1.groupby('B'):
expected[n1][n2] = op(gp2.ix[:, ['C', 'D']])
expected = dict((k, DataFrame(v)) for k, v in expected.iteritems())
expected = WidePanel.fromDict(expected).swapaxes(0, 1)
# a little bit crude
for col in ['C', 'D']:
result_col = op(grouped[col])
exp = expected[col]
pivoted = result1[col].unstack()
pivoted2 = result_col.unstack()
assert_frame_equal(pivoted.reindex_like(exp), exp)
assert_frame_equal(pivoted2.reindex_like(exp), exp)
_check_op(lambda x: x.sum())
_check_op(lambda x: x.mean())
# test single series works the same
result = data['C'].groupby([data['A'], data['B']]).mean()
expected = data.groupby(['A', 'B']).mean()['C']
assert_series_equal(result, expected)
def test_groupby_multiple_key(self):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year,
lambda x: x.month,
lambda x: x.day])
agged = grouped.sum()
assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby([lambda x: x.year,
lambda x: x.month,
lambda x: x.day], axis=1)
agged = grouped.agg(lambda x: x.sum(1))
assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum(1))
assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(self):
# test that having an all-NA column doesn't mess you up
df = self.df.copy()
df['bad'] = np.nan
agged = df.groupby(['A', 'B']).mean()
expected = self.df.groupby(['A', 'B']).mean()
expected['bad'] = np.nan
assert_frame_equal(agged, expected)
def test_omit_nuisance(self):
grouped = self.df.groupby('A')
result = grouped.mean()
expected = self.df.ix[:, ['A', 'C', 'D']].groupby('A').mean()
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
"""Unit tests for the reading functionality in dframeio.parquet"""
# pylint: disable=redefined-outer-name
from pathlib import Path
import pandas as pd
import pandera as pa
import pandera.typing
import pytest
from pandas.testing import assert_frame_equal
import dframeio
class SampleDataSchema(pa.SchemaModel):
"""pandera schema of the parquet test dataset"""
registration_dttm: pa.typing.Series[pa.typing.DateTime]
id: pa.typing.Series[pd.Int64Dtype] = pa.Field(nullable=True, coerce=True)
first_name: pa.typing.Series[pa.typing.String]
last_name: pa.typing.Series[pa.typing.String]
email: pa.typing.Series[pa.typing.String]
gender: pa.typing.Series[pa.typing.String] = pa.Field(coerce=True)
ip_address: pa.typing.Series[pa.typing.String]
cc: pa.typing.Series[pa.typing.String]
country: pa.typing.Series[pa.typing.String]
birthdate: pa.typing.Series[pa.typing.String]
salary: pa.typing.Series[pa.typing.Float64] = pa.Field(nullable=True)
title: pa.typing.Series[pa.typing.String]
comments: pa.typing.Series[pa.typing.String] = pa.Field(nullable=True)
@staticmethod
def length():
"""Known length of the data"""
return 5000
@staticmethod
def n_salary_over_150000():
"""Number of rows with salary > 150000"""
return 2384
@pytest.fixture(params=["multifile", "singlefile.parquet", "multifolder"])
def sample_data_path(request):
"""Path of a parquet dataset for testing"""
return Path(__file__).parent / "data" / "parquet" / request.param
def read_sample_dataframe():
"""Read the sample dataframe to pandas and return a cached copy"""
if not hasattr(read_sample_dataframe, "df"):
parquet_file = Path(__file__).parent / "data" / "parquet" / "singlefile.parquet"
backend = dframeio.ParquetBackend(str(parquet_file.parent))
read_sample_dataframe.df = backend.read_to_pandas(parquet_file.name)
return read_sample_dataframe.df.copy()
@pytest.fixture(scope="function")
def sample_dataframe():
"""Provide the sample dataframe"""
return read_sample_dataframe()
@pytest.fixture(scope="function")
def sample_dataframe_dict():
"""Provide the sample dataframe"""
parquet_file = Path(__file__).parent / "data" / "parquet" / "singlefile.parquet"
backend = dframeio.ParquetBackend(str(parquet_file.parent))
return backend.read_to_dict(parquet_file.name)
@pytest.mark.parametrize(
"kwargs, exception",
[
({"base_path": "/some/dir", "partitions": -1}, TypeError),
({"base_path": "/some/dir", "partitions": 2.2}, TypeError),
({"base_path": "/some/dir", "partitions": "abc"}, TypeError),
({"base_path": "/some/dir", "partitions": b"abc"}, TypeError),
({"base_path": "/some/dir", "rows_per_file": b"abc"}, TypeError),
({"base_path": "/some/dir", "rows_per_file": 1.1}, TypeError),
({"base_path": "/some/dir", "rows_per_file": -5}, ValueError),
],
)
def test_init_argchecks(kwargs, exception):
"""Challenge the argument validation of the constructor"""
with pytest.raises(exception):
dframeio.ParquetBackend(**kwargs)
def test_read_to_pandas(sample_data_path):
"""Read a sample dataset into a pandas dataframe"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_pandas_some_columns(sample_data_path):
"""Read a sample dataset into a pandas dataframe, selecting some columns"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, columns=["id", "first_name"])
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_pandas_some_rows(sample_data_path):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, row_filter="salary > 150000")
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.n_salary_over_150000()
def test_read_to_pandas_sample(sample_data_path):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, sample=10)
SampleDataSchema.to_schema().validate(df)
assert len(df) == 10
@pytest.mark.parametrize("limit", [0, 10])
def test_read_to_pandas_limit(sample_data_path, limit):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, limit=limit)
SampleDataSchema.to_schema().validate(df)
assert len(df) == limit
def test_read_to_pandas_base_path_check(sample_data_path):
"""Try if it isn't possible to read from outside the base path"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
with pytest.raises(ValueError):
backend.read_to_pandas("/tmp")
def test_read_to_dict(sample_data_path):
"""Read a sample dataset into a dictionary"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name)
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = pd.DataFrame(df)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_dict_some_columns(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some columns"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, columns=["id", "first_name"])
assert isinstance(df, dict)
assert set(df.keys()) == {"id", "first_name"}
df = pd.DataFrame(df)
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_dict_some_rows(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, row_filter="salary > 150000")
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = pd.DataFrame(df)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.n_salary_over_150000()
def test_read_to_dict_limit(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, columns=["id", "first_name"], limit=10)
assert isinstance(df, dict)
assert set(df.keys()) == {"id", "first_name"}
df = pd.DataFrame(df)
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == 10
def test_read_to_dict_sample(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, sample=10)
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = pd.DataFrame(df)
SampleDataSchema.to_schema().validate(df)
assert len(df) == 10
def test_read_to_dict_base_path_check(sample_data_path):
"""Try if it isn't possible to read from outside the base path"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
with pytest.raises(ValueError):
backend.read_to_dict("/tmp")
@pytest.mark.parametrize("old_content", [False, True])
def test_write_replace_df(sample_dataframe, tmp_path_factory, old_content):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
if old_content:
(tempdir / "data.parquet").open("w").close()
backend = dframeio.ParquetBackend(str(tempdir))
backend.write_replace("data.parquet", sample_dataframe)
backend2 = dframeio.ParquetBackend(str(tempdir))
dataframe_after = backend2.read_to_pandas("data.parquet")
assert_frame_equal(dataframe_after, sample_dataframe)
@pytest.mark.parametrize("old_content", [False, True])
def test_write_replace_df_multifile(sample_dataframe, tmp_path_factory, old_content):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
if old_content:
(tempdir / "data").mkdir()
(tempdir / "data" / "old.parquet").open("w").close()
backend = dframeio.ParquetBackend(str(tempdir), rows_per_file=1000)
backend.write_replace("data", sample_dataframe)
assert sum(1 for _ in (tempdir / "data").glob("*")) == 5, "There should be 5 files"
if old_content:
assert not (tempdir / "data" / "old.parquet").exists()
backend2 = dframeio.ParquetBackend(str(tempdir))
dataframe_after = backend2.read_to_pandas("data")
| assert_frame_equal(dataframe_after, sample_dataframe) | pandas.testing.assert_frame_equal |
import sys
import pandas as pd
import numpy as np
import os
import pickle
from sqlalchemy import create_engine
import re
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import make_scorer, accuracy_score, f1_score, fbeta_score, classification_report
import warnings
def load_data(database_filepath):
"""
- Loads data from SQL Database
Args:
database_filepath: SQL database file
Returns:
X pandas_dataframe: Features dataframe
Y pandas_dataframe: Target dataframe
category_names list: Target labels
"""
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql_table('clean_dataset', engine)
X = df["message"]
Y = df[df.columns[4:]]
category_names = df.columns[4:]
return X, Y, category_names
def tokenize(text):
"""
- Remove capitalization and special characters and lemmatize texts
Args:
messages as text file
Returns:
Processed text after normalizing, tokenizing and lemmatizing
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
- Initialize Verb Extractor class
This class extract the starting verb of a sentence,
creating a new feature for the ML classifier.
It has been integrated from Machine Learning Pipeline - Solution: GridSearch
"""
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, x, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return | pd.DataFrame(X_tagged) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 13:52:36 2020
@author: diego
"""
import os
import sqlite3
import numpy as np
import pandas as pd
import plots as _plots
import update_prices
import update_companies_info
pd.set_option("display.width", 400)
pd.set_option("display.max_columns", 10)
pd.options.mode.chained_assignment = None
update_prices.update_prices()
update_companies_info.update_db()
cwd = os.getcwd()
conn = sqlite3.connect(os.path.join(cwd, "data", "finance.db"))
cur = conn.cursor()
# %% Functions
class Ticker:
"""
Attributes and Methods to analyse stocks traded in B3 -BOLSA BRASIL BALCÃO
"""
def __init__(self, ticker, group="consolidated"):
"""
Creates a Ticker Class Object
Args:
ticker: string
string of the ticker
group: string
Financial statements group. Can be 'consolidated' or 'individual'
"""
self.ticker = ticker.upper()
df = pd.read_sql(
f"""SELECT cnpj, type, sector, subsector, segment, denom_comerc
FROM tickers
WHERE ticker = '{self.ticker}'""",
conn,
)
if len(df) == 0:
print('unknown ticker')
return
self.cnpj = df["cnpj"][0]
self.type = df["type"][0]
self.sector = df["sector"][0]
self.subsector = df["subsector"][0]
self.segment = df["segment"][0]
self.denom_comerc = df["denom_comerc"][0]
Ticker.set_group(self, group)
on_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'ON'",
conn,
)
on_ticker = on_ticker[on_ticker["ticker"].str[-1] == "3"]
self.on_ticker = on_ticker.values[0][0]
try:
self.pn_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'PN'",
conn,
).values[0][0]
except:
pass
def set_group(self, new_group):
"""
To change the financial statement group attribute of a object
Args:
new_group: string
can be 'consolidated' or 'individual'
"""
if new_group in ["individual", "consolidado", "consolidated"]:
if new_group == "individual":
self.grupo = "Individual"
else:
self.grupo = "Consolidado"
# Infer the frequency of the reports
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
if len(dates) == 0:
self.grupo = "Individual"
print(
f"The group of {self.ticker} was automatically switched to individual due to the lack of consolidated statements."
)
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
try:
freq = pd.infer_freq(dates["date"])
self.freq = freq[0]
except ValueError:
self.freq = "Q"
except TypeError:
dates["date"] = pd.to_datetime(dates["date"])
number_of_observations = len(dates)
period_of_time = (
dates.iloc[-1, 0] - dates.iloc[0, 0]
) / np.timedelta64(1, "Y")
if number_of_observations / period_of_time > 1:
self.freq = "Q"
else:
self.freq = "A"
if self.freq == "A":
print(
f"""
The {self.grupo} statements of {self.ticker} are only available on an annual basis.
Only YTD values will be available in the functions and many functions will not work.
Try setting the financial statements to individual:
Ticker.set_group(Ticker object, 'individual')
"""
)
else:
print("new_group needs to be 'consolidated' or 'individual'.")
def get_begin_period(self, function, start_period):
"""
Support method for other methods of the Class
"""
if start_period == "all":
begin_period = pd.to_datetime("1900-01-01")
return begin_period.date()
elif start_period not in ["all", "last"]:
try:
pd.to_datetime(start_period)
except:
print(
"start_period must be 'last', 'all', or date formated as 'YYYY-MM-DD'."
)
return
if start_period == "last":
if function in ["prices", "total_shares", "market_value"]:
last_date = pd.read_sql(
f"SELECT date FROM prices WHERE ticker = '{self.ticker}' ORDER BY date DESC LIMIT(1)",
conn,
)
else:
last_date = pd.read_sql(
f"SELECT dt_fim_exerc FROM dre WHERE cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}' ORDER BY dt_fim_exerc DESC LIMIT(1)",
conn,
)
begin_period = pd.to_datetime(last_date.values[0][0])
else:
begin_period = pd.to_datetime(start_period)
return begin_period.date()
def create_pivot_table(df):
"""
Support method for other methods of the Class
"""
##### Creates a pivot table and add % change columns #####
# create columns with % change of the values
# value_types: ytd, quarter_value, ttm_value
first_type = df.columns.get_loc('ds_conta') + 1
value_types = list(df.columns[first_type:])
new_columns = [i + " % change" for i in value_types]
df[new_columns] = df[value_types].div(
df.groupby("cd_conta")[value_types].shift(1))
# the calculation of %change from ytd is different:
if 'ytd' in value_types:
shifted_values = df[['dt_fim_exerc', 'cd_conta', 'ytd']]
shifted_values = shifted_values.set_index(
[(pd.to_datetime(shifted_values['dt_fim_exerc']) + pd.DateOffset(years=1)), shifted_values['cd_conta']])
df = df.set_index([df['dt_fim_exerc'], df['cd_conta']])
df['ytd % change'] = df['ytd'] / shifted_values['ytd']
df[new_columns] = (df[new_columns] - 1) * 100
# reshape
df = df.pivot(
index=["cd_conta", "ds_conta"],
columns=["dt_fim_exerc"],
values=value_types + new_columns
)
# rename multiIndex column levels
df.columns = df.columns.rename("value", level=0)
df.columns = df.columns.rename("date", level=1)
# sort columns by date
df = df.sort_values([("date"), ("value")], axis=1, ascending=False)
# So times, the description of the accounts have small differences for the
# same account in different periods, as punctuation. The purpose of the df_index
# is to keep only one description to each account, avoiding duplicated rows.
df_index = df.reset_index().iloc[:, 0:2]
df_index.columns = df_index.columns.droplevel(1)
df_index = df_index.groupby("cd_conta").first()
# This groupby adds the duplicated rows
df = df.groupby(level=0, axis=0).sum()
# The next two lines add the account description to the dataframe multiIndex
df["ds_conta"] = df_index["ds_conta"]
df = df.set_index("ds_conta", append=True)
# Reorder the multiIndex column levels
df = df.reorder_levels(order=[1, 0], axis=1)
# Due to the command line 'df = df.sort_values([('dt_fim_exerc'), ('value')],
# axis=1, ascending=False)'
# the columns are ordered by date descending, and value descending. The pupose
# here is to set the order as: date descending and value ascending
df_columns = df.columns.to_native_types()
new_order = []
for i in range(1, len(df_columns), 2):
new_order.append(df_columns[i])
new_order.append(df_columns[i - 1])
new_order = pd.MultiIndex.from_tuples(
new_order, names=("date", "value"))
df = df[new_order]
return df
def income_statement(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the income statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="income_statement", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if quarter == False:
df = df.drop(["quarter_value"], axis=1)
if ytd == False:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def balance_sheet(self, start_period="all", plot=False):
"""
Creates a dataframe with the balance sheet statement of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="bp", start_period=start_period
)
query = f"""SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpa
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
UNION ALL
SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpp
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, parse_dates=['dt_fim_exerc'])
df = Ticker.create_pivot_table(df)
if plot:
_plots.bs_plot(df, self.ticker, self.grupo)
return df
def cash_flow(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the cash flow statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="dfc", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dfc
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if not quarter:
df = df.drop(["quarter_value"], axis=1)
if not ytd:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def prices(self, start_period="all"):
"""
Support method for other methods of the Class
"""
begin_period = Ticker.get_begin_period(
self, function="prices", start_period=start_period
)
prices = pd.read_sql(
f"""SELECT date, preult AS price
FROM prices
WHERE ticker = '{self.ticker}' AND date >= '{begin_period}'
ORDER BY date""",
conn,
index_col="date", parse_dates=['date']
)
return prices
def total_shares(self, start_period="all"):
"""
Support method for other methods of the Class
"""
begin_period = Ticker.get_begin_period(
self, function="total_shares", start_period=start_period
)
query = f"""SELECT date, number_shares AS on_shares
FROM prices
WHERE ticker = '{self.on_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
nshares_on = pd.read_sql(query, conn)
try:
query = f"""SELECT date, number_shares AS pn_shares
FROM prices
WHERE ticker = '{self.pn_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
nshares_pn = pd.read_sql(query, conn)
shares = nshares_on.merge(nshares_pn, how="left")
shares["total_shares"] = shares["on_shares"] + \
shares["pn_shares"].fillna(0)
except:
shares = nshares_on.rename({"on_shares": "total_shares"}, axis=1)
shares.index = shares["date"]
shares.index = pd.to_datetime(shares.index)
return shares[["total_shares"]]
def net_income(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the net income information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="net_income", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, ds_conta, vl_conta AS ytd_net_income
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND (ds_conta = 'Resultado Líquido das Operações Continuadas' OR ds_conta = 'Lucro/Prejuízo do Período')
ORDER BY dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Líquido das Operações Continuadas"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Lucro/Prejuízo do Período"
]
df = df.drop(["ds_conta"], axis=1)
df["quarter_net_income"] = df["ytd_net_income"] - \
df["ytd_net_income"].shift(1)
df["quarter_net_income"][df["fiscal_quarter"] == 1] = df["ytd_net_income"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_net_income"] = (
df["quarter_net_income"].rolling(window=4, min_periods=4).sum()
)
if quarter == False:
df = df.drop(["quarter_net_income"], axis=1)
if ytd == False:
df = df.drop(["ytd_net_income"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' Net Income (R$,000) ')
return df
def ebit(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the ebit information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="ebit", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, ds_conta, vl_conta AS ytd_ebit
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND (ds_conta = 'Resultado Antes do Resultado Financeiro e dos Tributos' OR ds_conta = 'Resultado Operacional')
ORDER BY dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Antes do Resultado Financeiro e dos Tributos"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Resultado Operacional"
]
df = df.drop(["ds_conta"], axis=1)
df["quarter_ebit"] = df["ytd_ebit"] - df["ytd_ebit"].shift(1)
df["quarter_ebit"][df["fiscal_quarter"] == 1] = df["ytd_ebit"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_ebit"] = df["quarter_ebit"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_ebit"], axis=1)
if ytd == False:
df = df.drop(["ytd_ebit"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' EBIT (R$,000) ')
return df
def depre_amort(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the depreciationa and amortization information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="depre_amort", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, vl_conta AS ytd_d_a
FROM dva
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND ds_conta = 'Depreciação, Amortização e Exaustão'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
df["quarter_d_a"] = df["ytd_d_a"] - df["ytd_d_a"].shift(1)
df["quarter_d_a"][df["fiscal_quarter"] ==
1] = df["ytd_d_a"][df["fiscal_quarter"] == 1]
if ttm == True:
df["ttm_d_a"] = df["quarter_d_a"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_d_a"], axis=1)
if ytd == False:
df = df.drop(["ytd_d_a"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo, bars=' D&A (R$,000)')
return df
def ebitda(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the ebitda information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="ebitda", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dre.dt_fim_exerc AS date,
dre.fiscal_quarter,
dre.ds_conta,
dre.vl_conta AS ytd_ebit,
dva.vl_conta AS ytd_d_a
FROM dre
LEFT JOIN dva ON (dre.dt_fim_exerc=dva.dt_fim_exerc AND dre.grupo_dfp=dva.grupo_dfp AND dre.cnpj=dva.cnpj)
WHERE dre.cnpj = '{self.cnpj}'
AND dre.grupo_dfp = '{self.grupo}'
AND dre.dt_fim_exerc >= '{begin_period.date()}'
AND (dre.ds_conta = 'Resultado Antes do Resultado Financeiro e dos Tributos' OR dre.ds_conta = 'Resultado Operacional')
AND dva.ds_conta = 'Depreciação, Amortização e Exaustão'
ORDER BY dre.dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Antes do Resultado Financeiro e dos Tributos"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Resultado Operacional"
]
df["ebit"] = df["ytd_ebit"] - df["ytd_ebit"].shift(1)
df["ebit"][df["fiscal_quarter"] == 1] = df["ytd_ebit"][
df["fiscal_quarter"] == 1
]
df["d_a"] = df["ytd_d_a"] - df["ytd_d_a"].shift(1)
df["d_a"][df["fiscal_quarter"] ==
1] = df["ytd_d_a"][df["fiscal_quarter"] == 1]
df["quarter_ebitda"] = df["ebit"] - df["d_a"]
if ttm == True:
df["ttm_ebitda"] = df["quarter_ebitda"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_ebitda"], axis=1)
if ytd == True:
df["ytd_ebitda"] = df["ytd_ebit"] - df["ytd_d_a"]
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(
columns=["fiscal_quarter", "ds_conta",
"ytd_ebit", "ytd_d_a", "d_a", "ebit"]
)
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' EBITDA (R$,000) ')
return df
def revenue(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the revenue information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="revenue", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, vl_conta AS ytd_revenue
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND cd_conta = '3.01'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
df["quarter_revenue"] = df["ytd_revenue"] - df["ytd_revenue"].shift(1)
df["quarter_revenue"][df["fiscal_quarter"] == 1] = df["ytd_revenue"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_revenue"] = df["quarter_revenue"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_revenue"], axis=1)
if ytd == False:
df = df.drop(["ytd_revenue"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' Revenue (R$,000) ')
return df
def cash_equi(self, start_period="all", plot=False):
"""
Creates a dataframe with the cash and cash equivalents information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="cash_equi", start_period=start_period
)
query = f"""SELECT dt_fim_exerc AS date, SUM(vl_conta) AS cash_equi
FROM bpa
WHERE (cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}')
AND (ds_conta = 'Caixa e Equivalentes de Caixa' OR ds_conta = 'Aplicações Financeiras' )
AND (cd_conta != '1.02.01.03.01')
AND dt_fim_exerc >= '{begin_period}'
GROUP BY dt_fim_exerc
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' Cash & Equivalents (R$,000) ')
return df
def total_debt(self, start_period="all", plot=False):
"""
Creates a dataframe with the total debt information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="total_debt", start_period=start_period
)
query = f"""SELECT dt_fim_exerc AS date, SUM(vl_conta) AS total_debt
FROM bpp
WHERE (cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}' AND ds_conta = 'Empréstimos e Financiamentos')
AND (cd_conta = '2.01.04' OR cd_conta = '2.02.01')
AND dt_fim_exerc >= '{begin_period}'
GROUP BY dt_fim_exerc
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' Total Debt (R$,000) ')
return df
def market_value(self, start_period="all", plot=False):
"""
Creates a dataframe with the market value information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="market_value", start_period=start_period
)
try:
self.pn_ticker
except:
query = f"""SELECT date, (preult * number_shares) AS market_value
FROM prices
WHERE ticker = '{self.on_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
else:
query = f"""SELECT date, SUM(preult * number_shares) AS market_value
FROM prices
WHERE (ticker = '{self.on_ticker}' OR ticker ='{self.pn_ticker}')
AND date >= '{begin_period}'
GROUP BY date
ORDER BY date"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
if plot:
_plots.line_plot(df, self.ticker, self.grupo,
line=' Market Value (R$,000) ')
return df
def net_debt(self, start_period="all", plot=False):
"""
Creates a dataframe with the net debt information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
total_debt = Ticker.total_debt(self, start_period=start_period)
cash = Ticker.cash_equi(self, start_period=start_period)
net_debt = total_debt["total_debt"] - cash["cash_equi"]
net_debt.rename("net_debt", axis=1, inplace=True)
if plot:
_plots.bar_plot( | pd.DataFrame(net_debt) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .plan_losses import PPC, PlanCost,get_leading_hint
from .cost_model import *
from query_representation.utils import deterministic_hash,make_dir
from query_representation.viz import *
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import multiprocessing as mp
import random
from collections import defaultdict
import pandas as pd
import networkx as nx
import os
import wandb
import pickle
import pdb
TIMEOUT_CARD = 15000100000
def get_eval_fn(loss_name):
if loss_name == "qerr":
return QError()
elif loss_name == "qerr_joinkey":
return QErrorJoinKey()
elif loss_name == "abs":
return AbsError()
elif loss_name == "rel":
return RelativeError()
elif loss_name == "ppc":
return PostgresPlanCost(cost_model="C")
elif loss_name == "ppc2":
return PostgresPlanCost(cost_model="C2")
elif loss_name == "plancost":
return SimplePlanCost()
elif loss_name == "flowloss":
return FlowLoss()
elif loss_name == "constraints":
return LogicalConstraints()
else:
assert False
class EvalFunc():
def __init__(self, **kwargs):
pass
def save_logs(self, qreps, errors, **kwargs):
result_dir = kwargs["result_dir"]
if result_dir is None:
return
if "samples_type" in kwargs:
samples_type = kwargs["samples_type"]
else:
samples_type = ""
resfn = os.path.join(result_dir, self.__str__() + ".csv")
res = pd.DataFrame(data=errors, columns=["errors"])
res["samples_type"] = samples_type
# TODO: add other data?
if os.path.exists(resfn):
res.to_csv(resfn, mode="a",header=False)
else:
res.to_csv(resfn, header=True)
def eval(self, qreps, preds, **kwargs):
'''
@qreps: [qrep_1, ...qrep_N]
@preds: [{},...,{}]
@ret: [qerror_1, ..., qerror_{num_subplans}]
Each query has multiple subplans; the returned list flattens it into a
single array. The subplans of a query are sorted alphabetically (see
_get_all_cardinalities)
'''
pass
def __str__(self):
return self.__class__.__name__
# TODO: stuff for saving logs
def fix_query(query):
# these conditions were needed due to some edge cases while generating the
# queries on the movie_info_idx table, but crashes pyscopg2 somewhere.
# Removing them shouldn't effect the queries.
bad_str1 = "mii2.info ~ '^(?:[1-9]\d*|0)?(?:\.\d+)?$' AND"
bad_str2 = "mii1.info ~ '^(?:[1-9]\d*|0)?(?:\.\d+)?$' AND"
if bad_str1 in query:
query = query.replace(bad_str1, "")
if bad_str2 in query:
query = query.replace(bad_str2, "")
return query
def _get_all_cardinalities(qreps, preds):
ytrue = []
yhat = []
for i, pred_subsets in enumerate(preds):
qrep = qreps[i]["subset_graph"].nodes()
keys = list(pred_subsets.keys())
keys.sort()
for alias in keys:
pred = pred_subsets[alias]
actual = qrep[alias]["cardinality"]["actual"]
if actual == 0:
actual += 1
ytrue.append(float(actual))
yhat.append(float(pred))
return np.array(ytrue), np.array(yhat)
def _get_all_joinkeys(qreps, preds):
ytrue = []
yhat = []
for i, joinkeys in enumerate(preds):
einfos = qreps[i]["subset_graph"].edges()
keys = list(joinkeys.keys())
keys.sort()
for curkey in keys:
pred = joinkeys[curkey]
# actual = einfos[curkey]["actual"]
jcards = einfos[curkey]["join_key_cardinality"]
actual = list(jcards.values())[0]["actual"]
if actual == 0:
actual += 1
if pred == 0:
pred += 1
ytrue.append(float(actual))
yhat.append(float(pred))
return np.array(ytrue), np.array(yhat)
class LogicalConstraints(EvalFunc):
def __init__(self, **kwargs):
pass
def save_logs(self, qreps, errors, **kwargs):
pass
# result_dir = kwargs["result_dir"]
# if result_dir is None:
# return
# if "samples_type" in kwargs:
# samples_type = kwargs["samples_type"]
# else:
# samples_type = ""
# resfn = os.path.join(result_dir, self.__str__() + ".csv")
# res = pd.DataFrame(data=errors, columns=["errors"])
# res["samples_type"] = samples_type
# # TODO: add other data?
# if os.path.exists(resfn):
# res.to_csv(resfn, mode="a",header=False)
# else:
# res.to_csv(resfn, header=True)
def eval(self, qreps, preds, **kwargs):
'''
@qreps: [qrep_1, ...qrep_N]
@preds: [{},...,{}]
@ret: [qerror_1, ..., qerror_{num_subplans}]
Each query has multiple subplans; the returned list flattens it into a
single array. The subplans of a query are sorted alphabetically (see
_get_all_cardinalities)
'''
errors = []
id_errs = []
fkey_errs = []
featurizer = kwargs["featurizer"]
for qi, qrep in enumerate(qreps):
cur_errs = []
cur_preds = preds[qi]
sg = qrep["subset_graph"]
jg = qrep["join_graph"]
for node in sg.nodes():
if node == SOURCE_NODE:
continue
edges = sg.out_edges(node)
nodepred = cur_preds[node]
# calculating error per node instead of per edge
error = 0
for edge in edges:
prev_node = edge[1]
newt = list(set(edge[0]) - set(edge[1]))[0]
tab_pred = cur_preds[(newt,)]
for alias in edge[1]:
if (alias,newt) in jg.edges():
jdata = jg.edges[(alias,newt)]
elif (newt,alias) in jg.edges():
jdata = jg.edges[(newt,alias)]
else:
continue
if newt not in jdata or alias not in jdata:
continue
newjkey = jdata[newt]
otherjkey = jdata[alias]
if not featurizer.feat_separate_alias:
newjkey = ''.join([ck for ck in newjkey if not ck.isdigit()])
otherjkey = ''.join([ck for ck in otherjkey if not ck.isdigit()])
stats1 = featurizer.join_key_stats[newjkey]
stats2 = featurizer.join_key_stats[otherjkey]
newjcol = newjkey[newjkey.find(".")+1:]
if newjcol == "id":
card1 = cur_preds[(newt,)]
maxfkey = stats2["max_key"]
maxcard1 = maxfkey*card1
## FIXME: not fully accurate
if cur_preds[node] > maxcard1:
fkey_errs.append(1.0)
else:
fkey_errs.append(0.0)
# could not have got bigger
if cur_preds[prev_node] < cur_preds[node]:
error = 1
id_errs.append(1)
else:
id_errs.append(0)
# else:
# # new table was a foreign key
# maxfkey = stats1["max_key"]
# card_prev = cur_preds[prev_node]
# maxcurcard = card_prev * maxfkey
# if maxcurcard < cur_preds[node]:
# print("BAD")
# pdb.set_trace()
cur_errs.append(error)
errors.append(np.mean(cur_errs))
print("pkey x fkey errors: ", np.mean(fkey_errs), np.sum(fkey_errs))
print("primary key id errors: ", np.mean(id_errs))
return errors
def __str__(self):
return self.__class__.__name__
# TODO: stuff for saving logs
class QErrorJoinKey(EvalFunc):
def eval(self, qreps, preds, **kwargs):
'''
'''
assert len(preds) == len(qreps)
assert isinstance(preds[0], dict)
ytrue, yhat = _get_all_joinkeys(qreps, preds)
assert len(ytrue) == len(yhat)
# assert 0.00 not in ytrue
# assert 0.00 not in yhat
errors = np.maximum((ytrue / yhat), (yhat / ytrue))
num_table_errs = defaultdict(list)
didx = 0
for i, qrep in enumerate(qreps):
edges = list(qrep["subset_graph"].edges())
# if SOURCE_NODE in nodes:
# nodes.remove(SOURCE_NODE)
edges.sort(key = lambda x: str(x))
for qi, edge in enumerate(edges):
assert len(edge[1]) < len(edge[0])
numt = len(edge[1])
curerr = errors[didx]
num_table_errs[numt].append(curerr)
didx += 1
nts = list(num_table_errs.keys())
nts.sort()
for nt in nts:
print("{} Tables, JoinKey-QError mean: {}, 99p: {}".format(
nt, np.mean(num_table_errs[nt]),
np.percentile(num_table_errs[nt], 99)))
# self.save_logs(qreps, errors, **kwargs)
return errors
class QError(EvalFunc):
def eval(self, qreps, preds, **kwargs):
'''
'''
assert len(preds) == len(qreps)
assert isinstance(preds[0], dict)
ytrue, yhat = _get_all_cardinalities(qreps, preds)
assert len(ytrue) == len(yhat)
assert 0.00 not in ytrue
assert 0.00 not in yhat
errors = np.maximum((ytrue / yhat), (yhat / ytrue))
num_table_errs = defaultdict(list)
didx = 0
for i, qrep in enumerate(qreps):
nodes = list(qrep["subset_graph"].nodes())
if SOURCE_NODE in nodes:
nodes.remove(SOURCE_NODE)
nodes.sort()
# qidx = 0
for qi, node in enumerate(nodes):
numt = len(node)
curerr = errors[didx]
## debug code!
# if numt <= 2 and curerr > 100:
# if ytrue[didx] >= TIMEOUT_CARD:
# continue
# print(node, ytrue[didx], yhat[didx], curerr)
# pdb.set_trace()
num_table_errs[numt].append(curerr)
didx += 1
nts = list(num_table_errs.keys())
nts.sort()
for nt in nts:
if nt <= 3:
print("{} Tables, QError mean: {}, 99p: {}".format(
nt, np.mean(num_table_errs[nt]),
np.percentile(num_table_errs[nt], 99)))
self.save_logs(qreps, errors, **kwargs)
return errors
class AbsError(EvalFunc):
def eval(self, qreps, preds, **kwargs):
'''
'''
assert len(preds) == len(qreps)
assert isinstance(preds[0], dict)
ytrue, yhat = _get_all_cardinalities(qreps, preds)
errors = np.abs(yhat - ytrue)
return errors
class RelativeError(EvalFunc):
def eval(self, qreps, preds, **kwargs):
'''
'''
assert len(preds) == len(qreps)
assert isinstance(preds[0], dict)
ytrue, yhat = _get_all_cardinalities(qreps, preds)
# TODO: may want to choose a minimum estimate
# epsilons = np.array([1]*len(yhat))
# ytrue = np.maximum(ytrue, epsilons)
errors = np.abs(ytrue - yhat) / ytrue
return errors
class PostgresPlanCost(EvalFunc):
def __init__(self, cost_model="C"):
self.cost_model = cost_model
def __str__(self):
return self.__class__.__name__ + "-" + self.cost_model
def save_logs(self, qreps, errors, **kwargs):
if "result_dir" not in kwargs:
return
use_wandb = kwargs["use_wandb"]
result_dir = kwargs["result_dir"]
if result_dir is None and not use_wandb:
return
save_pdf_plans = kwargs["save_pdf_plans"]
sqls = kwargs["sqls"]
plans = kwargs["plans"]
opt_costs = kwargs["opt_costs"]
pg_costs = kwargs["pg_costs"]
true_cardinalities = kwargs["true_cardinalities"]
est_cardinalities = kwargs["est_cardinalities"]
costs = errors
if "samples_type" in kwargs:
samples_type = kwargs["samples_type"]
else:
samples_type = ""
if "alg_name" in kwargs:
alg_name = kwargs["alg_name"]
else:
alg_name = "Est"
if result_dir is not None:
costs_fn = os.path.join(result_dir, self.__str__() + ".csv")
if os.path.exists(costs_fn):
costs_df = pd.read_csv(costs_fn)
else:
columns = ["qname", "join_order", "exec_sql", "cost"]
costs_df = pd.DataFrame(columns=columns)
cur_costs = defaultdict(list)
for i, qrep in enumerate(qreps):
# sql_key = str(deterministic_hash(qrep["sql"]))
# cur_costs["sql_key"].append(sql_key)
qname = os.path.basename(qrep["name"])
cur_costs["qname"].append(qname)
joinorder = get_leading_hint(qrep["join_graph"], plans[i])
cur_costs["join_order"].append(joinorder)
cur_costs["exec_sql"].append(sqls[i])
cur_costs["cost"].append(costs[i])
cur_df = pd.DataFrame(cur_costs)
combined_df = | pd.concat([costs_df, cur_df], ignore_index=True) | pandas.concat |
import pandas as pd
import requests
base_url = "https://www.datos.gov.co/resource/gt2j-8ykr.json"
def get_json_response(base_url, params):
r = requests.get(url=base_url, params=params)
data = r.json()
return data
def get_total_casos():
df = get_total_acumulado()
penultimate_day, last_day = df.tail(2).acumulado.values
return last_day, penultimate_day
def get_total_recuperados():
df = get_acumulado_recuperados()
penultimate_day, last_day = df.tail(2).acumulado.values
return last_day, penultimate_day
def get_total_fallecidos():
df = get_acumulado_fallecidos()
penultimate_day, last_day = df.tail(2).acumulado.values
return last_day, penultimate_day
def get_total_casos_activos():
params = {
"$select": "COUNT(*) as total_casos_confirmados",
"$where": "Recuperado = 'Activo'",
}
casos_activos = get_json_response(base_url, params)[
0]["total_casos_confirmados"]
casos_activos = int(casos_activos)
return casos_activos
def get_distribucion_por_genero():
params = {
"$select": "UPPER(sexo) as Sexo, COUNT(id_de_caso) as cantidad",
"$group": "UPPER(sexo)",
}
casos_activos = get_json_response(base_url, params)
df = pd.DataFrame.from_records(casos_activos)
return df
def get_edades():
params = {
"$select": "edad, COUNT(id_de_caso) as cantidad",
"$group": "edad",
}
casos = get_json_response(base_url, params)
df = pd.DataFrame.from_records(casos)
return df
def get_distribucion_por_departamento():
params = {
"$select": "departamento as codigo, departamento_nom as nombre, COUNT(id_de_caso) as cantidad",
"$group": "codigo, nombre",
"$order": "cantidad DESC",
}
casos = get_json_response(base_url, params)
df = | pd.DataFrame.from_records(casos) | pandas.DataFrame.from_records |
"""
This script is for running inference on the VOiCES dataset using JasperInference
class. It takes in the following command line arguments
-r : The absolute path to the root of the dataset
-i : The absolute path to the VOiCES index file (.csv) that indexes all the
files for inference
-e : The path to the weights for the Japser/Quartznet encoder
-d : The path to the weights for the Japser/Quartznet decoder
-c : The path to the Jasper/Quartznet config file (.yml)
-o : The filepath that the inference results should be put out, includes .csv
extension
-b : The inference batch size, larger values will take advantage of GPU
acceleration better
--use_cpu : boolean. If enabled, NeMo computations will be done on CPU
The output is a csv file with a row for each file and the following columns
query_name: The VOiCES filename with the path info removed (string), can be
used for joining the inference results table with other tables
ground_truth: The ground truth transcript for the recording
noisy_transcript: The predicted transcript when running jasper on the VOiCES
recording
clean_transcript: The predicted transcript when running jasper on the original
librispeech recording
noisy wer: The word error rate of the noisy transcript with respect to the
ground truth
clean wer: The word error rate of the clean transcript with respect to the
ground truth
"""
import numpy as np
import os
import argparse
import pandas as pd
from JasperModels import JasperInference
from ruamel.yaml import YAML
import pesq
import librosa
from nemo_asr.helpers import post_process_predictions, word_error_rate
import tqdm
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def process_batch(item_batch,dataset_root,jasper_model,sample_rate=16000):
"""
Perform inference on and post-process a batch of VOiCES recordings
Arguments:
item_batch: A list of dictionaries, corresponding to entries in a
VOiCES index.
dataset_root: The absolute path to the root of the dataset
jasper_model: An instance of the JasperInference class
sample_rate: The sample rate of the recordings
Returns:
result_batch: A list of dictionaries, with one for each item in
item_batch.
"""
result_batch = []
noisy_waveform_list = []
clean_waveform_list = []
for item in item_batch:
result_dict = {'query_name':item['query_name']}
result_dict['ground_truth']=item['transcript']
noisy_filepath = os.path.join(dataset_root,item['filename'])
clean_filepath = os.path.join(dataset_root,item['source'])
noisy_waveform,_ = librosa.load(noisy_filepath,sr=sample_rate)
noisy_waveform_list.append(noisy_waveform)
clean_waveform,_ = librosa.load(clean_filepath,sr=sample_rate)
clean_waveform_list.append(clean_waveform)
#pesq_nb = pesq.pesq(16000,clean_waveform,noisy_waveform,'nb')
#pesq_wb = pesq.pesq(16000,clean_waveform,noisy_waveform,'wb')
#result_dict['pesq nb'] = pesq_nb
#result_dict['pesq wb'] = pesq_wb
result_batch.append(result_dict)
noisy_result = jasper_model.infer(waveforms=noisy_waveform_list)
clean_result = jasper_model.infer(waveforms=clean_waveform_list)
for i in range(len(item_batch)):
result_batch[i]['noisy transcript'] = noisy_result['greedy transcript'][i]
result_batch[i]['clean transcript'] = clean_result['greedy transcript'][i]
result_batch[i]['noisy wer'] = word_error_rate([result_batch[i]['noisy transcript']],[result_batch[i]['ground_truth']])
result_batch[i]['clean wer'] = word_error_rate([result_batch[i]['clean transcript']],[result_batch[i]['ground_truth']])
return result_batch
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-r',dest='DATASET_ROOT',help='VOiCES dataset root',
default='none',type=str)
parser.add_argument('-i',dest='INDEX_PATH',help='Target directory for index files',
default='none',type=str)
parser.add_argument('-e',dest='ENCODER_PATH',help='path to encoder weights',
default='none',type=str)
parser.add_argument('-d',dest='DECODER_PATH',help='path to decoder weights',
default='none',type=str)
parser.add_argument('-c',dest='CONFIG',help='path to config yaml',
default='none',type=str)
parser.add_argument('-o',dest='OUTPUT',help='out filepath',
default='none',type=str)
parser.add_argument('-b',dest='BATCH_SIZE',help='batch size',
default=8,type=int)
parser.add_argument('--use_cpu',dest='USE_CPU',action='store_true',
help='use the cpu')
args = parser.parse_args()
#load up the dataset
df = | pd.read_csv(args.INDEX_PATH) | pandas.read_csv |
import os
import pandas as pd
# read recent recombinaitons
fastGear_recent = pd.read_csv(snakemake.input[0],sep="\s+",header=1)
fastGear_recent_out = fastGear_recent[["StrainName", "Start", "End"]] # format in bed file
# every ancestral recombination involves multiple strains from two lineages (donor and recipient)
# we will list all the strains in these two lineages with the ancestral recombination region detected
# this is a rowwise function
def mask_strains_in_lineage(row, lineage):
lineage1 = row["Lineage1"] # lineage 1
lineage2 = row["Lineage2"] # lineage 2
start = row["Start"] # start position
end = row["End"] # end position
lineages = lineage[lineage["Lineage"].isin([lineage1, lineage2])]["Name"] # strains in both lineage 1 and 2
current_ances_row_bed=pd.DataFrame({"StrainName" : lineages.to_list(), "Start" : [int(start)] * len(lineages), "End" : [int(end)] * len(lineages)}) # format involved strains with their recombined regions
return current_ances_row_bed
# read in ancstral recombinaition files
fastGear_ances = pd.read_csv(snakemake.input[1],sep="\s+",header=1)
lineage_file= | pd.read_csv(snakemake.input[2],sep="\s+",header=0) | pandas.read_csv |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from datetime import datetime
from inspect import signature
from io import StringIO
import os
from pathlib import Path
import sys
import numpy as np
import pytest
from pandas.compat import PY310
from pandas.errors import (
EmptyDataError,
ParserError,
ParserWarning,
)
from pandas import (
DataFrame,
Index,
Series,
Timestamp,
compat,
)
import pandas._testing as tm
from pandas.io.parsers import TextFileReader
from pandas.io.parsers.c_parser_wrapper import CParserWrapper
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self) -> None:
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser.engine = "c"
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("squeeze", [True, False])
def test_squeeze(all_parsers, squeeze):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv_check_warnings(
FutureWarning,
"The squeeze argument has been deprecated "
"and will be removed in a future version. "
'Append .squeeze\\("columns"\\) to the call to squeeze.\n\n',
StringIO(data),
index_col=0,
header=None,
squeeze=squeeze,
)
if not squeeze:
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
@xfail_pyarrow
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@xfail_pyarrow
def test_missing_trailing_delimiters(all_parsers):
parser = all_parsers
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_skip_initial_space(all_parsers):
data = (
'"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
"1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
"314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
"70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
"0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
"-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
)
parser = all_parsers
result = parser.read_csv(
StringIO(data),
names=list(range(33)),
header=None,
na_values=["-9999.0"],
skipinitialspace=True,
)
expected = DataFrame(
[
[
"09-Apr-2012",
"01:10:18.300",
2456026.548822908,
12849,
1.00361,
1.12551,
330.65659,
355626618.16711,
73.48821,
314.11625,
1917.09447,
179.71425,
80.0,
240.0,
-350,
70.06056,
344.9837,
1,
1,
-0.689265,
-0.692787,
0.212036,
14.7674,
41.605,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
0,
12,
128,
]
]
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_trailing_delimiters(all_parsers):
# see gh-2442
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(all_parsers):
# https://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv board","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa:E501
parser = all_parsers
result = parser.read_csv(
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
)
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'
tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
@xfail_pyarrow
def test_ignore_leading_whitespace(all_parsers):
# see gh-3374, gh-6607
parser = all_parsers
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
result = parser.read_csv(StringIO(data), sep=r"\s+")
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
def test_uneven_lines_with_usecols(all_parsers, usecols):
# see gh-12203
parser = all_parsers
data = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10"""
if usecols is None:
# Make sure that an error is still raised
# when the "usecols" parameter is not provided.
msg = r"Expected \d+ fields in line \d+, saw \d+"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
else:
expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# First, check to see that the response of parser when faced with no
# provided columns raises the correct error, with or without usecols.
("", {}, None),
("", {"usecols": ["X"]}, None),
(
",,",
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
DataFrame(columns=["X"], index=[0], dtype=np.float64),
),
(
"",
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
DataFrame(columns=["X"]),
),
],
)
def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
# see gh-12493
parser = all_parsers
if expected is None:
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"kwargs,expected",
[
# gh-8661, gh-8679: this should ignore six lines, including
# lines with trailing whitespace and blank lines.
(
{
"header": None,
"delim_whitespace": True,
"skiprows": [0, 1, 2, 3, 5, 6],
"skip_blank_lines": True,
},
DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
),
# gh-8983: test skipping set of rows after a row with trailing spaces.
(
{
"delim_whitespace": True,
"skiprows": [1, 2, 3, 5, 6],
"skip_blank_lines": True,
},
DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
),
],
)
def test_trailing_spaces(all_parsers, kwargs, expected):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa:E501
parser = all_parsers
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
tm.assert_frame_equal(result, expected)
def test_raise_on_sep_with_delim_whitespace(all_parsers):
# see gh-6607
data = "a b c\n1 2 3"
parser = all_parsers
with pytest.raises(ValueError, match="you can only specify one"):
parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
def test_read_filepath_or_buffer(all_parsers):
# see gh-43366
parser = all_parsers
with pytest.raises(TypeError, match="Expected file path name or file-like"):
parser.read_csv(filepath_or_buffer=b"input")
@xfail_pyarrow
@pytest.mark.parametrize("delim_whitespace", [True, False])
def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
# see gh-9710
parser = all_parsers
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({"MyColumn": list("abab")})
result = parser.read_csv(
StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
)
tm.assert_frame_equal(result, expected)
# Skip for now, actually only one test fails though, but its tricky to xfail
@skip_pyarrow
@pytest.mark.parametrize(
"sep,skip_blank_lines,exp_data",
[
(",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(
",",
False,
[
[1.0, 2.0, 4.0],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5.0, np.nan, 10.0],
[np.nan, np.nan, np.nan],
[-70.0, 0.4, 1.0],
],
),
],
)
def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
parser = all_parsers
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
if sep == r"\s+":
data = data.replace(",", " ")
result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
expected = DataFrame(exp_data, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_whitespace_lines(all_parsers):
parser = all_parsers
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"data,expected",
[
(
""" A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
""",
DataFrame(
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
columns=["A", "B", "C", "D"],
index=["a", "b", "c"],
),
),
(
" a b c\n1 2 3 \n4 5 6\n 7 8 9",
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
),
],
)
def test_whitespace_regex_separator(all_parsers, data, expected):
# see gh-6607
parser = all_parsers
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_sub_character(all_parsers, csv_dir_path):
# see gh-16893
filename = os.path.join(csv_dir_path, "sub_char.csv")
expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
parser = all_parsers
result = parser.read_csv(filename)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])
def test_filename_with_special_chars(all_parsers, filename):
# see gh-15086.
parser = all_parsers
df = DataFrame({"a": [1, 2, 3]})
with tm.ensure_clean(filename) as path:
df.to_csv(path, index=False)
result = parser.read_csv(path)
tm.assert_frame_equal(result, df)
def test_read_table_same_signature_as_read_csv(all_parsers):
# GH-34976
parser = all_parsers
table_sign = signature(parser.read_table)
csv_sign = signature(parser.read_csv)
assert table_sign.parameters.keys() == csv_sign.parameters.keys()
assert table_sign.return_annotation == csv_sign.return_annotation
for key, csv_param in csv_sign.parameters.items():
table_param = table_sign.parameters[key]
if key == "sep":
assert csv_param.default == ","
assert table_param.default == "\t"
assert table_param.annotation == csv_param.annotation
assert table_param.kind == csv_param.kind
continue
else:
assert table_param == csv_param
def test_read_table_equivalency_to_read_csv(all_parsers):
# see gh-21948
# As of 0.25.0, read_table is undeprecated
parser = all_parsers
data = "a\tb\n1\t2\n3\t4"
expected = parser.read_csv(StringIO(data), sep="\t")
result = parser.read_table(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(
PY310,
reason="GH41935 This test is leaking only on Python 3.10,"
"causing other tests to fail with a cryptic error.",
)
@pytest.mark.parametrize("read_func", ["read_csv", "read_table"])
def test_read_csv_and_table_sys_setprofile(all_parsers, read_func):
# GH#41069
parser = all_parsers
data = "a b\n0 1"
sys.setprofile(lambda *a, **k: None)
result = getattr(parser, read_func)(StringIO(data))
sys.setprofile(None)
expected = DataFrame({"a b": ["0 1"]})
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_first_row_bom(all_parsers):
# see gh-26545
parser = all_parsers
data = '''\ufeff"Head1"\t"Head2"\t"Head3"'''
result = parser.read_csv(StringIO(data), delimiter="\t")
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_first_row_bom_unquoted(all_parsers):
# see gh-36343
parser = all_parsers
data = """\ufeffHead1\tHead2\tHead3"""
result = parser.read_csv(StringIO(data), delimiter="\t")
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("nrows", range(1, 6))
def test_blank_lines_between_header_and_data_rows(all_parsers, nrows):
# GH 28071
ref = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]],
columns=list("ab"),
)
csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4"
parser = all_parsers
df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False)
tm.assert_frame_equal(df, ref[:nrows])
@xfail_pyarrow
def test_no_header_two_extra_columns(all_parsers):
# GH 26218
column_names = ["one", "two", "three"]
ref = DataFrame([["foo", "bar", "baz"]], columns=column_names)
stream = StringIO("foo,bar,baz,bam,blah")
parser = all_parsers
with tm.assert_produces_warning(ParserWarning):
df = parser.read_csv(stream, header=None, names=column_names, index_col=False)
tm.assert_frame_equal(df, ref)
def test_read_csv_names_not_accepting_sets(all_parsers):
# GH 34946
data = """\
1,2,3
4,5,6\n"""
parser = all_parsers
with pytest.raises(ValueError, match="Names should be an ordered collection."):
parser.read_csv(StringIO(data), names=set("QAZ"))
@xfail_pyarrow
def test_read_table_delim_whitespace_default_sep(all_parsers):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
result = parser.read_table(f, delim_whitespace=True)
expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("delimiter", [",", "\t"])
def test_read_csv_delim_whitespace_non_default_sep(all_parsers, delimiter):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
msg = (
"Specified a delimiter with both sep and "
"delim_whitespace=True; you can only specify one."
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, delim_whitespace=True, sep=delimiter)
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, delim_whitespace=True, delimiter=delimiter)
def test_read_csv_delimiter_and_sep_no_default(all_parsers):
# GH#39823
f = StringIO("a,b\n1,2")
parser = all_parsers
msg = "Specified a sep and a delimiter; you can only specify one."
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, sep=" ", delimiter=".")
@pytest.mark.parametrize("kwargs", [{"delimiter": "\n"}, {"sep": "\n"}])
def test_read_csv_line_break_as_separator(kwargs, all_parsers):
# GH#43528
parser = all_parsers
data = """a,b,c
1,2,3
"""
msg = (
r"Specified \\n as separator or delimiter. This forces the python engine "
r"which does not accept a line terminator. Hence it is not allowed to use "
r"the line terminator as separator."
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
def test_read_csv_posargs_deprecation(all_parsers):
# GH 41485
f = StringIO("a,b\n1,2")
parser = all_parsers
msg = (
"In a future version of pandas all arguments of read_csv "
"except for the argument 'filepath_or_buffer' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
parser.read_csv(f, " ")
@pytest.mark.parametrize("delimiter", [",", "\t"])
def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
msg = (
"Specified a delimiter with both sep and "
"delim_whitespace=True; you can only specify one."
)
with pytest.raises(ValueError, match=msg):
parser.read_table(f, delim_whitespace=True, sep=delimiter)
with pytest.raises(ValueError, match=msg):
parser.read_table(f, delim_whitespace=True, delimiter=delimiter)
@pytest.mark.parametrize("func", ["read_csv", "read_table"])
def test_names_and_prefix_not_None_raises(all_parsers, func):
# GH#39123
f = StringIO("a,b\n1,2")
parser = all_parsers
msg = "Specified named and prefix; you can only specify one."
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning):
getattr(parser, func)(f, names=["a", "b"], prefix="x")
@pytest.mark.parametrize("func", ["read_csv", "read_table"])
@pytest.mark.parametrize("prefix, names", [(None, ["x0", "x1"]), ("x", None)])
def test_names_and_prefix_explicit_None(all_parsers, names, prefix, func):
# GH42387
f = StringIO("a,b\n1,2")
expected = DataFrame({"x0": ["a", "1"], "x1": ["b", "2"]})
parser = all_parsers
if prefix is not None:
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = getattr(parser, func)(
f, names=names, sep=",", prefix=prefix, header=None
)
else:
result = getattr(parser, func)(
f, names=names, sep=",", prefix=prefix, header=None
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_dict_keys_as_names(all_parsers):
# GH: 36928
data = "1,2"
keys = {"a": int, "b": int}.keys()
parser = all_parsers
result = parser.read_csv(StringIO(data), names=keys)
expected = DataFrame({"a": [1], "b": [2]})
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_encoding_surrogatepass(all_parsers):
# GH39017
parser = all_parsers
content = b"\xed\xbd\xbf"
decoded = content.decode("utf-8", errors="surrogatepass")
expected = DataFrame({decoded: [decoded]}, index=[decoded * 2])
expected.index.name = decoded * 2
with tm.ensure_clean() as path:
Path(path).write_bytes(
content * 2 + b"," + content + b"\n" + content * 2 + b"," + content
)
df = parser.read_csv(path, encoding_errors="surrogatepass", index_col=0)
| tm.assert_frame_equal(df, expected) | pandas._testing.assert_frame_equal |
import configparser
import importlib
import os
import os.path as osp
import sqlite3 as sq
import warnings
import yaml as ya
from io import StringIO
from pandas import concat, DataFrame, Series, read_sql_query, read_csv
from sqlite3 import Error
import numpy as np
from numba import jit
###############################################################################
#Non-Standard Imports
###############################################################################
try:
from .model_checker import check_model_terms
from .model_coder import model_to_code
except:
from model_checker import check_model_terms
from model_coder import model_to_code
###############################################################################
#Globals
###############################################################################
MBase = None
UBase = None
_dir = osp.dirname(osp.abspath(__file__))
userid = 'usr'
table_sql = '''
CREATE TABLE IF NOT EXISTS "models" (
"id" TEXT,
"system_type" TEXT,
"states" TEXT,
"parameters" TEXT,
"inputs" TEXT,
"equations" TEXT,
"ia" TEXT,
"descriptions" TEXT,
"active" INTEGER DEFAULT 1,
UNIQUE(system_type)
);
'''
all_model_funcs = {'np' : np, 'log' : np.log10, 'ln' : np.log,
'exp' : np.exp, 'jit' : jit
}
###############################################################################
#Database and Table Construction
###############################################################################
def create_connection(db_file):
'''
Creates database specified by db_file.
'''
db = sq.connect(db_file)
return db
def create_table(db, *args):
'''
Creates table.
Use args to add additional sql commands.
'''
try:
c = db.cursor()
for arg in args:
c.execute(arg)
except Error as e:
raise e
###############################################################################
#Constructor
###############################################################################
def make_core_model(system_type, states, parameters, inputs, equations, descriptions=None, ia='', **kwargs):
'''Returns a dictionary with the keys id, system_type, states, parameters, inputs, equations, ia.
Otherwise referred to as a core_model in the BMSS2 documentation. Redundant
states, parameters and inputs are disallowed.
:param system_type: A string of keywords serving as a unique identifier for
the core_model separated by commas, will be formatted so there is one space
after each comma, keywords should be in CamelCase
:type system_type: str
:param states: A list of strings corresponding to state names used in the core_model
:type states: list
:param parameters: A list of strings corresponding to parameter names used in the core_model
:type parameters: list
:param inputs: A list of strings corresponding to input names used in the core_model
:type inputs: list
:param equations: A list of strings corresponding to lines of equations used in the core_model
where the lines form coherent Python code when joined by '\n'.join
:type equations: list
:param descriptions: A description of the model.
:type descriptions: dict, optional
:param ia: For IA results as a string that can be read into csv format. Avoid using this argument.
:type ia: string, optional
:kwargs: Will be ignored
'''
if type(system_type) == str:
system_type1 = ', '.join([s.strip() for s in system_type.split(',')])
else:
system_type1 = ', '.join(system_type)
return make_core_model(system_type1, states, parameters, inputs, equations, descriptions, ia='')
states1 = list(states)
parameters1 = list(parameters)
inputs1 = list(inputs)
equations1 = list(equations)
descriptions1 = descriptions if descriptions else {}
core_model = {'id' : '',
'system_type' : system_type1,
'states' : states1,
'parameters' : parameters1,
'inputs' : inputs1,
'equations' : equations1,
'ia' : ia,
'descriptions' : descriptions1,
}
for key in ['states', 'parameters', 'inputs', 'equations']:
if not all([type(x)==str for x in core_model[key]]):
raise Exception('Invalid name in ' + key + '. Only strings are allowed.')
if type(core_model['ia']) != str:
raise Exception('Invalid name in ia. Only strings are allowed.')
is_valid, text = check_model_terms(core_model)
if not is_valid:
warnings.warn('Error in ' + str(system_type1) + ' when checking terms: ' + text)
model_to_code(core_model, local=False)
get_model_function(system_type, local=False)
return core_model
def copy_core_model(core_model):
keys = ['id', 'system_type', 'states' , 'parameters', 'inputs', 'equations', 'ia', 'descriptions']
new_core_model = {}
for key in keys:
try:
value = core_model[key].copy()
except:
value = core_model[key]
new_core_model[key] = value
return new_core_model
###############################################################################
#Model Storage
###############################################################################
def add_to_database(core_model, dialog=True):
'''Accepts a core_model and adds it to UBase.
'''
if 'BMSS' in core_model['system_type']:
raise Exception('system_type cannot contain "BMSS" as keyword.')
return backend_add_to_database(core_model, database=UBase, dialog=dialog)
###############################################################################
#Supporting Functions
###############################################################################
def backend_add_to_database(core_model, database, dialog=False):
'''Supporting function for add_to_database. Do not run.
:meta private:
'''
global MBase
global UBase
global userid
system_type = core_model['system_type']
make_new_id = True
existing_model = quick_search(system_type, error_if_no_result=False, active_only=False)
is_active = existing_model['system_type'] in list_models(UBase) if existing_model else False
d = 'Mbase' if database == MBase else 'UBase'
if existing_model:
existing_db = MBase if system_type in list_models(MBase) else UBase
if database != existing_db:
a = 'MBase' if database == MBase else 'UBase'
b = 'UBase' if database == UBase else 'MBase'
raise Exception(f'The model already exists in {a}. You cannot add it to {b}.')
if is_active and dialog:
#Break and continue if overwrite else return immediately
while True:
x = input('Overwrite existing model? (y/n): ')
if x.lower() == 'y':
break
return existing_model['id']
#Check existing model and edit if it matches
if system_type == existing_model['system_type']:
core_model['id'] = existing_model['id']
make_new_id = False
row = string_dict_values(core_model)
row_id = add_row('models', row, database)
#Update id based on row number if the model is new
if make_new_id:
model_id = 'bmss' + str(row_id) if database == MBase else userid + str(row_id)
update_value_by_rowid(row_id, 'id', model_id, database)
else:
model_id = core_model['id']
o = 'Added model ' if make_new_id else 'Modified model ' if is_active else 'Added model '
n = model_id if make_new_id else core_model['id']
print(o + n + ' to '+ d)
model_to_code(core_model, local=False)
return model_id
def string_dict_values(core_model):
'''Converts core_model to string
:meta private:
'''
model_dict = {key: str(core_model[key]) for key in core_model}
return model_dict
def update_value_by_rowid(row_id, column_id, value, database):
'''Supporting function for backend_add_to_database. Do not run.
:meta private:
'''
with database as db:
comm = "UPDATE models SET " + column_id + " = '" + str(value) + "' WHERE rowid = " + str(row_id)
cur = db.cursor()
cur.execute(comm)
def add_row(table, row, database):
'''Supporting function for backend_add_to_database. Do not run.
:meta private:
'''
row_ = '(' + ', '.join([k for k in row.keys()]) + ', active)'
values = tuple(row.values()) + ('1',)
with database as db:
comm = 'REPLACE INTO ' + table + str(row_) + ' VALUES(' + ','.join(['?']*len(values))+ ')'
cur = db.cursor()
cur.execute(comm, values)
return cur.lastrowid
###############################################################################
#Search
###############################################################################
def search_database(keyword, search_type='system_type', database=None, active_only=True):
'''Searches database for core_model data structure based on a keyword and a
key(field) in the core_model. Returns a list of core_model dictionaries.
:param keyword: A string that will be matched against entries in the database
:type keyword: str
:param search_type: A string corresponding to any key in the core_model, will
be used for matching
:type search_type: str
:param database: Can be either MBase or UBase, if None, this function will search
both databases, defaults to None
:type database: SQL Connection, optional
:param active_only: A boolean for backend use, if True, this limits the search
to rows where the value of active is True, defaults to True
:type active_only: bool, optional
'''
global MBase
global UBase
keyword1 = keyword if type(keyword) == str else ', '.join(keyword)
comm = 'SELECT id, system_type, states, parameters, inputs, equations, ia, descriptions FROM models WHERE ' + search_type + ' LIKE "%' + keyword1
result = []
if active_only:
comm += '%" AND active = 1;'
else:
comm += '%";'
databases = [database] if database else [MBase, UBase]
for database in databases:
with database as db:
cursor = db.execute(comm)
models = cursor.fetchall()
columns = database.execute('PRAGMA table_info(models);')
columns = columns.fetchall()
columns = [column[1] for column in columns if column[1]!='active']
models = [dict(zip(columns, model)) for model in models]
models = [process_model(model) for model in models]
result += models
return result
def process_model(model):
'''
:meta private:
'''
result = {}
for key, value in model.items():
if key in ['id', 'system_type', 'ia']:
result[key] = value
else:
try:
result[key] = eval(value)
except Exception as e:
system_type = model['system_type']
msg = 'An error occurred when calling eval on {} for {}'.format(key, system_type)
raise Exception(msg, *e.args)
return result
def quick_search(system_type, error_if_no_result=True, **kwargs):
'''Searches both system_type/id and returns an exact match in system_type/id.
Raises an error when no matches are found if error_if_no_result is set to True.
'''
core_models = search_database(system_type, search_type='system_type', **kwargs)
for core_model in core_models:
if core_model['system_type'] == system_type:
return core_model
if error_if_no_result:
raise Exception('Could not retrieve model with system_type ' + str(system_type))
else:
return
def list_models(database=None):
'''Returns a list of system_types
'''
global MBase
global UBase
if database:
with database as db:
comm = 'SELECT system_type FROM models WHERE active = 1;'
cursor = db.execute(comm)
models = [m[0] for m in cursor.fetchall()]
return models
else:
return list_models(MBase) + list_models(UBase)
def get_model_function(system_type, local=False):
global all_model_funcs
model_name = system_type.replace(', ', '_')
func_name = 'model_'+ model_name
if func_name in all_model_funcs:
return all_model_funcs[func_name]
if local:
filename = f'{model_name}.py'
else:
filename = osp.join(osp.dirname(__file__), 'model_functions', f'{model_name}.py')
if not osp.isfile(filename):
raise Exception(f'Could not find file for model function: {filename}')
with open(filename, 'r') as file:
code = file.read()
code = 'def' + code.split('def')[1]
exec(code, all_model_funcs)
return all_model_funcs[func_name]
###############################################################################
#Interfacing with Pandas
###############################################################################
def to_df(database=None):
'''Returns a copy of the databases as a pandas DataFrame.
:param database: The database to be read, if None, both databases will be read,
defaults to None
:type database: SQL Connection
'''
if database:
with database as db:
df = read_sql_query("SELECT * from models", db)
return df
else:
global MBase
global UBase
df = to_df(MBase), to_df(UBase)
df = concat(df, ignore_index=True)
return df
def backend_from_df(df, database):
'''For backend maintenance only.
:meta private:
'''
with database as db:
return df.to_sql('models', db, if_exists='replace', index=False)
###############################################################################
#Interfacing with Configparser
###############################################################################
def from_config(filename):
'''Reads a file and returns a core_model data structure.
:param filename: The name of the file to read.
:type filename: str
'''
config = configparser.ConfigParser()
config.optionxform = lambda option: option
model = {'system_type' : [],
'states' : [],
'parameters' : [],
'inputs' : [],
'equations' : [],
'ia' : '',
'descriptions' : {}
}
with open(filename, 'r') as file:
config.read_file(file)
for key in config.sections():
if key not in model:
continue
if key == 'ia':
line = config[key][key].strip()
elif key == 'equations':
# line = config[key][key].replace('\n', ',').split(',')
line = split_at_top_level(config[key][key].replace('\n', ','))
line = [s.strip() if s else '' for s in line]
line = line if line[0] else line[1:]
elif key == 'descriptions':
temp = config[key]
line = {k: temp[k] for k in temp}
else:
line = config[key][key].replace('\n', ',').split(',')
line = [s.strip() for s in line if s]
model[key] = line
return make_core_model(**model)
def split_at_top_level(string, delimiter=','):
'''
Use this for nested lists.
This is also a helper function for string_to_dict.
'''
nested = []
buffer = ''
result = []
matching_bracket = {'(':')', '[':']', '{':'}'}
for char in string:
if char in ['[', '(', '{']:
nested.append(char)
buffer += char
elif char in [']', ')', '}']:
if char == matching_bracket.get(nested[-1]):
nested = nested[:-1]
buffer += char
else:
raise Exception('Mismatched brackets.' )
elif char == delimiter and not nested:
if buffer:
result.append(buffer)
buffer = ''
else:
buffer += char
if buffer:
result.append(buffer)
return result
def to_config(core_model, filename):
'''Exports a core_model data structure to a config file.
:param core_model: The core_model to be exported
:type core_model: dict
:param filename: The name of the file to write to
:type filename: str
'''
config = configparser.ConfigParser()
for key in core_model:
if not core_model[key]:
continue
if type(core_model[key]) == str:
line = core_model[key]
elif type(core_model[key]) == dict:
config[key] = core_model[key]
continue
elif key == 'equations':
line = '\n' + '\n'.join(core_model[key])
else:
line = ', '.join(core_model[key])
config[key] = {key:line}
with open(filename, 'w') as configfile:
config.write(configfile)
return config
###############################################################################
#Direct Config to Database
###############################################################################
def config_to_database(filename, dialog=True):
'''Reads a config file containing information for a core_model data structure
and adds the core_model to the database.
:param filename: Name of the file to be read
:type filename: str
:param dialog: For backend use, defaults to True
:type dialog: bool, optional
'''
global UBase
return backend_config_to_database(filename, database=UBase, dialog=dialog)
def backend_config_to_database(filename, database, dialog=False):
'''For backend maintenance. Do not run.
:meta private:
'''
core_model = from_config(filename)
backend_add_to_database(core_model, database, dialog=dialog)
return core_model['system_type']
###############################################################################
#Updateing IA
###############################################################################
def update_ia(core_model, new_row, save=True):
'''
Appends new_row to the core_model['ia'] where new_row can be a dict or Series.
If save is True and the core_model is in the database, the changes will be
applied to database.
'''
global MBase
global UBase
s = new_row if type(new_row) in [Series, DataFrame] else Series(new_row)
if core_model['ia']:
df = read_ia(core_model)
try:
s = s[df.columns]
except:
raise Exception('Mismatch in column names. \nExpected: ' +
str(df.columns) + '\nReceived: ' + str(s.index))
#Check if inputs are duplicated
try:
row_num = df['input'][s['input'] == df['input']].index[0]
df.iloc[row_num] = s
print('Updated row ' + str(row_num) + ' in ' + str(core_model['system_type']))
except:
df = df.append(s, ignore_index=True)
print('Added row to ' + str(core_model['system_type']))
else:
df = | DataFrame(s) | pandas.DataFrame |
from perceptron import Perceptron
from pandas import DataFrame
def run_perceptron():
print('\n\n********************** AND **********************\n\n')
p = Perceptron(2)
df = | DataFrame([[0, 0], [0, 1], [1, 0], [1, 1]]) | pandas.DataFrame |
import warnings
from onecodex.lib.enums import AlphaDiversityMetric, Rank, BaseEnum
from onecodex.exceptions import OneCodexException, PlottingException, PlottingWarning
from onecodex.viz._primitives import prepare_props, sort_helper, get_base_classification_url
class PlotType(BaseEnum):
Auto = "auto"
BoxPlot = "boxplot"
Scatter = "scatter"
class VizMetadataMixin(object):
def plot_metadata(
self,
rank=Rank.Auto,
haxis="Label",
vaxis=AlphaDiversityMetric.Shannon,
title=None,
xlabel=None,
ylabel=None,
return_chart=False,
plot_type=PlotType.Auto,
label=None,
sort_x=None,
width=200,
height=400,
facet_by=None,
):
"""Plot an arbitrary metadata field versus an arbitrary quantity as a boxplot or scatter plot.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
haxis : `string`, optional
The metadata field (or tuple containing multiple categorical fields) to be plotted on
the horizontal axis.
vaxis : `string`, optional
Data to be plotted on the vertical axis. Can be any one of the following:
- A metadata field: the name of a metadata field containing numerical data
- {'simpson', 'observed_taxa', 'shannon'}: an alpha diversity statistic to calculate for each sample
- A taxon name: the name of a taxon in the analysis
- A taxon ID: the ID of a taxon in the analysis
title : `string`, optional
Text label at the top of the plot.
xlabel : `string`, optional
Text label along the horizontal axis.
ylabel : `string`, optional
Text label along the vertical axis.
plot_type : {'auto', 'boxplot', 'scatter'}
By default, will determine plot type automatically based on the data. Otherwise, specify
one of 'boxplot' or 'scatter' to set the type of plot manually.
label : `string` or `callable`, optional
A metadata field (or function) used to label each analysis. If passing a function, a
dict containing the metadata for each analysis is passed as the first and only
positional argument. The callable function must return a string.
sort_x : `list` or `callable`, optional
Either a list of sorted labels or a function that will be called with a list of x-axis labels
as the only argument, and must return the same list in a user-specified order.
facet_by : `string`, optional
The metadata field used to facet samples by (i.e. to create a separate subplot for each
group of samples).
Examples
--------
Generate a boxplot of the abundance of Bacteroides (genus) of samples grouped by whether the
individuals are allergic to dogs, cats, both, or neither.
>>> plot_metadata(haxis=('allergy_dogs', 'allergy_cats'), vaxis='Bacteroides')
"""
# Deferred imports
import altair as alt
import pandas as pd
if rank is None:
raise OneCodexException("Please specify a rank or 'auto' to choose automatically")
if not PlotType.has_value(plot_type):
raise OneCodexException("Plot type must be one of: auto, boxplot, scatter")
if len(self._results) < 1:
raise PlottingException(
"There are too few samples for metadata plots after filtering. Please select 1 or "
"more samples to plot."
)
# alpha diversity is only allowed on vertical axis--horizontal can be magically mapped
metadata_fields = [haxis, "Label"]
if facet_by:
metadata_fields.append(facet_by)
df, magic_fields = self._metadata_fetch(metadata_fields, label=label)
if AlphaDiversityMetric.has_value(vaxis):
df.loc[:, vaxis] = self.alpha_diversity(vaxis, rank=rank)
magic_fields[vaxis] = vaxis
df.dropna(subset=[magic_fields[vaxis]], inplace=True)
else:
# if it's not alpha diversity, vertical axis can also be magically mapped
vert_df, vert_magic_fields = self._metadata_fetch([vaxis])
# we require the vertical axis to be numerical otherwise plots get weird
if (
pd.api.types.is_bool_dtype(vert_df[vert_magic_fields[vaxis]])
or pd.api.types.is_categorical_dtype(vert_df[vert_magic_fields[vaxis]])
or | pd.api.types.is_object_dtype(vert_df[vert_magic_fields[vaxis]]) | pandas.api.types.is_object_dtype |
import numpy as np
import pandas as pd
from tqdm import tqdm
tqdm.pandas()
from numpy.linalg import norm
from sklearn.neighbors import NearestNeighbors
def rgb(value):
minimum, maximum = float(0), float(255)
ratio = 2 * (value-minimum) / (maximum - minimum)
b = int(max(0, 255*(1 - ratio)))
r = int(max(0, 255*(ratio - 1)))
g = 255 - b - r
return (r, g, b)
def dist_latlon(lat1, lon1, lat2, lon2):
return norm([lat1 - lat2, lon1 - lon2])
def add_poswork_target(x):
lat1, lon1, lat2, lon2 = x[['pos_lat', 'pos_lon', 'work_lat', 'work_lon']]
d = dist_latlon(lat1, lon1, lat2, lon2)
return int(d < 0.02)
def add_poshome_target(x):
lat1, lon1, lat2, lon2 = x[['pos_lat', 'pos_lon', 'home_lat', 'home_lon']]
d = dist_latlon(lat1, lon1, lat2, lon2)
return int(d < 0.02)
def add_dist_to_neighbours(df):
df_point_dup = df.groupby(['pos_lat', 'pos_lon']).agg('size').reset_index()
df_point_dup.columns = ['pos_lat', 'pos_lon', 'pos_customer_freq']
df = | pd.merge(df, df_point_dup, on=['pos_lat', 'pos_lon'], how='left') | pandas.merge |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/03_ACS_Download.ipynb (unless otherwise specified).
__all__ = ['retrieve_acs_data']
# Cell
import pandas as pd
from urllib.parse import urlencode
import csv # quoting=csv.QUOTE_ALL
# Cell
# @ title Run: Create retrieve_acs_data()
#File: retrieveAcsData.py
#Author: <NAME>
#Date: 1/9/19
#Section: Bnia
#Email: <EMAIL>
#Description:
#This file returns ACS data given an ID and Year
# The county total is given a tract of '010000'
#def retrieve_acs_data():
#purpose: Retrieves ACS data from the web
#input:
# state (required)
# county (required)
# tract (required)
# tableId (required)
# year (required)
# includeCountyAgg (True)(todo)
# replaceColumnNames (False)(todo)
# save (required)
#output:
# Acs Data.
# Prints to ../../data/2_cleaned/acs/
def retrieve_acs_data(state, county, tract, tableId, year):
dictionary = ''
keys = []
vals = []
header = []
keys1=keys2=keys3=keys4=keys5=keys6=keys7=keys8=''
keyCount = 0
# Called in addKeys(), Will create the final URL for readIn()
# These are parameters used in the API URL Query
# This query will retrieve the census tracts
def getParams(keys): return {
'get': 'NAME'+keys,
'for': 'tract:'+tract,
'in': 'state:'+state+' county:'+county,
'key': '<KEY>'
}
# Aggregate City data is best retrieved seperatly rather than as an aggregate of its constituent tracts
def getCityParams(keys): return {
'get': 'NAME'+keys,
'for': 'county:'+county,
'in': 'state:'+state,
'key': '<KEY>'
}
# Called in AddKeys(). Requests data by url and preformats it.
def readIn( url ):
tbl = pd.read_json(url, orient='records')
tbl.columns = tbl.iloc[0]
return tbl
# Called by retrieveAcsData.
# Creates a url and retrieve the data
# Then appends the city values as tract '010000'
# Finaly it merges and returns the tract and city totals.
def addKeys( table, params):
# Get Tract and City Records For Specific Columns
table2 = readIn( base+urlencode(getParams(params)) )
table3 = readIn( base+urlencode(getCityParams(params)) )
table3['tract'] = '010000'
# Concatenate the Records
table2.append([table2, table3], sort=False)
table2 = pd.concat([table2, table3], ignore_index=True)
# Merge to Master Table
table = pd.merge(table, table2, how='left',
left_on=["NAME","state","county","tract"],
right_on = ["NAME","state","county","tract"])
return table
#~~~~~~~~~~~~~~~
# Step 1)
# Retrieve a Meta Data Table Describing the Content of the Table
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+year+'/acs/acs5/groups/'+tableId+'.json'
metaDataTable = | pd.read_json(url, orient='records') | pandas.read_json |
import scipy
import time
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
from hydroDL.master import basins
from hydroDL.data import gageII, usgs, gridMET
from hydroDL import kPath, utils
import os
import pandas as pd
import numpy as np
from hydroDL import kPath
fileSiteNo = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteNoLst-1979')
siteNoLstAll = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()
# all gages
dirInv = os.path.join(kPath.dirData, 'USGS', 'inventory')
fileSiteNo = os.path.join(dirInv, 'siteNoLst-1979')
siteNoLstAll = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()
codeLst = sorted(usgs.newC)
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE', 'CLASS'], siteNoLst=siteNoLstAll)
dfCrd = gageII.updateCode(dfCrd)
sd = np.datetime64('1979-01-01')
# load all data
dictC = dict()
dictCF = dict()
for k, siteNo in enumerate(siteNoLstAll):
print(k, siteNo)
dfC, dfCF = usgs.readSample(siteNo, codeLst=codeLst, startDate=sd, flag=2)
dictC[siteNo] = dfC
dictCF[siteNo] = dfCF
dictQ = dict()
for k, siteNo in enumerate(siteNoLstAll):
print(k, siteNo)
dfQ = usgs.readStreamflow(siteNo, startDate=sd)
dfQ = dfQ.rename(columns={'00060_00003': '00060'})
dictQ[siteNo] = dfQ
# app\waterQual\stableSites\countSiteYear.py
# calculate interval
intMatC = np.full([len(siteNoLstAll), len(codeLst), 4], np.nan)
for k, siteNo in enumerate(siteNoLstAll):
dfC = dictC[siteNo]
print('\t {}/{}'.format(k, len(siteNoLstAll)), end='\r')
for j, code in enumerate(codeLst):
tt = dfC[code].dropna().index.values
if len(tt) > 1:
dt = tt[1:]-tt[:-1]
dd = dt.astype('timedelta64[D]').astype(int)
intMatC[k, j, 0] = len(tt)
intMatC[k, j, 1] = np.percentile(dd, 25)
intMatC[k, j, 2] = np.percentile(dd, 50)
intMatC[k, j, 3] = np.percentile(dd, 75)
fig, ax = plt.subplots(1, 1)
for code in codeLst:
ic = codeLst.index(code)
v = intMatC[:, ic, 2]
vv = np.sort(v[~np.isnan(v)])
x = np.arange(len(vv))
ax.plot(x, vv, label=code)
ax.set_ylim([0, 100])
ax.set_xlim([0, 1000])
ax.legend()
fig.show()
[indS, indC] = np.where((intMatC[:, :, 0] > 150) & (intMatC[:, :, 2] < 40))
len(np.unique(indS))
# use convolve to count # samples within one year
siteNo = siteNoLstAll[0]
code = '00915'
df = dictC[siteNo][code].dropna()
sd = np.datetime64('1979-01-01')
ed = np.datetime64('2019-12-31')
td = | pd.date_range(sd, ed) | pandas.date_range |
import pandas as pd
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import numpy as np
import seaborn as sns; sns.set()
import csv
from scipy.stats import ranksums
"""
Load data song data
"""
# load in song data
data_path = "C:/Users/abiga/Box " \
"Sync/Abigail_Nicole/ChippiesTimeOfDay" \
"/FinalChippiesDataReExportedAs44100Hz_LogTransformed_forTOD.csv"
log_song_data = | pd.DataFrame.from_csv(data_path, header=0, index_col=None) | pandas.DataFrame.from_csv |
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from IPython.display import display, Markdown
from utils import load_pickle, log, dump_pickle, remove_all_files_from_dir
from config.config import DEFAULT_FILES_NAMES, DEFAULT_END_DATE, DEFAULT_START_DATE
class DataHandler:
"""
DataHandler aims to:
- Constructs features that will be used later
- Builds labels using returns thresholds and volatility
- Build long / hold / short strategies
- Transform stock data with time series into images
- Pickles dataframes on disk with keys/values
"""
def __init__(self, encoding_method='GADF',
window_len=42,
image_size=42,
retrain_freq=5,
threshold_ret = (-0.014,0.014),
start_date: int = DEFAULT_START_DATE,
end_date: int = DEFAULT_END_DATE,
frac_of_stocks=1.,
minimum_volume=1e6,
stock_data_dir_path: str = 'data/2019_2010_stock_data',
dir_for_samples='data/cnn_samples/regular',
nb_of_stocks_by_file=50,
nb_files_to_read: int = 34,
):
"""
:param encoding_method: including GADF, GASF, MTF
:param window_len: length of moving window
:param image_size: size of image
:param retrain_freq: frequency of retrain
:param start_date: start date to consider for the stock data
:param end_date: last date to consider for the stock data
:param frac_of_stocks: fraction of data to utlize which ranges from start_date to end_date
:param minimum_volume: minimum average daily volume for small stock filtering
:param stock_data_dir_path: path to fetch stock data
:param dir_for_samples: path to store transformed images
:param nb_of_stocks_by_file: number of files to dump
:param nb_files_to_read: number of files to read
"""
self._window_len = window_len
self._image_size = image_size
self._retrain_freq = retrain_freq
self._threshold_ret = threshold_ret
self._encoding_method = encoding_method
self._features = ['date', 'RET', 'ASKHI', 'BIDLO', 'VOL', 'sprtrn']
self._min_volume = minimum_volume
self._start_date = start_date
self._end_date = end_date
self._frac_of_stocks_to_get = frac_of_stocks
self._nb_of_stocks_by_file = nb_of_stocks_by_file
self._directory_for_samples = dir_for_samples
self._stock_data_dir_path = stock_data_dir_path
self._N_FILES_CRSP = nb_files_to_read
self._LOGGER_ENV = 'image_encoding'
self.df_data = None
self._df_raw_data = None
self._stocks_list = None
def get_df_data(self):
"""
- Identifies how many files it must read (randomly) according to self._frac_of_stocks_to_get.
Reads all the files if self._frac_of_stocks_to_get is 1. Use a value <1 for testing purposes
- Filters data on Volume/dates
- Constructs features that will be used later in our model
:instanciates:
* self.df_data: dataframe of all data with dates as index
* self._stocks_list: all the unique permnos (stock identifiers) present in the data
* self._df_raw_data: dataframe of the data as extracted from the database
:return: Nothing
"""
nb_files_to_get = max(round(self._frac_of_stocks_to_get * self._N_FILES_CRSP), 1)
choices = np.random.choice(np.arange(1, self._N_FILES_CRSP + 1), nb_files_to_get, replace=False)
file_names = ['stockdata_{}'.format(i) for i in choices]
df_data = self._load_stock_data(file_names, data_dir_path=self._stock_data_dir_path,
logger_env=self._LOGGER_ENV)
self._df_raw_data = df_data
df_data = self._filter_data(df_data)
df_data = self.__rectify_prices(df_data)
df_data = self._extract_features(df_data)
self._stocks_list = np.unique(df_data.index)
self.log('Data finalized in handler.df_data, number of stocks {}'.format(len(self._stocks_list)))
self.df_data = self._get_data_between(df_data, self._start_date, self._end_date, self._LOGGER_ENV)
def build_and_dump_images_and_targets(self):
"""
* Builds images with the time series
* Builds labels using returns thresholds and volatility
* Pickles dataframes on disk with keys/values :
- date as index
- PERMNO: stock identifier
- RET: returns that will be used for the backtest
- samples: used for training and backtesting
- close: one-hot encoded elements, Example [1,0,0] stands for the class 'long'
"""
nb_stocks = len(self._stocks_list)
n_files_to_dump = nb_stocks // self._nb_of_stocks_by_file + ((nb_stocks % self._nb_of_stocks_by_file) != 0)
df_data_multi_index = self.df_data.reset_index(drop=False).set_index(['PERMNO', 'date'])
self.log('***** Dumping data in {} different files'.format(n_files_to_dump))
# Removing existing files in the folder
remove_all_files_from_dir(self._directory_for_samples, logger_env=self._LOGGER_ENV)
for batch in range(n_files_to_dump):
btch_name = 'image_data_{}'.format(batch + 1)
btch_stocks = self._stocks_list[batch * self._nb_of_stocks_by_file:(batch + 1) * self._nb_of_stocks_by_file]
df_batch_data = self._extract_data_for_stocks(df_data_multi_index, btch_stocks)
# Build Images and targets
df_res = self._build_images_one_batch(df_batch_data, btch_name)
# Sort by dates
df_res = df_res.set_index('date').sort_index()
# Dumping the pickle dataframe
dump_pickle(df_res, os.path.join(self._directory_for_samples, btch_name), logger_env=self._LOGGER_ENV)
@staticmethod
def _build_close_returns(df, window_len=64, retrain_freq=5, up_return=0.0125, down_return=-0.0125,
buy_on_last_date=True):
"""
:param up_return: threshold for long strategy
:param down_return: threshold for short strategy
:param buy_on_last_date: whether to buy on last date
:return: strategy target list, backtesting dataframe, price return list, date list
"""
n_sample = len(df)
targets, prc_list, dates_list = [], [], []
# Strategy of long / hold / short
# Hold stands for a state that model can't make decision between long and short
_long, _hold, _short = [1, 0, 0], [0, 1, 0], [0, 0, 1]
rebalance_indexes = []
df_rolling_ret = np.exp(np.log(df.RET).rolling(window=retrain_freq).sum()) # product of returns
# print(df_rolling_ret)
df_rolling_std = df.RET.rolling(window=window_len).std() * np.sqrt(252.)
for i in range(window_len, n_sample, retrain_freq):
j = i - 1 if buy_on_last_date else i
price_return = df_rolling_ret.iloc[np.min([n_sample - 1, i - 1 + retrain_freq])]
dates_list.append(df.index[j])
prc_list.append(price_return)
vol = df_rolling_std.iloc[j]
if price_return - 1. > up_return * vol * 4:
targets.append(_long)
elif price_return - 1. < down_return * vol * 4:
targets.append(_short)
else:
targets.append(_hold)
# we keep the indexes of the dates when there will be a rebalance in the portfolio
rebalance_indexes.append(j)
df_for_backtest = df_rolling_ret.iloc[rebalance_indexes]
return np.asarray(targets), df_for_backtest, prc_list, dates_list
@staticmethod
def _build_images_one_stock(df_one_permno, window_len, retrain_freq, encoding_method, image_size):
"""
Encodes images as timeseries for one stock
:param df_one_permno: dataframe of the timeseries of all data for one particular stock
:param window_len: number of observations to consider (42 for 2 months)
:param retrain_freq: lag to consider between making two samples
:param encoding_method: method to encode the images
:param image_size: final size of the image (using window_len*window_len will avoid any averaging)
:return: np.ndarray of the samples of shape (N,window_len,window_len,M) where:
- M is the number of features
- N is the number of final samples ~ len(df_one_permno)/retrain_freq
"""
n_days = df_one_permno.T.shape[-1]
samples_list, dates_list, prc_list = [], [], []
for i in range(window_len, n_days, retrain_freq):
window_data = df_one_permno.T.iloc[:, i - window_len:i]
# Use GADF algorithm to transform data
if encoding_method == 'GADF':
try:
from pyts.image import GADF
gadf = GADF(image_size)
except:
from pyts.image import GramianAngularField
gadf = GramianAngularField(image_size, method='difference')
samples_list.append(gadf.fit_transform(window_data).T)
# Use GASF algorithm to transform data
elif encoding_method == 'GASF':
try:
from pyts.image import GASF
gasf = GASF(image_size)
except:
from pyts.image import GramianAngularField
gasf = GramianAngularField(image_size, method='summation')
samples_list.append(gasf.fit_transform(window_data).T)
# Use MTF algorithm to transform data
elif encoding_method == 'MTF':
try:
from pyts.image import MTF
mtf = MTF(image_size)
except:
from pyts.image import MarkovTransitionField
mtf = MarkovTransitionField(image_size)
samples_list.append(mtf.fit_transform(window_data).T)
else:
raise BaseException('Method must be either GADF, GASF or MTF not {}'.format(encoding_method))
samples_list = np.asarray(samples_list)
return samples_list
def _build_images_one_batch(self, df_batch_data, batch_name):
"""
:param df_batch_data: dataframe of the timeseries of all data for a batch of stocks
:param batch_name: name of the batch
:return: pd.DataFrame with columns ['sample', 'date', 'RET', 'close']
"""
self.log('Building Targets and Images for batch {}'.format(batch_name), )
df_batch_data = df_batch_data.reset_index(drop=False).set_index(['PERMNO', 'date'])
all_permnos = df_batch_data.index.levels[0]
# The empty dataframe initialized
columns_df_res = ['sample', 'date', 'RET', 'close']
df_res = | pd.DataFrame(columns=columns_df_res) | pandas.DataFrame |
"""
This script includes several modules for the validation purpose, including treecover_masking, validation polygon masking,
roc_analysis...
----------------------------
The available modules are listed below:
burnpixel_masking(data): for masking burned pixel
treecover_masking(year,data,prctg=60): for masking forest and none forest pixel
validation_dataset_config(State,Validation_period,BurnPixel): for validation dataset configuration
CreateValidatedBurnMask(BurnPixel,State, Validation_period): for validation mask generation
validate(Test_Array = None, Validated_Array = None, plot=False): for roc analysis
_forward_fill(Input_DataArray = None): for missing value filling
_identify_burned_area(Input_DataArray,lag0_threshold = -0.50, lag1_threshold = -0.3): for NBR differencing method
transform_from_latlon(lat, lon): for rasterising the shapefile
rasterize(shapes, coords, fill=np.nan, **kwargs): rasterize a list of (geometry, fill_value) tuples
"""
import xarray as xr
import numpy as np
import pandas as pd
import geopandas
from rasterio import features
from affine import Affine
import matplotlib.pyplot as plt
from pylab import rcParams
pd.set_option('display.max_colwidth', 200)
| pd.set_option('display.max_rows', None) | pandas.set_option |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, time
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas import (DataFrame, Series, Index,
Timestamp, DatetimeIndex,
to_datetime, date_range)
import pandas as pd
import pandas.tseries.offsets as offsets
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.compat import product
from pandas.tests.frame.common import TestData
class TestDataFrameTimeSeriesMethods(tm.TestCase, TestData):
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
self.assertEqual(rs.s[1], 1)
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x': np.nan, 'y': pd.Series(
1), 'z': pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0, 2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[pd.Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
self.assertEqual(result[0].dtype, np.float64)
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH 9727
df = DataFrame([[1., 2.], [3., 4.]])
assert_frame_equal(df.diff(axis=1), DataFrame(
[[np.nan, 1.], [np.nan, 1.]]))
assert_frame_equal(df.diff(axis=0), DataFrame(
[[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O')).values
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O')).values
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
self.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
self.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=offsets.BDay())
self.assertEqual(len(shiftedFrame), len(self.tsframe))
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + offsets.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d), check_names=False)
# shift int frame
int_shifted = self.intframe.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
self.assert_index_equal(shifted.index, ps.index)
self.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.iloc[:, 0].valid().values,
ps.iloc[:-1, 0].values)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, offsets.BDay())
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
assertRaisesRegexp(ValueError, 'does not match PeriodIndex freq',
ps.shift, freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis=1)
assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis='columns')
assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH 9416
s1 = pd.Series(['a', 'b', 'c'], dtype='category')
s2 = pd.Series(['A', 'B', 'C'], dtype='category')
df = DataFrame({'one': s1, 'two': s2})
rs = df.shift(1)
xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})
assert_frame_equal(rs, xp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
assert_frame_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.iloc[[0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_truncate(self):
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
self.assertRaises(ValueError, ts.truncate,
before=ts.index[-1] - 1,
after=ts.index[0] + 1)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
self.assertFalse((self.tsframe.values[5:11] == 5).any())
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd())
rule_monthly = self.tsframe.asfreq('BM')
assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad') # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad') # noqa
# test does not blow up on length-0 DataFrame
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
self.assertIsNot(result, zero_length)
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
tm.assertIsInstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
tm.assertIsInstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range('1/1/2016', periods=10, freq='2S')
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({'one': ts})
# insert pre-existing missing value
df.loc['2016-01-01 00:00:08', 'one'] = None
actual_df = df.asfreq(freq='1S', fill_value=9.0)
expected_df = df.asfreq(freq='1S').fillna(9.0)
expected_df.loc['2016-01-01 00:00:08', 'one'] = None
assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq='1S').fillna(9.0)
actual_series = ts.asfreq(freq='1S', fill_value=9.0)
assert_series_equal(expected_series, actual_series)
def test_first_last_valid(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
mat[-5:] = nan
frame = | DataFrame({'foo': mat}, index=self.frame.index) | pandas.DataFrame |
"""created by <NAME> (https://github.com/M-earnest)
feedback questionnaire for ansl pilot
"""
from psychopy import locale_setup, sound, gui, visual, core, data, event, logging
import os # handy system and path functions
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import pandas as pd
def update(list_1,firsttrial,check):
# function to display Dialogue
# assign gui to variable and define title
if check == 0:
#gUI_1
#added and list_1[] == '' in red condition --> resolved
print(str(firsttrial)+'trial')
myDlg1 = gui.Dlg(title="Nachbefragung")
# quest 1
myDlg1.addField('Which tones were audible?:',choices= ['low','high','both','none'],initial=list_1[0])
# quest 2
myDlg1.addField('Which tones were not audible?:', choices=['low','high','both','none'],initial=list_1[1])
# quest 3 and 4
myDlg1.addField('Do you were distracted during the audiometry / could you not concentrate?'
,choices= ['J','N'],initial=list_1[2])
if list_1[2] == '' and firsttrial==0:
myDlg1.addField('If yes: why?:',initial=list_1[3])
elif list_1[2] == 'y' and list_1[3] == '' and firsttrial==1:
myDlg1.addField('If yes: why?:',initial=list_1[3], color='red')
elif list_1[2] == 'N' and firsttrial==1:
myDlg1.addField('If yes: why?:',initial=list_1[3])
elif list_1[2] == 'Y' and not list_1[3] == ''and firsttrial==1:
myDlg1.addField('If yes: why?:',initial=list_1[3])
# quest 5 and 6
myDlg1.addField('Konntest du deine Konzentration gut auf das Hoeren der Toene richten?',choices= ['J','N'],initial=list_1[4])
if list_1[4] == '' and firsttrial==0:
myDlg1.addField('Wenn Nein: Warum?:',initial=list_1[5])
elif list_1[4] == 'N' and list_1[5] == '' and firsttrial==1:
myDlg1.addField('Wenn Nein: Warum?:',initial=list_1[5], color='red')
elif list_1[4] == 'J' and firsttrial==1:
myDlg1.addField('Wenn Nein: Warum?:',initial=list_1[5])
elif list_1[4] == 'N' and not list_1[5] == '' and firsttrial==1:
myDlg1.addField('Wenn Nein: Warum?:',initial=list_1[5])
# quest 6 and 7
myDlg1.addField('Hattest du bestimmte Strategien, um die Toene besser wahrzunehmen?',choices= ['J','N'],initial=list_1[6])
if list_1[6] == '' or firsttrial==0:
myDlg1.addField('Wenn Ja: Welche?',initial=list_1[7])
elif list_1[6] == 'J' and list_1[7] == '' and firsttrial==1:
myDlg1.addField('Wenn Ja: Welche?',initial=list_1[7], color='red')
elif list_1[6] == 'N' and firsttrial==1:
myDlg1.addField('Wenn Ja: Welche?',initial=list_1[7])
elif list_1[6] == 'J' and not list_1[7] == '' and firsttrial==1:
myDlg1.addField('Wenn Ja: Welche?',initial=list_1[7])
# quest 8
if list_1[8] == '' or firsttrial==1:
myDlg1.addField('Zusaetzlichen Anmerkungen zum Experiment?',initial=list_1[8])
elif not list_1[8] == '' and firsttrial == 1:
myDlg1.addField('Zusaetzlichen Anmerkungen zum Experiment?',initial=list_1[8])
myDlg1.show()
myDlg1 = myDlg1.data
return list_1, myDlg1
def savecsv(list_1,filename):
# function to add data to experiment handler and save as csv
# add column + data
print('list')
data = {'gut_hoerbar': list_1[0],
'schlecht_hoerbar': list_1[1],
'aufmerksamkeit': list_1[2],
'problematik_aufmerksamkeit': list_1[3],
'konzentartion_toene': list_1[4],
'problematik_konzentration': list_1[5],
'strategienutzung': list_1[6],
'genutzte Strategie': list_1[7],
'zusaetzliche_anmerkungen': list_1[8]}
df = | pd.Series(data) | pandas.Series |
from unittest import TestCase
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import ts_charting.formatter as formatter
plot_index = pd.date_range(start="2000-1-1", freq="B", periods=10000)
class TestTimestampLocator(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_inferred_freq(self):
"""
inferred freqs are based off of min_ticks
"""
plot_index = pd.date_range(start="2000-1-1", freq="B", periods=10000)
tl = formatter.TimestampLocator(plot_index)
# showing only the first 10 should give us days
xticks = tl._process(1, 10)
assert tl.gen_freq == 'D'
# showing only the first 70 should give us weeks
xticks = tl._process(1, 6 * 7 + 1)
assert tl.gen_freq == 'W'
# months should trigger at around 6 * 31
xticks = tl._process(1, 6 * 31 )
assert tl.gen_freq == 'MS'
# year should trigger at around 6 *366
xticks = tl._process(1, 6 * 366 + 1)
assert tl.gen_freq == 'AS'
def test_fixed_freq(self):
"""
Test passing in a fixed freq. This will allow len(xticks)
less than min_ticks
"""
plot_index = pd.date_range(start="2000-1-1", freq="D", periods=10000)
tl = formatter.TimestampLocator(plot_index, 'MS')
xticks = tl._process(0, 30*3)
assert len(xticks) == 3
tl = formatter.TimestampLocator(plot_index, 'MS')
xticks = tl._process(0, 30*6)
assert len(xticks) == 6
tl = formatter.TimestampLocator(plot_index, 'W')
xticks = tl._process(0, 10*7)
assert len(xticks) == 10
tl = formatter.TimestampLocator(plot_index, 'AS')
xticks = tl._process(0, 10 * 365)
assert len(xticks) == 10
def test_bool_xticks(self):
"""
ability to set ticks with a bool series where True == tick
"""
plot_index = pd.date_range(start="2000-1-1", freq="D", periods=10000)
freq = 'M'
ds = | pd.Series(1, index=plot_index) | pandas.Series |
#!/bin/python
import os
import numpy as np
import pandas as pd
from collections import OrderedDict
from Bio import Seq, SeqIO
import argparse
from straintables import PrimerEngine, InputFile, OutputFile, Definitions
from straintables.Database import annotationManager, genomeManager
def writeFastaFile(outputPath,
locusName,
locusSequences):
fastaSequences = []
for genome in locusSequences.keys():
sequence = SeqIO.SeqRecord(Seq.Seq(locusSequences[genome]),
id=genome,
name=genome,
description="")
fastaSequences.append(sequence)
with open(outputPath, "w") as output_handle:
SeqIO.write(fastaSequences, output_handle, "fasta")
def Execute(options):
# CHECK DECLARATION OF PRIMER FILE;
if not options.PrimerFile:
print("FATAL: No primer file specified.")
exit(1)
print("\nAnnotation feature type for automatic primer search is |%s|." %
options.wantedFeatureType)
print("\t (allowed options: CDS, gene, mRNA)")
# -- CHECK ANNOTATED FEATURES; -- useles step maybe
featureFolderPath =\
os.path.join(options.SourceDataDirectory, "annotations")
if os.path.isdir(featureFolderPath):
genomeFeatureFiles = [
os.path.join(featureFolderPath, File)
for File in os.listdir(featureFolderPath)
if not File.startswith(".")
]
else:
genomeFeatureFiles = []
# CHECK GENOME FEATURES FILE EXISTENCE;
if not genomeFeatureFiles:
print("Fatal: No genbank features file found.")
exit(1)
# -- LOAD USER DEFINED PRIMERS;
lociPrimerList = InputFile.loadPrimerList(options.PrimerFile)
# LOAD GENOMES;
genomeFilePaths = genomeManager.readGenomeFolder(
os.path.join(options.SourceDataDirectory, "genomes")
)
genomes = [PrimerEngine.GeneticEntities.Genome(genomeFilePath)
for genomeFilePath in genomeFilePaths]
print("Loaded %i genomes." % len(genomes))
maxGenomes = 100
if len(genomes) > maxGenomes:
print("Discarding genomes, max is %i!" % maxGenomes)
genomes = genomes[:maxGenomes]
if not genomes:
print("Fatal: No genomes found!")
exit(1)
if len(genomes) < 4:
print("Fatal: need at least 4 genomes to proper execute the analysis,")
print("\tgot only %i." % len(genomes))
exit(1)
# Avoid repeating genome names;
genomeNames = []
for genome in genomes:
while genome.name in genomeNames:
genome.name += "+"
genomeNames.append(genome.name)
# Initialize brute force prime searcher on top of chosen annotation file;
annotationFilePath, genomeFeatures =\
annotationManager.loadAnnotation(featureFolderPath)
if annotationFilePath:
bruteForceSearcher =\
PrimerEngine.PrimerDesign.BruteForcePrimerSearcher(
genomeFeatures,
genomeFilePaths,
wantedFeatureType=options.wantedFeatureType,
FindPCRViablePrimers=options.RealPrimers,
PrimerLength=options.PrimerLength,
AmpliconMinimumLength=options.MinAmpliconLength,
AmpliconMaximumLength=options.MaxAmpliconLength,
PrimerAllowedUncertainty=options.PrimerUncertainty
)
if not annotationFilePath:
# or not bruteForceSearcher.matchedGenome:(matchedGenome is deprecated)
bruteForceSearcher = None
# -- SETUP OUTPUT DATA STRUCTURES;
AllLociPrimerSet = OrderedDict()
matchedPrimerSequences = []
print("\n")
GenomeFailureReport =\
OutputFile.DockFailureReport(options.WorkingDirectory)
# ITERATE LOCI: Main Loop;
for i in range(lociPrimerList.shape[0]):
locus_info = lociPrimerList.iloc[i]
locus_name = locus_info["LocusName"]
# ASSIGN OUTPUT FASTA FILE NAME AND CHECK IF EXISTS;
outputFastaName = "%s%s.fasta" % (
Definitions.FastaRegionPrefix, locus_name)
outputFastaPath = os.path.join(
options.WorkingDirectory, outputFastaName)
print("Fasta file: %s" % outputFastaPath)
if os.path.isfile(outputFastaPath):
print("Skipping locus %s. Already exists..." % locus_name)
continue
# MAYBE WE WANT TO SKIP GIVEN LOCUS?
if options.WantedLoci:
WantedLoci = options.WantedLoci.split(',')
WantedLoci = [l.strip() for l in WantedLoci]
if locus_name not in WantedLoci:
continue
overallProgress = (i + 1, lociPrimerList.shape[0])
RegionMatchResult =\
PrimerEngine.PrimerDock.matchLocusOnGenomes(
locus_name,
locus_info,
genomes,
overallProgress,
rebootTolerance=options.rebootTolerance,
allowN=options.AllowUnknownBaseInAmplicon,
bruteForceSearcher=bruteForceSearcher
)
FailureType = PrimerEngine.PrimerDock.RegionMatchFailure
if type(RegionMatchResult) == FailureType:
GenomeFailureReport.content[locus_name] =\
RegionMatchResult.FailedGenomes
continue
# -- Additional region statistics;
if RegionMatchResult.LocusAmpliconSet is not None:
# AlignmentHealth.
score = PrimerEngine.ampliconSanity.evaluateSetOfAmplicons(
RegionMatchResult.LocusAmpliconSet)
print("\tAlignment Health = %.2f%%" % score)
print()
# record amplicon and primer data;
writeFastaFile(outputFastaPath, locus_name,
RegionMatchResult.LocusAmpliconSet)
primerPair = {
P.label: P.sequence
for P in RegionMatchResult.MatchedPrimers
}
primerPair["LocusName"] = locus_name
primerPair["AlignmentHealth"] = score
RegionLengths = [
len(r)
for r in RegionMatchResult.LocusAmpliconSet
]
primerPair["MeanLength"] = np.mean(RegionLengths)
primerPair["StdLength"] = np.std(RegionLengths)
primerPair["Chromosome"] = RegionMatchResult.chr_identifier
primerPair["StartPosition"] =\
RegionMatchResult.MatchedPrimers[0].position.start()
# Append region data;
matchedPrimerSequences.append(primerPair)
AllLociPrimerSet[locus_name] = RegionMatchResult.MatchedPrimers
# print("Bad Amplicon set for %s! Ignoring...." % locus_name)
else:
print("WARNING: PrimerDock failure.")
if matchedPrimerSequences:
# SHOW AMPLICON DATABASE;
# BUILD MATCHED PRIMER DATABASE;
MatchedRegions = OutputFile.MatchedRegions(options.WorkingDirectory)
MatchedRegions.add(matchedPrimerSequences)
MatchedRegions.write()
# Primer Maps on Guide Genome:
PrimerData = []
allPrimers = []
for Locus in AllLociPrimerSet.keys():
for Primer in AllLociPrimerSet[Locus]:
row = Primer.to_dict(Locus)
del row["Chromosome"]
PrimerData.append(row)
allPrimers.append(Primer)
# -- SAVE PRIMER DATA FILE;
fPrimerData = OutputFile.PrimerData(options.WorkingDirectory)
fPrimerData.content = | pd.DataFrame(PrimerData) | pandas.DataFrame |
# @author <NAME> (<EMAIL>)
# @time 2021/5/31 9:40
# @desc [script description]
""" tmc2gmns
This script aims to transform tmc file into gmns format. Then after map-matching program
of MapMatching4GMNS, the link file generated from tmc file is matched to the underlying network (here is gmns format of osm map).
In the end, the link performance file of underlying network is generated with tmc file and the corresponding reading file.
"""
#!/usr/bin/python
# coding:utf-8
import os
import datetime
import numpy as np
import pandas as pd
import os.path
import MapMatching4GMNS
'''step 1 Convert TMC Data into GMNS Format
Convert TMC Data into GMNS Format
'''
def create_folder(path):
if not os.path.exists(path):
os.makedirs(path)
def Convert_TMC(tmc_path):
'''build node_tmc.csv'''
print('reading tmc data...')
files= os.listdir(tmc_path)
for file in files:
if file[:18] == 'TMC_Identification':
tmc = pd.read_csv(tmc_path + os.sep + file)
break
'''build node.csv'''
print('converting tmc data into gmns format...')
node_tmc = pd.DataFrame()
node_tmc['name'] = None
node_tmc['x_coord'] = None
node_tmc['y_coord'] = None
node_tmc['z_coord'] = None
node_tmc['node_type'] = None
node_tmc['ctrl_type'] = None
node_tmc['zone_id'] = None
node_tmc['parent_node_id'] = None
node_tmc['geometry'] = None
for i in range(0,len(tmc)-1):
if tmc.loc[i+1,'road_order'] > tmc.loc[i,'road_order']:
node_tmc = node_tmc.append({'name': tmc.loc[i,'tmc'],\
'x_coord': tmc.loc[i,'start_longitude'], \
'y_coord': tmc.loc[i,'start_latitude'],\
'z_coord': None,\
'node_type': 'tmc_start',\
'ctrl_type': None,\
'zone_id': None,\
'parent_node_id': None,\
'geometry': "POINT (" + tmc.loc[i,'start_longitude'].astype(str) + " " + tmc.loc[i,'start_latitude'].astype(str) +")"}, ignore_index=True)
else:
node_tmc = node_tmc.append({'name': tmc.loc[i,'tmc'],\
'x_coord': tmc.loc[i,'start_longitude'], \
'y_coord': tmc.loc[i,'start_latitude'],\
'z_coord': None,\
'node_type': 'tmc_start',\
'ctrl_type': None,\
'zone_id': None,\
'parent_node_id': None,\
'geometry': "POINT (" + tmc.loc[i,'start_longitude'].astype(str) + " " + tmc.loc[i,'start_latitude'].astype(str) +")"}, ignore_index=True)
node_tmc = node_tmc.append({'name': tmc.loc[i,'tmc']+'END',\
'x_coord': tmc.loc[i,'end_longitude'], \
'y_coord': tmc.loc[i,'end_latitude'],\
'z_coord': None,\
'node_type': 'tmc_end',\
'ctrl_type': None,\
'zone_id': None,\
'parent_node_id': None,\
'geometry': "POINT (" + tmc.loc[i,'end_longitude'].astype(str) + " " + tmc.loc[i,'end_latitude'].astype(str) +")"}, ignore_index=True)
node_tmc = node_tmc.append({'name': tmc.loc[i+1,'tmc'],\
'x_coord': tmc.loc[i+1,'start_longitude'], \
'y_coord': tmc.loc[i+1,'start_latitude'],\
'z_coord': None,\
'node_type': 'tmc_start',\
'ctrl_type': None,\
'zone_id': None,\
'parent_node_id': None,\
'geometry': "POINT (" + tmc.loc[i+1,'start_longitude'].astype(str) + " " + tmc.loc[i+1,'start_latitude'].astype(str) +")"}, ignore_index=True)
node_tmc = node_tmc.append({'name': tmc.loc[i+1,'tmc']+'END',\
'x_coord': tmc.loc[i+1,'end_longitude'], \
'y_coord': tmc.loc[i+1,'end_latitude'],\
'z_coord': None,\
'node_type': 'tmc_end',\
'ctrl_type': None,\
'zone_id': None,\
'parent_node_id': None,\
'geometry': "POINT (" + tmc.loc[i+1,'end_longitude'].astype(str) + " " + tmc.loc[i+1,'end_latitude'].astype(str) +")"}, ignore_index=True)
node_tmc.index.name = 'node_id'
node_tmc.index += 100000001 #index from 0
node_tmc.to_csv(tmc_path + os.sep + '/node_tmc.csv')
print('node_tmc.csv generated!')
'''build link_tmc.csv'''
link_tmc = pd.DataFrame()
link_tmc['name'] = None
link_tmc['corridor_id'] = None
link_tmc['corridor_link_order'] = None
link_tmc['from_node_id'] = None
link_tmc['to_node_id'] = None
link_tmc['directed'] = None
link_tmc['geometry_id'] = None
link_tmc['geometry'] = None
link_tmc['dir_flag'] = None
link_tmc['parent_link_id'] = None
link_tmc['length'] = None
link_tmc['grade'] = None
link_tmc['facility_type'] = None
link_tmc['capacity'] = None
link_tmc['free_speed'] = None
link_tmc['lanes'] = None
for i in range(0,len(tmc)):
link_tmc = link_tmc.append({'name': tmc.loc[i,'tmc'],\
'corridor_id': tmc.loc[i,'road']+'_'+tmc.loc[i,'direction'],\
'corridor_link_order' : tmc.loc[i,'road_order'],\
'from_node_id': node_tmc[(node_tmc['x_coord']==tmc.loc[i,'start_longitude']) & (node_tmc['y_coord']==tmc.loc[i,'start_latitude'])].index.values[0], \
'to_node_id': node_tmc[(node_tmc['x_coord']==tmc.loc[i,'end_longitude']) & (node_tmc['y_coord']==tmc.loc[i,'end_latitude'])].index.values[0],\
'directed': 1,\
'geometry_id': None,\
'geometry': "LINESTRING (" + tmc.loc[i,'start_longitude'].astype(str) + " " + tmc.loc[i,'start_latitude'].astype(str) + "," +\
tmc.loc[i,'end_longitude'].astype(str) +" "+ tmc.loc[i,'end_latitude'].astype(str) + ")",\
'dir_flag': 1,\
'parent_link_id': None,\
'length': tmc.loc[i,'miles'],\
'grade': None,\
'facility_type': 'interstate' if tmc.loc[i,'road'][0] == 'I'else None ,\
'capacity':None,\
'free_speed':None,\
'lanes': None}, ignore_index=True)
link_tmc.index.name = 'link_id'
link_tmc.index += 100000001
link_tmc.to_csv(tmc_path + os.sep + '/link_tmc.csv')
print('link_tmc.csv generated!')
'''build link_performance_tmc.csv'''
reading = pd.read_csv(tmc_path + os.sep + 'Reading_VA.csv')
# reading = reading[pd.to_datetime(reading['measurement_tstamp'], format='%Y-%m-%d %H:%M:%S')<datetime.datetime.strptime('2015-04-01 02:00:00', '%Y-%m-%d %H:%M:%S')]
reading = reading.loc[0:2000]
link_performance_tmc = pd.DataFrame()
link_performance_tmc['name'] = None
link_performance_tmc['corridor_id'] = None
link_performance_tmc['corridor_link_order'] = None
link_performance_tmc['from_node_id'] = None
link_performance_tmc['to_node_id'] = None
link_performance_tmc['timestamp'] = None
link_performance_tmc['volume'] = None
link_performance_tmc['travel_time'] = None
link_performance_tmc['speed'] = None
link_performance_tmc['reference_speed'] = None
link_performance_tmc['density'] = None
link_performance_tmc['queue'] = None
link_performance_tmc['notes'] = None
gp = reading.groupby('measurement_tstamp')
for key, form in gp:
# print(key)
for i in link_tmc.index:
form_selected = form[form['_vatmc_code']==link_tmc['name'][i]]
if len(form_selected)>0:
# break
link_performance_tmc = link_performance_tmc.append({'name': link_tmc['name'][i],\
'corridor_id': link_tmc['corridor_id'][i],\
'corridor_link_order' : link_tmc['corridor_link_order'][i],\
'from_node_id': link_tmc.loc[i,'from_node_id'], \
'to_node_id': link_tmc.loc[i,'to_node_id'], \
'timestamp': form_selected['measurement_tstamp'].values[0][0:10]+'T'+form_selected['measurement_tstamp'].values[0][11:13]+':'+form_selected['measurement_tstamp'].values[0][14:16],\
'volume': None,\
'travel_time': link_tmc['length'][i]/form_selected['speed'].values[0],\
'speed': form_selected['speed'].values[0],\
'reference_speed': form_selected['reference_speed'].values[0],\
'density': None,\
'queue': None,\
'notes': None }, ignore_index=True)
else:
link_performance_tmc = link_performance_tmc.append({'name': link_tmc['name'][i],\
'corridor_id': link_tmc['corridor_id'][i],\
'corridor_link_order' : link_tmc['corridor_link_order'][i],\
'from_node_id': link_tmc.loc[i,'from_node_id'], \
'to_node_id': link_tmc.loc[i,'to_node_id'], \
'timestamp': None,\
'volume': None,\
'travel_time': None,\
'speed': None,\
'reference_speed': None,\
'density': None,\
'queue': None,\
'notes': None }, ignore_index=True)
link_performance_tmc.to_csv(tmc_path + os.sep +'/link_performance_tmc.csv',index = False)
print('link_performance_tmc.csv generated!')
'''build trace.csv'''
'''trace_id is numeric'''
trace = pd.DataFrame()
trace['corridor_id'] = None
trace['agent_id'] = None
trace['date'] = None
trace['tmc'] = None
trace['trace_id'] = None
trace['hh'] = None
trace['mm'] = None
trace['ss'] = None
trace['y_coord'] = None
trace['x_coord'] = None
agent_id = 1
trace_id = 0
for i in range(0,len(tmc)-1):
if tmc.loc[i+1,'road_order'] > tmc.loc[i,'road_order']:
trace = trace.append({'corridor_id': tmc.loc[i,'road'] + '_' + tmc.loc[i,'direction'],\
'agent_id': agent_id,\
'date': None, \
'tmc': tmc.loc[i,'tmc'],\
'trace_id': trace_id,\
'hh': None,\
'mm': 'None',\
'ss': None,\
'y_coord': tmc.loc[i,'start_latitude'],\
'x_coord': tmc.loc[i,'start_longitude']}, ignore_index=True)
trace_id +=1
else:
trace = trace.append({'corridor_id': tmc.loc[i,'road'] + '_' + tmc.loc[i,'direction'],\
'agent_id': agent_id,\
'date': None, \
'tmc': tmc.loc[i,'tmc'],\
'trace_id': trace_id,\
'hh': None,\
'mm': 'None',\
'ss': None,\
'y_coord': tmc.loc[i,'start_latitude'],\
'x_coord': tmc.loc[i,'start_longitude']}, ignore_index=True)
trace_id += 1
trace = trace.append({'corridor_id': tmc.loc[i,'road'] + '_' + tmc.loc[i,'direction'],\
'agent_id': agent_id,\
'date': None, \
'tmc': tmc.loc[i,'tmc'],\
'trace_id': trace_id,\
'hh': None,\
'mm': 'None',\
'ss': None,\
'y_coord': tmc.loc[i,'end_latitude'],\
'x_coord': tmc.loc[i,'end_longitude']}, ignore_index=True)
agent_id += 1
trace_id = 0
trace = trace.append({'corridor_id': tmc.loc[i+1,'road'] + '_' + tmc.loc[i+1,'direction'],\
'agent_id': agent_id,\
'date': None, \
'tmc': tmc.loc[i+1,'tmc'],\
'trace_id': trace_id,\
'hh': None,\
'mm': 'None',\
'ss': None,\
'y_coord': tmc.loc[i+1,'start_latitude'],\
'x_coord': tmc.loc[i+1,'start_longitude']}, ignore_index=True)
trace_id +=1
trace = trace.append({'corridor_id': tmc.loc[i+1,'road'] + '_' + tmc.loc[i+1,'direction'],\
'agent_id': agent_id,\
'date': None, \
'tmc': tmc.loc[i+1,'tmc'],\
'trace_id': trace_id,\
'hh': None,\
'mm': 'None',\
'ss': None,\
'y_coord': tmc.loc[i+1,'end_latitude'],\
'x_coord': tmc.loc[i+1,'end_longitude']}, ignore_index=True)
trace.to_csv(tmc_path + os.sep +'/trace.csv')
print('trace.csv generated!')
'''step 2 map matching
'''
def mapmatch(tmc_path,osm_path):
"Get the OSM Network"
import osm2gmns as og
files= os.listdir(osm_path)
for file in files:
if file[-3:] == 'osm':
net = og.getNetFromOSMFile(osm_path + os.sep + file,network_type=('auto'), default_lanes=True, default_speed=True)
og.consolidateComplexIntersections(net)
og.outputNetToCSV(net, output_folder=osm_path)
break
create_folder(os.path.join(os.path.dirname(os.path.realpath('__file__')), 'data/testdata'))#to local
import shutil
source_link_osm = os.path.join(os.path.join(os.path.dirname(os.path.realpath('__file__')),osm_path),'link.csv')
source_node_osm = os.path.join(os.path.join(os.path.dirname(os.path.realpath('__file__')),osm_path),'node.csv')
source_trace = os.path.join(os.path.join(os.path.dirname(os.path.realpath('__file__')),tmc_path),'trace.csv')
source_list = [source_link_osm,source_node_osm,source_trace]
destination_1 = os.path.join(os.path.dirname(os.path.realpath('__file__')), 'data/testdata')
destination_2 = os.path.dirname(os.path.realpath('__file__'))
destination_list = [destination_1,destination_2]
for i in range(len(source_list)):
for j in range(len(destination_list)):
shutil.copy(source_list[i], destination_list[j])
MapMatching4GMNS.map_match()
print("Map matching is completed!")
'''step 3 link performance of osm
'''
def osm_link_performance(tmc_path,osm_path):
link_road = pd.read_csv(osm_path + os.sep +'/link.csv', encoding='gbk',low_memory=False,index_col=None)
trace = | pd.read_csv(tmc_path + os.sep +'/trace.csv', encoding='gbk',low_memory=False,index_col=None) | pandas.read_csv |
from datetime import (
datetime,
timedelta,
)
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
import pandas.util._test_decorators as td
from pandas import (
NA,
Categorical,
CategoricalDtype,
Index,
Interval,
NaT,
Series,
Timedelta,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestAstypeAPI:
def test_arg_for_errors_in_astype(self):
# see GH#14878
ser = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
ser.astype(np.float64, errors=False)
ser.astype(np.int8, errors="raise")
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see GH#7271
ser = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = ser.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = ser.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
ser.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
ser.astype(dt4)
# GH#16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
ser.astype(dt5)
class TestAstype:
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see GH#15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning):
as_type_empty = | Series([]) | pandas.Series |
"""
本地数据查询及预处理,适用于zipline ingest写入
读取本地数据
1. 元数据所涉及的时间列 其tz为UTC
2. 数据框datetime-index.tz为None
注:只选A股股票。注意股票总体在`ingest`及`fundamental`必须保持一致。
"""
import re
import warnings
from concurrent.futures.thread import ThreadPoolExecutor
from functools import lru_cache, partial
from trading_calendars import get_calendar
import numpy as np
import pandas as pd
from cnswd.mongodb import get_db
from cnswd.setting.constants import MAX_WORKER
from cnswd.utils import sanitize_dates
import akshare as ak
warnings.filterwarnings('ignore')
WY_DAILY_COL_MAPS = {
'日期': 'date',
'股票代码': 'symbol',
'收盘价': 'close',
'最高价': 'high',
'最低价': 'low',
'开盘价': 'open',
'前收盘': 'prev_close',
'涨跌幅': 'change_pct',
'换手率': 'turnover',
'成交量': 'volume',
'成交金额': 'amount',
'总市值': 'total_cap',
'流通市值': 'market_cap',
}
WY_ADJUSTMENT_COLS = {
'股票代码': 'symbol',
'分红年度': 'date',
'送股(每10股)': 's_ratio',
'转增(每10股)': 'z_ratio',
'派息(每10股)': 'amount',
'公告日期': 'declared_date',
'股权登记日': 'record_date',
'除权除息日': 'ex_date',
'红股上市日': 'pay_date'
}
def encode_index_code(x, offset=1000000):
i = int(x) + offset
return str(i).zfill(7)
def decode_index_code(x, offset=1000000):
i = int(x) - offset
return str(i).zfill(6)
def get_exchange(code):
"""股票所在交易所编码"""
# https://www.iso20022.org/10383/iso-10383-market-identifier-codes
if len(code) == 7:
return '指数'
if code.startswith('688'):
return "上交所科创板"
elif code.startswith('002'):
return "深交所中小板"
elif code.startswith('6'):
return "上交所"
elif code.startswith('3'):
return "深交所创业板"
elif code.startswith('0'):
return "深交所主板"
elif code.startswith('2'):
return "深证B股"
elif code.startswith('9'):
return "上海B股"
else:
raise ValueError(f'股票代码:{code}错误')
def _select_only_a(df, code_col):
"""选择A股数据
Arguments:
df {DataFrame} -- 数据框
code_col {str} -- 代表股票代码的列名称
Returns:
DataFrame -- 筛选出来的a股数据
"""
cond1 = df[code_col].str.startswith('2')
cond2 = df[code_col].str.startswith('9')
df = df.loc[~(cond1 | cond2), :]
return df
def _gen_index_metadata(db, code):
collection = db[code]
name = collection.find_one(projection={
'_id': 0,
'名称': 1,
},
sort=[('日期', -1)])
if name is None:
return pd.DataFrame()
first = collection.find_one(projection={
'_id': 0,
'日期': 1,
},
sort=[('日期', 1)])
last = collection.find_one(projection={
'_id': 0,
'日期': 1,
},
sort=[('日期', -1)])
start_date = pd.Timestamp(first['日期'], tz='UTC')
end_date = pd.Timestamp(last['日期'], tz='UTC')
return pd.DataFrame(
{
'symbol': encode_index_code(code),
'exchange': '指数',
'asset_name': name['名称'], # 简称
'start_date': start_date,
'end_date': end_date,
'first_traded': start_date,
# 适应于分钟级别的数据
'last_traded': end_date,
'auto_close_date': end_date + pd.Timedelta(days=1),
},
index=[0])
def gen_index_metadata():
db = get_db('wy_index_daily')
codes = db.list_collection_names()
dfs = [_gen_index_metadata(db, code) for code in codes]
return pd.concat(dfs)
def _stock_first_and_last(code, db=None):
"""
日线交易数据开始交易及结束交易日期
Examples
--------
>>> _stock_first_and_last('000333')
symbol asset_name first_traded last_traded
0 000333 美的集团 2020-04-02 00:00:00+00:00 2020-04-04 00:00:00+00:00
"""
if db is None:
db = get_db('wy_stock_daily')
if code not in db.list_collection_names():
return pd.DataFrame()
collection = db[code]
# 空表
if collection.count_documents({}) == 0:
return pd.DataFrame()
first = collection.find_one(projection={
'_id': 0,
'日期': 1,
'名称': 1,
},
sort=[('日期', 1)])
last = collection.find_one(projection={
'_id': 0,
'日期': 1,
'名称': 1,
},
sort=[('日期', -1)])
return pd.DataFrame(
{
'symbol':
code,
'asset_name':
last['名称'], # 最新简称
'first_traded':
pd.Timestamp(first['日期'], tz='UTC'),
# 适应于分钟级别的数据
'last_traded':
pd.Timestamp(last['日期'], tz='UTC') + pd.Timedelta(days=1),
},
index=[0])
def get_delist_stock_dates():
"""退市日期字典"""
sz_delist_df = ak.stock_info_sz_delist(indicator="终止上市公司")
sh_delist_df = ak.stock_info_sh_delist(indicator="终止上市公司")
res = {}
for c, d in zip(sz_delist_df['证券代码'].values, sz_delist_df['终止上市日期'].values):
if not pd.isnull(d):
res[c] = pd.to_datetime(d).floor('D').tz_localize('UTC')
for c, d in zip(sh_delist_df['COMPANY_CODE'].values, sh_delist_df['QIANYI_DATE'].values):
if not pd.isnull(d):
res[c] = pd.to_datetime(d).floor('D').tz_localize('UTC')
return res
def gen_asset_metadata(only_in=True, only_A=True, include_index=True):
"""
生成符号元数据
Paras
-----
only_in : bool
是否仅仅包含当前在市的股票,默认为真。
only_A : bool
是否仅仅为A股股票(即:不包含B股股票),默认为不包含。
include_index : bool
是否包含指数,默认包含指数。
Examples
--------
>>> df = gen_asset_metadata()
>>> df.head()
symbol start_date end_date exchange asset_name first_traded last_traded auto_close_date
0 000001 1991-04-03 2018-12-21 深交所主板 平安银行 1991-04-03 2018-12-21 2018-12-22
1 000002 1991-01-29 2018-12-21 深交所主板 万 科A 1991-01-29 2018-12-21 2018-12-22
2 000004 1991-01-14 2018-12-21 深交所主板 国农科技 1991-01-02 2018-12-21 2018-12-22
3 000005 1990-12-10 2018-12-21 深交所主板 世纪星源 1991-01-02 2018-12-21 2018-12-22
4 000006 1992-04-27 2018-12-21 深交所主板 深振业A 1992-04-27 2018-12-21 2018-12-22
"""
db = get_db('wy_stock_daily')
codes = db.list_collection_names()
delisted = get_delist_stock_dates()
if only_in:
codes = [code for code in codes if code not in delisted.keys()]
# 股票数量 >3900
# 设置max_workers=8,用时 67s 股票 4565 用时 110s
# 设置max_workers=4,用时 54s
func = partial(_stock_first_and_last, db=db)
with ThreadPoolExecutor(MAX_WORKER) as pool:
r = pool.map(func, codes)
df = pd.concat(r)
df.sort_values('symbol', inplace=True)
df['exchange'] = df['symbol'].map(get_exchange)
df['start_date'] = df['first_traded']
df['end_date'] = df['last_traded']
df['auto_close_date'] = df['last_traded'].map(
lambda x: x + | pd.Timedelta(days=1) | pandas.Timedelta |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, | Timestamp('2011-01-02') | pandas.Timestamp |
import pytest
from numpy import ma, array
import numpy
import pandas as pd
from pymc3_ext import Model, Normal, sample_prior_predictive, sample, ImputationWarning
def test_missing():
data = ma.masked_values([1, 2, -1, 4, -1], value=-1)
with Model() as model:
x = Normal('x', 1, 1)
with pytest.warns(ImputationWarning):
Normal('y', x, 1, observed=data)
y_missing, = model.missing_values
assert y_missing.tag.test_value.shape == (2,)
model.logp(model.test_point)
with model:
prior_trace = sample_prior_predictive()
assert set(['x', 'y']) <= set(prior_trace.keys())
def test_missing_pandas():
data = | pd.DataFrame([1, 2, numpy.nan, 4, numpy.nan]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mock
import pytest
import collections
import json
import pandas as pd
from pandas.util.testing import assert_frame_equal
from nbformat.v4 import new_notebook, new_code_cell, new_markdown_cell, new_output
from . import get_notebook_path, get_notebook_dir
from .. import read_notebook, utils
from ..models import Notebook
from ..exceptions import ScrapbookException
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
@pytest.fixture(scope='session', autouse=True)
def kernel_mock():
"""Mocks the kernel to capture warnings during testing"""
with mock.patch.object(utils, 'is_kernel') as _fixture:
yield _fixture
class AnyDict(object):
def __eq__(self, other):
return isinstance(other, dict)
@pytest.fixture
def notebook_result():
path = get_notebook_path("collection/result1.ipynb")
return read_notebook(path)
@pytest.fixture
def notebook_backwards_result():
path = get_notebook_path("record.ipynb")
return read_notebook(path)
def test_bad_path():
with pytest.raises(FileNotFoundError):
Notebook("not/a/valid/path.ipynb")
def test_bad_ext():
with pytest.raises(Warning):
Notebook("not/a/valid/extension.py")
@mock.patch("papermill.iorw.papermill_io.read")
def test_good_ext_for_url(mock_read):
sample_output = {
"cells": [{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": []
}]
}
mock_read.return_value = json.dumps(sample_output)
params = "?sig=some-unique-secret-token"
url = "abs://mystorage.blob.core.windows.net/my-actual-notebook.ipynb" + params
Notebook(url)
mock_read.assert_called_once()
def test_bad_ext_for_url():
with pytest.raises(Warning):
params = "?sig=some-unique-secret-token"
url = "abs://mystorage.blob.core.windows.net/my-actual-notebook.txt" + params
Notebook(url)
def test_filename(notebook_result):
assert notebook_result.filename == "result1.ipynb"
def test_directory(notebook_result):
assert notebook_result.directory == get_notebook_dir("collection/result1.ipynb")
def test_parameters(notebook_result):
assert notebook_result.parameters == dict(foo=1, bar="hello")
def test_data_scraps(notebook_result):
assert notebook_result.scraps.data_dict == {
"dict": {u"a": 1, u"b": 2},
"list": [1, 2, 3],
"number": 1,
"one": 1,
}
def test_display_scraps(notebook_result):
assert notebook_result.scraps.display_dict == {
"output": {
"data": {"text/plain": "'Hello World!'"},
"metadata": {
"scrapbook": {
"name": "output",
"data": False,
"display": True,
}
},
"output_type": "display_data",
},
"one_only": {
"data": {"text/plain": "'Just here!'"},
"metadata": {
"scrapbook": {"name": "one_only", "data": False, "display": True}
},
"output_type": "display_data",
},
}
def test_scraps_collection_dataframe(notebook_result):
expected_df = pd.DataFrame(
[
("one", 1, "json", None),
("number", 1, "json", None),
("list", [1, 2, 3], "json", None),
("dict", {u"a": 1, u"b": 2}, "json", None),
("output", None, "display", AnyDict()),
("one_only", None, "display", AnyDict()),
],
columns=["name", "data", "encoder", "display"],
)
assert_frame_equal(notebook_result.scraps.dataframe, expected_df, check_exact=True)
def test_record_scraps_collection_dataframe(notebook_backwards_result):
expected_df = pd.DataFrame(
[
("hello", "world", "json", None),
("number", 123, "json", None),
("some_list", [1, 3, 5], "json", None),
("some_dict", {u"a": 1, u"b": 2}, "json", None),
("some_display", None, "display", AnyDict()),
],
columns=["name", "data", "encoder", "display"],
)
print(notebook_backwards_result.scraps.dataframe)
assert_frame_equal(notebook_backwards_result.scraps.dataframe, expected_df, check_exact=True)
@mock.patch("scrapbook.models.ip_display")
def test_reglue_display(mock_display, notebook_result):
notebook_result.reglue("output")
mock_display.assert_called_once_with(
{"text/plain": "'Hello World!'"},
metadata={"scrapbook": {"name": "output", "data": False, "display": True}},
raw=True,
)
@mock.patch("scrapbook.models.ip_display")
def test_reglue_scrap(mock_display, notebook_result):
notebook_result.reglue("one")
mock_display.assert_called_once_with(
{
"application/scrapbook.scrap.json+json": {
"name": "one",
"data": 1,
"encoder": "json",
"version": 1,
}
},
metadata={"scrapbook": {"name": "one", "data": True, "display": False}},
raw=True,
)
@mock.patch("scrapbook.models.ip_display")
def test_reglue_display_unattached(mock_display, notebook_result):
notebook_result.reglue("output", unattached=True)
mock_display.assert_called_once_with(
{"text/plain": "'Hello World!'"}, metadata={}, raw=True
)
@mock.patch("scrapbook.models.ip_display")
def test_reglue_scrap_unattached(mock_display, notebook_result):
notebook_result.reglue("one", unattached=True)
mock_display.assert_called_once_with(
{
"application/scrapbook.scrap.json+json": {
"name": "one",
"data": 1,
"encoder": "json",
"version": 1,
}
},
metadata={},
raw=True,
)
def test_missing_reglue(notebook_result):
with pytest.raises(ScrapbookException):
notebook_result.reglue("foo")
@mock.patch("scrapbook.models.ip_display")
def test_missing_reglue_no_error(mock_display, notebook_result):
notebook_result.reglue("foo", raise_on_missing=False)
mock_display.assert_called_once_with(
"No scrap found with name 'foo' in this notebook"
)
@mock.patch("scrapbook.models.ip_display")
def test_reglue_rename(mock_display, notebook_result):
notebook_result.reglue("output", "new_output")
mock_display.assert_called_once_with(
{"text/plain": "'Hello World!'"},
metadata={"scrapbook": {"name": "new_output", "data": False, "display": True}},
raw=True,
)
@pytest.fixture
def no_exec_result():
path = get_notebook_path("result_no_exec.ipynb")
return read_notebook(path)
def test_cell_timing(notebook_result):
assert notebook_result.cell_timing == [0.0, 0.123]
def test_malformed_cell_timing(no_exec_result):
assert no_exec_result.cell_timing == [None]
def test_execution_counts(notebook_result):
assert notebook_result.execution_counts == [1, 2]
def test_malformed_execution_counts(no_exec_result):
assert no_exec_result.execution_counts == [None]
def test_papermill_metrics(notebook_result):
expected_df = pd.DataFrame(
[
("result1.ipynb", "Out [1]", 0.000, "time (s)"),
("result1.ipynb", "Out [2]", 0.123, "time (s)"),
],
columns=["filename", "cell", "value", "type"],
)
assert_frame_equal(notebook_result.papermill_metrics, expected_df)
def test_malformed_execution_metrics(no_exec_result):
expected_df = | pd.DataFrame([], columns=["filename", "cell", "value", "type"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This module contains the ReadSets class that is in charge
of reading the sets files, reshaping them to be used in
the build class, creating and reading the parameter files and
checking the errors in the definition of the sets and parameters
"""
import itertools as it
from openpyxl import load_workbook
import pandas as pd
from hypatia.error_log.Checks import (
check_nan,
check_index,
check_index_data,
check_table_name,
check_mapping_values,
check_mapping_ctgry,
check_sheet_name,
check_tech_category,
check_carrier_type,
check_years_mode_consistency,
)
from hypatia.error_log.Exceptions import WrongInputMode
import numpy as np
from hypatia.utility.constants import (
global_set_ids,
regional_set_ids,
technology_categories,
carrier_types,
)
from hypatia.utility.constants import take_trade_ids, take_ids, take_global_ids
MODES = ["Planning", "Operation"]
class ReadSets:
""" Class that reads the sets of the model, creates the parameter files with
default values and reads the filled parameter files
Attributes
------------
mode:
The mode of optimization including the operation and planning mode
path:
The path of the set files given by the user
glob_mapping : dict
A dictionary of the global set tables given by the user in the global.xlsx file
mapping : dict
A dictionary of the regional set tables given by the user in the regional
set files
connection_sheet_ids: dict
A nested dictionary that defines the sheet names of the parameter file of
the inter-regional links with their default values, indices and columns
global_sheet_ids : dict
A nested dictionary that defines the sheet names of the global parameter file
with their default values, indices and columns
regional_sheets_ids : dict
A nested dictionary that defines the sheet names of the regional parameter files
with their default values, indices and columns
trade_data : dict
A nested dictionary for storing the inter-regional link data
global_data : dict
A nested dictionary for storing the global data
data : dict
A nested dictionary for storing the regional data
"""
def __init__(self, path, mode="Planning"):
self.mode = mode
self.path = path
self._init_by_xlsx()
def _init_by_xlsx(self,):
"""
Reads and organizes the global and regional sets
"""
glob_mapping = {}
wb_glob = load_workbook(r"{}/global.xlsx".format(self.path))
sets_glob = wb_glob["Sets"]
set_glob_category = {key: value for key, value in sets_glob.tables.items()}
for entry, data_boundary in sets_glob.tables.items():
data_glob = sets_glob[data_boundary]
content = [[cell.value for cell in ent] for ent in data_glob]
header = content[0]
rest = content[1:]
df = pd.DataFrame(rest, columns=header)
glob_mapping[entry] = df
self.glob_mapping = glob_mapping
check_years_mode_consistency(
mode=self.mode, main_years=list(self.glob_mapping["Years"]["Year"])
)
for key, value in self.glob_mapping.items():
check_table_name(
file_name="global",
allowed_names=list(global_set_ids.keys()),
table_name=key,
)
check_index(value.columns, key, "global", pd.Index(global_set_ids[key]))
check_nan(key, value, "global")
if key == "Technologies":
check_tech_category(value, technology_categories, "global")
if key == "Carriers":
check_carrier_type(value, carrier_types, "global")
self.regions = list(self.glob_mapping["Regions"]["Region"])
self.main_years = list(self.glob_mapping["Years"]["Year"])
if "Timesteps" in self.glob_mapping.keys():
self.time_steps = list(self.glob_mapping["Timesteps"]["Timeslice"])
self.timeslice_fraction = self.glob_mapping["Timesteps"][
"Timeslice_fraction"
].values
else:
self.time_steps = ["Annual"]
self.timeslice_fraction = np.ones((1, 1))
# possible connections among the regions
if len(self.regions) > 1:
lines_obj = it.permutations(self.regions, r=2)
self.lines_list = []
for item in lines_obj:
if item[0] < item[1]:
self.lines_list.append("{}-{}".format(item[0], item[1]))
mapping = {}
for reg in self.regions:
wb = load_workbook(r"{}/{}.xlsx".format(self.path, reg))
sets = wb["Sets"]
self._setbase_reg = [
"Technologies",
"Carriers",
"Carrier_input",
"Carrier_output",
]
set_category = {key: value for key, value in sets.tables.items()}
reg_mapping = {}
for entry, data_boundary in sets.tables.items():
data = sets[data_boundary]
content = [[cell.value for cell in ent] for ent in data]
header = content[0]
rest = content[1:]
df = pd.DataFrame(rest, columns=header)
reg_mapping[entry] = df
mapping[reg] = reg_mapping
for key, value in mapping[reg].items():
check_table_name(
file_name=reg,
allowed_names=list(regional_set_ids.keys()),
table_name=key,
)
check_index(value.columns, key, reg, pd.Index(regional_set_ids[key]))
check_nan(key, value, reg)
if key == "Technologies":
check_tech_category(value, technology_categories, reg)
if key == "Carriers":
check_carrier_type(value, carrier_types, reg)
if key == "Carrier_input" or key == "Carrier_output":
check_mapping_values(
value,
key,
mapping[reg]["Technologies"],
"Technologies",
"Technology",
"Technology",
reg,
)
check_mapping_values(
mapping[reg]["Carrier_input"],
"Carrier_input",
mapping[reg]["Carriers"],
"Carriers",
"Carrier_in",
"Carrier",
reg,
)
check_mapping_values(
mapping[reg]["Carrier_output"],
"Carrier_output",
mapping[reg]["Carriers"],
"Carriers",
"Carrier_out",
"Carrier",
reg,
)
check_mapping_ctgry(
mapping[reg]["Carrier_input"],
"Carrier_input",
mapping[reg]["Technologies"],
"Supply",
reg,
)
check_mapping_ctgry(
mapping[reg]["Carrier_output"],
"Carrier_output",
mapping[reg]["Technologies"],
"Demand",
reg,
)
self.mapping = mapping
Technologies = {}
for reg in self.regions:
regional_tech = {}
for key in list(self.mapping[reg]["Technologies"]["Tech_category"]):
regional_tech[key] = list(
self.mapping[reg]["Technologies"].loc[
self.mapping[reg]["Technologies"]["Tech_category"] == key
]["Technology"]
)
Technologies[reg] = regional_tech
self.Technologies = Technologies
self._create_input_data()
def _create_input_data(self):
"""
Defines the sheets, indices and columns of the parameter files
"""
if len(self.regions) > 1:
# Create the columns of inter-regional links as a multi-index of the
# pairs of regions and the transmitted carriers
indexer = pd.MultiIndex.from_product(
[self.lines_list, self.glob_mapping["Carriers_glob"]["Carrier"]],
names=["Line", "Transmitted Carrier"],
)
self.connection_sheet_ids = {
"F_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"V_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Residual_capacity": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Capacity_factor_line": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Line_efficiency": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"AnnualProd_perunit_capacity": {
"value": 1,
"index": pd.Index(
["AnnualProd_Per_UnitCapacity"], name="Performance Parameter"
),
"columns": indexer,
},
}
self.global_sheet_ids = {
"Max_production_global": {
"value": 1e30,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
(
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
)
& (
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Storage"
)
]["Technology"],
},
"Min_production_global": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
(
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
)
& (
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Storage"
)
]["Technology"],
},
"Glob_emission_cap_annual": {
"value": 1e30,
"index": pd.Index(self.main_years, name="Years"),
"columns": ["Global Emission Cap"],
},
}
if self.mode == "Planning":
self.connection_sheet_ids.update(
{
"INV": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Decom_cost": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Min_totalcap": {
"value": 0,
"index": | pd.Index(self.main_years, name="Years") | pandas.Index |
# tagifai/data.py
# Data processing operations.
import itertools
import json
import re
from argparse import Namespace
from collections import Counter
from pathlib import Path
from typing import List, Sequence, Tuple
import numpy as np
import pandas as pd
import torch
from nltk.stem import PorterStemmer
from skmultilearn.model_selection import IterativeStratification
from config import config
from tagifai import utils
def filter_items(items: List, include: List = [], exclude: List = []) -> List:
"""Filter a list using inclusion and exclusion lists of items.
Args:
items (List): List of items to apply filters.
include (List, optional): List of items to include. Defaults to [].
exclude (List, optional): List of items to filter out. Defaults to [].
Returns:
Filtered list of items.
Usage:
```python
# Filter tags for each project
df.tags = df.tags.apply(
filter_items,
include=list(tags_dict.keys()),
exclude=config.EXCLDUE,
)
```
"""
# Filter
filtered = [item for item in items if item in include and item not in exclude]
return filtered
def prepare(
df: pd.DataFrame, include: List = [], exclude: List = [], min_tag_freq: int = 30
) -> Tuple:
"""Prepare the raw data.
Args:
df (pd.DataFrame): Pandas DataFrame with data.
include (List): list of tags to include.
exclude (List): list of tags to exclude.
min_tag_freq (int, optional): Minimum frequency of tags required. Defaults to 30.
Returns:
A cleaned dataframe and dictionary of tags and counts above the frequency threshold.
"""
# Filter tags for each project
df.tags = df.tags.apply(filter_items, include=include, exclude=exclude)
tags = Counter(itertools.chain.from_iterable(df.tags.values))
# Filter tags that have fewer than `min_tag_freq` occurrences
tags_above_freq = Counter(tag for tag in tags.elements() if tags[tag] >= min_tag_freq)
tags_below_freq = Counter(tag for tag in tags.elements() if tags[tag] < min_tag_freq)
df.tags = df.tags.apply(filter_items, include=list(tags_above_freq.keys()))
# Remove projects with no more remaining relevant tags
df = df[df.tags.map(len) > 0]
return df, tags_above_freq, tags_below_freq
class Stemmer(PorterStemmer):
def stem(self, word):
if self.mode == self.NLTK_EXTENSIONS and word in self.pool: # pragma: no cover, nltk
return self.pool[word]
if self.mode != self.ORIGINAL_ALGORITHM and len(word) <= 2: # pragma: no cover, nltk
# With this line, strings of length 1 or 2 don't go through
# the stemming process, although no mention is made of this
# in the published algorithm.
return word
stem = self._step1a(word)
stem = self._step1b(stem)
stem = self._step1c(stem)
stem = self._step2(stem)
stem = self._step3(stem)
stem = self._step4(stem)
stem = self._step5a(stem)
stem = self._step5b(stem)
return stem
def preprocess(
text: str,
lower: bool = True,
stem: bool = False,
stopwords: List = config.STOPWORDS,
) -> str:
"""Conditional preprocessing on text.
Usage:
```python
preprocess(text="Transfer learning with BERT!", lower=True, stem=True)
```
<pre>
'transfer learn bert'
</pre>
Args:
text (str): String to preprocess.
lower (bool, optional): Lower the text. Defaults to True.
stem (bool, optional): Stem the text. Defaults to False.
filters (str, optional): Filters to apply on text.
stopwords (List, optional): List of words to filter out. Defaults to STOPWORDS.
Returns:
Preprocessed string.
"""
# Lower
if lower:
text = text.lower()
# Remove stopwords
if len(stopwords):
pattern = re.compile(r"\b(" + r"|".join(stopwords) + r")\b\s*")
text = pattern.sub("", text)
# Spacing and filters
text = re.sub(
r"([!\"'#$%&()*\+,-./:;<=>?@\\\[\]^_`{|}~])", r" \1 ", text
) # add spacing between objects to be filtered
text = re.sub("[^A-Za-z0-9]+", " ", text) # remove non alphanumeric chars
text = re.sub(" +", " ", text) # remove multiple spaces
text = text.strip()
# Remove links
text = re.sub(r"http\S+", "", text)
# Stemming
if stem:
stemmer = Stemmer()
text = " ".join([stemmer.stem(word) for word in text.split(" ")])
return text
class LabelEncoder:
"""Encode labels into unique indices.
Usage:
```python
# Encode labels
label_encoder = LabelEncoder()
label_encoder.fit(labels)
y = label_encoder.encode(labels)
```
"""
def __init__(self, class_to_index: dict = {}):
self.class_to_index = class_to_index or {} # mutable defaults ;)
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
self.classes = list(self.class_to_index.keys())
def __len__(self):
return len(self.class_to_index)
def save(self, fp: str):
with open(fp, "w") as fp:
contents = {"class_to_index": self.class_to_index}
json.dump(contents, fp, indent=4, sort_keys=False)
@classmethod
def load(cls, fp: str):
with open(fp) as fp:
kwargs = json.load(fp=fp)
return cls(**kwargs)
class MultiClassLabelEncoder(LabelEncoder):
"""Encode labels into unique indices
for multi-class classification.
"""
def __str__(self):
return f"<MultiClassLabelEncoder(num_classes={len(self)})>"
def fit(self, y: Sequence):
"""Learn label mappings from a series of class labels.
Args:
y (Sequence): Collection of labels as a pandas Series object.
"""
classes = np.unique(y)
for i, class_ in enumerate(classes):
self.class_to_index[class_] = i
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
self.classes = list(self.class_to_index.keys())
return self
def encode(self, y: pd.Series) -> np.ndarray:
"""Encode a collection of classes.
Args:
y (pd.Series): Collection of labels as a pandas Series object.
Returns:
Labels as (multilabel) one-hot encodings
"""
encoded = np.zeros((len(y)), dtype=int)
for i, item in enumerate(y):
encoded[i] = self.class_to_index[item]
return encoded
def decode(self, y: np.ndarray) -> List[List[str]]:
"""Decode a collection of class indices.
Args:
y (np.ndarray): Labels as (multilabel) one-hot encodings
Returns:
List of original labels for each output.
"""
classes = []
for i, item in enumerate(y):
classes.append(self.index_to_class[item])
return classes
class MultiLabelLabelEncoder(LabelEncoder):
"""Encode labels into unique indices
for multi-label classification.
"""
def __str__(self):
return f"<MultiLabelLabelEncoder(num_classes={len(self)})>"
def fit(self, y: Sequence):
"""Learn label mappings from a series of class labels.
Args:
y (Sequence): Collection of labels as a pandas Series object.
"""
classes = np.unique(list(itertools.chain.from_iterable(y)))
for i, class_ in enumerate(classes):
self.class_to_index[class_] = i
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
self.classes = list(self.class_to_index.keys())
return self
def encode(self, y: pd.Series) -> np.ndarray:
"""Encode a collection of labels using (multilabel) one-hot encoding.
Args:
y (pd.Series): Collection of labels as a pandas Series object.
Returns:
Labels as (multilabel) one-hot encodings
"""
y_one_hot = np.zeros((len(y), len(self.class_to_index)), dtype=int)
for i, item in enumerate(y):
for class_ in item:
y_one_hot[i][self.class_to_index[class_]] = 1
return y_one_hot
def decode(self, y: np.ndarray) -> List[List[str]]:
"""Decode a (multilabel) one-hot encoding into corresponding labels.
Args:
y (np.ndarray): Labels as (multilabel) one-hot encodings
Returns:
List of original labels for each output.
"""
classes = []
for i, item in enumerate(y):
indices = np.where(np.asarray(item) == 1)[0]
classes.append([self.index_to_class[index] for index in indices])
return classes
def iterative_train_test_split(X: pd.Series, y: np.ndarray, train_size: float = 0.7) -> Tuple:
"""Custom iterative train test split which
'maintains balanced representation with respect
to order-th label combinations.'
Args:
X (pd.Series): Input features as a pandas Series object.
y (np.ndarray): One-hot encoded labels.
train_size (float, optional): Proportion of data for first split. Defaults to 0.7.
Returns:
Two stratified splits based on specified proportions.
"""
stratifier = IterativeStratification(
n_splits=2,
order=1,
sample_distribution_per_fold=[
1.0 - train_size,
train_size,
],
)
train_indices, test_indices = next(stratifier.split(X, y))
X_train, y_train = X[train_indices], y[train_indices]
X_test, y_test = X[test_indices], y[test_indices]
return X_train, X_test, y_train, y_test
class Tokenizer:
"""Tokenize a feature using a built vocabulary.
Usage:
```python
tokenizer = Tokenizer(char_level=char_level)
tokenizer.fit_on_texts(texts=X)
X = np.array(tokenizer.texts_to_sequences(X), dtype=object)
```
"""
def __init__(
self,
char_level: bool,
num_tokens: int = None,
pad_token: str = "<PAD>",
oov_token: str = "<UNK>",
token_to_index: dict = None,
):
self.char_level = char_level
self.separator = "" if self.char_level else " "
if num_tokens:
num_tokens -= 2 # pad + unk tokens
self.num_tokens = num_tokens
self.pad_token = pad_token
self.oov_token = oov_token
if not token_to_index:
token_to_index = {pad_token: 0, oov_token: 1}
self.token_to_index = token_to_index
self.index_to_token = {v: k for k, v in self.token_to_index.items()}
def __len__(self):
return len(self.token_to_index)
def __str__(self):
return f"<Tokenizer(num_tokens={len(self)})>"
def fit_on_texts(self, texts: List):
"""Learn token mappings from a list of texts.
Args:
texts (List): List of texts made of tokens.
"""
if not self.char_level:
texts = [text.split(" ") for text in texts]
all_tokens = [token for text in texts for token in text]
counts = Counter(all_tokens).most_common(self.num_tokens)
self.min_token_freq = counts[-1][1]
for token, count in counts:
index = len(self)
self.token_to_index[token] = index
self.index_to_token[index] = token
return self
def texts_to_sequences(self, texts: List) -> List[List]:
"""Convert a list of texts to a lists of arrays of indices.
Args:
texts (List): List of texts to tokenize and map to indices.
Returns:
A list of mapped sequences (list of indices).
"""
sequences = []
for text in texts:
if not self.char_level:
text = text.split(" ")
sequence = []
for token in text:
sequence.append(self.token_to_index.get(token, self.token_to_index[self.oov_token]))
sequences.append(sequence)
return sequences
def sequences_to_texts(self, sequences: List) -> List:
"""Convert a lists of arrays of indices to a list of texts.
Args:
sequences (List): list of mapped tokens to convert back to text.
Returns:
Mapped text from index tokens.
"""
texts = []
for sequence in sequences:
text = []
for index in sequence:
text.append(self.index_to_token.get(index, self.oov_token))
texts.append(self.separator.join([token for token in text]))
return texts
def save(self, fp: str):
with open(fp, "w") as fp:
contents = {
"char_level": self.char_level,
"oov_token": self.oov_token,
"token_to_index": self.token_to_index,
}
json.dump(contents, fp, indent=4, sort_keys=False)
@classmethod
def load(cls, fp: str):
with open(fp) as fp:
kwargs = json.load(fp=fp)
return cls(**kwargs)
def pad_sequences(sequences: np.ndarray, max_seq_len: int = 0) -> np.ndarray:
"""Zero pad sequences to a specified `max_seq_len`
or to the length of the largest sequence in `sequences`.
Usage:
```python
# Pad inputs
seq = np.array([[1, 2, 3], [1, 2]], dtype=object)
padded_seq = pad_sequences(sequences=seq, max_seq_len=5)
print (padded_seq)
```
<pre>
[[1. 2. 3. 0. 0.]
[1. 2. 0. 0. 0.]]
</pre>
Note:
Input `sequences` must be 2D.
Check out this [implemention](https://madewithml.com/courses/foundations/convolutional-neural-networks/#padding){:target="_blank"} for a more generalized approach.
Args:
sequences (np.ndarray): 2D array of data to be padded.
max_seq_len (int, optional): Length to pad sequences to. Defaults to 0.
Raises:
ValueError: Input sequences are not two-dimensional.
Returns:
An array with the zero padded sequences.
"""
# Get max sequence length
max_seq_len = max(max_seq_len, max(len(sequence) for sequence in sequences))
# Pad
padded_sequences = np.zeros((len(sequences), max_seq_len))
for i, sequence in enumerate(sequences):
padded_sequences[i][: len(sequence)] = sequence
return padded_sequences
class CNNTextDataset(torch.utils.data.Dataset):
"""Create `torch.utils.data.Dataset` objects to use for
efficiently feeding data into our models.
Usage:
```python
# Create dataset
X, y = data
dataset = CNNTextDataset(X=X, y=y, max_filter_size=max_filter_size)
# Create dataloaders
dataloader = dataset.create_dataloader(batch_size=batch_size)
```
"""
def __init__(self, X, y, max_filter_size):
self.X = X
self.y = y
self.max_filter_size = max_filter_size
def __len__(self):
return len(self.y)
def __str__(self):
return f"<Dataset(N={len(self)})>"
def __getitem__(self, index: int) -> List:
X = self.X[index]
y = self.y[index]
return [X, y]
def collate_fn(self, batch: List) -> Tuple:
"""Processing on a batch. It's used to override the default `collate_fn` in `torch.utils.data.DataLoader`.
Args:
batch (List): List of inputs and outputs.
Returns:
Processed inputs and outputs.
"""
# Get inputs
batch = np.array(batch, dtype=object)
X = batch[:, 0]
y = np.stack(batch[:, 1], axis=0)
# Pad inputs
X = pad_sequences(sequences=X, max_seq_len=self.max_filter_size)
# Cast
X = torch.LongTensor(X.astype(np.int32))
y = torch.FloatTensor(y.astype(np.int32))
return X, y
def create_dataloader(
self, batch_size: int, shuffle: bool = False, drop_last: bool = False
) -> torch.utils.data.DataLoader:
"""Create dataloaders to load batches with.
Usage:
```python
# Create dataset
X, y = data
dataset = CNNTextDataset(X=X, y=y, max_filter_size=max_filter_size)
# Create dataloaders
dataloader = dataset.create_dataloader(batch_size=batch_size)
```
Args:
batch_size (int): Number of samples per batch.
shuffle (bool, optional): Shuffle each batch. Defaults to False.
drop_last (bool, optional): Drop the last batch if it's less than `batch_size`. Defaults to False.
Returns:
Torch dataloader to load batches with.
"""
return torch.utils.data.DataLoader(
dataset=self,
batch_size=batch_size,
collate_fn=self.collate_fn,
shuffle=shuffle,
drop_last=drop_last,
pin_memory=True,
)
def compute_features(params: Namespace) -> None:
"""Compute features to use for training.
Args:
params (Namespace): Input parameters for operations.
"""
# Set up
utils.set_seed(seed=params.seed)
# Load data
projects_url = (
"https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/projects.json"
)
projects = utils.load_json_from_url(url=projects_url)
df = | pd.DataFrame(projects) | pandas.DataFrame |
"""
Unit test suite for OLS and PanelOLS classes
"""
# pylint: disable-msg=W0212
from __future__ import division
from datetime import datetime
import unittest
import nose
import numpy as np
from pandas import date_range, bdate_range
from pandas.core.panel import Panel
from pandas import DataFrame, Index, Series, notnull, datetools
from pandas.stats.api import ols
from pandas.stats.ols import _filter_data
from pandas.stats.plm import NonPooledPanelOLS, PanelOLS
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from common import BaseTest
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm
except ImportError:
_have_statsmodels = False
def _check_repr(obj):
repr(obj)
str(obj)
def _compare_ols_results(model1, model2):
assert(type(model1) == type(model2))
if hasattr(model1, '_window_type'):
_compare_moving_ols(model1, model2)
else:
_compare_fullsample_ols(model1, model2)
def _compare_fullsample_ols(model1, model2):
assert_series_equal(model1.beta, model2.beta)
def _compare_moving_ols(model1, model2):
assert_frame_equal(model1.beta, model2.beta)
class TestOLS(BaseTest):
# TODO: Add tests for OLS y predict
# TODO: Right now we just check for consistency between full-sample and
# rolling/expanding results of the panel OLS. We should also cross-check
# with trusted implementations of panel OLS (e.g. R).
# TODO: Add tests for non pooled OLS.
@classmethod
def setUpClass(cls):
try:
import matplotlib as mpl
mpl.use('Agg', warn=False)
except ImportError:
pass
if not _have_statsmodels:
raise nose.SkipTest
def testOLSWithDatasets(self):
self.checkDataSet(sm.datasets.ccard.load(), skip_moving=True)
self.checkDataSet(sm.datasets.cpunish.load(), skip_moving=True)
self.checkDataSet(sm.datasets.longley.load(), skip_moving=True)
self.checkDataSet(sm.datasets.stackloss.load(), skip_moving=True)
self.checkDataSet(sm.datasets.copper.load())
self.checkDataSet(sm.datasets.scotland.load())
# degenerate case fails on some platforms
# self.checkDataSet(datasets.ccard.load(), 39, 49) # one col in X all 0s
def testWLS(self):
X = DataFrame(np.random.randn(30, 4), columns=['A', 'B', 'C', 'D'])
Y = Series(np.random.randn(30))
weights = X.std(1)
self._check_wls(X, Y, weights)
weights.ix[[5, 15]] = np.nan
Y[[2, 21]] = np.nan
self._check_wls(X, Y, weights)
def _check_wls(self, x, y, weights):
result = ols(y=y, x=x, weights=1/weights)
combined = x.copy()
combined['__y__'] = y
combined['__weights__'] = weights
combined = combined.dropna()
endog = combined.pop('__y__').values
aweights = combined.pop('__weights__').values
exog = sm.add_constant(combined.values, prepend=False)
sm_result = sm.WLS(endog, exog, weights=1/aweights).fit()
assert_almost_equal(sm_result.params, result._beta_raw)
assert_almost_equal(sm_result.resid, result._resid_raw)
self.checkMovingOLS('rolling', x, y, weights=weights)
self.checkMovingOLS('expanding', x, y, weights=weights)
def checkDataSet(self, dataset, start=None, end=None, skip_moving=False):
exog = dataset.exog[start : end]
endog = dataset.endog[start : end]
x = DataFrame(exog, index=np.arange(exog.shape[0]),
columns=np.arange(exog.shape[1]))
y = Series(endog, index=np.arange(len(endog)))
self.checkOLS(exog, endog, x, y)
if not skip_moving:
self.checkMovingOLS('rolling', x, y)
self.checkMovingOLS('rolling', x, y, nw_lags=0)
self.checkMovingOLS('expanding', x, y, nw_lags=0)
self.checkMovingOLS('rolling', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1, nw_overlap=True)
def checkOLS(self, exog, endog, x, y):
reference = sm.OLS(endog, sm.add_constant(exog, prepend=False)).fit()
result = ols(y=y, x=x)
# check that sparse version is the same
sparse_result = ols(y=y.to_sparse(), x=x.to_sparse())
_compare_ols_results(result, sparse_result)
assert_almost_equal(reference.params, result._beta_raw)
assert_almost_equal(reference.df_model, result._df_model_raw)
assert_almost_equal(reference.df_resid, result._df_resid_raw)
assert_almost_equal(reference.fvalue, result._f_stat_raw[0])
assert_almost_equal(reference.pvalues, result._p_value_raw)
assert_almost_equal(reference.rsquared, result._r2_raw)
assert_almost_equal(reference.rsquared_adj, result._r2_adj_raw)
assert_almost_equal(reference.resid, result._resid_raw)
assert_almost_equal(reference.bse, result._std_err_raw)
| assert_almost_equal(reference.tvalues, result._t_stat_raw) | pandas.util.testing.assert_almost_equal |
import pandas as pd
import numpy as np
fn = '2019_survey/2019 Kubernetes Contributor Experience Survey PUBLIC.csv'
contribute_header = "What areas of Kubernetes do you contribute to? Please check all that apply."
blockers_header = "Please rate any challenges to the listed steps of the contribution process"
agree_header = "Do you agree with the following statements (1 - strongly disagree, 5 - strongly agree):"
attend_header = "Which of the below would make you likely to attend more of the Community Meetings? Check all that apply."
most_important_proj_header = "Some of the major projects SIG Contributor Experience is working on are listed below, rank the ones that are most important to you (and/or your SIG)"
use_freq_header = "Of our various communications channels, please rate which ones you use and/or check most frequently on a 1-5 scale, where 1 is “never”, 3 is “several times a month” and 5 is “every day”."
news_header = "Which of these channels is most likely to reach you first for news about decisions, changes, additions, and/or announcements to the contributor process or community matters?"
def map_blocker_and_usefreq_vals(val):
try:
return int(val)
except ValueError:
return int(val[0])
def process_header(df):
columns = list(df.columns)
new_columns = [None]*len(columns)
for i, col in enumerate(columns):
if col[1].startswith("Unnamed") or col[1] == "Response":
new_columns[i] = col[0]
continue
# Find the starting column for the multilabel responses (checkboxes)
# that were also in the 2018 survey
if col[0] == blockers_header:
blockers_i = i
elif col[0] == contribute_header:
contribute_i = i
elif col[0] == news_header:
news_i = i
elif col[0] == use_freq_header:
use_freq_i = i
elif col[0] == most_important_proj_header:
most_important_proj_i = i
elif col[0] == agree_header: # Starting columns for multilabel responses that weren't in the 2018 survey.
agree_i = i
elif col[0] == attend_header:
attend_i = i
#elif col[0] == unattendance_header:
# unattendance_i = i
else: # Handle open ended responses
new_columns[i] = col[0]
def prefix_cols(header, header_i, prefix):
i = header_i
while i < len(columns) and (columns[i][0].startswith("Unnamed") or columns[i][0] == header):
new_columns[i] = "{} {}".format(prefix, columns[i][1])
i += 1
prefix_cols(contribute_header, contribute_i, "Contribute:")
prefix_cols(blockers_header, blockers_i, "Blocker:")
prefix_cols(news_header, news_i, "Check for news:")
prefix_cols(use_freq_header, use_freq_i, "Use freq:")
prefix_cols(most_important_proj_header, most_important_proj_i, "Most Important Project:")
prefix_cols(agree_header, agree_i, "Agree:")
prefix_cols(attend_header, attend_i, "Would attend if:")
df.columns = new_columns
def get_df(file_name=None):
fn = '2019_survey/2019 Kubernetes Contributor Experience Survey PUBLIC.csv'
if file_name:
fn = file_name
df = | pd.read_csv(fn, header=[0,1], skipinitialspace=True) | pandas.read_csv |
import pandas as pd
import os
from sqlalchemy import create_engine
print("Reading csv files")
files=[]
for r, d, f in os.walk("./data"):
for file in f:
if (not '.zip' in file) and (not '.db' in file):
files.append(os.path.join(r, file))
df = pd.concat([pd.read_csv(f) for f in files],sort=True)
df['start_time']=pd.to_datetime(df['start_time'])
df['end_time']=pd.to_datetime(df['end_time'])
froms=df[['from_station_id','from_station_name']]
froms.columns=['id','name']
tos=df[['to_station_id','to_station_name']]
tos.columns=['id','name']
all_stations = pd.concat([froms,tos],sort=True).drop_duplicates()
starts = df[['from_station_id','start_time']]
starts['action']=-1
starts.columns = ['id','time','action']
ends = df[['to_station_id','end_time']]
ends['action']=1
ends.columns = ['id','time','action']
availability= | pd.concat([starts,ends],sort=True) | pandas.concat |
"""corpkit: multiprocessing of interrogations"""
from __future__ import print_function
def pmultiquery(corpus,
search,
show='words',
query='any',
sort_by='total',
save=False,
multiprocess='default',
root=False,
note=False,
print_info=True,
subcorpora=False,
**kwargs
):
"""
- Parallel process multiple queries or corpora.
- This function is used by corpkit.interrogator.interrogator()
- for multiprocessing.
- There's no reason to call this function yourself.
"""
import os
from pandas import DataFrame, Series
import pandas as pd
import collections
from collections import namedtuple, OrderedDict
from time import strftime, localtime
import corpkit
from corpkit.interrogator import interrogator
from corpkit.interrogation import Interrogation, Interrodict
from corpkit.process import canpickle
try:
from joblib import Parallel, delayed
except ImportError:
pass
import multiprocessing
locs = locals()
for k, v in kwargs.items():
locs[k] = v
in_notebook = locs.get('in_notebook')
def best_num_parallel(num_cores, num_queries):
"""decide how many parallel processes to run
the idea, more or less, is to balance the load when possible"""
import corpkit
if num_queries <= num_cores:
return num_queries
if num_queries > num_cores:
if (num_queries / num_cores) == num_cores:
return int(num_cores)
if num_queries % num_cores == 0:
try:
return max([int(num_queries / n) for n in range(2, num_cores) \
if int(num_queries / n) <= num_cores])
except ValueError:
return num_cores
else:
import math
if (float(math.sqrt(num_queries))).is_integer():
square_root = math.sqrt(num_queries)
if square_root <= num_queries / num_cores:
return int(square_root)
return num_cores
num_cores = multiprocessing.cpu_count()
# what is our iterable? ...
multiple = kwargs.get('multiple', False)
mult_corp_are_subs = False
if hasattr(corpus, '__iter__'):
if all(getattr(x, 'level', False) == 's' for x in corpus):
mult_corp_are_subs = True
non_first_sub = None
if subcorpora:
non_first_sub = subcorpora[1:] if isinstance(subcorpora, list) else None
subval = subcorpora if not non_first_sub else subcorpora[0]
#print(subcorpora, non_first_sub, subval)
if subcorpora is True:
import re
subcorpora = re.compile(r'.*')
else:
# strange travis error happened here
subcorpora = corpus.metadata['fields'][subval]
if len(subcorpora) == 0:
print('No %s metadata found.' % str(subval))
return
mapcores = {'datalist': [corpus, 'corpus'],
'multiplecorpora': [corpus, 'corpus'],
'namedqueriessingle': [query, 'query'],
'namedqueriesmultiple': [search, 'search'],
'subcorpora': [subcorpora, 'subcorpora']}
# a is a dummy, just to produce default one
toiter, itsname = mapcores.get(multiple, [False, False])
if isinstance(toiter, dict):
toiter = toiter.items()
denom = len(toiter)
num_cores = best_num_parallel(num_cores, denom)
# todo: code below makes no sense
vals = ['eachspeaker', 'multiplespeaker', 'namedqueriesmultiple']
if multiple == 'multiplecorpora' and any(x is True for x in vals):
from corpkit.corpus import Corpus, Corpora
if isinstance(corpus, Corpora):
multiprocess = False
else:
corpus = Corpus(corpus)
if isinstance(multiprocess, int):
num_cores = multiprocess
if multiprocess is False:
num_cores = 1
# make sure saves are right type
if save is True:
raise ValueError('save must be string when multiprocessing.')
# make a list of dicts to pass to interrogator,
# with the iterable unique in every one
locs['printstatus'] = False
locs['multiprocess'] = False
locs['df1_always_df'] = False
locs['files_as_subcorpora'] = False
locs['corpus'] = corpus
if multiple == 'multiplespeaker':
locs['multispeaker'] = True
if isinstance(non_first_sub, list) and len(non_first_sub) == 1:
non_first_sub = non_first_sub[0]
# make the default query
locs = {k: v for k, v in locs.items() if canpickle(v)}
# make a new dict for every iteration
ds = [dict(**locs) for i in range(denom)]
for index, (d, bit) in enumerate(zip(ds, toiter)):
d['paralleling'] = index
if multiple in ['namedqueriessingle', 'namedqueriesmultiple']:
d[itsname] = bit[1]
d['outname'] = bit[0]
elif multiple in ['multiplecorpora', 'datalist']:
d['outname'] = bit.name.replace('-parsed', '')
d[itsname] = bit
elif multiple in ['subcorpora']:
d[itsname] = bit
jmd = {subval: bit}
# put this earlier
j2 = kwargs.get('just_metadata', False)
if not j2:
j2 = {}
jmd.update(j2)
d['just_metadata'] = jmd
d['outname'] = bit
d['by_metadata'] = False
d['subcorpora'] = non_first_sub
if non_first_sub:
d['print_info'] = False
# message printer should be a function...
if kwargs.get('conc') is False:
message = 'Interrogating'
elif kwargs.get('conc') is True:
message = 'Interrogating and concordancing'
elif kwargs.get('conc').lower() == 'only':
message = 'Concordancing'
time = strftime("%H:%M:%S", localtime())
from corpkit.process import dictformat
if print_info:
# proper printing for plurals
# in truth this needs to be revised, it's horrible.
sformat = dictformat(search, query)
if num_cores == 1:
add_es = ''
else:
add_es = 'es'
if multiple in ['multiplecorpora', 'datalist']:
corplist = "\n ".join([i.name for i in list(corpus)[:20]])
if len(corpus) > 20:
corplist += '\n ... and %d more ...\n' % (len(corpus) - 20)
print(("\n%s: Beginning %d corpus interrogations (in %d parallel process%s):\n %s" \
"\n Query: %s\n %s corpus ... \n" % (time, len(corpus), num_cores, add_es, corplist, sformat, message)))
elif multiple == 'namedqueriessingle':
print(("\n%s: Beginning %d corpus interrogations (in %d parallel process%s): %s" \
"\n Queries: %s\n %s corpus ... \n" % (time, len(query), num_cores, add_es, corpus.name, sformat, message) ))
elif multiple == 'namedqueriesmultiple':
print(("\n%s: Beginning %d corpus interrogations (in %d parallel process%s): %s" \
"\n Queries: %s\n %s corpus ... \n" % (time, len(list(search.keys())), num_cores, add_es, corpus.name, sformat, message)))
elif multiple in ['eachspeaker', 'multiplespeaker']:
print(("\n%s: Beginning %d parallel corpus interrogation%s: %s" \
"\n Query: %s\n %s corpus ... \n" % (time, num_cores, add_es.lstrip('e'), corpus.name, sformat, message) ))
elif multiple in ['subcorpora']:
print(("\n%s: Beginning %d parallel corpus interrogation%s: %s" \
"\n Query: %s\n %s corpus ... \n" % (time, num_cores, add_es.lstrip('e'), corpus.name, sformat, message) ))
# run in parallel, get either a list of tuples (non-c option)
# or a dataframe (c option)
#import sys
#reload(sys)
#stdout=sys.stdout
failed = False
terminal = False
used_joblib = False
#ds = ds[::-1]
#todo: the number of blank lines to print can be way wrong
if not root and print_info:
from blessings import Terminal
terminal = Terminal()
print('\n' * (len(ds) - 2))
for dobj in ds:
linenum = dobj['paralleling']
# this try handles nosetest problems in sublime text
try:
with terminal.location(0, terminal.height - (linenum + 1)):
# this is a really bad idea.
thetime = strftime("%H:%M:%S", localtime())
num_spaces = 26 - len(dobj['outname'])
print('%s: QUEUED: %s' % (thetime, dobj['outname']))
except:
pass
if not root and multiprocess:
try:
res = Parallel(n_jobs=num_cores)(delayed(interrogator)(**x) for x in ds)
used_joblib = True
except:
failed = True
print('Multiprocessing failed.')
raise
if not res:
failed = True
else:
res = []
for index, d in enumerate(ds):
d['startnum'] = (100 / denom) * index
res.append(interrogator(**d))
try:
res = sorted([i for i in res if i])
except:
pass
# remove unpicklable bits from query
from types import ModuleType, FunctionType, BuiltinMethodType, BuiltinFunctionType
badtypes = (ModuleType, FunctionType, BuiltinFunctionType, BuiltinMethodType)
qlocs = {k: v for k, v in locs.items() if not isinstance(v, badtypes)}
if hasattr(qlocs.get('corpus', False), 'name'):
qlocs['corpus'] = qlocs['corpus'].path
else:
qlocs['corpus'] = list([i.path for i in qlocs.get('corpus', [])])
# return just a concordance
from corpkit.interrogation import Concordance
if kwargs.get('conc') == 'only':
concs = | pd.concat([x for x in res]) | pandas.concat |
import datetime
import logging
import pandas as pd
import pytest
import awswrangler as wr
from awswrangler.s3._merge_upsert_table import _is_data_quality_sufficient, merge_upsert_table
logger = logging.getLogger("awswrangler")
logger.setLevel(logging.DEBUG)
def test_is_data_quality_sufficient_check_column_names():
# Check both table have the same columns
existing_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["col_a", "col_b", "col_c"])
delta_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["col_a", "col_b", "col_c"])
primary_key = ["col_a", "col_b"]
assert _is_data_quality_sufficient(existing_df=existing_df, delta_df=delta_df, primary_key=primary_key)
def test_is_data_quality_sufficient_mistmatch_column_names():
# Check both dataframe have the same columns.
# In this case they are different thus it should fail
existing_df = pd.DataFrame({"c0": [1, 2, 1, 2], "c1": [1, 2, 1, 2], "c2": [2, 1, 2, 1]})
delta_df = pd.DataFrame({"d0": [1, 2, 1, 2], "d1": [1, 2, 1, 2], "c2": [2, 1, 2, 1]})
primary_key = ["c0", "c1"]
assert _is_data_quality_sufficient(existing_df=existing_df, delta_df=delta_df, primary_key=primary_key) is False
def test_is_data_quality_sufficient_same_column_names_different_row_count():
# Check both table have the same columns and
existing_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], columns=["col_a", "col_b", "col_c"])
delta_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["col_a", "col_b", "col_c"])
primary_key = ["col_a", "col_b"]
assert _is_data_quality_sufficient(existing_df=existing_df, delta_df=delta_df, primary_key=primary_key) is True
def test_is_data_quality_sufficient_missing_primary_key():
# Check both tables have the same primary key
existing_df = pd.DataFrame({"c0": [1, 2, 1], "c1": [1, 2, 1], "c2": [2, 1, 1]})
delta_df = pd.DataFrame({"c0": [1, 2, 1, 2]})
primary_key = ["c0", "c1"]
assert _is_data_quality_sufficient(existing_df=existing_df, delta_df=delta_df, primary_key=primary_key) is False
def test_is_data_quality_sufficient_fail_for_duplicate_data():
# Check for duplicate data inside the dataframe
existing_df = | pd.DataFrame([[1, 2, 3], [1, 2, 3], [7, 8, 9], [10, 11, 12]], columns=["col_a", "col_b", "col_c"]) | pandas.DataFrame |
import os
import warnings
import pandas as pd
import numpy as np
import sys
from rpy2.robjects import r, pandas2ri, numpy2ri
from .univariate_statistical_analysis import univariate_analysis
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from sklearn.cluster import FeatureAgglomeration
from scipy.cluster.hierarchy import linkage, cut_tree
from scipy.spatial.distance import squareform
pandas2ri.activate()
numpy2ri.activate()
class DimensionalityReduction(BaseEstimator):
"""A general class to handle dimensionality reduction.
Parameters
----------
method: str or transform
Method used to compute dimensionality reduction. Either str or callable
If str inbuild function named as str is called, must be one of following:
'hierarchical_clust_parmar': Consensus Clustering with hierarchical clustering as described in :
Radiomic feature clusters and Prognostic Signatures specific for Lung and Head & Neck cancer.
Parmar et al., Scientific Reports, 2015
'hierarchical_clust_leger': Hierarchical clustering as described in :
A comparative study of machine learning methods for time-to-event survival data for
radiomics risk modelling. Leger et al., Scientific Reports, 2017
If transform method must inherit from TransformMixin (like sklearn transformers) or have a fit + a transform
method that will be called successively with the latter returning the reduce dataset.
Ex :
str: method = 'hierarchical_clust_leger'
transform: method = sklearn.decomposition.PCA(n_components=0.95, solver='svd_full')
corr_metric: str
Correlation metric used to compute distance between features
threshold: float
Correlation threshold used assign clusters to feature. Tree will be cut at a height of 1 - threshold
cluster_reduction: str
Method used to combine features in the same cluster. Currently implemented : mean and medoid
"""
dr_methods = ['hierarchical_clust_parmar', 'hierarchical_clust_leger']
cluster_reduction_methods = ['mean', 'medoid']
def __init__(self, method='hierarchical_clust_leger', corr_metric='spearman', threshold=0.9, cluster_reduction='mean'):
self.method = method
self.corr_metric = corr_metric
self.threshold = threshold
self.cluster_reduction = cluster_reduction
self.is_reduced = False
self.is_fitted = True
@staticmethod
def _check_X_Y(X, y):
# Check X
if not isinstance(X, (list, tuple, np.ndarray)):
if isinstance(X, pd.DataFrame) or isinstance(X, pd.Series):
X = X.to_numpy()
else:
raise TypeError('X array must be an array like or pandas Dataframe/Series')
else:
X = np.array(X)
if len(X.shape) != 2:
raise ValueError('X array must 2D')
# Check y
if y is not None:
if not isinstance(y, (list, tuple, np.ndarray)):
if isinstance(y, pd.DataFrame) or isinstance(y, pd.Series):
y = y.to_numpy()
else:
raise TypeError('y array must be an array like or pandas Dataframe/Series')
else:
y = np.array(y)
if len(y.shape) != 1:
if len(y.shape) == 2 and y.shape[1] == 1:
y.reshape(-1)
else:
raise ValueError('y array must be 1D or 2D with second dimension equal to 1')
if len(np.unique(y)) <= 1:
raise ValueError('y array must have at least 2 classes')
return X, y
def _get_dr_func(self):
if isinstance(self.method, TransformerMixin) or (hasattr(self.method, 'fit') and hasattr(self.method, 'transform')):
return self.method
elif isinstance(self.method, str):
method_name = self.method.lower()
if method_name not in self.dr_methods:
raise ValueError('If string method must be one of : {0}. '
'%s was passed'.format(str(self.dr_methods), self.method))
return getattr(self, self.method)
else:
raise TypeError('method argument must be a callable or a string')
@staticmethod
def _get_medoid(n_k, distance_matrix, cluster_labels):
df_distance_matrix = pd.DataFrame(distance_matrix)
cluster_distance_matrix = df_distance_matrix.loc[cluster_labels == n_k, cluster_labels == n_k]
return cluster_distance_matrix.sum(axis=0).idxmin()
def hierarchical_clust_parmar(self, X, y=None):
"""
Consensus Clustering with hierarchical clustering as described in :
Radiomic feature clusters and Prognostic Signatures specific for Lung and Head & Neck cancer.
Parmar et al., Scientific Reports, 2015
"""
df = pd.DataFrame(X)
r_df = pandas2ri.py2ri(df)
cwd = os.path.dirname(sys.argv[0])
r.setwd(cwd)
r.source('./Statistical_analysis/R_scripts/hierarchical_clustering_Parmar.R')
if self.cluster_reduction in self.cluster_reduction_methods:
r_dr_results = r.hierarchical_clustering_parmar(r_df, max_k=20, threshold=1 - self.threshold,
corr_metric=self.corr_metric,
cluster_reduction=self.cluster_reduction)
else:
raise ValueError('cluster_reduction must be one of : %s. '
'%s was passed' % (self.cluster_reduction_methods, self.cluster_reduction))
R_object_dict = {}
keys = r_dr_results.names
for i in range(len(keys)):
R_object_dict[keys[i]] = np.array(r_dr_results[i])
dr_results = pd.DataFrame(R_object_dict).to_numpy()
self.cluster_labels = dr_results[:, 0]
nb_cluster = np.amax(dr_results[:, 0]).astype(int)
coefficient_matrix = np.zeros((dr_results.shape[0], nb_cluster)) # Shape of (n_features, nb cluster)
for i in range(nb_cluster):
coefficient_matrix[:, i] = np.where(dr_results[:, 0] == i + 1, dr_results[:, 1], 0)
coefficient_matrix = coefficient_matrix.T
return coefficient_matrix
def hierarchical_clust_leger(self, X, y=None):
"""
Hierarchical clustering as described in :
A comparative study of machine learning methods for time-to-event survival data for
radiomics risk modelling. Leger et al., Scientific Reports, 2017
"""
# df = pd.DataFrame(X)
# r_df = pandas2ri.py2ri(df)
# cwd = os.path.dirname(sys.argv[0])
# r.setwd(cwd)
# r.source('./Statistical_analysis/R_scripts/hierarchical_clustering_Leger.R')
# r_dr_results = r.hierarchical_clustering_leger(r_df)
# R_object_dict = {}
# keys = r_dr_results.names
# for i in range(len(keys)):
# R_object_dict[keys[i]] = np.array(r_dr_results[i])
# dr_results = pd.DataFrame(R_object_dict).to_numpy()
# nb_cluster = np.amax(dr_results[:, 0]).astype(int)
# coefficient_matrix = np.zeros((dr_results.shape[0], nb_cluster)) # Shape of (n_features, nb cluster)
# for i in range(nb_cluster):
# coefficient_matrix[:, i] = np.where(dr_results[:, 0] == i + 1, dr_results[:, 1], 0)
# coefficient_matrix = coefficient_matrix.T
dissimilarity_matrix = 1 - np.abs( | pd.DataFrame(X) | pandas.DataFrame |
#!/usr/bin/python
''' Credits:
-------
Author: <NAME> (@rahulrajpl)
License: MIT License 2020
Reference:
----------
[1] https://www.kaggle.com/pmarcelino/comprehensive-data-exploration-with-python/notebook
[2] https://coolsymbol.com/emojis
'''
import streamlit as st
import pandas as pd
import numpy as np
import time, os
import seaborn as sns
import pandas_profiling
import webbrowser
import codecs
from matplotlib import pyplot as plt
def main():
st.title('📊WExDA')
st.subheader('Web based tool for Exploratory Data Analysis' )
@st.cache(persist=True)
def load_data(uploaded_file):
df = | pd.read_csv(uploaded_file) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.feature_selection import (
f_regression,
SelectKBest,
SelectFromModel,
)
from sklearn.linear_model import Lasso
from sklearn.datasets import load_boston
from feature_engine.wrappers import SklearnTransformerWrapper
def test_sklearn_imputer_numeric_with_constant(df_na):
variables_to_impute = ["Age", "Marks"]
na_variables_left_after_imputation = [
col
for col in df_na.loc[:, df_na.isna().any()].columns
if col not in variables_to_impute
]
transformer = SklearnTransformerWrapper(
transformer=SimpleImputer(fill_value=-999, strategy="constant"),
variables=variables_to_impute,
)
# transformed dataframe
ref = df_na.copy()
ref[variables_to_impute] = ref[variables_to_impute].fillna(-999)
dataframe_na_transformed = transformer.fit_transform(df_na)
# init params
assert isinstance(transformer.transformer, SimpleImputer)
assert transformer.variables == variables_to_impute
# fit params
assert transformer.input_shape_ == (8, 6)
# transformed output
assert all(
dataframe_na_transformed[na_variables_left_after_imputation].isna().sum() != 0
)
assert all(dataframe_na_transformed[variables_to_impute].isna().sum() == 0)
pd.testing.assert_frame_equal(ref, dataframe_na_transformed)
def test_sklearn_imputer_object_with_constant(df_na):
variables_to_impute = ["Name", "City"]
na_variables_left_after_imputation = [
col
for col in df_na.loc[:, df_na.isna().any()].columns
if col not in variables_to_impute
]
transformer = SklearnTransformerWrapper(
transformer=SimpleImputer(fill_value="missing", strategy="constant"),
variables=variables_to_impute,
)
# transformed dataframe
ref = df_na.copy()
ref[variables_to_impute] = ref[variables_to_impute].fillna("missing")
dataframe_na_transformed = transformer.fit_transform(df_na)
# init params
assert isinstance(transformer.transformer, SimpleImputer)
assert transformer.variables == variables_to_impute
# fit params
assert transformer.input_shape_ == (8, 6)
# transformed output
assert all(
dataframe_na_transformed[na_variables_left_after_imputation].isna().sum() != 0
)
assert all(dataframe_na_transformed[variables_to_impute].isna().sum() == 0)
pd.testing.assert_frame_equal(ref, dataframe_na_transformed)
def test_sklearn_imputer_allfeatures_with_constant(df_na):
transformer = SklearnTransformerWrapper(
transformer=SimpleImputer(fill_value="missing", strategy="constant")
)
# transformed dataframe
ref = df_na.copy()
ref = ref.fillna("missing")
dataframe_na_transformed = transformer.fit_transform(df_na)
# init params
assert isinstance(transformer.transformer, SimpleImputer)
# fit params
assert transformer.input_shape_ == (8, 6)
# transformed output
assert all(dataframe_na_transformed.isna().sum() == 0)
pd.testing.assert_frame_equal(ref, dataframe_na_transformed)
def test_sklearn_standardscaler_numeric(df_vartypes):
variables_to_scale = ["Age", "Marks"]
transformer = SklearnTransformerWrapper(
transformer=StandardScaler(), variables=variables_to_scale
)
ref = df_vartypes.copy()
ref[variables_to_scale] = (
ref[variables_to_scale] - ref[variables_to_scale].mean()
) / ref[variables_to_scale].std(ddof=0)
transformed_df = transformer.fit_transform(df_vartypes)
# init params
assert isinstance(transformer.transformer, StandardScaler)
assert transformer.variables == variables_to_scale
# fit params
assert transformer.input_shape_ == (4, 5)
assert (transformer.transformer.mean_.round(6) == np.array([19.5, 0.75])).all()
assert all(transformer.transformer.scale_.round(6) == [1.118034, 0.111803])
| pd.testing.assert_frame_equal(ref, transformed_df) | pandas.testing.assert_frame_equal |
import math
import string
from typing import Optional, Sequence, Tuple
import hypothesis.strategies as st
import numpy as np
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
from hypothesis import example, given, settings
import fletcher as fr
from fletcher.testing import examples
try:
# Only available in pandas 1.2+
# When this class is defined, we can also use `.str` on fletcher columns.
from pandas.core.strings.object_array import ObjectStringArrayMixin # noqa F401
_str_accessors = ["str", "fr_str"]
except ImportError:
_str_accessors = ["fr_str"]
@pytest.fixture(params=_str_accessors, scope="module")
def str_accessor(request):
return request.param
@st.composite
def string_patterns_st(draw, max_len=50) -> Tuple[Sequence[Optional[str]], str, int]:
ab_charset_st = st.sampled_from("ab")
ascii_charset_st = st.sampled_from(string.ascii_letters)
charset_st = st.sampled_from((ab_charset_st, ascii_charset_st))
charset = draw(charset_st)
fixed_pattern_st = st.sampled_from(["a", "aab", "aabaa"])
generated_pattern_st = st.text(alphabet=charset, max_size=max_len)
pattern_st = st.one_of(fixed_pattern_st, generated_pattern_st)
pattern = draw(pattern_st)
min_str_size = 0 if len(pattern) > 0 else 1
raw_str_st = st.one_of(
st.none(), st.lists(charset, min_size=min_str_size, max_size=max_len)
)
raw_seq_st = st.lists(raw_str_st, max_size=max_len)
raw_seq = draw(raw_seq_st)
for s in raw_seq:
if s is None:
continue
"""
There seems to be a bug in pandas for this edge case
>>> pd.Series(['']).str.replace('', 'abc', n=1)
0
dtype: object
But
>>> pd.Series(['']).str.replace('', 'abc')
0 abc
dtype: object
I believe the second result is the correct one and this is what the
fletcher implementation returns.
"""
max_ind = len(s) - len(pattern)
if max_ind < 0:
continue
repl_ind_st = st.integers(min_value=0, max_value=max_ind)
repl_ind_list_st = st.lists(repl_ind_st, max_size=math.ceil(max_len / 10))
repl_ind_list = draw(repl_ind_list_st)
for j in repl_ind_list:
s[j : j + len(pattern)] = pattern
seq = ["".join(s) if s is not None else None for s in raw_seq]
offset = draw(st.integers(min_value=0, max_value=len(seq)))
return (seq, pattern, offset)
string_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", "bb", None], "a"),
(["aa", "ab", "ba", "bb", None], "A"),
(["aa", "ab", "bA", "bB", None], "a"),
(["aa", "AB", "ba", "BB", None], "A"),
],
)
def _fr_series_from_data(data, fletcher_variant, dtype=pa.string()):
arrow_data = pa.array(data, type=dtype)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
return pd.Series(fr_array)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_cat(data, str_accessor, fletcher_variant, fletcher_variant_2):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
ser_fr = _fr_series_from_data(data, fletcher_variant)
ser_fr_other = _fr_series_from_data(data, fletcher_variant_2)
result_pd = ser_pd.str.cat(ser_pd)
result_fr = getattr(ser_fr, str_accessor).cat(ser_fr_other)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
def _check_series_equal(result_fr, result_pd):
result_fr = result_fr.astype(result_pd.dtype)
tm.assert_series_equal(result_fr, result_pd)
def _check_str_to_t(
t, func, data, str_accessor, fletcher_variant, test_offset=0, *args, **kwargs
):
"""Check a .str. function that returns a series with type t."""
tail_len = len(data) - test_offset
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, func)(*args, **kwargs)
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
result_fr = getattr(getattr(ser_fr, str_accessor), func)(*args, **kwargs)
_check_series_equal(result_fr, result_pd)
def _check_str_to_str(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(str, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def _check_str_to_bool(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(bool, func, data, str_accessor, fletcher_variant, *args, **kwargs)
@string_patterns
def test_text_endswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("endswith", data, str_accessor, fletcher_variant, pat=pat)
@string_patterns
def test_text_startswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("startswith", data, str_accessor, fletcher_variant, pat=pat)
@string_patterns
def test_contains_no_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=False
)
@pytest.mark.parametrize(
"data, pat, expected",
[
([], "", []),
(["a", "b"], "", [True, True]),
(["aa", "Ab", "ba", "bb", None], "a", [True, False, True, False, None]),
],
)
def test_contains_no_regex_ascii(data, pat, expected, str_accessor, fletcher_variant):
if str_accessor == "str":
pytest.skip(
"return types not stable yet, might sometimes return null instead of bool"
)
return
fr_series = _fr_series_from_data(data, fletcher_variant)
fr_expected = _fr_series_from_data(expected, fletcher_variant, pa.bool_())
# Run over slices to check offset handling code
for i in range(len(data)):
ser = fr_series.tail(len(data) - i)
expected = fr_expected.tail(len(data) - i)
result = getattr(ser, str_accessor).contains(pat, regex=False)
tm.assert_series_equal(result, expected)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
def test_contains_no_regex_case_sensitive(data_tuple, str_accessor, fletcher_variant):
data, pat, test_offset = data_tuple
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
case=True,
regex=False,
)
@string_patterns
def test_contains_no_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=False,
case=False,
)
regex_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", None], "a"),
(["aa", "ab", "ba", None], "a$"),
(["aa", "ab", "ba", None], "^a"),
(["Aa", "ab", "ba", None], "A"),
(["aa", "AB", "ba", None], "A$"),
(["aa", "AB", "ba", None], "^A"),
],
)
@regex_patterns
def test_contains_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=True
)
@regex_patterns
def test_contains_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=True,
case=False,
)
@settings(deadline=None)
@given(
data_tuple=string_patterns_st(),
n=st.integers(min_value=0, max_value=10),
repl=st.sampled_from(["len4", "", "z"]),
)
@example(
data_tuple=(["aababaa"], "aabaa", 0),
repl="len4",
n=1,
fletcher_variant="continuous",
)
@example(data_tuple=(["aaa"], "a", 0), repl="len4", n=1, fletcher_variant="continuous")
def test_replace_no_regex_case_sensitive(
data_tuple, repl, n, str_accessor, fletcher_variant
):
data, pat, test_offset = data_tuple
_check_str_to_str(
"replace",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
repl=repl,
n=n,
case=True,
regex=False,
)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
@example(data_tuple=(["a"], "", 0), fletcher_variant="chunked")
def test_count_no_regex(data_tuple, str_accessor, fletcher_variant):
"""Check a .str. function that returns a series with type t."""
data, pat, test_offset = data_tuple
tail_len = len(data) - test_offset
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, "count")(pat=pat)
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
kwargs = {}
if str_accessor.startswith("fr_"):
kwargs["regex"] = False
result_fr = getattr(ser_fr, str_accessor).count(pat=pat, **kwargs)
_check_series_equal(result_fr, result_pd)
def _optional_len(x: Optional[str]) -> int:
if x is not None:
return len(x)
else:
return 0
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_zfill(data, str_accessor, fletcher_variant):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
max_str_len = ser_pd.map(_optional_len).max()
if pd.isna(max_str_len):
max_str_len = 0
arrow_data = pa.array(data, type=pa.string())
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
ser_fr = pd.Series(fr_array)
result_pd = ser_pd.str.zfill(max_str_len + 1)
result_fr = getattr(ser_fr, str_accessor).zfill(max_str_len + 1)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
@settings(deadline=None, max_examples=3)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@examples(
example_list=[
[
" 000000000000000000000000000000000000000000İࠀࠀࠀࠀ𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐤱000000000000𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀"
],
["\x80 "],
[],
],
example_kword="data",
)
def test_text_strip_offset(str_accessor, fletcher_variant, fletcher_slice_offset, data):
_do_test_text_strip(str_accessor, fletcher_variant, fletcher_slice_offset, data)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@examples(
example_list=[
[],
[""],
[None],
[" "],
["\u2000"],
[" a"],
["a "],
[" a "],
# https://github.com/xhochy/fletcher/issues/174
["\xa0"],
["\u2000a\u2000"],
["\u2000\u200C\u2000"],
["\n\u200C\r"],
["\u2000\x80\u2000"],
["\t\x80\x0b"],
["\u2000\u10FFFF\u2000"],
[" \u10FFFF "],
]
+ [
[c]
for c in " \t\r\n\x1f\x1e\x1d\x1c\x0c\x0b"
"\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2000\u2009\u200A\u200B\u2028\u2029\u202F\u205F"
]
+ [[chr(c)] for c in range(0x32)]
+ [[chr(c)] for c in range(0x80, 0x85)]
+ [[chr(c)] for c in range(0x200C, 0x2030)]
+ [[chr(c)] for c in range(0x2060, 0x2070)]
+ [[chr(c)] for c in range(0x10FFFE, 0x110000)],
example_kword="data",
)
def test_text_strip(str_accessor, fletcher_variant, data):
_do_test_text_strip(str_accessor, fletcher_variant, 1, data)
def _do_test_text_strip(str_accessor, fletcher_variant, fletcher_slice_offset, data):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
arrow_data = pa.array(
[None for _ in range(fletcher_slice_offset)] + data, type=pa.string()
)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
ser_fr = pd.Series(fr_array[fletcher_slice_offset:])
result_pd = ser_pd.str.strip()
result_fr = getattr(ser_fr, str_accessor).strip()
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
result_pd[result_pd.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
def test_fr_str_accessor(fletcher_array):
data = ["a", "b"]
ser_pd = pd.Series(data)
# object series is returned
s = ser_pd.fr_str.encode("utf8")
assert s.dtype == np.dtype("O")
# test fletcher functionality and fallback to pandas
arrow_data = pa.array(data, type=pa.string())
fr_array = fletcher_array(arrow_data)
ser_fr = pd.Series(fr_array)
# pandas strings only method
s = ser_fr.fr_str.encode("utf8")
assert isinstance(s.values, fr.FletcherBaseArray)
def test_fr_str_accessor_fail(fletcher_variant):
data = [1, 2]
ser_pd = pd.Series(data)
with pytest.raises(Exception):
ser_pd.fr_str.startswith("a")
@pytest.mark.parametrize("regex", ["([0-9]+)", "([0-9]+)\\+([a-z]+)*"])
@pytest.mark.parametrize(
"data", [["123+"], ["123+a"], ["123+a", "123+"], ["123+", "123+a"]]
)
def test_text_extractall(str_accessor, fletcher_variant, data, regex):
if str_accessor == "str":
pytest.skip("extractall is not yet dispatched to the ExtensionArray")
return
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).extractall(regex)
assert isinstance(result_fr[0].dtype, fr.FletcherBaseDtype)
ser_pd = pd.Series(data)
result_pd = ser_pd.str.extractall(regex)
tm.assert_frame_equal(result_pd, result_fr.astype(object))
@pytest.mark.parametrize("data", [["123"], ["123+"], ["123+a+", "123+"]])
@pytest.mark.parametrize("expand", [True, False])
def test_text_split(str_accessor, fletcher_variant, data, expand):
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).split("+", expand=expand)
ser_pd = pd.Series(data)
result_pd = ser_pd.str.split("+", expand=expand)
if expand:
tm.assert_frame_equal(result_pd, result_fr.astype(object))
else:
tm.assert_series_equal(result_pd, result_fr.astype(object))
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
slice_=st.tuples(st.integers(-20, 20), st.integers(-20, 20), st.integers(-20, 20)),
)
def test_slice(data, slice_, str_accessor, fletcher_variant):
if slice_[2] == 0:
pytest.raises(ValueError)
return
if data == [None] or data == [""]:
return
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).slice(*slice_)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
ser_pd = pd.Series(data, dtype=object)
result_pd = ser_pd.str.slice(*slice_)
| tm.assert_series_equal(result_fr, result_pd) | pandas.testing.assert_series_equal |
import os, math
import _pickle as pickle
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from sklearn import preprocessing
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', default='data', help='Parent dir of the dataset')
parser.add_argument('--file-name', default='electricity.csv', help='Directory containing data.csv')
parser.add_argument('--pickle-name', default='electricity.pkl', help='Directory containing data.csv')
parser.add_argument('--horizon', type=int, default=24, help='Forecast horizon. Default=24')
parser.add_argument('--test', action='store_true', help='whenever to use test set only.')
parser.add_argument('--hop', action='store_true', help='Whether to use test set for validation') # default=False
if __name__ == '__main__':
args = parser.parse_args()
### load the data
dir_path = args.data_folder # './data'
file_name = args.file_name
if file_name=='electricity.csv':
train_start = '2012-01-01 00:00:00'
if args.test:
train_end = '2013-10-19 23:00:00'
test_start = '2014-05-20 00:00:00' #need additional 7 days as given info
test_end = '2014-12-31 23:00:00'
elif args.hop:
train_end = '2012-04-30 23:00:00'
test_start = '2012-04-24 00:00:00'
test_end = '2012-05-31 23:00:00'
else:
train_end = '2013-10-19 23:00:00'
test_start = '2013-10-20 00:00:00' #need additional 7 days as given info
test_end = '2014-12-31 23:00:00'
elif 'europe_power_system' in file_name:
train_start = '2015-01-01 00:00:00'
if args.test:
train_end = '2017-01-15 23:00:00'
test_start = '2017-06-17 00:00:00' #need additional 7 days as given info
test_end = '2017-11-30 23:00:00'
elif args.hop:
train_end = '2015-04-30 23:00:00'
test_start = '2015-04-24 00:00:00' #need additional 7 days as given info
test_end = '2015-05-31 23:00:00'
else:
train_end = '2017-01-15 23:00:00'
test_start = '2017-01-16 00:00:00' #need additional 7 days as given info
test_end = '2017-11-30 23:00:00'
df = pd.read_csv(os.path.join(dir_path, file_name), sep=",", index_col=0, parse_dates=True, decimal='.')
df = df.reset_index()
df = df.drop([df.columns[0]], axis=1).transpose()
dt = df.rename(columns=df.iloc[0]).values #.drop(df.index[0])
## The date range
date_list = pd.date_range(start=train_start, end=test_end)
date_list = pd.to_datetime(date_list)
yr = int(date_list.year[0])
hour_list = []
for nDate in date_list:
for nHour in range(24):
tmp_timestamp = nDate+timedelta(hours=nHour)
hour_list.append(tmp_timestamp)
hour_list = np.array(hour_list)
#print('hour_list', hour_list.shape[0])
#print('dt.shape[0]', dt.shape[0])
station_index = list(range(dt.shape[0]))
#if args.horizon ==36:
# sliding_window_dis = 24;
#else:
# sliding_window_dis = args.horizon;
#print('sliding_window_dis: ', sliding_window_dis)
sliding_window_dis = args.horizon; # 24;
input_len = 168;
output_len = args.horizon; #24;
sample_len = input_len + output_len; #192; #168+24
coef = args.horizon/24;
total_n = int((len(date_list) - 8)/coef) #800; ## The total days
test_n = int(len( | pd.date_range(start=test_start, end=test_end) | pandas.date_range |
# coding: utf-8
# In[ ]:
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pprint
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
import pyLDAvis
import pyLDAvis.gensim
from nltk.corpus import stopwords
from collections import Counter
import string
from collections import Counter
import operator
import nltk
from nltk.corpus import wordnet as wn
from nltk.stem import PorterStemmer
import pickle
# In[ ]:
eudata = pd.read_csv('euData.csv')
eudata['date'] = pd.to_datetime(eudata['date'])
eudata_early = eudata[eudata.date < '2001-01-01']
rownr = eudata_early.shape[0]
rownr
# In[ ]:
pd.options.mode.chained_assignment = None
wordCount = Counter([])
mepDict = {}
stopWords = list(nltk.corpus.stopwords.words('english'))
for i in range(rownr):
if not i%10000:
print(i)
text = eudata_early['text'].iloc[i]
agenda = eudata_early['agenda'].iloc[i]
#remove punctuation, digits and lowering case
text = (agenda + ' ' + text).translate(str.maketrans('','',(string.punctuation + string.digits + '–'))).lower()
#removing unicode
text = (text.encode('ascii', 'ignore')).decode('utf-8')
#splitting text into word list
textList = [x for x in text.split() if x not in stopWords]
mep = eudata_early['name'].iloc[i]
party = eudata_early['party'].iloc[i]
euparty = eudata_early['euparty'].iloc[i]
date = eudata_early['date'].iloc[i]
speechnr = eudata_early['speechnr'].iloc[i]
session = pd.Timestamp(date.year,date.month,1)
k = (mep, session)
if k in mepDict.keys():
mepDict[k] = mepDict[k] + textList
else:
mepDict[k] = textList
wordCount = wordCount + Counter(textList)
# In[ ]:
topWords = list(dict(sorted(wordCount.items(), key=operator.itemgetter(1), reverse=True)[:30]).keys())
topWords
# In[ ]:
len(mepDict)
# In[ ]:
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
def stem(words):
stemmer = PorterStemmer()
return [stemmer.stem(word) for word in words]
# In[ ]:
fillWords = ['also', 'behalf', 'commission', 'commissioner', 'committee',
'council', 'debate', 'european', 'gentlemen', 'item', 'ladies',
'like', 'madam', 'make', 'minutes', 'mr', 'mrs', 'next',
'parliament', 'point', 'presidency', 'president', 'proposal',
'question', 'say', 'sitting', 'thank', 'think', 'vote', 'want']
excludeWords = topWords + fillWords + stopWords
excludeWords = stem([get_lemma(x) for x in excludeWords])
mepDict_clean = {}
i = 0
for k in mepDict.keys():
if not i % 1000:
print(i)
i += 1
textList = mepDict[k]
textList_clean = stem([get_lemma(x) for x in textList])
textList_clean = [x for x in textList_clean if x not in excludeWords and len(x) > 2]
if len(textList_clean) > 10:
mepDict_clean[k] = textList_clean
# In[ ]:
len(mepDict_clean)
# In[ ]:
text_data = []
i = 0
for k in mepDict_clean.keys():
if not i % 1000:
print(i)
i += 1
text_data.append(mepDict_clean[k])
# In[ ]:
dictionary = corpora.Dictionary(text_data)
corpus = [dictionary.doc2bow(text) for text in text_data]
pickle.dump(corpus, open('Trial_Topics/corpus.pkl', 'wb'))
dictionary.save('Trial_Topics/dictionary.gensim')
# # performance measurement
# In[ ]:
def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
model = gensim.models.ldamodel.LdaModel(corpus, num_topics = num_topics, id2word=dictionary)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
return model_list, coherence_values
# In[ ]:
model_list, coherence_values = compute_coherence_values(dictionary=dictionary, corpus=corpus, texts=text_data,limit=100,start=20, step=10)
# In[ ]:
limit=100; start=20; step=10;
x = range(start, limit, step)
plt.plot(x, coherence_values)
plt.xlabel("Num Topics")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
plt.show()
# In[ ]:
for m, cv in zip(x, coherence_values):
print("Num Topics =", m, " has Coherence Value of", round(cv, 4))
# In[ ]:
ldamodel = model_list[4]
ldamodel.save('Trial_Topics/model.gensim')
topics = ldamodel.print_topics(num_words=6)
for topic in topics:
print(topic)
# In[ ]:
def get_topic(text):
if type(text).__name__ == 'str':
text = text.split()
topics = ldamodel.get_document_topics(dictionary.doc2bow(text))
return topics
# In[ ]:
topicData = np.array([(k[0], k[1], get_topic(v)) for k, v in mepDict_clean.items()]).reshape((-1,3))
topicData[0,:]
# In[ ]:
topicDF = | pd.DataFrame(topicData, columns=['name', 'date', 'topic']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
from calendar import monthrange
from datetime import datetime
from time import sleep
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from fake_headers import Headers
from nordvpn_switcher import initialize_VPN, rotate_VPN, terminate_VPN
from Google import Create_Service
# import random
# import shadow_useragent
# from fake_useragent import UserAgent
| pd.set_option('display.max_columns', None) | pandas.set_option |
"""
Clean a DataFrame column containing text data.
"""
import re
import string
from functools import partial, update_wrapper
from typing import Any, Callable, Dict, List, Optional, Set, Union
from unicodedata import normalize
import dask.dataframe as dd
import numpy as np
import pandas as pd
from ..assets.english_stopwords import english_stopwords
from .utils import NULL_VALUES, to_dask
REGEX_BRACKETS = {
"angle": re.compile(r"(\<)[^<>]*(\>)"),
"curly": re.compile(r"(\{)[^{}]*(\})"),
"round": re.compile(r"(\()[^()]*(\))"),
"square": re.compile(r"(\[)[^\[\]]*(\])"),
}
REGEX_DIGITS = re.compile(r"\d+")
REGEX_DIGITS_BLOCK = re.compile(r"\b\d+\b")
REGEX_HTML = re.compile(r"<[A-Za-z/][^>]*>|&(?:[a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});")
REGEX_PUNCTUATION = re.compile(rf"[{re.escape(string.punctuation)}]")
REGEX_URL = re.compile(r"(?:https?://|www\.)\S+")
REGEX_WHITESPACE = re.compile(r"[\n\t]|[ ]{2,}")
def clean_text(
df: Union[pd.DataFrame, dd.DataFrame],
column: str,
pipeline: Optional[List[Dict[str, Any]]] = None,
stopwords: Optional[Set[str]] = None,
) -> pd.DataFrame:
"""
Clean text data in a DataFrame column.
Read more in the :ref:`User Guide <clean_text_user_guide>`.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be cleaned.
column
The name of the column containing text data.
pipeline
A list of cleaning functions to be applied to the column. If None,
use the default pipeline. See the :ref:`User Guide <clean_text_custom_pipeline>`
for more information on customizing the pipeline.
(default: None)
stopwords
A set of words to be removed from the column. If None, use NLTK's
stopwords.
(default: None)
Examples
--------
Clean a column of text data using the default pipeline.
>>> df = pd.DataFrame({"text": ["This show was an amazing, fresh & innovative idea in the \
70's when it first aired."]})
>>> clean_text(df, 'text')
text
0 show amazing fresh innovative idea first aired
"""
df = to_dask(df)
pipe = _get_default_pipeline(stopwords) if not pipeline else _get_custom_pipeline(pipeline)
for func in pipe:
df[column] = df[column].apply(func, meta=object)
df = df.compute()
return df
def default_text_pipeline() -> List[Dict[str, Any]]:
"""
Return a list of dictionaries representing the functions in the default pipeline.
Use as a template for creating a custom pipeline.
Read more in the :ref:`User Guide <clean_text_user_guide>`.
Examples
--------
>>> default_text_pipeline()
[{'operator': 'fillna'}, {'operator': 'lowercase'}, {'operator': 'remove_digits'},
{'operator': 'remove_html'}, {'operator': 'remove_urls'}, {'operator': 'remove_punctuation'},
{'operator': 'remove_accents'}, {'operator': 'remove_stopwords', 'parameters':
{'stopwords': None}}, {'operator': 'remove_whitespace'}]
"""
return [
{"operator": "fillna"},
{"operator": "lowercase"},
{"operator": "remove_digits"},
{"operator": "remove_html"},
{"operator": "remove_urls"},
{"operator": "remove_punctuation"},
{"operator": "remove_accents"},
{"operator": "remove_stopwords", "parameters": {"stopwords": None}},
{"operator": "remove_whitespace"},
]
def _get_default_pipeline(
stopwords: Optional[Set[str]] = None,
) -> List[Callable[..., Any]]:
"""
Return a list of functions defining the default pipeline.
"""
return [
_fillna,
_lowercase,
_remove_digits,
_remove_html,
_remove_urls,
_remove_punctuation,
_remove_accents,
lambda x: _remove_stopwords(x, stopwords),
_remove_whitespace,
]
def _get_custom_pipeline(pipeline: List[Dict[str, Any]]) -> List[Callable[..., Any]]:
"""
Return a list of functions defining a custom pipeline.
"""
func_dict = _get_func_dict()
custom_pipeline: List[Callable[..., Any]] = []
for component in pipeline:
# Check whether function is built in or user defined
operator = (
func_dict[component["operator"]]
if isinstance(component["operator"], str)
else component["operator"]
)
# Append the function to the pipeline
# If parameters are specified, create a partial function to lock in
# the values and prevent them from being overwritten in subsequent loops
if "parameters" in component:
custom_pipeline.append(_wrapped_partial(operator, component["parameters"]))
else:
custom_pipeline.append(operator)
return custom_pipeline
def _get_func_dict() -> Dict[str, Callable[..., Any]]:
"""
Return a mapping of strings to function names.
"""
return {
"fillna": _fillna,
"lowercase": _lowercase,
"sentence_case": _sentence_case,
"title_case": _title_case,
"uppercase": _uppercase,
"remove_accents": _remove_accents,
"remove_bracketed": _remove_bracketed,
"remove_digits": _remove_digits,
"remove_html": _remove_html,
"remove_prefixed": _remove_prefixed,
"remove_punctuation": _remove_punctuation,
"remove_stopwords": _remove_stopwords,
"remove_urls": _remove_urls,
"remove_whitespace": _remove_whitespace,
"replace_bracketed": _replace_bracketed,
"replace_digits": _replace_digits,
"replace_prefixed": _replace_prefixed,
"replace_punctuation": _replace_punctuation,
"replace_stopwords": _replace_stopwords,
"replace_text": _replace_text,
"replace_urls": _replace_urls,
}
def _fillna(text: Any, value: Any = np.nan) -> Any:
"""
Replace all null values with NaN (default) or the supplied value.
"""
return value if text in NULL_VALUES else str(text)
def _lowercase(text: Any) -> Any:
"""
Convert all characters to lowercase.
"""
return str(text).lower() if pd.notna(text) else text
def _sentence_case(text: Any) -> Any:
"""
Convert first character to uppercase and remaining to lowercase.
"""
return str(text).capitalize() if pd.notna(text) else text
def _title_case(text: Any) -> Any:
"""
Convert first character of each word to uppercase and remaining to lowercase.
"""
return str(text).title() if pd.notna(text) else text
def _uppercase(text: Any) -> Any:
"""
Convert all characters to uppercase.
"""
return str(text).upper() if pd.notna(text) else text
def _remove_accents(text: Any) -> Any:
"""
Remove accents (diacritic marks).
"""
return (
normalize("NFD", str(text)).encode("ascii", "ignore").decode("ascii")
if pd.notna(text)
else text
)
def _remove_bracketed(text: Any, brackets: Union[str, Set[str]], inclusive: bool = True) -> Any:
"""
Remove text between brackets.
Parameters
----------
brackets
The bracket style.
- "angle": <>
- "curly": {}
- "round": ()
- "square": []
inclusive
If True (default), remove the brackets along with the text in between.
Otherwise, keep the brackets.
"""
if pd.isna(text):
return text
text = str(text)
value = "" if inclusive else r"\g<1>\g<2>"
if isinstance(brackets, set):
for bracket in brackets:
text = re.sub(REGEX_BRACKETS[bracket], value, text)
else:
text = re.sub(REGEX_BRACKETS[brackets], value, text)
return text
def _remove_digits(text: Any) -> Any:
"""
Remove all digits.
"""
return re.sub(REGEX_DIGITS, "", str(text)) if pd.notna(text) else text
def _remove_html(text: Any) -> Any:
"""
Remove HTML tags.
"""
return re.sub(REGEX_HTML, "", str(text)) if pd.notna(text) else text
def _remove_prefixed(text: Any, prefix: Union[str, Set[str]]) -> Any:
"""
Remove substrings that start with the prefix(es).
"""
if | pd.isna(text) | pandas.isna |
from multiprocessing import Pool
import cvxpy as cvx
from cvxstoc import NormalRandomVariable, prob, expectation
from cvxpower import FixedLoad, Generator, Net, Group, Device, Terminal
from datetime import timedelta
import pandas as pd
import numpy as np
import dccp
import time
import os
class LossyStorage(Device):
r"""Storage device.
A storage device either takes or delivers power with charging and
discharging rates specified by the constraints
.. math::
-D^\max \le p \le C^\max
..
where :math:`C^\max` and :math:`D^\max` are the maximum charging and
discharging rates. The charge level of the battery is given by
.. math::
q(\tau) = q^\mathrm{init} + \sum_{t=1}^\tau p(t), \quad \tau = 1, \ldots, T,
..
which is constrained according to the physical limits of the battery
.. math::
0 \le q \le Q^\max.
..
:param discharge_max: Maximum discharge rate, :math:`D^\max`
:param charge_max: Maximum charge rate, :math:`C^\max`
:param energy_init: Initial charge, :math:`q^\mathrm{init}`
:param energy_max: Maximum battery capacity, :math:`Q^\max`
:param name: (optional) Display name of storage device
:type discharge_max: float or sequence of floats
:type charge_max: float or sequence of floats
:type energy_init: float
:type energy_max: float or sequence of floats
:type name: string
"""
def __init__(
self,
discharge_max=0,
charge_max=None,
energy_init=0,
energy_final=None,
energy_max=None,
name=None,
len_interval=1.0,
final_energy_price=None,
alpha = 0.0,
DoD = 0.9
):
super(LossyStorage, self).__init__([Terminal()], name)
self.discharge_max = discharge_max
self.charge_max = charge_max
self.energy_init = energy_init
self.energy_max = energy_max
self.energy_min = energy_max * (1 - DoD)
self.energy_final = energy_final
self.len_interval = len_interval # in hours
self.final_energy_price = final_energy_price
self.energy = None
self.alpha = alpha
self.T = int(self.len_interval*24)
@property
def cost(self):
T, S = self.terminals[0].power_var.shape
if self.final_energy_price is not None:
if self.energy is None:
self.energy = cvx.Variable(self.terminals[0].power_var.shape)
cost = np.zeros((T - 1, S))
final_cost = cvx.reshape(
self.energy[-1, :] * self.final_energy_price[0, 0], (1, S)
)
cost = cvx.vstack([cost, final_cost])
else:
cost = np.zeros(T, S)
return cost
@property
def constraints(self):
P = self.terminals[0].power_var
if self.energy is None:
self.energy = cvx.Variable(self.terminals[0].power_var.shape)
e_init = cvx.reshape(self.energy_init, ())
constr = [
#cvx.diff(self.energy.T) == P[1:, :] * self.len_interval,
#self.energy[0, :] - e_init - P[0, :] * self.len_interval == 0,
self.energy[0, :] == e_init,
self.terminals[0].power_var >= -self.discharge_max,
self.terminals[0].power_var <= self.charge_max,
self.energy <= self.energy_max,
self.energy >= self.energy_min,
]
for t in range(0, self.T - 1):
constr += [
self.energy[t+1] == (1 - self.alpha) * self.energy[t] + P[t] * self.len_interval
]
if self.energy_final is not None:
constr += [(1 - self.alpha) * self.energy[-1] + P[-1] * self.len_interval>= self.energy_final]
#constr += [self.energy[-1] >= self.energy_final]
return constr
class Converter(Device):
"""Storage converter.
A loss converter has two terminals with power schedules
:math:`p_1` and :math:`p_2`. Conservation of energy across the
converter is enforced with the constraint
.. math::
p_1 + p_2 = (1-eta)p_1,
p_1 + p_2 = (1-eta)p_2,
..
and a maximum capacity of :math:`P^\max` with
.. math::
|p_1| \le P^\max.
..
:param power_max: Maximum capacity of the converter line
:param name: (optional) Display name for converter line
:type power_max: float or sequence of floats
:type name: string
"""
def __init__(self, eta=0.0, power_max=None, name=None):
super(Converter, self).__init__([Terminal(), Terminal()], name)
self.power_max = power_max
self.eta = eta
assert self.eta >= 0
@property
def constraints(self):
p1 = self.terminals[0].power_var
p2 = self.terminals[1].power_var
constrs = []
if self.eta > 0:
constrs += [p1 + p2 >= (1 - self.eta)*p1]
# constrs += [p1 + p2 <= (1 - self.eta)*p2]
if self.power_max is not None:
constrs += [((2 * self.power_max) * (1 - self.eta)) / (1 + self.eta) >= p1 + p2]
# constrs += [((2 * self.power_max) * (self.eta - 1)) / (1 + self.eta) >= p1 + p2]
else:
constrs += [p1 + p2 == 0]
if self.power_max is not None:
constrs += [cvx.abs((p1 - p2) / 2) <= self.power_max]
return constrs
def getConstraints(network):
constraints = []
group = network.devices + network.nets
[constraints.append(constraint) for x in group for constraint in x.constraints]
return constraints
def getData(df, day, d_name):
idx = df[df.index.dayofyear==day].index
return df.loc[idx, d_name].values
def process_image(NUM_HOURS_ENS):
cov_matrix_load = pd.read_csv('./data/cov_matrix_load.csv').values
#cov_matrix_load = np.repeat(np.repeat(pd.read_csv('./cov_matrix_load.csv').values, 2, axis=0), 2, axis=1)
#cov_matrix_solar = np.repeat(np.repeat(pd.read_csv('./cov_matrix_solar.csv').values, 2, axis=0), 2, axis=1)
cov_matrix_solar = pd.read_csv('./data/cov_matrix_solar.csv').values
df_optim = pd.read_csv('./data/lvdc_microgrid_optim_variables_short.csv',
index_col=0, infer_datetime_format=True, parse_dates=["index"])
# df_optim = df_optim.append(pd.DataFrame(index=[df_optim.index[-1]+timedelta(hours=1)]))
# df_optim = df_optim.resample("30T").ffill().dropna() # resample to 30 minutes
df_acc = | pd.read_csv('./data/accuracy_in_clusters_daily_predictions_2013.txt', header=None) | pandas.read_csv |
import sys
import pandas as pd
import sqlite3
import sqlalchemy
import numpy as np
from sqlalchemy import create_engine
def load_data(messages_filepath='disaster_messages.csv', categories_filepath='disaster_categories.csv'):
'''
Loads messages and categories from dataset
Returns merged dataframe of messages & categories
'''
messages = | pd.read_csv(messages_filepath) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from woodwork.accessor_utils import (
_is_dask_dataframe,
_is_dask_series,
_is_dataframe,
_is_koalas_dataframe,
_is_koalas_series,
_is_series,
get_invalid_schema_message,
init_series,
is_schema_valid,
)
from woodwork.exceptions import TypeConversionError
from woodwork.logical_types import Categorical, Datetime, NaturalLanguage
def test_init_series_valid_conversion_specified_ltype(sample_series):
if _is_koalas_series(sample_series):
sample_series = sample_series.astype("str")
else:
sample_series = sample_series.astype("object")
series = init_series(sample_series, logical_type="categorical")
assert series is not sample_series
correct_dtype = Categorical._get_valid_dtype(type(sample_series))
assert series.dtype == correct_dtype
assert isinstance(series.ww.logical_type, Categorical)
assert series.ww.semantic_tags == {"category"}
series = init_series(sample_series, logical_type="natural_language")
assert series is not sample_series
correct_dtype = NaturalLanguage._get_valid_dtype(type(sample_series))
assert series.dtype == correct_dtype
assert isinstance(series.ww.logical_type, NaturalLanguage)
assert series.ww.semantic_tags == set()
def test_init_series_with_pd_extension_array():
extension_categories = pd.Categorical([1, 2, 3])
series = init_series(extension_categories)
pd_reference_series = init_series(pd.Series([1, 2, 3], dtype="category"))
assert series.equals(pd_reference_series)
assert series.ww.logical_type == pd_reference_series.ww.logical_type
assert series.ww.semantic_tags == pd_reference_series.ww.semantic_tags
extension_ints = pd.array(np.array([1, 2, 3, 4], dtype="int64"))
series = init_series(extension_ints)
pd_reference_series = init_series(pd.Series([1, 2, 3, 4], dtype="Int64"))
assert series.equals(pd_reference_series)
assert series.ww.logical_type == pd_reference_series.ww.logical_type
assert series.ww.semantic_tags == pd_reference_series.ww.semantic_tags
def test_init_series_with_invalid_type(sample_df):
inputs = [sample_df, 1, "string", None]
for input_ in inputs:
error_message = (
f"Input must be of series type. The current input is of type {type(input_)}"
)
with pytest.raises(TypeError, match=error_message):
init_series(input_)
def test_init_series_with_np_array(sample_series_pandas):
series = init_series(sample_series_pandas.to_numpy())
series2 = init_series(
sample_series_pandas
) # Sample series panda contains ['a','b','c','a']
assert series.equals(series2)
assert series.ww.logical_type == series2.ww.logical_type
assert series.ww.semantic_tags == series2.ww.semantic_tags
def test_init_series_with_multidimensional_np_array():
input_ = np.array([["a", "b"], ["a", "b"]])
error_message = f"np.ndarray input must be 1 dimensional. Current np.ndarray is {input_.ndim} dimensional"
with pytest.raises(ValueError, match=error_message):
init_series(input_)
def test_init_series_valid_conversion_inferred_ltype(sample_series):
if _is_koalas_series(sample_series):
sample_series = sample_series.astype("str")
else:
sample_series = sample_series.astype("object")
series = init_series(sample_series)
assert series is not sample_series
correct_dtype = Categorical._get_valid_dtype(type(sample_series))
assert series.dtype == correct_dtype
assert isinstance(series.ww.logical_type, Categorical)
assert series.ww.semantic_tags == {"category"}
def test_init_series_with_datetime(sample_datetime_series):
series = init_series(sample_datetime_series, logical_type="datetime")
assert series.dtype == "datetime64[ns]"
assert isinstance(series.ww.logical_type, Datetime)
def test_init_series_all_parameters(sample_series):
if _is_koalas_series(sample_series):
sample_series = sample_series.astype("str")
else:
sample_series = sample_series.astype("object")
metadata = {"meta_key": "meta_value"}
description = "custom description"
origin = "base"
series = init_series(
sample_series,
logical_type="categorical",
semantic_tags=["custom_tag"],
metadata=metadata,
description=description,
origin=origin,
use_standard_tags=False,
)
assert series is not sample_series
correct_dtype = Categorical._get_valid_dtype(type(sample_series))
assert series.dtype == correct_dtype
assert isinstance(series.ww.logical_type, Categorical)
assert series.ww.semantic_tags == {"custom_tag"}
assert series.ww.metadata == metadata
assert series.ww.description == description
assert series.ww.origin == origin
def test_init_series_error_on_invalid_conversion(sample_series):
if _is_dask_series(sample_series):
pytest.xfail(
"Dask type conversion with astype does not fail until compute is called"
)
if _is_koalas_series(sample_series):
pytest.xfail(
"Koalas allows this conversion, filling values it cannot convert with NaN "
"and converting dtype to float."
)
error_message = (
"Error converting datatype for sample_series from type category to type Int64. "
"Please confirm the underlying data is consistent with logical type IntegerNullable."
)
with pytest.raises(TypeConversionError, match=error_message):
init_series(sample_series, logical_type="integer_nullable")
def test_is_series(sample_df):
assert _is_series(sample_df["id"])
assert not _is_series(sample_df)
def test_is_dataframe(sample_df):
assert _is_dataframe(sample_df)
assert not _is_dataframe(sample_df["id"])
def test_get_invalid_schema_message(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
name="test_schema",
index="id",
logical_types={"id": "Double", "full_name": "PersonFullName"},
)
schema = schema_df.ww.schema
assert get_invalid_schema_message(schema_df, schema) is None
assert (
get_invalid_schema_message(sample_df, schema)
== "dtype mismatch for column id between DataFrame dtype, int64, and Double dtype, float64"
)
sampled_df = schema_df.sample(frac=0.3)
assert get_invalid_schema_message(sampled_df, schema) is None
dropped_df = schema_df.drop("id", axis=1)
assert (
get_invalid_schema_message(dropped_df, schema)
== "The following columns in the typing information were missing from the DataFrame: {'id'}"
)
renamed_df = schema_df.rename(columns={"id": "new_col"})
assert (
get_invalid_schema_message(renamed_df, schema)
== "The following columns in the DataFrame were missing from the typing information: {'new_col'}"
)
def test_get_invalid_schema_message_dtype_mismatch(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
logical_types={"age": "Categorical", "full_name": "PersonFullName"}
)
schema = schema_df.ww.schema
incorrect_int_dtype_df = schema_df.ww.astype({"id": "Int64"})
incorrect_bool_dtype_df = schema_df.ww.astype({"is_registered": "Int64"})
assert (
get_invalid_schema_message(incorrect_int_dtype_df, schema)
== "dtype mismatch for column id between DataFrame dtype, Int64, and Integer dtype, int64"
)
assert (
get_invalid_schema_message(incorrect_bool_dtype_df, schema)
== "dtype mismatch for column is_registered between DataFrame dtype, Int64, and BooleanNullable dtype, boolean"
)
# Koalas backup dtypes make these checks not relevant
if not _is_koalas_dataframe(sample_df):
incorrect_str_dtype_df = schema_df.ww.astype(
{"full_name": "object"}
) # wont work for koalas
incorrect_categorical_dtype_df = schema_df.ww.astype(
{"age": "string"}
) # wont work for koalas
assert (
get_invalid_schema_message(incorrect_str_dtype_df, schema)
== "dtype mismatch for column full_name between DataFrame dtype, object, and PersonFullName dtype, string"
)
assert (
get_invalid_schema_message(incorrect_categorical_dtype_df, schema)
== "dtype mismatch for column age between DataFrame dtype, string, and Categorical dtype, category"
)
def test_get_invalid_schema_message_index_checks(sample_df):
if not isinstance(sample_df, pd.DataFrame):
pytest.xfail("Index validation not performed for Dask or Koalas DataFrames")
schema_df = sample_df.copy()
schema_df.ww.init(
name="test_schema",
index="id",
logical_types={"id": "Double", "full_name": "PersonFullName"},
)
schema = schema_df.ww.schema
different_underlying_index_df = schema_df.copy()
different_underlying_index_df["id"] = pd.Series([9, 8, 7, 6], dtype="float64")
assert (
get_invalid_schema_message(different_underlying_index_df, schema)
== "Index mismatch between DataFrame and typing information"
)
not_unique_df = schema_df.replace({3: 1})
not_unique_df.index = not_unique_df["id"]
not_unique_df.index.name = None
assert (
get_invalid_schema_message(not_unique_df, schema)
== "Index column is not unique"
)
df = pd.DataFrame(
{
"id": pd.Series([5, 4, 3, 2], dtype="float64"),
"col": pd.Series(["b", "b", "b", "d"], dtype="category"),
}
)
df.ww.init(index="id")
df_schema = df.ww.schema
nan_df = df.replace({3: None})
nan_df["id"] = nan_df["id"].astype("float64")
nan_df = nan_df.set_index("id", drop=False)
actual = get_invalid_schema_message(nan_df, df_schema)
assert actual == "Index contains null values"
def test_is_schema_valid_true(sample_df):
sample_df.ww.init(index="id", logical_types={"phone_number": "Categorical"})
copy_df = sample_df.copy()
assert copy_df.ww.schema is None
assert is_schema_valid(copy_df, sample_df.ww.schema)
def test_is_schema_valid_false(sample_df):
sample_df.ww.init()
schema = sample_df.ww.schema
invalid_dtype_df = sample_df.astype({"age": "float64"})
assert not is_schema_valid(invalid_dtype_df, schema)
missing_col_df = sample_df.drop(columns={"is_registered"})
assert not is_schema_valid(missing_col_df, schema)
def test_is_dask_dataframe(sample_df_dask):
assert _is_dask_dataframe(sample_df_dask)
assert not _is_dask_dataframe(pd.DataFrame())
def test_is_dask_series(sample_series_dask):
assert _is_dask_series(sample_series_dask)
assert not _is_dask_series( | pd.Series() | pandas.Series |
#!/usr/bin/env python
import argparse
from subprocess import run
import pandas as pd
PARAMS = [5, 10, 25, 50, 100, 250, 500, 1000]
# PARAMS = [i*i*5 for i in range(1,15)]
parser = argparse.ArgumentParser(description='Correlation Evaluation script',
usage='Use CV to optimize correlation',
epilog='The files must have 2 columns, first for index and second for the values')
parser.add_argument('--predictor', metavar='predictor_file_path',
default='SetupFiles-indri-5.6/clarity.m-1/Clarity-Fiana', help='path to predictor executable res')
parser.add_argument('--parameters', metavar='parameters_file_path', default='clarity/clarityParam.xml',
help='path to predictor parameters res')
parser.add_argument('--testing', metavar='running_parameter', default='-documents=', choices=['documents', 'fbDocs'],
help='The parameter to optimize')
parser.add_argument('-q', '--queries', metavar='queries.xml', default='data/ROBUST/queries.xml',
help='path to queries xml res')
parser.add_argument('-m', '--measure', default='pearson', type=str,
help='default correlation measure type is pearson', choices=['pearson', 'spearman', 'kendall'], )
# parser.add_argument("-v", "--verbose", help="increase output verbosity",
# action="store_true")
def pre_testing(predictor_exe, parameters_xml, test_param, queries):
"""This function will run the predictor using a shell command for different numbers of documents
and save the output files to the dir tmp-testing"""
run('mkdir -v tmp-testing', shell=True)
pred = 'Fiana' if 'Fiana' in predictor_exe else 'Anna'
run('mkdir -v tmp-testing/clarity-{}'.format(pred), shell=True)
print('The temporary files will be saved in the directory tmp-testing')
for i in PARAMS:
print('\n ******** Running for: {} documents ******** \n'.format(i))
output = 'tmp-testing/clarity-{}/predictions-{}'.format(pred, i)
run('{} {} -{}={} {} > {}'.format(predictor_exe, parameters_xml, test_param, i,
queries, output), shell=True)
def calc_cor_files(first_file, second_file, test):
first_df = pd.read_table(first_file, delim_whitespace=True, header=None, index_col=0, names=['x'])
second_df = pd.read_table(second_file, delim_whitespace=True, header=None, index_col=0, names=['y'])
return calc_cor_df(first_df, second_df, test)
def calc_cor_df(first_df, second_df, test):
merged_df = | pd.merge(first_df, second_df, left_index=True, right_index=True) | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Make sure the following dependencies are installed.
#!pip install albumentations --upgrade
#!pip install timm
#!pip install iterative-stratification
__author__ = 'MPWARE: https://www.kaggle.com/mpware'
# In[ ]:
# Configure HOME and DATA_HOME according to your setup
HOME = "./"
DATA_HOME = "./data/"
TRAIN_HOME = DATA_HOME + "train/"
TRAIN_IMAGES_HOME = TRAIN_HOME + "images/"
IMAGE_SIZE = 512 # Image size for training
RESIZED_IMAGE_SIZE = 384 # For random crop
COMPOSE = None # For RGBY support
# Set to True for interactive session
PT_SCRIPT = True # True
# In[ ]:
import sys, os, random, math
import numpy as np
import h5py
import cv2
import torch
import torch.nn as nn
import operator
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
import albumentations as A
import torch.nn.functional as F
import functools
from collections import OrderedDict
import torch.nn.functional as F
from torch.optim import Adam, SGD
import timm
import iterstrat
# In[ ]:
LABEL = "Label"
ID = "ID"
EID = "EID"
IMAGE_WIDTH = "ImageWidth"
IMAGE_HEIGHT = "ImageHeight"
META = "META"
TOTAL = "Total"
EXT = "ext"
DEFAULT = "default"
# 19 class labels. Some rare classes: Mitotic spindle (0.37%), Negative: (0.15%)
class_mapping = {
0: 'Nucleoplasm', 1: 'Nuclear membrane', 2: 'Nucleoli', 3: 'Nucleoli fibrillar center',
4: 'Nuclear speckles', 5: 'Nuclear bodies', 6: 'Endoplasmic reticulum', 7: 'Golgi apparatus', 8: 'Intermediate filaments',
9: 'Actin filaments', 10: 'Microtubules', 11: 'Mitotic spindle', 12: 'Centrosome', 13: 'Plasma membrane', 14: 'Mitochondria',
15: 'Aggresome', 16: 'Cytosol', 17: 'Vesicles and punctate cytosolic patterns', 18: 'Negative',
}
class_mapping_inv = {v:k for k,v in class_mapping.items()}
class_labels = [str(k) for k,v in class_mapping.items()]
class_names = [str(v) for k,v in class_mapping.items()]
LABELS_OHE_START = 3
# In[ ]:
def seed_everything(s):
random.seed(s)
os.environ['PYTHONHASHSEED'] = str(s)
np.random.seed(s)
# Torch
torch.manual_seed(s)
torch.cuda.manual_seed(s)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available():
torch.cuda.manual_seed_all(s)
# In[ ]:
def l1_loss(A_tensors, B_tensors):
return torch.abs(A_tensors - B_tensors)
class ComboLoss(nn.Module):
def __init__(self, alpha=1.0, beta=1.0, gamma=1.0, from_logits=True, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.from_logits = from_logits
print("alpha:", self.alpha, "beta:", self.beta, "gamma:", self.gamma)
self.loss_classification = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, y_pred, y_true, features_single=None, y_pred_tiles=None, features_tiles=None, y_pred_tiled_flatten=None):
loss_ = self.alpha * self.loss_classification(y_pred, y_true).mean()
if features_tiles is not None and self.beta > 0:
logits_reconstruction = y_pred_tiles
loss_tiles_class_ = self.loss_classification(logits_reconstruction, y_true).mean()
loss_ = loss_ + self.beta * loss_tiles_class_
if features_single is not None and features_tiles is not None and self.gamma > 0:
loss_reconstruction_ = l1_loss(features_single, features_tiles).mean()
loss_ = loss_ + self.gamma * loss_reconstruction_
return loss_
# In[ ]:
# Main configuration
class raw_conf:
def __init__(self, factory):
super().__init__()
self.inference = False
self.compose = COMPOSE
self.normalize = False if factory == "HDF5" else True
self.norm_value = None if factory == "HDF5" else 65535.0
# Dataset
self.image_size = None if factory == "HDF5" else IMAGE_SIZE
self.denormalize = 255
# Model
self.mtype = "siamese" # "regular"
self.backbone = 'seresnext50_32x4d' # 'gluon_seresnext101_32x4d' # 'cspresnext50' 'regnety_064'
self.pretrained_weights = "imagenet"
self.INPUT_RANGE = [0, 1]
self.IMG_MEAN = [0.485, 0.456, 0.406, 0.485] if self.compose is None else [0.485, 0.456, 0.406]
self.IMG_STD = [0.229, 0.224, 0.225, 0.229] if self.compose is None else [0.229, 0.224, 0.225]
self.num_classes = 19
self.with_cam = True
self.puzzle_pieces = 4
self.hpa_classifier_weights = None
self.dropout = None
# Model output
self.post_activation = "sigmoid"
self.output_key = "logits" if self.mtype == "regular" else "single_logits" # None
self.output_key_extra = "features" if self.mtype == "regular" else "single_features" # None
self.output_key_siamese = None if self.mtype == "regular" else "tiled_logits"
self.output_key_extra_siamese = None if self.mtype == "regular" else "tiled_features"
# Loss
self.alpha = 1.0 # Single image classification loss
self.beta = 0.0 if self.mtype == "regular" else 1.0 # Reconstructed image classification loss
self.gamma = 0.0 if self.mtype == "regular" else 0.5 # 0.25
self.loss = ComboLoss(alpha=self.alpha, beta=self.beta, gamma=self.gamma)
self.sampler = "prob"
self.sampler_cap = "auto" # None
self.fp16 = True
self.finetune = False
self.optimizer = "Adam" # "SGD"
self.scheduler = None if self.finetune is True or self.optimizer != "Adam" else "ReduceLROnPlateau" # "CosineAnnealingWarmRestarts"
self.scheduler_factor = 0.3
self.scheduler_patience = 8
self.lr = 0.0003
self.min_lr = 0.00005
self.beta1 = 0.9
self.train_verbose = True
self.valid_verbose = True
# Train parameters
self.L_DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.map_location = self.L_DEVICE
self.WORKERS = 0 if PT_SCRIPT is False else 8
self.BATCH_SIZE = 36 if self.mtype == "siamese" else 48
self.ITERATIONS_LOGS = 30
self.CYCLES = 1
self.EPOCHS_PER_CYCLE = 48 # 36
self.EPOCHS = self.CYCLES * self.EPOCHS_PER_CYCLE
self.WARMUP = 0
self.FOLDS = 4
self.METRIC_ = "min" # "max"
self.pin_memory = True
# In[ ]:
# Load CSV data, drop duplicates if any
def prepare_data(filename, ext_name=None):
train_pd = pd.read_csv(DATA_HOME + filename)
train_pd[LABEL] = train_pd[LABEL].apply(literal_eval)
train_pd[LABEL] = train_pd[LABEL].apply(lambda x: [int(l) for l in x])
if EXT not in train_pd.columns:
train_pd.insert(2, EXT, DEFAULT)
if ext_name is not None:
train_pd[EXT] = ext_name
train_pd = train_pd.drop_duplicates(subset=[ID]).reset_index(drop=True)
assert(np.argwhere(train_pd.columns.values == EXT)[0][0] == 2)
return train_pd
# In[ ]:
# Use PIL to support 16 bits, normalize=True to return [0-1.0] float32 image
def read_image(filename, compose=None, normalize=False, norm_value=65535.0, images_root=TRAIN_IMAGES_HOME):
filename = images_root + filename
filename = filename + "_red.png" if "_red.png" not in filename else filename
mt_, pi_, nu_, er_ = filename, filename.replace('_red', '_green'), filename.replace('_red', '_blue'), filename.replace('_red', '_yellow')
if compose is None:
mt = np.asarray(Image.open(mt_)).astype(np.uint16)
pi = np.asarray(Image.open(pi_)).astype(np.uint16)
nu = np.asarray(Image.open(nu_)).astype(np.uint16)
er = np.asarray(Image.open(er_)).astype(np.uint16)
ret = np.dstack((mt, pi, nu, er))
else:
if compose == "RGB":
mt = np.asarray(Image.open(mt_)).astype(np.uint16)
pi = np.asarray(Image.open(pi_)).astype(np.uint16)
nu = np.asarray(Image.open(nu_)).astype(np.uint16)
ret = np.dstack((mt, pi, nu))
elif compose == "RYB":
mt = np.asarray(Image.open(mt_)).astype(np.uint16)
er = np.asarray(Image.open(er_)).astype(np.uint16)
nu = np.asarray(Image.open(nu_)).astype(np.uint16)
ret = np.dstack((mt, er, nu))
elif compose == "RYGYB":
mt = np.asarray(Image.open(mt_))
pi = np.asarray(Image.open(pi_))
nu = np.asarray(Image.open(nu_))
er = np.asarray(Image.open(er_))
ret = np.dstack(((mt + er)/2.0, (pi + er/2)/1.5, nu))
else:
raise Exception("Unknown compose:", compose)
if normalize is True:
# Some images are np.uint16 but from 0-255 range!
if ret.max() > 255:
ret = (ret/norm_value).astype(np.float32)
else:
ret = (ret/255).astype(np.float32)
return ret
# Data available through raw PNG files
class DataFactory:
def __init__(self, paths, conf=None, verbose=False):
super().__init__()
self.paths = paths
self.conf = conf
self.verbose = verbose
print("PNGFile factory") if self.verbose is True else None
def read_image(self, uid, container=None):
images_path = self.paths
if container is not None and container != DEFAULT:
images_path = images_path.replace("images", container)
image = read_image(uid, compose=self.conf.compose, normalize=self.conf.normalize, norm_value=self.conf.norm_value, images_root=images_path)
return image
def cleanup(self):
pass
# Data available through HDF5 files
class HDF5DataFactory:
def __init__(self, paths, conf=None, verbose=False):
super().__init__()
self.paths = paths
self.hdf5_paths = None
self.conf = conf
self.verbose = verbose
self.initialized = False
print("HDF5 factory") if self.verbose is True else None
def initialize_hdf5(self):
if self.initialized is False:
self.hdf5_paths = h5py.File(self.paths, 'r') if isinstance(self.paths, str) else {k: h5py.File(v, 'r') for k, v in self.paths.items()}
self.initialized = True
print("initialize_hdf5", self.hdf5_paths) if self.verbose is True else None
def read_image(self, uid, container=DEFAULT):
self.initialize_hdf5()
hdf5_paths_ = self.hdf5_paths if isinstance(self.hdf5_paths, str) else self.hdf5_paths.get(container)
# Image is already resized, normalized 0-1.0 as float32
image = hdf5_paths_[uid][:,:,:]
if self.conf.compose is not None:
if self.conf.compose == "RGB":
image = image[:, :, [0,1,2]]
elif self.conf.compose == "RYB":
image = image[:, :, [0,3,2]]
elif self.conf.compose == "G":
image = np.dstack((image[:, :, 1], image[:, :, 1], image[:, :, 1]))
elif self.conf.compose == "RYGYB":
ret = np.dstack(((image[:, :, 0] + image[:, :, 3])/2.0, (image[:, :, 1] + image[:, :, 3]/2)/1.5, image[:, :, 2]))
else:
raise Exception("Unknown compose:", self.conf.compose)
return image
def cleanup(self):
if self.hdf5_paths is not None:
[v.close() for k, v in self.hdf5_paths.items()] if isinstance(self.hdf5_paths, dict) else self.hdf5_paths.close()
print("HDF5 factory cleaned") if self.verbose is True else None
# In[ ]:
# Dataset with all images
def zero(x, y=None):
return 0
class HPADataset(Dataset):
def __init__(self, df, factory, conf, subset="train", categoricals=None, augment=None, postprocess=None, modelprepare=None, classes=None, weights=False, dump=None, verbose=False):
super().__init__()
self.df = df
self.categoricals = categoricals
self.subset = subset
self.augment = augment
self.postprocess = postprocess
self.modelprepare = modelprepare
self.classes = classes
self.conf = conf
self.factory = factory
self.dump = dump
self.verbose = verbose
if subset == 'train':
self.get_offset = np.random.randint
elif subset == 'valid':
self.get_offset = zero
elif subset == 'ho':
self.get_offset = zero
elif subset == 'test':
self.get_offset = zero
else:
raise RuntimeError("Unknown subset")
# Compute weights
self.weights = self.compute_weights(self.df) if subset == "train" and weights is True else None
def prob_from_weight(self, labels_list, weights_dict_, cap=None):
labels_weights = np.array([weights_dict_[class_mapping[int(label_)]] for label_ in labels_list])
prob_ = np.nanmean(labels_weights)
if cap is not None:
prob_ = np.clip(prob_, 0, cap) # Clip to avoid too much single rare labels, for example: 95th percentile cut, or top K
return prob_
def compute_weights(self, df_):
weights_dict = {label: 1/df_[label].sum() for label in class_names}
cap_ = self.conf.sampler_cap
if cap_ is not None and cap_ == "auto":
top_weights = sorted(weights_dict.items(), key=operator.itemgetter(1), reverse=True)[:3]
print("top_weights", top_weights) if self.verbose is True else None
cap_ = top_weights[2][1] # Cap to the top 3rd weight
df_dist = df_[[ID, LABEL]].copy()
df_dist["prob"] = df_dist[LABEL].apply(lambda x: self.prob_from_weight(x, weights_dict, cap=cap_))
if self.verbose is True:
print("compute_weights completed, cap:", self.conf.sampler_cap, cap_)
for i, (k, v) in enumerate(weights_dict.items()):
print(i, k, v)
return df_dist[["prob"]]
def cleanup(self):
self.factory.cleanup()
def __len__(self):
return len(self.df)
def read_image(self, row):
uid = row[ID]
container = row[EXT]
# Load image
img = self.factory.read_image(uid, container=container)
# Scale image after cropping
if self.conf.image_size is not None and self.conf.image_size != img.shape[0]:
img = skimage.transform.resize(img, (self.conf.image_size, self.conf.image_size), anti_aliasing=True) # Works with float image
if self.conf.denormalize is not None:
img = (self.conf.denormalize * img).astype(np.uint8)
return img
def get_data(self, row, categoricals):
# Return image
img = self.read_image(row)
# Labels (OHE)
labels = np.zeros(self.conf.num_classes, dtype=np.uint8)
for l in row[LABEL]:
labels[l] = 1
sample = {
'image': img,
'label': labels,
}
if self.dump is not None:
sample[ID] = row[ID]
if EID in row:
sample[META] = np.array([row[EID], int(row[IMAGE_WIDTH]), int(row[IMAGE_HEIGHT])], dtype=np.int32)
# Optional augmentation on RGBY image (uint8)
if self.augment:
tmp = self.augment(image=sample['image'])
sample['image'] = tmp["image"] # Apply on full image
# Mandatory to feed model
if self.modelprepare: # Albumentations to normalize data
tmp = self.modelprepare(image=sample['image'])
sample['image'] = tmp["image"] # Apply on full image
return sample
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
row = self.df.iloc[idx]
sample = self.get_data(row, self.categoricals)
return sample
# In[ ]:
# (BS, CLASSES, 12, 12) - Between 0-1.0
# Adapted from: https://github.com/OFRIN/PuzzleCAM/blob/master/core/puzzle_utils.py
def make_cam(x, epsilon=1e-5):
x = F.relu(x) # (BS, CLASSES, 12, 12)
b, c, h, w = x.size() # (BS, CLASSES, 12, 21)
flat_x = x.view(b, c, (h * w)) # (BS, CLASSES, 12x12)
max_value = flat_x.max(axis=-1)[0].view((b, c, 1, 1))
return F.relu(x - epsilon) / (max_value + epsilon) # (BS, CLASSES, 12, 12)
# Input (BS, C, H, W), num_pieces = 4
# Return (BS*4, C, H//4, W//4)
# Adapted from: https://github.com/OFRIN/PuzzleCAM/blob/master/core/puzzle_utils.py
def tile_features(features, num_pieces):
_, _, h, w = features.size()
num_pieces_per_line = int(math.sqrt(num_pieces))
h_per_patch = h // num_pieces_per_line
w_per_patch = w // num_pieces_per_line
"""
+-----+-----+
| 1 | 2 |
+-----+-----+
| 3 | 4 |
+-----+-----+
+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 |
+-----+-----+-----+-----+
"""
patches = []
for splitted_features in torch.split(features, h_per_patch, dim=2):
for patch in torch.split(splitted_features, w_per_patch, dim=3):
patches.append(patch)
return torch.cat(patches, dim=0)
# Adapted from: https://github.com/OFRIN/PuzzleCAM/blob/master/core/puzzle_utils.py
def merge_features(features, num_pieces, batch_size):
"""
+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 |
+-----+-----+-----+-----+
+-----+-----+
| 1 | 2 |
+-----+-----+
| 3 | 4 |
+-----+-----+
"""
features_list = list(torch.split(features, batch_size))
num_pieces_per_line = int(math.sqrt(num_pieces))
index = 0
ext_h_list = []
for _ in range(num_pieces_per_line):
ext_w_list = []
for _ in range(num_pieces_per_line):
ext_w_list.append(features_list[index])
index += 1
ext_h_list.append(torch.cat(ext_w_list, dim=3))
features = torch.cat(ext_h_list, dim=2)
return features
# In[ ]:
# Add 4 channels support
def get_4channels_conv(stem_conv2d):
stem_conv2d_pretrained_weight = stem_conv2d.weight.clone()
stem_conv2d_ = nn.Conv2d(4,
stem_conv2d.out_channels, kernel_size=stem_conv2d.kernel_size, stride=stem_conv2d.stride, padding=stem_conv2d.padding, padding_mode=stem_conv2d.padding_mode, dilation=stem_conv2d.dilation,
bias=True if stem_conv2d.bias is True else False)
stem_conv2d_.weight = nn.Parameter(torch.cat([stem_conv2d_pretrained_weight, nn.Parameter(torch.mean(stem_conv2d_pretrained_weight, axis=1).unsqueeze(1))], axis=1))
return stem_conv2d_
class HPAModel(nn.Module):
def __init__(self, cfg):
super().__init__()
self.num_classes = cfg.num_classes
self.backbone = cfg.backbone
self.with_cam = cfg.with_cam
self.drop_rate = cfg.dropout
self.preprocess_input_fn = get_preprocessing_fn(cfg)
# Unpooled/NoClassifier (features only)
self.mfeatures = timm.create_model(self.backbone, pretrained=True, num_classes=0, global_pool='')
# Add one channel more
if cfg.compose is None:
if "regnet" in self.backbone:
self.mfeatures.stem.conv = get_4channels_conv(self.mfeatures.stem.conv)
elif "csp" in self.backbone:
self.mfeatures.stem[0].conv = get_4channels_conv(self.mfeatures.stem[0].conv)
elif "resnest" in self.backbone:
self.mfeatures.conv1[0] = get_4channels_conv(self.mfeatures.conv1[0])
elif "seresnext" in self.backbone:
self.mfeatures.conv1 = get_4channels_conv(self.mfeatures.conv1)
elif "densenet" in self.backbone:
self.mfeatures.features.conv0 = get_4channels_conv(self.mfeatures.features.conv0)
# Classifier
num_chs = self.mfeatures.feature_info[-1]['num_chs'] # 1296 # 2048
self.mclassifier = nn.Conv2d(num_chs, self.num_classes, 1, bias=False)
# self.mclassifier = timm.models.layers.linear.Linear(num_chs, self.num_classes, bias=True)
# Initialize weights
self.initialize([self.mclassifier])
print("Model %s, last channels: %d, classes: %d" % (cfg.backbone, num_chs, self.num_classes))
# Pooling
def adaptive_avgmax_pool2d(self, x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
# Average pooling 2d
def global_average_pooling_2d(self, x, keepdims=False):
x = torch.mean(x.view(x.size(0), x.size(1), -1), -1)
if keepdims:
x = x.view(x.size(0), x.size(1), 1, 1)
return x
def gap(self, x, keepdims=False):
return self.global_average_pooling_2d(x, keepdims=keepdims)
def initialize(self, modules):
for m in modules:
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
# ([BS, C, H, W])
x = self.mfeatures(x) # (BS, num_chs, 12, 12)
features = None
if self.with_cam is True:
if self.drop_rate is not None and self.drop_rate > 0.0:
x = F.dropout(x, p=float(self.drop_rate), training=self.training)
features = self.mclassifier(x) # (BS, CLASSES, 12, 12)
logits = self.gap(features) # (BS, CLASSES)
else:
x = self.gap(x, keepdims=True) # (BS, num_chs, 1, 1)
if self.drop_rate is not None and self.drop_rate > 0.0:
x = F.dropout(x, p=float(self.drop_rate), training=self.training)
logits = self.mclassifier(x).view(-1, self.num_classes) # (BS, CLASSES)
return {"logits": logits, "features": features} # (BS, CLASSES), (BS, CLASSES, 12, 12)
class HPASiameseModel(nn.Module):
def __init__(self, cfg):
super().__init__()
self.num_classes = cfg.num_classes
self.backbone = cfg.backbone
self.with_cam = cfg.with_cam
self.puzzle_pieces = cfg.puzzle_pieces
self.preprocess_input_fn = get_preprocessing_fn(cfg)
self.cnn1 = HPAModel(cfg)
if cfg.hpa_classifier_weights is not None:
if os.path.exists(cfg.hpa_classifier_weights):
print("Load regular HPA weights from: %s" % cfg.hpa_classifier_weights)
self.cnn1.load_state_dict(torch.load(cfg.hpa_classifier_weights, map_location=cfg.map_location))
print("Model %s" % (cfg.mtype))
def forward_once(self, x):
x = self.cnn1(x)
return x # {"logits": logits, "features": features}
def forward(self, x):
# ([BS, C, H, W])
bs, _, _, _ = x.shape
# Full image
x1 = self.forward_once(x)
single_logits, single_features = x1["logits"], x1["features"]
# Tiled image
tiled_x = tile_features(x, self.puzzle_pieces) # (BS*puzzle_pieces, C, H//puzzle_pieces, W//puzzle_pieces) # 2x memory
x2 = self.forward_once(tiled_x) # Shared weights
tiled_logits, tiled_features = x2["logits"], x2["features"]
tiled_features = merge_features(tiled_features, self.puzzle_pieces, bs) # (BS, CLASSES, 12, 12)
tiled_logits_reconstructed = self.cnn1.gap(tiled_features) # (BS, CLASSES)
return {
"single_logits": single_logits, "single_features": single_features,
"tiled_logits_flatten": tiled_logits, "tiled_features": tiled_features,
"tiled_logits": tiled_logits_reconstructed,
}
# In[ ]:
def build_model(cfg, device, encoder_weights=None):
if cfg.mtype == "siamese":
model = HPASiameseModel(cfg)
else:
model = HPAModel(cfg)
# Load weights
if (encoder_weights is not None) and ("imagenet" not in encoder_weights):
if os.path.exists(encoder_weights):
print("Load weights before optimizer from: %s" % encoder_weights)
model.load_state_dict(torch.load(encoder_weights, map_location=cfg.map_location))
model = model.to(device)
if cfg.optimizer == "Adam":
optimizer = Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.lr, betas=(cfg.beta1, 0.999))
elif cfg.optimizer == "SGD":
optimizer = SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.lr, momentum=0.9)
# Loss
loss = cfg.loss
loss = loss.to(device)
return model, loss, optimizer
# In[ ]:
def format_logs(logs):
str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]
s = ', '.join(str_logs)
return s
# Train loop
def train_loop_fn(batches, preprocessing, model, optimizer, criterion, tmp_conf, device, stage="Train", verbose=True, scaler=None):
model.train()
count, train_loss = 0, 0.0
all_predicted_probs, all_target_classes = None, None
with tqdm(batches, desc=stage, file=sys.stdout, disable=not(verbose)) as iterator:
for x, batch in enumerate(iterator, 1):
try:
for k, v in batch.items():
batch[k] = v.to(device)
samples_data, labels_data = batch.get("image"), batch.get("label")
optimizer.zero_grad() # reset gradient
# Model
with torch.cuda.amp.autocast(enabled=tmp_conf.fp16):
# Preprocessing
with torch.no_grad():
data = preprocessing(samples_data) if preprocessing is not None else samples_data
output = model(data) # forward pass
if tmp_conf.mtype == "siamese":
loss = criterion(output[tmp_conf.output_key], labels_data.float(),
features_single=output[tmp_conf.output_key_extra],
y_pred_tiles=output[tmp_conf.output_key_siamese],
features_tiles=output[tmp_conf.output_key_extra_siamese],
y_pred_tiled_flatten=output["tiled_logits_flatten"])
output = output[tmp_conf.output_key] if tmp_conf.output_key is not None else output
else:
output = output[tmp_conf.output_key] if tmp_conf.output_key is not None else output
# Compute loss
loss = criterion(output, labels_data.float())
if (tmp_conf.ITERATIONS_LOGS > 0) and (x % tmp_conf.ITERATIONS_LOGS == 0):
loss_value = loss.item()
if ~np.isnan(loss_value): train_loss += loss_value
else: print("Warning: NaN loss")
# backward pass
scaler.scale(loss).backward() if scaler is not None else loss.backward()
# Update weights
if scaler is not None:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
if (tmp_conf.ITERATIONS_LOGS > 0) and (x % tmp_conf.ITERATIONS_LOGS == 0):
# Labels predictions
predicted_probs = torch.sigmoid(output) if tmp_conf.post_activation == "sigmoid" else output
predicted_probs = predicted_probs.detach().cpu().numpy()
target_classes = labels_data.detach().cpu().numpy()
# Concatenate for all batches
all_predicted_probs = np.concatenate([all_predicted_probs, predicted_probs], axis=0) if all_predicted_probs is not None else predicted_probs
all_target_classes = np.concatenate([all_target_classes, target_classes], axis=0) if all_target_classes is not None else target_classes
count += 1
if verbose:
scores_str = {"train_%s" % m.__name__: m(all_target_classes, all_predicted_probs) for m in METRICS_PROBS}
scores_str["train_loss"] = (train_loss / count)
iterator.set_postfix_str(format_logs(scores_str))
except Exception as ex:
print("Training batch error:", ex)
scores = {"train_%s" % m.__name__: m(all_target_classes, all_predicted_probs) for m in METRICS_PROBS}
scores["train_loss"] = (train_loss / count)
return (scores, all_target_classes, all_predicted_probs)
# In[ ]:
# Valid loop
def valid_loop_fn(batches, preprocessing, model, criterion, tmp_conf, device, stage="Valid", verbose=True):
model.eval()
count, valid_loss = 0, 0.0
all_predicted_probs, all_target_classes = None, None
with tqdm(batches, desc=stage, file=sys.stdout, disable=not(verbose)) as iterator:
for batch in iterator:
try:
for k, v in batch.items():
batch[k] = v.to(device)
samples_data, labels_data = batch.get("image"), batch.get("label")
with torch.no_grad():
# NN model
with torch.cuda.amp.autocast(enabled=tmp_conf.fp16):
# Preprocessing
data = preprocessing(samples_data) if preprocessing is not None else samples_data
output = model(data) # forward pass
if tmp_conf.mtype == "siamese":
loss = criterion(output[tmp_conf.output_key], labels_data.float(),
features_single=output[tmp_conf.output_key_extra],
y_pred_tiles=output[tmp_conf.output_key_siamese],
features_tiles=output[tmp_conf.output_key_extra_siamese],
y_pred_tiled_flatten=output["tiled_logits_flatten"])
output = output[tmp_conf.output_key] if tmp_conf.output_key is not None else output
else:
output = output[tmp_conf.output_key] if tmp_conf.output_key is not None else output
# Compute loss
loss = criterion(output, labels_data.float())
loss_value = loss.item()
if ~np.isnan(loss_value): valid_loss += loss_value
else: print("Warning: NaN loss")
# Labels predictions
predicted_probs = torch.sigmoid(output) if tmp_conf.post_activation == "sigmoid" else output
predicted_probs = predicted_probs.detach().cpu().numpy()
target_classes = labels_data.detach().cpu().numpy()
# Concatenate for all batches
all_predicted_probs = np.concatenate([all_predicted_probs, predicted_probs], axis=0) if all_predicted_probs is not None else predicted_probs
all_target_classes = np.concatenate([all_target_classes, target_classes], axis=0) if all_target_classes is not None else target_classes
count += 1
if verbose:
scores_str = {"valid_%s" % m.__name__: m(all_target_classes, all_predicted_probs) for m in METRICS_PROBS}
scores_str["valid_loss"] = (valid_loss / count)
iterator.set_postfix_str(format_logs(scores_str))
except Exception as ex:
print("Validation batch error:", ex)
scores = {"valid_%s" % m.__name__: m(all_target_classes, all_predicted_probs) for m in METRICS_PROBS}
scores["valid_loss"] = (valid_loss / count)
return (scores, all_target_classes, all_predicted_probs)
# In[ ]:
# Train one fold
def run_stage(X_train, X_valid, stage, fold, device):
# Build model
snapshot_path = "%s/fold%d/%s/snapshots" % (MODEL_PATH, fold, stage)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
cnn_model, criterion, optimizer = build_model(conf, device,
encoder_weights=os.path.join(snapshot_path.replace(stage, PRETRAINED_STAGE), MODEL_BEST) if PRETRAINED_STAGE is not None else None)
if RESUME == True:
resume_path = os.path.join(snapshot_path, MODEL_BEST)
if os.path.exists(resume_path):
cnn_model.load_state_dict(torch.load(resume_path, map_location=conf.map_location))
print("Resuming, model weights loaded: %s" % resume_path)
factory = DataFactory_(ALL_IMAGES, conf=conf)
# Datasets
train_dataset = HPADataset(X_train, factory, conf, subset="train", augment=image_augmentation_train, modelprepare=get_preprocessing(cnn_model.preprocess_input_fn), dump=None, weights=True, verbose=True)
valid_dataset = HPADataset(X_valid, factory, conf, subset="valid", augment=None, modelprepare=get_preprocessing(cnn_model.preprocess_input_fn), dump=None, verbose=False) if X_valid is not None else None
train_sampler = WeightedRandomSampler(weights=train_dataset.weights[conf.sampler].values, replacement=True, num_samples=len(train_dataset)) if conf.sampler is not None else None
print("Stage:", stage, "fold:", fold, "on:", device, "workers:", conf.WORKERS, "post_activation:", conf.post_activation, "batch size:", conf.BATCH_SIZE, "metric_:", conf.METRIC_,
"train dataset:", len(train_dataset), "valid dataset:", len(valid_dataset) if valid_dataset is not None else None, "num_classes:", conf.num_classes, "fp16:", conf.fp16, "aug:", image_augmentation_train,
"sampler:", train_sampler)
# Dataloaders
train_loader = DataLoader(train_dataset, batch_size=conf.BATCH_SIZE, sampler=train_sampler, num_workers=conf.WORKERS, drop_last = False, pin_memory=conf.pin_memory, shuffle=True if train_sampler is None else False)
valid_loader = DataLoader(valid_dataset, batch_size=conf.BATCH_SIZE, shuffle=False, num_workers=conf.WORKERS, drop_last = False, pin_memory=conf.pin_memory) if X_valid is not None else None
scheduler = None
if conf.scheduler is not None:
if conf.scheduler == "ReduceLROnPlateau":
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode=conf.METRIC_, factor=conf.scheduler_factor, min_lr=0.000001, patience=conf.scheduler_patience, verbose=True)
else:
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, conf.EPOCHS_PER_CYCLE, T_mult=1, eta_min=conf.min_lr)
print(criterion, optimizer, scheduler)
metric = METRIC_NAME
valid_loss_min = np.Inf
metric_loss_criterion = np.Inf if conf.METRIC_ == "min" else -np.Inf
history = []
scaler = torch.cuda.amp.GradScaler(enabled=conf.fp16) if conf.fp16 is True else None
for epoch in tqdm(range(1, conf.EPOCHS + 1)):
lr = optimizer.param_groups[0]['lr'] if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) else scheduler.get_last_lr()[0] if scheduler is not None else optimizer.param_groups[0]['lr'] if isinstance(optimizer, torch.optim.SGD) or isinstance(optimizer, torch.optim.Adam) else optimizer.get_last_lr()
info = "[%d], lr=%.7f" % (epoch, lr)
# Train loop
train_scores, _, _ = train_loop_fn(train_loader, None, cnn_model, optimizer, criterion, conf, device, stage="Train%s" % info, verbose=conf.train_verbose, scaler=scaler)
# Validation loop
valid_scores, _, all_predicted_probs_ = valid_loop_fn(valid_loader, None, cnn_model, criterion, conf, device, stage="Valid%s" % info, verbose=conf.valid_verbose) if valid_loader is not None else ({"valid_%s" % metric: 0, "valid_loss": 0}, None, None)
# Keep track of loss and metrics
history.append({"epoch":epoch, "lr": lr, **train_scores, **valid_scores})
if conf.scheduler is not None:
scheduler.step(valid_scores["valid_%s" % metric]) if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) else scheduler.step()
metric_loss = valid_scores["valid_%s" % metric]
if (conf.METRIC_ == "min" and metric_loss < metric_loss_criterion and epoch > 1) or (conf.METRIC_ == "max" and metric_loss > metric_loss_criterion and epoch > 1):
print("Epoch%s, Valid loss from: %.4f to %.4f, Metric improved from %.4f to %.4f, saving model ..." % (info, valid_loss_min, valid_scores["valid_loss"], metric_loss_criterion, metric_loss))
metric_loss_criterion = metric_loss
valid_loss_min = valid_scores["valid_loss"]
torch.save(cnn_model.state_dict(), os.path.join(snapshot_path, MODEL_BEST))
# Save per image OOF
oof_pd = pd.DataFrame(all_predicted_probs_)
oof_pd = oof_pd.set_index(X_valid[ID].values)
oof_pd.to_csv("%s/oof_%d.csv" % (snapshot_path, fold))
factory.cleanup()
if history:
# Plot training history
history_pd = pd.DataFrame(history[1:]).set_index("epoch")
train_history_pd = history_pd[[c for c in history_pd.columns if "train_" in c]]
valid_history_pd = history_pd[[c for c in history_pd.columns if "valid_" in c]]
lr_history_pd = history_pd[[c for c in history_pd.columns if "lr" in c]]
fig, ax = plt.subplots(1,2, figsize=(DEFAULT_FIG_WIDTH, 6))
t_epoch = train_history_pd["train_%s" % metric].argmin() if conf.METRIC_ == "min" else train_history_pd["train_%s" % metric].argmax()
v_epoch = valid_history_pd["valid_%s" % metric].argmin() if conf.METRIC_ == "min" else valid_history_pd["valid_%s" % metric].argmax()
d = train_history_pd.plot(kind="line", ax=ax[0], title="Epoch: %d, Train: %.3f" % (t_epoch, train_history_pd.iloc[t_epoch,:]["train_%s" % metric]))
d = lr_history_pd.plot(kind="line", ax=ax[0], secondary_y=True)
d = valid_history_pd.plot(kind="line", ax=ax[1], title="Epoch: %d, Valid: %.3f" % (v_epoch, valid_history_pd.iloc[v_epoch,:]["valid_%s" % metric]))
d = lr_history_pd.plot(kind="line", ax=ax[1], secondary_y=True)
train_history_pd.to_csv("%s/train.csv" % snapshot_path)
valid_history_pd.to_csv("%s/valid.csv" % snapshot_path)
plt.savefig("%s/train.png" % snapshot_path, bbox_inches='tight')
plt.show() if PT_SCRIPT is False else None
return (history)
# In[ ]:
# Mandatory transform to feed model
def to_tensor(x, **kwargs):
return x.transpose(2, 0, 1).astype('float32')
def aug_custom(x, **kwargs):
return x
def preprocess_input(x, mean=None, std=None, input_space="RGB", input_range=None, **kwargs):
if input_space == "BGR":
x = x[..., ::-1].copy()
if input_range is not None:
if x.max() > 1 and input_range[1] == 1:
x = x / 255.0
if mean is not None:
mean = np.array(mean)
x = x - mean
if std is not None:
std = np.array(std)
x = x / std
return x
def get_custom(**kwargs):
return A.Lambda(name="custom", image=aug_custom, **kwargs)
def get_preprocessing_fn(cfg):
params = {"mean": cfg.IMG_MEAN, "std": cfg.IMG_STD, "input_range": cfg.INPUT_RANGE}
return functools.partial(preprocess_input, **params)
def get_preprocessing(preprocessing_fn):
return A.Compose([
A.Lambda(image=preprocessing_fn), # Convert uint8 (0-255) in range [0-1.0] and apply Apply Z-Norm that depends on each model,
A.Lambda(image=to_tensor), # Convert (H, W, C) to (C, H, W)
])
# In[ ]:
# Optional augmentations (Works with C=4 layers)
def image_harder_augmentation_train(p=1.0):
return A.Compose([
# Crop smaller tile randomly (uint8, float32, H, W, C)
A.RandomCrop(RESIZED_IMAGE_SIZE, RESIZED_IMAGE_SIZE, always_apply=True, p=1.0) if RESIZED_IMAGE_SIZE != IMAGE_SIZE else A.NoOp(p=1.0),
# Noise
A.OneOf([
A.GaussNoise(var_limit=(5.0, 30.0), p=0.5),
A.CoarseDropout(max_holes=8, max_height=32, max_width=32, p=0.5),
A.IAAAdditiveGaussianNoise(scale=(0.02 * 255, 0.05 * 255), p=0.5),
], p=0.5),
# Flips/Rotations
A.HorizontalFlip(p=0.5),
A.RandomRotate90(p=1.0),
# Rotate/Distorsion
A.OneOf([
A.GridDistortion(num_steps=5, distort_limit=0.3, p=0.15),
A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.1, rotate_limit=30, p=0.75),
A.ElasticTransform(alpha=1, sigma=50, alpha_affine=50, p=0.15),
A.OpticalDistortion(distort_limit=0.05, shift_limit=0.05, p=0.10),
A.IAAAffine(shear=5.0, p=0.5),
], p=0.5),
# Blurs
A.OneOf([
A.GaussianBlur(blur_limit=(3, 5), p=0.5), # 5 to 7
A.MotionBlur(blur_limit=(3, 5), p=0.5),
A.MedianBlur(blur_limit=(3, 7), p=0.5),
], p=0.5),
# Stain/colors
A.OneOf([
A.RandomGamma(gamma_limit=(80, 120), p=0.5),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5),
], p=0.25),
], p=p)
# In[ ]:
# Display inputs/outputs of the model
def check_model(conf):
print("Check model")
m, _, _ = build_model(conf, "cpu", verbose=True)
tmp_factory = DataFactory_(ALL_IMAGES, conf=conf, verbose=True)
tmp_dataset = HPADataset(train_pd, tmp_factory, conf, subset="train", verbose=False, augment=image_augmentation_train, modelprepare=get_preprocessing(m.preprocess_input_fn), weights=True)
print("tmp_dataset:", len(tmp_dataset))
tmp_sampler = WeightedRandomSampler(weights=tmp_dataset.weights[conf.sampler].values, replacement=True, num_samples=len(tmp_dataset)) if conf.sampler is not None else None
# tmp_sampler = torch.utils.data.RandomSampler(tmp_dataset)
tmp_loader = DataLoader(tmp_dataset, batch_size=5, num_workers=0, drop_last = False, pin_memory=False, sampler=tmp_sampler, shuffle=False)
for tmp_batch in tmp_loader:
for key, value in tmp_batch.items():
print(key, value.shape, value.dtype, "min", value.min(), "max", value.max(), "mean", value.float().mean())
tmp_out = m(tmp_batch.get("image"))
tmp_out = tmp_out[conf.output_key] if conf.output_key is not None else tmp_out
break
print("tmp_out", tmp_out.shape, tmp_out.dtype, "min", tmp_out.min(), "max", tmp_out.max(), "mean", tmp_out.mean())
total_params = sum(p.numel() for p in m.parameters() if p.requires_grad)
print("Params:", total_params)
tmp_loss = conf.loss(tmp_out, tmp_batch["label"].float()) # (BS, seq_len), (BS, seq_len)
print("loss", tmp_loss)
tmp_probs = torch.sigmoid(tmp_out) if conf.post_activation == "sigmoid" else tmp_out
print("tmp_probs", tmp_probs.shape, tmp_probs.dtype, "min", tmp_probs.min(), "max", tmp_probs.max(), "mean", tmp_probs.mean())
tmp_factory.cleanup()
# In[ ]:
# Some previews with augmentation
def display_preview():
tmp_factory = DataFactory_(ALL_IMAGES, conf=conf, verbose=True)
tmp_dataset = HPADataset(train_pd, tmp_factory, conf, subset="train", verbose=False, augment=image_augmentation_train)
print("tmp_dataset:", len(tmp_dataset))
tmp_loader = DataLoader(tmp_dataset, batch_size=16, num_workers=0, drop_last = False, pin_memory=False, sampler=None, shuffle=False)
ROWS = 6
COLS = 8
fig, ax = plt.subplots(ROWS, COLS, figsize=(20, 16))
print("Loading images")
i = 0
for tmp_batch in tmp_loader:
images = tmp_batch["image"]
labels = tmp_batch.get("label")
print(images.shape, labels.shape) if i == 0 else None
for img, label in zip(images, labels):
r = i%ROWS
c = i//ROWS
d = ax[r, c].imshow(img.numpy()[:,:,[0,1,2]])
d = ax[r, c].grid(None)
d = ax[r, c].axis('off')
d = ax[r, c].set_title("%s" % [i for i, x in enumerate(label.cpu().numpy()) if x != 0])
i = i + 1
if i >= ROWS*COLS: break
if i >= ROWS*COLS: break
tmp_factory.cleanup()
plt.subplots_adjust(wspace=0.1, hspace=0.1)
plt.show()
# In[ ]:
def check_performances(workers_ = 0):
print("Check performances")
augment_ = image_augmentation_train
m, _, _ = build_model(conf, conf.L_DEVICE, verbose=True)
tmp_factory = DataFactory_(ALL_IMAGES, conf=conf, verbose=True)
tmp_dataset = HPADataset(train_pd, tmp_factory, conf, subset="train", verbose=False, augment=augment_, modelprepare=get_preprocessing(m.preprocess_input_fn))
print("tmp_dataset:", len(tmp_dataset))
tmp_loader = DataLoader(tmp_dataset, batch_size=16, num_workers=workers_, drop_last = False, pin_memory=False, sampler=None, shuffle=True)
i = 0
for tmp_batch in tqdm(tmp_loader):
images = tmp_batch["image"]
labels = tmp_batch["label"]
if i == 0: print(images.shape, images.dtype, images.max(), labels.shape)
i = i + 1
tmp_factory.cleanup()
# In[ ]:
def display_fold(train_pd_, kf_):
# Check how well the folds are stratified.
print("fold 1 2 3 4 total")
print("======================================================================")
for label in class_names:
label_padded = label + " "*(43-len(label))
dist = ": "
for train_idx, valid_idx in kf_.split(train_pd_, train_pd_.iloc[:, LABELS_OHE_START:LABELS_OHE_START+19]):
X_train, X_valid = train_pd_.iloc[train_idx], train_pd_.iloc[valid_idx]
dist += "{:4d} ".format(X_valid[label].sum())
dist += "{:4d} ".format(train_pd_[label].sum())
print(label_padded + dist)
label_padded = "total" + " "*(43-len("total"))
# In[ ]:
def none_or_str(value):
if value == 'None':
return None
return value
def get_argsparser():
parser = argparse.ArgumentParser()
parser.add_argument('--factory', default="HDF5", type=none_or_str, help='HDF5 or None')
parser.add_argument('--mtype', default='siamese', type=str)
parser.add_argument('--backbone', default='seresnext50_32x4d', type=str)
parser.add_argument('--gamma', default=0.5, type=float)
parser.add_argument('--lr', default=0.0003, type=float)
parser.add_argument('--seed', default=2020, type=int)
parser.add_argument('--batch_size', default=36, type=int)
parser.add_argument('--epochs', default=48, type=int)
parser.add_argument('--workers', default=8 if PT_SCRIPT is True else 0, type=int)
parser.add_argument('--resume_fold', default=0, type=int)
parser.add_argument('--stage', default='stage1', type=str, help='stage to train')
parser.add_argument('--pretrained_stage', default=None, type=none_or_str, help='stage to load pretrained weights from')
parser.add_argument('--labels_file', default='train_cleaned_default_external.csv', type=str, help='CSV file with labels')
parser.add_argument('--additional_labels_file', default=None, type=none_or_str, help='Additional CSV file with labels like train_cleaned_2018.csv')
return parser
# In[ ]:
if __name__ == '__main__':
import os, sys, random, math
import pandas as pd
import timeit, os, gc, psutil
if PT_SCRIPT is False:
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
import warnings
from sklearn import metrics
from functools import partial
from collections import OrderedDict
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
from PIL import Image
from ast import literal_eval
warnings.filterwarnings('ignore')
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', 250)
pd.set_option('display.max_rows', 100)
import skimage.io
import skimage.transform
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
DEFAULT_FIG_WIDTH = 28
sns.set_context("paper", font_scale=1.2)
import argparse
print('Python : ' + sys.version.split('\n')[0])
print('Numpy : ' + np.__version__)
print('Pandas : ' + pd.__version__)
print('PyTorch : ' + torch.__version__)
print('Albumentations: ' + A.__version__)
print('Timm : ' + timm.__version__)
print('Iterstrat : ' + iterstrat.__version__)
# Parse arguments
args = get_argsparser().parse_args() if PT_SCRIPT is True else get_argsparser().parse_args(['--seed', '2020']) # '--additional_labels_file', 'train_cleaned_2018.csv'
# Fixed seed for reproducibility
seed = args.seed
seed_everything(seed)
# All data
PARTS = ["external"]
if args.additional_labels_file is not None:
PARTS = PARTS + ["additional"]
if args.factory == "HDF5":
ALL_IMAGES = {
DEFAULT: TRAIN_HOME + 'images_%d.hdf5' % IMAGE_SIZE,
}
for p in PARTS:
ALL_IMAGES[p] = TRAIN_HOME + 'images_%s_%d.hdf5' % (p, IMAGE_SIZE)
DataFactory_ = HDF5DataFactory
else:
ALL_IMAGES = TRAIN_IMAGES_HOME
DataFactory_ = DataFactory
print("Factory", DataFactory_, ALL_IMAGES)
# Override basic configuration
conf = raw_conf(args.factory)
conf.mtype = args.mtype
conf.backbone = args.backbone
conf.gamma = args.gamma
conf.lr = args.lr
conf.BATCH_SIZE = args.batch_size
conf.EPOCHS = args.epochs
conf.WORKERS = args.workers
print('Running on device: {}'.format(conf.L_DEVICE))
MODEL_NAME = "%s_%s_%d_%d_%s_%s%s_v3.0" % (conf.mtype, conf.backbone, IMAGE_SIZE, RESIZED_IMAGE_SIZE if RESIZED_IMAGE_SIZE is not None else IMAGE_SIZE, COMPOSE if COMPOSE is not None else "RGBY", "fp16_" if conf.fp16 is True else "", "CV%d" % conf.FOLDS if conf.FOLDS > 0 else "FULL")
MODEL_PATH = HOME + "models/" + MODEL_NAME
STAGE = args.stage
MODEL_BEST = 'model_best.pt'
if not os.path.exists(MODEL_PATH):
os.makedirs(MODEL_PATH)
TRAIN = True
RESUME_FOLD = args.resume_fold
RESUME = True
PRETRAINED = None
PRETRAINED_STAGE = args.pretrained_stage
FREEZE_BACKBONE = False
PRETRAINED_BACKBONE_STAGE = None
USE_AUG = True
# Load ID, Labels, ...
train_pd = prepare_data(args.labels_file)
print("Labels:", args.labels_file, "train_pd:", train_pd.shape)
if args.additional_labels_file is not None:
FILTER = "additional"
train_extra_pd = prepare_data(args.additional_labels_file, ext_name = FILTER)
print("Additional labels:", args.labels_file, "train_extra_pd:", train_extra_pd.shape)
train_pd = | pd.concat([train_pd, train_extra_pd], axis=0) | pandas.concat |
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import lightgbm as lgb
import sklearn
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.externals import joblib
from keras import Sequential
from keras.layers import LSTM, Dropout, Dense
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.metrics import mean_squared_error
from sklearn import preprocessing
from xgboost import XGBRegressor, plot_importance
from matplotlib import pyplot
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
data_path = '../input'
# data_path = 'data'
out_path = '../input'
submission_path = '../input'
ver = 6
# Any results you write to the current directory are saved as output.
def unreasonable_data(data):
print("----------Reasonable of Data----------")
print("Min Value:", data.min())
print("Max Value:", data.max())
print("Average Value:", data.mean())
print("Center Point of Data:", data.median())
print(data.describe())
def drop_duplicate(data, sub_set):
print('Before drop shape:', data.shape)
before = data.shape[0]
data.drop_duplicates(sub_set, keep='first', inplace=True)
data.reset_index(drop=True, inplace=True)
print('After drop shape:', data.shape)
after = data.shape[0]
print('Total Duplicate:', before - after)
def pre_process_data_3():
sales = pd.read_csv('%s/train.csv' % data_path, parse_dates=['date'], infer_datetime_format=True,dayfirst=True)
val = | pd.read_csv('%s/test.csv' % data_path) | pandas.read_csv |
import pandas as pd
from datetime import datetime, timedelta
spain_c19_casos_url = 'https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_casos_long.csv'
spain_c19_muertes_url = 'https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_fallecidos_long.csv'
spain_c19_uci_url = 'https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_uci_long.csv'
spain_c19_altas_url = 'https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_altas_long.csv'
spain_c19_hosp_url = 'https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_hospitalizados_long.csv'
usa_c19_url = 'http://covidtracking.com/api/states/daily.csv'
world_pop_url = 'https://raw.githubusercontent.com/datasets/population/master/data/population.csv'
usa_states_pop = 'https://raw.githubusercontent.com/CivilServiceUSA/us-states/master/data/states.csv'
if __name__ == '__main__':
df_spain_cases = pd.read_csv(spain_c19_casos_url)
df_spain_deceased = pd.read_csv(spain_c19_muertes_url)
########################
# CV
########################
df_cv = df_spain_cases.loc[df_spain_cases['cod_ine'] == 10]
df_cv = df_cv.rename(columns={'total': 'cases', 'fecha': 'dateRep'})
df_cv = df_cv.drop(columns=['cod_ine', 'CCAA'])
# adds data from 01-01 to 26-2, with num. cases = 0
prev_dates = [str(datetime.strptime('2020-01-01', '%Y-%m-%d') + timedelta(days=d))[:10] for d in range(57)]
df_prev_cv = | pd.DataFrame(prev_dates, columns=['dateRep']) | pandas.DataFrame |
import pandas as pd
from .resources import get
def get_table(
table_code: str,
territorial_level: str,
ibge_territorial_code: str,
variable: str = None,
classification: str = None,
categories: str = None,
period: str = None,
header: str = None,
format: str = "pandas",
):
data = get(
table_code,
territorial_level,
ibge_territorial_code,
variable,
classification,
categories,
period,
header,
)
if format == "pandas":
return | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/python
import os
import sys
import csv
import json
import pandas
import dataLib
validYeargroups = ["R","1","2","3","4"]
invalidYeargroups = ["5","6","7","8","9","10","11"]
def formToYearGroup(theForm):
for validYeargroup in validYeargroups:
if theForm.startswith(validYeargroup):
found = True
for invalidYeargroup in invalidYeargroups:
if invalidYeargroup in theForm:
found = False
if found == True:
return validYeargroup
return None
# Load the config file (set by the system administrator).
config = dataLib.loadConfig(["dataFolder"])
# Make sure the output folder exists.
outputRoot = config["dataFolder"] + os.sep + "Mathletics"
os.makedirs(outputRoot, exist_ok=True)
# Input data headings:
# Pupils: GUID,UserCode,GivenName,FamilyName,DateOfBirth,Gender,Username,YearGroup,Form,Tutor
# Staff: GUID,UserCode,Title,GivenName,FamilyName,DateOfBirth,Username,Identifier,Form,JobTitle
# Output in Excel spreadsheet:
# Student First Name (Mandatory), Student Surname (Mandatory), Student Year (Mandatory), Class Name (Mandatory), Teacher Title (Optional), Teacher First name (Mandatory), Teacher Surname (Mandatory), Teacher Email (Mandatory)
mathletics = pandas.DataFrame(columns=["Student First Name (Mandatory)","Student Surname (Mandatory)","Student Year (Mandatory)","Class Name (Mandatory)","Teacher Title (Optional)","Teacher First name (Mandatory)","Teacher Surname (Mandatory)","Teacher Email (Mandatory)"])
pupils = | pandas.read_csv(config["dataFolder"] + os.sep + "pupils.csv", header=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Spyder Editor
Este é um arquivo de script temporário.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.graphics.api import qqplot
import plotly
plotly.offline.init_notebook_mode()
import plotly.offline as po
import plotly.plotly as py
import plotly.graph_objs as go
import cufflinks as cf
import plotly.figure_factory as ff
symbols = ['ITUB4', 'ABEV3', 'BBDC4', 'PETR4', 'VALE5']
taxas = ['risk_free', 'ipca']
dados = | pd.DataFrame() | pandas.DataFrame |
# RAiSERHD module
# <NAME>, 23 Feb 2022
# import packages
import h5py
import numpy as np
import pandas as pd
import time as ti
import os, warnings
from astropy import constants as const
from astropy import units as u
from astropy.convolution import convolve, Gaussian2DKernel
from astropy.cosmology import FlatLambdaCDM
from astropy.io import fits
from astropy import wcs
from copy import copy
from matplotlib import pyplot as plt
from matplotlib import cm, rc
from matplotlib.colors import LogNorm
from matplotlib.ticker import FormatStrFormatter, NullFormatter, LogLocator
from numba import jit
from scipy.optimize import least_squares
from scipy.special import gamma, zeta
## Define global variables that can be adjusted to customise model output
# basic constants
year = 365.2422*24*3600 # average year in seconds
maverage = (0.6*const.m_p.value) # kg average particle mass
hubble = 0.7 # dimensionless Hubble parameter
OmegaM = 0.27 # fraction of matter in the flat universe
OmegaD = 0.73 # fraction of dark energy in the flat universe
freq_cmb = 5.879e10 # frequency of cosmic microwave background at z = 0
temp_cmb = 2.725 # temperature of cosmic microwave background at z = 0
c_speed = const.c.value # speed of light
e_charge = const.e.value # electron charge
k_B = const.k_B.value # Boltzmann constant
m_e = const.m_e.value # electron mass
mu0 = const.mu0.value # vacuum permeability
sigma_T = const.sigma_T.value # electron scattering cross-section
# model parameters that can be optimised for efficiency
nangles = 16 # number of angles to calculate expansion rate along (must be greater than 1)
betaRegions = 64 # set maximum number of beta regions
limTime = (year) # the FR-II limit must be used before this time
stepRatio = 1.01 # ratio to increase time/radius
crit_age = 0.95 # fraction of source age for lower end of power law approximations
lambda_min = 1e-256 # minimum value of Lambda for computational efficiency
# shocked gas and lobe parameters
chi = 2*np.pi/3.0 # lobe geometry parameter
shockAxisRatio = 0.5875 # exponent relating the cocoon axis ratio to the shocked gas axis ratio
shockRadius = 1.072 # fraction of the radius the shocked gas is greater than the lobe
gammaX = (5./3) # lorentz factor of external gas
gammaJ = (4./3) # lorentz factor of jet plasma
# set electron energy distribution constants
Lorentzmin = 780. # minimum Lorentz factor of injected electrons AT HOTSPOT for Cygnus A
Lorentzmax = 1e6 # effectively infinity
# density and temperature profiles
rCutoff = 0.01 # minimum radius to match profiles as a fraction of r200
betaMax = 2 # set critical value above which the cocoon expands balistically
# average and standard deviation of Vikhlinin model parameters
alphaAvg = 1.64 # corrected for removal of second core term
alphaStdev = 0.30
betaPrimeAvg = 0.56
betaPrimeStdev = 0.10
gammaPrimeAvg = 3
gammaPrimeStdev = 0
epsilonAvg = 3.23
epsilonStdev = 0 # 1.93; this parameter has little effect on profile
rCoreAvg = 0.087 # this is ratio of rc to r200
rCoreStdev = 0.028
rSlopeAvg = 0.73 # this is ratio of rs to r200
rSlopeStdev = 0 # 0.39; this parameter has little effect on profile
# temperature parameters
TmgConst = (-2.099)
TmgSlope = 0.6678
TmgError = 0.0727
# new temperature parameters assuming heating from AGN during expansion
TmgAvg = 7.00
TmgStdev = 0.28
# approximate halo to gas fraction conversion
# for halo masses between 10^12 and 10^15 and redshifts 0 < z < 5
halogasfracCONST1z0 = (-0.881768418)
halogasfracCONST1z1 = (-0.02832004)
halogasfracCONST2z0 = (-0.921393448)
halogasfracCONST2z1 = 0.00064515
halogasfracSLOPE = 0.053302276
# uncertainties, in dex
dhalogasfracz0 = 0.05172769
dhalogasfracz1 = (-0.00177947)
# correction to SAGE densities
SAGEdensitycorr = (-0.1)
## Define functions for run-time user output
def __join(*values):
return ";".join(str(v) for v in values)
def __color_text(s, c, base=30):
template = '\x1b[{0}m{1}\x1b[0m'
t = __join(base+8, 2, __join(*c))
return template.format(t, s)
class Colors:
DogderBlue = (30, 144, 255)
Green = (0,200,0)
Orange = (255, 165, 0)
## Define main function to run RAiSE HD
def RAiSE_run(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5, equipartition=-1.5, spectral_index=0.7, gammaCValue=5./3, lorentz_min=Lorentzmin, brightness=True, angle=0., resolution='standard', seed=None, aj_star=0.231, jet_angle=0.686, axis_exponent=0.343, fill_factor=0.549):
# record start time of code
start_time = ti.time()
# function to test type of inputs and convert type where appropriate
if nangles <= 1:
raise Exception('Private variable nangles must be greater than 1.')
frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons = __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz)
# download and pre-process particles from hydrodynamical simulation
if not resolution == None:
print(__color_text('Reading particle data from file.', Colors.Green))
time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio = __PLUTO_particles('RAiSE_particles.hdf5')
# set seed for quasi-random profiles
if not seed == None:
__set_seed(seed)
# create folder for output files if not present
if not os.path.exists('LDtracks'):
os.mkdir('LDtracks')
if not resolution == None:
print(__color_text('Running RAiSE dynamics and emissivity.', Colors.Green))
else:
print(__color_text('Running RAiSE dynamics.', Colors.Green))
for i in range(0, len(redshift)):
for j in range(0, len(axis_ratio)):
for k in range(0, len(jet_power)):
for l in range(0, nenvirons):
for m in range(0, len(active_age)):
for n in range(0, len(equipartition)):
for o in range(0, len(jet_lorentz)):
# set correct data types for halo mass and core density
if isinstance(halo_mass, (list, np.ndarray)):
new_halo_mass = halo_mass[l]
else:
new_halo_mass = halo_mass
if isinstance(rho0Value, (list, np.ndarray)):
new_rho0Value = rho0Value[l]
new_temperature = temperature[l]
new_betas = betas[l]
new_regions = regions[l]
else:
new_rho0Value = rho0Value
new_temperature = temperature
new_betas = betas
new_regions = regions
# calculate dynamical evolution of lobe and shocked shell using RAiSE dynamics
lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda = __RAiSE_environment(redshift[i], axis_ratio[j], jet_power[k], source_age, halo_mass=new_halo_mass, rand_profile=rand_profile, rho0Value=new_rho0Value, regions=new_regions, betas=new_betas, temperature=new_temperature, active_age=active_age[m], jet_lorentz=jet_lorentz[o], gammaCValue=gammaCValue, aj_star=aj_star, jet_angle=jet_angle, axis_exponent=axis_exponent, fill_factor=fill_factor)
# calculate synchrotron emission from lobe using particles and RAiSE model
if not resolution == None:
location, luminosity, magnetic_field = __RAiSE_emissivity(frequency, redshift[i], time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, source_age, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, active_age[m], equipartition[n], spectral_index, gammaCValue=gammaCValue, lorentz_min=lorentz_min, resolution=resolution)
# create pandas dataframe for integrated emission
df = pd.DataFrame()
df['Time (yrs)'] = 10**np.asarray(source_age).astype(np.float_)
df['Size (kpc)'] = 2*lobe_lengths[0,:]/const.kpc.value
df['Pressure (Pa)'] = shock_pressures[0,:]
df['Axis Ratio'] = lobe_lengths[0,:]/lobe_lengths[-1,:]
if not resolution == None:
for q in range(0, len(frequency)):
if frequency[q] > 0:
df['B{:.2f} (T)'.format(frequency[q])] = magnetic_field[:,q]
df['L{:.2f} (W/Hz)'.format(frequency[q])] = np.nansum(luminosity[:,:,q], axis=1)
# write data to file
if isinstance(rho0Value, (list, np.ndarray)):
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i]), index=False)
elif isinstance(halo_mass, (list, np.ndarray)):
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i]), index=False)
else:
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
# calculate brightness per pixel across the source
if brightness == True and not resolution == None:
x_values, y_values, brightness_list = __RAiSE_brightness_map(frequency, redshift[i], source_age, lobe_lengths, location, luminosity, angle, resolution=resolution)
for p in range(0, len(source_age)):
for q in range(0, len(frequency)):
# create pandas dataframe for spatially resolved emission
if isinstance(x_values[p][q], (list, np.ndarray)):
df = pd.DataFrame(index=x_values[p][q]/const.kpc.value, columns=y_values[p][q]/const.kpc.value, data=brightness_list[p][q])
# write surface brightness map to file
if isinstance(rho0Value, (list, np.ndarray)):
if frequency[q] > 0:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), header=True, index=True)
else:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], source_age[p], resolution), header=True, index=True)
elif isinstance(halo_mass, (list, np.ndarray)):
if frequency[q] > 0:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), header=True, index=True)
else:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], source_age[p], resolution), header=True, index=True)
else:
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
else:
if isinstance(rho0Value, (list, np.ndarray)):
warnings.warn('The following file was not created as no emission is present: LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), category=UserWarning)
elif isinstance(halo_mass, (list, np.ndarray)):
warnings.warn('The following file was not created as no emission is present: LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), category=UserWarning)
else:
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
# print total run time to screen
print(__color_text('RAiSE completed running after {:.2f} seconds.'.format(ti.time() - start_time), Colors.Green))
# Define function to test type of inputs and convert type where appropriate
def __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz):
# convert redshift, axis ratio and jet power to correct data types
if not isinstance(frequency, (list, np.ndarray)):
frequency = [frequency]
for i in range(0, len(frequency)):
if not isinstance(frequency[i], (int, float)):
raise Exception('Frequency must be provided as a float or list/array of floats in units of log10 Hertz.')
else:
if frequency[i] <= 0:
frequency[i] = -1.
warnings.warn('Pressure map will be produced instead of surface brightness image.', category=UserWarning)
elif not (5 < frequency[i] and frequency[i] < 20):
raise Exception('Frequency must be provided as a float or list/array of floats in units of log10 Hertz.')
if not isinstance(redshift, (list, np.ndarray)):
redshift = [redshift]
for i in range(0, len(redshift)):
if not isinstance(redshift[i], (int, float)) or not (0 < redshift[i] and redshift[i] < 20):
raise Exception('Redshift must be provided as a float or list/array of floats.')
if not isinstance(axis_ratio, (list, np.ndarray)):
axis_ratio = [axis_ratio]
for i in range(0, len(axis_ratio)):
if not isinstance(axis_ratio[i], (int, float)) or not (1 <= axis_ratio[i] and axis_ratio[i] < 20):
raise Exception('Axis ratio must be provided as a float or list/array of floats and be greater than 1.')
if not isinstance(jet_power, (list, np.ndarray)):
jet_power = [jet_power]
for i in range(0, len(jet_power)):
if not isinstance(jet_power[i], (int, float)) or not (33 < jet_power[i] and jet_power[i] < 46):
raise Exception('Jet power must be provided as a float or list/array of floats in units of log10 Watts.')
if not isinstance(source_age, (list, np.ndarray)):
source_age = [source_age]
for i in range(0, len(source_age)):
if not isinstance(source_age[i], (int, float)) or not (0 <= source_age[i] and source_age[i] <= 10.14):
raise Exception('Source age must be provided as a float or list/array of floats in units of log10 years.')
else:
source_age[i] = float(source_age[i])
if not isinstance(active_age, (list, np.ndarray)):
active_age = [active_age]
for i in range(0, len(active_age)):
if not isinstance(active_age[i], (int, float)) or not (0 <= active_age[i] and active_age[i] <= 10.14):
raise Exception('Active age must be provided as a float or list/array of floats in units of log10 years.')
if not isinstance(equipartition, (list, np.ndarray)):
equipartition = [equipartition]
for i in range(0, len(equipartition)):
if not isinstance(equipartition[i], (int, float)) or not (-6 < equipartition[i] and equipartition[i] < 6):
raise Exception('Equipartition factor must be provided as a float or list/array of floats in units of log10.')
if not isinstance(jet_lorentz, (list, np.ndarray)):
jet_lorentz = [jet_lorentz]
for i in range(0, len(jet_lorentz)):
if not isinstance(jet_lorentz[i], (int, float)) or not (-100 <= jet_lorentz[i] and jet_lorentz[i] < 20):
raise Exception('Jet bulk lorentz factor factor must be provided as a float or list/array of floats.')
elif (-100 <= jet_lorentz[i] and jet_lorentz[i] <= 1):
jet_lorentz[i] = 0
warnings.warn('Jet phase will not be included in this simulation.', category=UserWarning)
# convert environment to correct data types
if not isinstance(halo_mass, (list, np.ndarray)) and not halo_mass == None:
halo_mass = [halo_mass]
nenvirons_halo = len(halo_mass)
elif not halo_mass == None:
nenvirons_halo = len(halo_mass)
if isinstance(halo_mass, (list, np.ndarray)):
for i in range(0, len(halo_mass)):
if not isinstance(halo_mass[i], (int, float)) or not (9 < halo_mass[i] and halo_mass[i] < 17):
raise Exception('Dark matter halo mass must be provided as a float or list/array of floats in units of log10 stellar mass.')
if not isinstance(rho0Value, (list, np.ndarray)) and not rho0Value == None:
rho0Value = [rho0Value]
nenvirons_rho = len(rho0Value)
elif not rho0Value == None:
nenvirons_rho = len(rho0Value)
if isinstance(rho0Value, (list, np.ndarray)):
if not isinstance(temperature, (list, np.ndarray)) and not temperature == None:
temperature = [temperature]*nenvirons_rho
elif temperature == None or not len(temperature) == nenvirons_rho:
rho0Value = None # full density profile not provided
if isinstance(betas, (list, np.ndarray)) and not isinstance(betas[0], (list, np.ndarray)):
betas = [betas]*nenvirons_rho
elif not isinstance(betas, (list, np.ndarray)) and not betas == None:
betas = [[betas]]*nenvirons_rho
elif betas == None or not len(betas) == nenvirons_rho:
rho0Value = None # full density profile not provided
if isinstance(regions, (list, np.ndarray)) and not isinstance(regions[0], (list, np.ndarray)):
regions = [regions]*nenvirons_rho
elif not isinstance(regions, (list, np.ndarray)) and not betas == None:
regions = [[regions]]*nenvirons_rho
elif regions == None or not len(regions) == nenvirons_rho:
rho0Value = None # full density profile not provided
if isinstance(rho0Value, (list, np.ndarray)):
nenvirons = nenvirons_rho
for i in range(0, len(rho0Value)):
if not isinstance(rho0Value[i], (int, float)) or not (1e-30 < rho0Value[i] and rho0Value[i] < 1e-15):
raise Exception('Core gas density must be provided as a float or list/array of floats in units of kg/m^3.')
for i in range(0, len(temperature)):
if not isinstance(temperature[i], (int, float)) or not (0 < temperature[i] and temperature[i] < 1e12):
raise Exception('Gas temperature must be provided as a float or list/array of floats in units of Kelvin.')
else:
nenvirons = nenvirons_halo
return frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons
# Define random seed function
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __set_seed(value):
np.random.seed(value)
## Define functions for analytic modelling of the environment
# function to calculate properties of the environment and call RAiSE_evolution
def __RAiSE_environment(redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5., gammaCValue=5./3, aj_star=0.231, jet_angle=0.686, axis_exponent=0.343, fill_factor=0.549):
# check minimal inputs
if halo_mass == None and (not isinstance(betas, (list, np.ndarray)) or not isinstance(regions, (list, np.ndarray))):
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
# calculate gas mass and virial radius of halo unless density and temperature profile fully specified
gasfraction = 0
if not halo_mass == None:
rVir = (10**halo_mass*const.M_sun.value/(100./const.G.value*(100.*hubble*np.sqrt(OmegaM*(1 + redshift)**3 + OmegaD)/const.kpc.value)**2))**(1./3)
if rand_profile == False:
gasfraction = __HalogasfracFunction(halo_mass, redshift)
else:
gasfraction = __rand_norm(__HalogasfracFunction(halo_mass, redshift), __dHalogasfracFunction(halo_mass, redshift))
gasMass = 10**(halo_mass + gasfraction)*const.M_sun.value
# approximate the gas density profile of Vikhlinin 2006 by multiple density profiles with a simple beta dependence
if not isinstance(betas, (list, np.ndarray)) or not isinstance(regions, (list, np.ndarray)):
# set maximum number of regions
nregions = betaRegions
nregions, new_betas, new_regions = __DensityProfiler(rVir, nregions, rand_profile)
elif len(betas) == len(regions):
# set maximum number of regions
nregions = len(betas)
new_betas = np.asarray(betas.copy())
new_regions = np.asarray(regions.copy())
else:
raise Exception('Variables betas and regions must be arrays of the same length.')
# calculate the average temperature of the external medium
if temperature == None:
if not halo_mass == None:
if rand_profile == False:
tempFlat = 10**TmgAvg
tempCluster = 10**(TmgConst + TmgSlope*halo_mass)
else:
tempFlat = 10**(__rand_norm(TmgAvg, TmgStdev))
tempCluster = 10**(__rand_norm(TmgConst + TmgSlope*halo_mass, TmgError))
temperature = max(tempFlat, tempCluster) # take the highest temperature out of the flat profile and cluster model
else:
raise Exception('Either the halo mass or temperature must be provided as model inputs.')
# determine initial value of density parameter given gas mass and density profile
if not rho0Value == None:
# determine density parameter in the core
k0Value = rho0Value*new_regions[0]**new_betas[0]
# extend first beta region to a radius of zero
new_regions[0] = 0
elif not halo_mass == None:
# extend first beta region to a radius of zero
new_regions[0] = 0
# find relative values (i.e. to 1) of density parameter in each beta region
kValues = __DensityParameter(nregions, 1.0, new_betas, new_regions)
# determine density parameter in the core
k0Value = __k0ValueFinder(rVir, gasMass, nregions, new_betas, new_regions, kValues)
else:
raise Exception('Either the halo mass or core density must be provided as model inputs.')
# find values of density parameter in each beta region
kValues = __DensityParameter(nregions, k0Value, new_betas, new_regions)
# call RadioSourceEvolution function to calculate Dt tracks
return __RAiSE_evolution(redshift, axis_ratio, jet_power, source_age, active_age, gammaCValue, nregions, new_betas, new_regions, kValues, temperature, jet_lorentz, aj_star, jet_angle, axis_exponent, fill_factor)
# approximate the gas density profile of Vikhlinin 2006 by multiple density profiles with a simple beta dependence
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __DensityProfiler(rVir, nregions, rand_profile):
# instantiate variables
betas, regions = np.zeros(nregions), np.zeros(nregions)
# set values of Vikhlinin model parameters
if rand_profile == False:
alpha = alphaAvg
betaPrime = betaPrimeAvg
gammaPrime = gammaPrimeAvg # this value has no uncertainty
epsilon = epsilonAvg
rCore = rCoreAvg
rSlope = rSlopeAvg
else:
alpha = __rand_norm(alphaAvg, alphaStdev)
betaPrime = __rand_norm(betaPrimeAvg, betaPrimeStdev)
gammaPrime = __rand_norm(gammaPrimeAvg, gammaPrimeStdev) # this value has no uncertainty
epsilon = __rand_norm(epsilonAvg, epsilonStdev)
rCore = __rand_norm(rCoreAvg, rCoreStdev)
rSlope = __rand_norm(rSlopeAvg, rSlopeStdev)
# set minimum and maximum radius for density profile to be matched
rmin = rCutoff*rVir
rmax = rVir
# use logarithmic radius scale
r = rmin
ratio = (rmax/rmin)**(1./(nregions)) - 1
for count in range(0, nregions):
# set radius at low end of region
rlow = r
# calculate relative density at rlow, i.e. ignoring rho_0 factor
rhoLow = np.sqrt((rlow/(rCore*rVir))**(-alpha)/((1 + rlow**2/(rCore*rVir)**2)**(3*betaPrime - alpha/2.)*(1 + rlow**gammaPrime/(rSlope*rVir)**gammaPrime)**(epsilon/gammaPrime)))
# increment radius
dr = r*ratio
r = r + dr
# set radius at high end of region
rhigh = r
# calculate relative density at rlow, i.e. ignoring rho_0 factor
rhoHigh = np.sqrt((rhigh/(rCore*rVir))**(-alpha)/((1 + rhigh**2/(rCore*rVir)**2)**(3*betaPrime - alpha/2.)*(1 + rhigh**gammaPrime/(rSlope*rVir)**gammaPrime)**(epsilon/gammaPrime)))
# set value of innermost radius of each beta region
if count == 0:
# extend first beta region to a radius of zero
regions[count] = 0
else:
regions[count] = rlow
# calculate exponent beta for each region to match density profile, ensuring beta is less than 2
if (-np.log(rhoLow/rhoHigh)/np.log(rlow/rhigh) < betaMax):
betas[count] = -np.log(rhoLow/rhoHigh)/np.log(rlow/rhigh)
else:
# ensure beta is less than (or equal to) 2
betas[count] = betaMax
# set this count to be the number of distinct regions
nregions = count + 1
break
return nregions, betas, regions
# find values of density parameter in each beta region
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __DensityParameter(nregions, k0Value, betas, regions):
# instantiate variables
kValues = np.zeros(nregions)
# calculate density parameters in each region
for count in range(0, nregions):
# match tracks between regions `a' and `b'
if count > 0:
# find replicating core density in region `b' required to match pressures and times
kValues[count] = kValues[count - 1]*regions[count]**(betas[count] - betas[count - 1])
# if first region, set initial value of replicating core density as actual core density
else:
kValues[count] = k0Value
return kValues
# determine value of the density parameter at the core given gas mass and density profile
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __k0ValueFinder(rVir, gasMass, nregions, betas, regions, kValues):
# set volume to zero initially
volume = 0
# calculate weighted volume integral using by analytically integraing the volume in each beta region
for count in range(0, nregions):
# set lower bound of analytic integral
rlow = regions[count]
# set upper bound of analytic integral
if (count + 1 == nregions):
rhigh = rVir
else:
rhigh = regions[count + 1]
# increment total weighted volume by weigthed volume of this region
volume = volume + 4*np.pi*(kValues[count]/kValues[0])/(3 - betas[count])*(rhigh**(3 - betas[count]) - rlow**(3 - betas[count]))
# calculate density parameter at the core from stellar mass and weighted volume
k0Value = gasMass/volume
return k0Value
# random normal with values truncated to avoid sign changes
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __rand_norm(mean, stdev):
rand_number = np.random.normal(mean, stdev)
while (mean*rand_number < 0 or np.abs(rand_number - mean) > 2*stdev):
rand_number = np.random.normal(mean, stdev)
return rand_number
# gas fraction-halo mass relationship
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __HalogasfracFunction(halo_mass, redshift):
return max(halogasfracCONST1z0 + halogasfracCONST1z1*redshift, halogasfracCONST2z0 + halogasfracCONST2z1*redshift) + halogasfracSLOPE*(halo_mass - 14) + SAGEdensitycorr # in log space
# gas fraction-halo mass relationship error
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __dHalogasfracFunction(halo_mass, redshift):
return dhalogasfracz0 + dhalogasfracz1*redshift # in log space
## Define functions required for RAiSE dynamical evolution
# function to calculate dynamical evolution of lobe and shocked shell
def __RAiSE_evolution(redshift, axis_ratio, jet_power, source_age, active_age, gammaCValue, nregions, betas, regions, kValues, temperature, jet_lorentz, aj_star=0.231, jet_angle=0.686, axis_exponent=0.343, fill_factor=0.549):
# convert jet power and source age to correct units
QavgValue = 10**jet_power/2. # set the power of *each* jet; convert from log space
if isinstance(source_age, (list, np.ndarray)):
tFinal = np.zeros_like(source_age)
for i in range(0, len(source_age)):
tFinal[i] = 10**source_age[i]*year # convert from log space years to seconds
else:
tFinal = np.array([10**source_age*year])
tActive = 10**active_age*year
# calculate angle of current radial line
angles = np.arange(0, nangles, 1).astype(np.int_)
dtheta = (np.pi/2)/nangles
theta = dtheta*(angles + 0.5)
# calculate opening angle of jet
open_angle = (jet_angle*np.pi/180)/(axis_ratio/2.83)
# evaluate the translation coefficients eta_c and eta_s
eta_c = 1./np.sqrt(axis_ratio**2*(np.sin(theta))**2 + (np.cos(theta))**2)
eta_s = 1./np.sqrt(axis_ratio**(2*shockAxisRatio)*(np.sin(theta))**2 + (np.cos(theta))**2)
# evaluate the translation coefficient zeta_s/eta_s at t -> infinity
zetaeta = np.sqrt(axis_ratio**(2*shockAxisRatio)*(np.sin(theta))**2 + (np.cos(theta))**2)/np.sqrt(axis_ratio**(4*shockAxisRatio)*(np.sin(theta))**2 + (np.cos(theta))**2)
eta_c[0], eta_s[0], zetaeta[0] = 1., 1., 1,
# calculate the differential volume element coefficient chi
dchi = 4*np.pi/3.*np.sin(theta)*np.sin(dtheta/2.)
# solve RAiSE dynamics iteratively to find thermal component of lobe pressure
if jet_lorentz > 1:
# run code in strong-shock limit to calibrate initial velocity
x_time = 10**10.14*year
_, _, _, _, _, _, _, critical_point_1 = __RAiSE_runge_kutta(QavgValue, np.array([x_time]), x_time, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue, critical_velocity=c_speed, strong_shock=True)
# run code for full RAiSE HD dynamical model
lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, critical_point_3 = __RAiSE_runge_kutta(QavgValue, tFinal, tActive, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue, critical_velocity=c_speed*critical_point_1[2]/critical_point_1[3])
else:
# run code for RAiSE X dynamical model
lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, _ = __RAiSE_runge_kutta(QavgValue, tFinal, tActive, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue)
return lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda
# function to apply Runge-Kutta method and extract values at requested time steps
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_runge_kutta(QavgValue, source_age, active_age, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue, critical_velocity=0., strong_shock=False):
# instantiate variables
X, P = np.zeros((nangles, 5)), np.zeros((nangles, 4))
critical_point = np.zeros(4)
regionPointer = np.zeros(nangles).astype(np.int_)
lobe_minor, lambda_crit, alphaP_denv, alpha_lambda = np.zeros(len(source_age)), np.zeros(len(source_age)), np.zeros(len(source_age)), np.zeros(len(source_age))
lobe_lengths, shock_lengths, shock_pressures = np.zeros((nangles, len(source_age))), np.zeros((nangles, len(source_age))), np.zeros((nangles, len(source_age)))
# calculate injection ages to derive time-average power-law indices for external pressure and filling factor
inject_age = np.zeros(2*len(source_age))
inject_axis_ratios, inject_pressures, inject_lambdas = np.zeros(2*len(source_age)), np.zeros(2*len(source_age)), np.zeros(2*len(source_age))
for timePointer in range(0, len(source_age)):
inject_age[2*timePointer:2*(timePointer + 1)] = np.asarray([crit_age*source_age[timePointer], source_age[timePointer]])
inject_index = np.argsort(inject_age) # sort ages in ascending order
# calculate the spatially-averaged jet velocity and Lorentz factor
if jet_lorentz > 1:
bulk_lorentz = np.sqrt(jet_lorentz**2*aj_star**4 - aj_star**4 + 1)
bulk_velocity = np.sqrt((jet_lorentz**2*aj_star**4 - aj_star**4)/(jet_lorentz**2*aj_star**4 - aj_star**4 + 1))*c_speed
else:
bulk_lorentz, bulk_velocity = -1, -1
i = 0
for timePointer in range(0, len(source_age)):
# set initial conditions for each volume element
if timePointer == 0:
# calculate initial time and radius for ODE
FR2time = limTime
if jet_lorentz > 1:
FR2radius = bulk_velocity*limTime
FR2velocity = bulk_velocity # eta_R is very large
else:
FR2radius = np.sqrt(1 - 1./100**2)*c_speed*limTime
FR2velocity = np.sqrt(1 - 1./100**2)*c_speed
# test if this radius is above start of second region boundary
if (regions[1] < FR2radius):
FR2radius = regions[1]
if jet_lorentz > 1:
FR2time = regions[1]/bulk_velocity
FR2velocity = bulk_velocity
else:
FR2time = regions[1]/(np.sqrt(1 - 1./100**2)*c_speed)
FR2velocity = np.sqrt(1 - 1./100**2)*c_speed
# calculate the initial jet/shock shell radius and velocity for each angle theta
X[angles,0] = FR2time
X[angles,1] = FR2radius*eta_s
X[angles,2] = FR2velocity*eta_s
if jet_lorentz > 1:
X[0,3], X[angles[1:],3] = bulk_lorentz, 1./np.sqrt(1 - (FR2velocity*eta_s[angles[1:]]/c_speed)**2)
else:
X[0,3], X[angles[1:],3] = 100, 100*eta_s[angles[1:]]
X[angles,4] = -1 # null value
# set region pointer to first (non-zero) region if smaller than FR2 radius
index = regions[1] < X[angles,1]
regionPointer[index] = 1
regionPointer[np.logical_not(index)] = 0
# calculate fraction of jet power injected into each volume element
injectFrac = dchi*eta_s**(3 - betas[regionPointer[0]])*zetaeta**2
injectFrac = injectFrac/np.sum(injectFrac) # sum should be equal to unity
# solve ODE to find radius and pressue at each time step
while (X[0,0] < source_age[timePointer]):
while (X[0,0] < inject_age[inject_index[i]]):
# calculate the appropriate density profile for each angle theta
for anglePointer in range(0, nangles):
while (regionPointer[anglePointer] + 1 < nregions and X[anglePointer,1] > regions[regionPointer[anglePointer] + 1]):
regionPointer[anglePointer] = regionPointer[anglePointer] + 1
# check if next step passes time point of interest
if (X[0,0]*stepRatio > inject_age[inject_index[i]]):
step = inject_age[inject_index[i]] - X[0,0]
else:
step = X[0,0]*(stepRatio - 1)
# update estimates of time, radius and velocity
__rk4sys(step, X, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
X[:,3] = np.maximum(1, X[:,3])
# find location of jet--lobe transition
critical_point[0], critical_point[1], critical_point[2], critical_point[3] = X[0,0], X[0,1], X[0,2]*X[0,3], X[0,4]
# record axis ratio, external pressure and filling factor and injection times
if P[-1,0] > 0:
inject_axis_ratios[inject_index[i]] = 1./(P[0,0]/P[-1,0])**2 # inverted to match alpha_lambda definition
else:
inject_axis_ratios[inject_index[i]] = 1
inject_pressures[inject_index[i]] = P[0,2]
inject_lambdas[inject_index[i]] = P[0,3]
# update injection age if not a requested source age
if inject_age[inject_index[i]] < source_age[timePointer]:
i = i + 1
# calculate the lobe and shocked shell length, shock pressure and total pressure as a function of angle
lobe_lengths[angles,timePointer] = P[angles,0]
shock_lengths[angles,timePointer] = X[angles,1]
shock_pressures[angles,timePointer] = P[angles,1]
lambda_crit[timePointer] = P[0,3]
# calculate lobe minor axis (associated with dimensions of shocked shell) at this time step
lobe_minor[timePointer] = X[-1,1]*eta_c[-1]/(shockRadius*eta_s[-1])
# calculate the slope of external pressure profile at this time step
if inject_pressures[inject_index[2*timePointer]] <= 0:
alphaP_denv[timePointer] = 0
else:
alphaP_denv[timePointer] = np.log(inject_pressures[2*timePointer + 1]/inject_pressures[2*timePointer])/np.log(inject_age[2*timePointer + 1]/inject_age[2*timePointer])
if inject_lambdas[2*timePointer] <= 0:
alpha_lambda[timePointer] = 1e9 # no emission from this injection time
else:
alpha_lambda[timePointer] = np.log(inject_lambdas[2*timePointer + 1]/inject_lambdas[2*timePointer])/np.log(inject_age[2*timePointer + 1]/inject_age[2*timePointer]) + np.log(inject_axis_ratios[2*timePointer + 1]/inject_axis_ratios[2*timePointer])/np.log(inject_age[2*timePointer + 1]/inject_age[2*timePointer]) # filling factor and changing volume/axis ratio
return lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, critical_point
# Runge-Kutta method to solve ODE in dynamical model
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __rk4sys(step, X, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock):
# instantiate variables
Y, K1, K2, K3, K4 = np.zeros((len(angles), 5)), np.zeros((len(angles), 5)), np.zeros((len(angles), 5)), np.zeros((len(angles), 5)), np.zeros((len(angles), 5))
# fouth order Runge-Kutta method
__xpsys(X, K1, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
Y[:,:] = X[:,:] + 0.5*step*K1[:,:]
__xpsys(Y, K2, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
Y[:,:] = X[:,:] + 0.5*step*K2[:,:]
__xpsys(Y, K3, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
Y[:,:] = X[:,:] + 0.5*step*K3[:,:]
__xpsys(Y, K4, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
X[:,:] = X[:,:] + (step/6.)*(K1[:,:] + 2*K2[:,:] + 2*K3[:,:] + K4[:,:])
# coupled second order differential equations for lobe evolution
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __xpsys(X, f, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock):
# Differential equations for X[0,1,2,3,4] = (time, radius, velocity, lorentz_factor, thermal_velocity)
# Additional variable for P[0,1,2,3] = (lobe_length, lobe_pressure, external_pressure, lambda_crit)
f[angles,0] = 1.
f[angles,1] = X[angles,2]
# test if the AGN is active at this time-step
if (X[0,0] <= active_age):
active_jet = 1
else:
active_jet = 0
# calculate the spatially-averaged jet velocity and Lorentz factor
if jet_lorentz > 1:
bulk_lorentz = np.sqrt(jet_lorentz**2*aj_star**4 - aj_star**4 + 1)
bulk_velocity = np.sqrt((jet_lorentz**2*aj_star**4 - aj_star**4)/(jet_lorentz**2*aj_star**4 - aj_star**4 + 1))*c_speed
else:
bulk_lorentz, bulk_velocity = -1, -1
# TWO-PHASE FLUID
if jet_lorentz > 1:
# calculate the lobe formation scale
eta_R = QavgValue*bulk_lorentz**2/(2*np.pi*kValues[regionPointer[0]]*(bulk_lorentz*bulk_velocity)*(bulk_lorentz - 1)*c_speed**2*(1 - np.cos(open_angle))*X[0,1]**(2 - betas[regionPointer[0]]))
# calculate lambda_crit
#if (eta_R/bulk_lorentz**2) > 1:
# lambda_crit = 0
#else:
# lambda_crit = 1
lambda_crit = np.exp(-(eta_R/bulk_lorentz**2)/(2*np.log(2)))
P[0,3] = lambda_crit
else:
P[0,3] = 1
# ACCELERATION
# update fraction of jet power injected into each volume element
injectFrac_new = dchi*eta_s**(3 - betas[regionPointer[0]])*zetaeta**2
injectFrac_new = injectFrac/np.sum(injectFrac) # sum should be equal to unity
if jet_lorentz > 1:
injectFrac[angles] = (1 - lambda_crit)*injectFrac_new + lambda_crit*injectFrac # keep static at late times
else:
injectFrac[angles] = injectFrac_new[angles]
# acceleration of jet-head
if jet_lorentz > 1:
jet_acceleration = (betas[regionPointer[0]] - 2)*bulk_velocity*X[0,2]/(2*X[0,1]*(1 + eta_R**(-1./2))**2*eta_R**(1./2))
# acceleration of lobe (supersonic/subsonic)
if jet_lorentz > 1 and strong_shock == True:
f[angles,2] = np.minimum((gammaCValue - 1)*injectFrac[angles]*(QavgValue*active_jet)*X[angles,1]**(betas[regionPointer[angles]] - 3)/(X[angles,2]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)*dchi[angles]*(X[angles,3]*zetaeta[angles])**2*kValues[regionPointer[angles]]) + (betas[regionPointer[angles]] - 3*gammaCValue)*(X[angles,2])**2/(2*X[angles,1]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)), (betas[regionPointer[angles]] - 2)/(5 - betas[regionPointer[angles]]) * X[angles,2]*X[angles,3]/(X[0,0] + year)) # ensure model doesn't run slower than limit due to numerics
elif jet_lorentz > 1:
f[angles,2] = (gammaCValue - 1)*injectFrac[angles]*(QavgValue*active_jet)*X[angles,1]**(betas[regionPointer[angles]] - 3)/(X[angles,2]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)*dchi[angles]*(X[angles,3]*zetaeta[angles])**2*kValues[regionPointer[angles]]) + (betas[regionPointer[angles]] - 3*gammaCValue)*(X[angles,2])**2/(2*X[angles,1]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)) - (3*gammaCValue - betas[regionPointer[angles]])*(k_B*temperature/maverage)/(2*X[angles,1]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)*(X[angles,3]*zetaeta[angles])**2)
else:
sub_angles = (X[angles,2]*X[angles,3]*zetaeta)**2/(gammaX*(k_B*temperature/maverage)) <= 1
super_angles = np.logical_not(sub_angles)
f[super_angles,2] = (gammaX + 1)*(gammaCValue - 1)*injectFrac[super_angles]*(QavgValue*active_jet)*X[super_angles,1]**(betas[regionPointer[super_angles]] - 3)/(2*X[super_angles,2]*(1 + (X[super_angles,3]*X[super_angles,2]/c_speed)**2)*dchi[super_angles]*(X[super_angles,3]*zetaeta[super_angles])**2*kValues[regionPointer[super_angles]]) + (betas[regionPointer[super_angles]] - 3*gammaCValue)*(X[super_angles,2])**2/(2*X[super_angles,1]*(1 + (X[super_angles,3]*X[super_angles,2]/c_speed)**2)) + (gammaX - 1)*(3*gammaCValue - betas[regionPointer[super_angles]])*(k_B*temperature/maverage)/(4*X[super_angles,1]*(1 + (X[super_angles,3]*X[super_angles,2]/c_speed)**2)*(X[super_angles,3]*zetaeta[super_angles])**2)
f[sub_angles,2] = (betas[regionPointer[sub_angles]] - 2)*(X[sub_angles,2])**2/X[sub_angles,1]
# combine acceleration from jet-head and lobe as two-phase fluid
if jet_lorentz > 1:
if (lambda_crit < lambda_min or X[0,0] < 10*limTime): # improve stability
f[0,2], f[angles[1:],2] = jet_acceleration, jet_acceleration*eta_s[angles[1:]]
X[angles[1:],2] = X[0,2]*eta_s[angles[1:]]
else:
f[0,2], f[angles[1:],2] = (1 - lambda_crit)*jet_acceleration + lambda_crit*f[0,2], (1 - lambda_crit)*jet_acceleration*eta_s[angles[1:]] + lambda_crit*f[angles[1:],2]
# calculate Lorentz factor of two-phase fluid
f[angles,3] = X[angles,3]**3*X[angles,2]*f[angles,2]/c_speed**2
# PRESSURES
# external pressure at each volume element
P[angles,2] = kValues[regionPointer[angles]]*(k_B*temperature/maverage)*X[angles,1]**(-betas[regionPointer[angles]])
# set velocity associated with thermal component of lobe perssure
if jet_lorentz > 1 and critical_velocity > 0:
if (lambda_crit < lambda_min or X[0,0] < 10*limTime): # improve stability
f[0,4], f[angles[1:],4] = jet_acceleration, jet_acceleration*eta_s[angles[1:]]
X[angles[1:],4] = X[0,4]*eta_s[angles[1:]]
else:
f[angles,4] = (betas[regionPointer[angles]] - 2)/(5 - betas[regionPointer[angles]]) * X[angles,4]/(X[0,0] + year)
else:
X[angles,4], f[angles,4] = X[angles,2]*X[angles,3], f[angles,2]
# jet/lobe pressure at each volume element
volume = X[angles,1]**3*dchi[angles]
if jet_lorentz > 1:
# calculate lobe pressure
P[angles,1] = zetaeta[angles]**2*kValues[regionPointer[angles]]*X[angles,1]**(-betas[regionPointer[angles]])*(np.minimum(X[angles,2], X[angles,4]))**2 + kValues[regionPointer[angles]]*(k_B*temperature/maverage)*X[angles,1]**(-betas[regionPointer[angles]])
# calculate average pressure across jet/lobe
pressure = np.sum(P[angles,1]*volume)/np.sum(volume)
# set average pressure in all of lobe other than hotspot
P[angles[1:],1] = pressure
else:
# calculate lobe pressure
P[super_angles,1] = 2./(gammaX + 1)*zetaeta[super_angles]**2*kValues[regionPointer[super_angles]]*X[super_angles,1]**(-betas[regionPointer[super_angles]])*(X[super_angles,2]*X[super_angles,3])**2 - (gammaX - 1)/(gammaX + 1)*kValues[regionPointer[super_angles]]*(k_B*temperature/maverage)*X[super_angles,1]**(-betas[regionPointer[super_angles]])
P[sub_angles,1] = P[sub_angles,2]
# calculate average pressure across jet/lobe
pressure = np.sum(P[angles,1]*volume)/np.sum(volume)
# set average pressure in all of lobe other than hotspot
P[angles[1:],1] = pressure
# AXIS RATIO
if jet_lorentz > 1:
# calculate total mass of particles from the jet
particle_mass = QavgValue*np.minimum(active_age, X[0,0])/((bulk_lorentz - 1)*c_speed**2)
# calculate volume occupied by particles expanding at sound speed and maximum fillable volume within shocked shell
jet_sound = c_speed*np.sqrt(gammaJ - 1)
particle_volume = particle_mass/(gammaJ*pressure/jet_sound**2) # mass / density
shell_volume = np.sum(volume*eta_c/(shockRadius*eta_s))
# calculate (optimal) lobe volume as weighted sum of particle volume and maximum fillable volume (i.e. enable sound speed to reduce as lobe approaches size of shocked shell)
lobe_volume = 1./(1./(particle_volume/fill_factor)**axis_exponent + 1./(shell_volume)**axis_exponent)**(1./axis_exponent)
# find axis ratio for an ellipsoidal lobe
if lobe_volume > 0 and lambda_crit >= lambda_min:
lobe_axis_ratio = np.minimum(np.sqrt(2*np.pi*(X[0,1]/shockRadius)**3/(3*lobe_volume)), 1/np.tan(open_angle))
else:
lobe_axis_ratio = 1/np.tan(open_angle)
# update lobe length along let axis and axis ratio of shocked shell
P[0,0] = X[0,1]/shockRadius
# calculate geometry of each angular volume element
dtheta = (np.pi/2)/len(angles)
theta = dtheta*(angles + 0.5)
lobe_eta_c = 1./np.sqrt(lobe_axis_ratio**2*(np.sin(theta))**2 + (np.cos(theta))**2)
# set length of lobe along each angular volume element
P[angles[1:],0] = np.minimum(lobe_eta_c[angles[1:]]*P[0,0], X[angles[1:],1]*eta_c[angles[1:]]/(shockRadius*eta_s[angles[1:]])) # second condition should rarely be met
else:
# set length of lobe along each angular volume element
P[0,0], P[angles[1:],0] = X[0,1]/shockRadius, X[angles[1:],1]*eta_c[angles[1:]]/(shockRadius*eta_s[angles[1:]])
## Define functions to download and preprocess particles from hydrodynamical simulations
def __PLUTO_particles(particle_data_path):
# unpack particle data from hydrodynamical simulations
particle_dict = h5py.File(os.path.join(os.path.dirname(os.path.realpath(__file__)), particle_data_path), 'r')
# store variables at desired resolution
time = particle_dict['time'][:].astype(np.float32)
shock_time = particle_dict['tinject'][:,:].astype(np.float32)
major = particle_dict['major'][:].astype(np.float32)
minor = particle_dict['minor'][:].astype(np.float32)
x1 = particle_dict['x1'][:,:].astype(np.float32)
x2 = particle_dict['x2'][:,:].astype(np.float32)
x3 = particle_dict['x3'][:,:].astype(np.float32)
tracer = particle_dict['tracer'][:,:].astype(np.float32)
vx3 = particle_dict['vx3'][:,:].astype(np.float32)
volume = particle_dict['volume'][:,:].astype(np.float32)
pressure = particle_dict['pressure'][:,:].astype(np.float32)
press_minor = particle_dict['pressminor'][:].astype(np.float32)
alphaP_hyd = particle_dict['alphaP'][:,:].astype(np.float32)
alphaP_henv = particle_dict['alphaPenv'][:,:].astype(np.float32)
hotspot_ratio = particle_dict['hotspotratio'][:].astype(np.float32)
return time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio
## Define functions to add emissivity from particles in hydrodynamical simulations on top of dynamics
# function to manage orientation and distribution of particles from simulation output
def __RAiSE_emissivity(frequency, redshift, time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, source_age, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, active_age, equipartition, spectral_index, gammaCValue=5./3, lorentz_min=Lorentzmin, resolution='standard'):
# determine spatial resolution of particles; i.e. overdensity of particles to include in calculations
if resolution == 'best':
nsamples = 2048
elif resolution == 'high':
nsamples = 512
elif resolution == 'standard':
nsamples = 128
elif resolution == 'poor':
nsamples = 32
else:
raise Exception('Unrecognised keyword for particle resolution. The accepted keywords are: best, high, standard and poor.')
# randomly generate viewing time in the simulated source age
timePointer = np.arange(0, nsamples).astype(np.int_)%len(time)
# convert frequency, equipartition factor and spectral index to correct units
if isinstance(frequency, (list, np.ndarray)):
rest_frequency = np.zeros_like(frequency)
inverse_compton = np.zeros_like(frequency).astype(np.int_)
for freqPointer in range(0, len(frequency)):
rest_frequency[freqPointer] = 10**frequency[freqPointer]*(1 + redshift)
if rest_frequency[freqPointer] > 1e12: # assume frequencies greater than 1000 GHz are inverse-Compton
inverse_compton[freqPointer] = 1
else:
rest_frequency = [10**frequency*(1 + redshift)]
if rest_frequency[freqPointer] > 1e12: # assume frequencies greater than 1000 GHz are inverse-Compton
inverse_compton = [1]
if isinstance(source_age, (list, np.ndarray)):
tFinal = np.zeros_like(source_age)
for i in range(0, len(source_age)):
tFinal[i] = 10**source_age[i]*year # convert from log space years to seconds
else:
tFinal = [10**source_age*year]
tActive = 10**active_age*year
equi_factor = 10**float(-np.abs(equipartition)) # ensure sign is correct
s_index = 2*float(np.abs(spectral_index)) + 1 # ensure sign is correct
# derive redshift dependent ancillary variables used by every analytic model
Ks = __RAiSE_Ks(s_index, gammaCValue, lorentz_min)
blackbody = __RAiSE_blackbody(s_index)
return __RAiSE_particles(timePointer, rest_frequency, inverse_compton, redshift, time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, tFinal, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, tActive, equi_factor, s_index, gammaCValue, lorentz_min, Ks, blackbody)
# function to calculate emissivity from each particle using RAiSE model
@jit(nopython=True, parallel=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_particles(timePointer, rest_frequency, inverse_compton, redshift, time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, tFinal, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, tActive, equi_factor, s_index, gammaCValue, lorentz_min, Ks, blackbody):
# instantiate variables
luminosity = np.zeros((len(tFinal), len(timePointer)*len(pressure[:,0]), len(rest_frequency)))
magnetic_field = np.zeros((len(tFinal), len(rest_frequency)))
magnetic_particle, magnetic_weighting = np.zeros((len(tFinal), len(timePointer), len(rest_frequency))), np.zeros((len(tFinal), len(timePointer), len(rest_frequency)))
location = np.zeros((len(tFinal), len(timePointer)*len(pressure[:,0]), 3))
# derive emissivity at each time step
for i in range(0, len(tFinal)):
# derive emissivity for random variations in particle distribution
for j in range(0, len(timePointer)):
# SHOCK ACCELERATION TIMES
new_shock_time = shock_time[:,timePointer[j]]*(tFinal[i]/time[timePointer[j]])*np.minimum(1., (tActive/tFinal[i])) # scale the last acceleration time to active age if source is a remnant
# PRESSURES
new_pressure = pressure[:,timePointer[j]]*(shock_pressures[-1,i]/press_minor[timePointer[j]]) # correction factor to match Model A
# correct the hotspot/lobe pressure ratio based on the dynamical model
new_pressure = new_pressure*((shock_pressures[0,i]/shock_pressures[-1,i])/hotspot_ratio[timePointer[j]] - 1)*(np.abs(x3[:,timePointer[j]])/major[timePointer[j]]) + new_pressure # increase log-space pressure linearly along lobe
# correct the evolutionary histories of the particles based on the dynamical model
alphaP_dyn = np.maximum(-2, np.minimum(0, alphaP_denv[i] + alphaP_hyd[:,timePointer[j]] - alphaP_henv[:,timePointer[j]]))
# VOLUMES
volume_fraction = volume[:,timePointer[j]]/(4*np.pi/3.*major[timePointer[j]]*minor[timePointer[j]]**2)
#volume_sum = np.nansum(volume_fraction[~np.isinf(volume_fraction)])
# cap the largest volumes at the 95th percentile to outliers in surface brightness map; minimal effect on total luminosity
volume_fraction[volume_fraction > np.nanpercentile(volume_fraction, 95)] = np.nanpercentile(volume_fraction, 95)
new_volume = volume_fraction*(4*np.pi/3.*lobe_lengths[0,i]*lobe_minor[i]**2)*tracer[:,timePointer[j]] #/volume_sum
# RELATIVISTIC BEAMING
doppler_factor = np.sqrt(np.maximum(1e-6, 1 - vx3[:,timePointer[j]]**2))**(3 - (s_index - 1)/2.) # Doppler boosting of particles in jet; 1e-6 ensures some very low level emission
doppler_factor[np.logical_and(np.abs(x3[:,timePointer[j]])/major[timePointer[j]] < 0.1, np.logical_and(np.abs(x1[:,timePointer[j]])/major[timePointer[j]] < 0.01, np.abs(x2[:,timePointer[j]])/major[timePointer[j]] < 0.01))] = 0 # completely remove very bright particles clumped at start of jet
# LOBE PARTICLES
# find angle and radius of each particle from core
new_angles = np.arctan((np.sqrt(x1[:,timePointer[j]]**2 + x2[:,timePointer[j]]**2)*lobe_minor[i]/minor[timePointer[j]])/(x3[:,timePointer[j]]*lobe_lengths[0,i]/major[timePointer[j]])) # rescale axes to correct axis ratio
new_radii = np.sqrt((x1[:,timePointer[j]]**2 + x2[:,timePointer[j]]**2)*(lobe_minor[i]/minor[timePointer[j]])**2 + (x3[:,timePointer[j]]*lobe_lengths[0,i]/major[timePointer[j]])**2)/lobe_lengths[0,i]
# find particles within lobe region; particles outside this region will not emit. Particle map is set to axis ratio based on shocked shell to maintain geometry of jet
new_eta_c = 1./np.sqrt((lobe_lengths[0,i]/lobe_lengths[-1,i])**2*(np.sin(new_angles))**2 + (np.cos(new_angles))**2)
lobe_particles = np.zeros_like(x1[:,timePointer[j]])
lobe_particles[np.abs(vx3[:,timePointer[j]]) > 1./np.sqrt(3)] = 1 # assume sound speed is critical value for relativisitic particles
lobe_particles[new_radii < new_eta_c] = 1.
# TWO PHASE FLUID
# fraction of jet particles that have reached location in lobe
two_phase_weighting = np.maximum(0, np.minimum(1, lambda_crit[i]*(new_shock_time/np.minimum(tActive, tFinal[i]))**np.maximum(0, alpha_lambda[i])))
if tActive/tFinal[i] >= 1:
# keep jet particles visible at all times
two_phase_weighting = np.maximum(two_phase_weighting, np.minimum(1, np.abs(vx3[:,timePointer[j]]*np.sqrt(3)))) # assume sound speed is critical value for relativisitic particles
else:
# suppress emission from jet particle
two_phase_weighting = np.minimum(two_phase_weighting, 1 - np.minimum(1, np.abs(vx3[:,timePointer[j]]*np.sqrt(3))))
# PARTICLE EMISSIVITY
for k in range(0, len(rest_frequency)):
if rest_frequency[k] > 100:
# calculate losses due to adiabatic expansion, and synchrotron/iC radiation
lorentz_ratio, pressure_ratio = __RAiSE_loss_mechanisms(rest_frequency[k], inverse_compton[k], redshift, tFinal[i], new_shock_time, new_pressure, alphaP_dyn, equi_factor, gammaCValue)
# calculate luminosity associated with each particle
temp_luminosity = None
if inverse_compton[k] == 1:
# inverse-Compton
sync_frequency = (3*e_charge*rest_frequency[k]*np.sqrt(2*mu0*( equi_factor*new_pressure/((gammaCValue - 1)*(equi_factor + 1)) ))/(2*np.pi*m_e*(freq_cmb*temp_cmb*(1 + redshift)))) # assuming emission at CMB frequency only
temp_luminosity = Ks/blackbody*sync_frequency**((1 - s_index)/2.)*(sync_frequency/rest_frequency[k])*(gammaCValue - 1)*__RAiSE_uC(redshift) * (equi_factor**((s_index + 1)/4. - 1 )/(equi_factor + 1)**((s_index + 5)/4. - 1 ))*new_volume*new_pressure**((s_index + 1 )/4.)*pressure_ratio**(1 - 4./(3*gammaCValue))*lorentz_ratio**(2 - s_index)/len(timePointer) * doppler_factor*lobe_particles*two_phase_weighting
else:
# synchrotron
temp_luminosity = Ks*rest_frequency[k]**((1 - s_index)/2.)*(equi_factor**((s_index + 1)/4.)/(equi_factor + 1)**((s_index + 5)/4.))*new_volume*new_pressure**((s_index + 5)/4.)*pressure_ratio**(1 - 4./(3*gammaCValue))*lorentz_ratio**(2 - s_index)/len(timePointer) * doppler_factor*lobe_particles*two_phase_weighting
# remove any infs
index = np.isinf(temp_luminosity)
temp_luminosity[index] = np.nan
luminosity[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),k] = temp_luminosity
# calculate luminosity weighted magnetic field strength
magnetic_particle[i,j,k] = np.nansum(np.sqrt(2*mu0*new_pressure*equi_factor/(gammaCValue - 1)*(equi_factor + 1))*luminosity[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),k])
magnetic_weighting[i,j,k] = np.nansum(luminosity[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),k])
# PARTICLE PRESSURE
else:
luminosity[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),k] = new_pressure*lobe_particles
# CARTESIAN LOCATIONS
location[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),0] = x1[:,timePointer[j]]*lobe_minor[i]/minor[timePointer[j]] *np.sign(timePointer[j]%8 - 3.5)
location[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),1] = x2[:,timePointer[j]]*lobe_minor[i]/minor[timePointer[j]] *np.sign(timePointer[j]%4 - 1.5)
location[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),2] = x3[:,timePointer[j]]*lobe_lengths[0,i]/major[timePointer[j]] *np.sign(timePointer[j]%2 - 0.5)
# calculate luminosity weighted magnetic field strength for time step
for k in range(0, len(rest_frequency)):
if np.nansum(magnetic_weighting[i,:,k]) == 0:
magnetic_field[i,k] = 0
else:
magnetic_field[i,k] = np.nansum(magnetic_particle[i,:,k])/np.nansum(magnetic_weighting[i,:,k])
return location, luminosity, magnetic_field
# find ratio of the lorentz factor and the pressure at the time of acceleration to that at the time of emission
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_loss_mechanisms(rest_frequency, inverse_compton, redshift, time, shock_time, pressure, alphaP, equipartition, gammaCValue=5./3):
# calculate lorentz factor at time of emission
if inverse_compton == 1:
# inverse-Compton
lorentz_factor = np.sqrt(rest_frequency/(freq_cmb*temp_cmb*(1 + redshift)))*np.ones(len(pressure)) # assuming emission at CMB frequency only
else:
# synchrotron
lorentz_factor = np.sqrt(2*np.pi*m_e*rest_frequency/(3*e_charge*np.sqrt(2*mu0*pressure/(gammaCValue - 1)*(equipartition/(equipartition + 1))))) # assuming emission at Larmor frequency only
# calculate pressure and volume at time of acceleration
pressure_inject = pressure*(shock_time/time)**alphaP
# calculate RAiSE constant a2
a2 = __RAiSE_a2(redshift, time, shock_time, pressure, pressure_inject, equipartition, alphaP, gammaCValue)
# calculate lorentz factor at time of acceleration, and remove invalid points
lorentz_inject = lorentz_factor*shock_time**(alphaP/(3*gammaCValue))/(time**(alphaP/(3*gammaCValue)) - a2*lorentz_factor) # second time is i becasue is time_high
lorentz_inject[lorentz_inject < 1] = np.nan
return lorentz_inject/lorentz_factor, pressure_inject/pressure
# find RAiSE constant a2 for synchrotron and iC radiative losses
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_a2(redshift, time, shock_time, pressure, pressure_inject, equipartition, alphaP, gammaCValue=5./3):
return 4*sigma_T/(3*m_e*c_speed)*(pressure_inject/(gammaCValue - 1)*(equipartition/(equipartition + 1))/(1 + alphaP*(1 + 1./(3*gammaCValue)))*shock_time**(-alphaP)*(time**(1 + alphaP*(1 + 1./(3*gammaCValue))) - shock_time**(1 + alphaP*(1 + 1./(3*gammaCValue)))) + __RAiSE_uC(redshift)/(1 + alphaP/(3*gammaCValue))*(time**(1 + alphaP/(3*gammaCValue)) - shock_time**(1 + alphaP/(3*gammaCValue)))) # array is shorter by one element
# find CMB radiation energy density
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_uC(redshift):
uC0 = 0.25*1e6*e_charge # J m-3 CMB energy density at z = 0 (Longair, 1981)
return uC0*(redshift + 1)**4 # assuming uC prop to (z + 1)^4 as in KDA97
# find RAiSE constant K(s) for the absolute scaling of the emissivity
def __RAiSE_Ks(s_index, gammaCValue=5./3, lorentz_min=Lorentzmin):
kappa = (gamma(s_index/4. + 19./12)*gamma(s_index/4. - 1./12)*gamma(s_index/4. + 5./4)/gamma(s_index/4. + 7./4))
return kappa/(m_e**((s_index + 3)/2.)*c_speed*(s_index + 1))*(e_charge**2*mu0/(2*(gammaCValue - 1)))**((s_index + 5)/4.)*(3./np.pi)**(s_index/2.)/((lorentz_min**(2 - s_index) - Lorentzmax**(2 - s_index))/(s_index - 2) - (lorentz_min**(1 - s_index) - Lorentzmax**(1 - s_index))/(s_index - 1))
# find RAiSE blackbody constant to convert cosmic microwave background emission from single frequency to blackbody spectrum
def __RAiSE_blackbody(s_index):
return np.pi**4/(15*gamma((s_index + 5)/2.)*zeta((s_index + 5)/2.))
## Define functions to produce surface brightness maps of radio lobes
# define function to manage the discretisation of particles down to pixels
def __RAiSE_brightness_map(frequency, redshift, source_age, lobe_lengths, location, luminosity, angle, resolution='standard'):
# determine spatial resolution of particles; i.e. overdensity of particles to include in calculations
if resolution == 'best':
npixels = 2048/4
elif resolution == 'high':
npixels = 512/2
elif resolution == 'standard':
npixels = 128/1
elif resolution == 'poor':
npixels = 32*2
else:
raise Exception('Unrecognised keyword for particle resolution. The accepted keywords are: best, high, standard and poor.')
# convert frequency, equipartition factor and spectral index to correct units
if isinstance(frequency, (list, np.ndarray)):
rest_frequency = np.zeros_like(frequency)
for freqPointer in range(0, len(frequency)):
rest_frequency[freqPointer] = 10**frequency[freqPointer]*(1 + redshift)
else:
rest_frequency = [10**frequency*(1 + redshift)]
if isinstance(source_age, (list, np.ndarray)):
tFinal = np.zeros_like(source_age)
for i in range(0, len(source_age)):
tFinal[i] = 10**source_age[i]*year # convert from log space years to seconds
else:
tFinal = [10**source_age*year]
return __RAiSE_pixels(rest_frequency, redshift, tFinal, lobe_lengths, location, luminosity, angle, npixels)
# define function to discretise particles down to pixels
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_pixels(rest_frequency, redshift, tFinal, lobe_lengths, location, luminosity, angle, npixels):
# instantiate variables to store brightness map variables
x_list = []
y_list = []
brightness_list = []
for i in range(0, len(tFinal)):
x_col = []
y_col = []
brightness_col = []
sim_x, sim_y, sim_z = location[i,:,0], location[i,:,1], location[i,:,2] # x, y, z (i.e. 0, 1, 2) in simulations
for j in range(0, len(rest_frequency)):
# separate location array into components
index = np.logical_and(np.logical_and(np.logical_not(np.isnan(luminosity[i,:,j])), np.logical_not(np.isinf(luminosity[i,:,j]))), np.logical_not(np.isnan(sim_x)))
location_x = np.sin(angle*np.pi/180.)*sim_y[index] + np.cos(angle*np.pi/180.)*sim_z[index]
location_y = sim_x[index]
new_luminosity = luminosity[i,:,j]
new_luminosity = new_luminosity[index]
if len(location_x) > 0:
# discretise particles
location_x = np.floor(location_x/lobe_lengths[0,i]*(npixels//2)).astype(np.int_)
location_y = np.floor(location_y/lobe_lengths[0,i]*(npixels//2)).astype(np.int_)
min_x, min_y = np.min(location_x), np.min(location_y)
location_x = location_x - min_x
location_y = location_y - min_y
# instantiate variables to store discrete particles
x_values = np.arange(np.min(location_x), np.max(location_x) + 0.1, 1).astype(np.int_)
y_values = np.arange(np.min(location_y), np.max(location_y) + 0.1, 1).astype(np.int_)
brightness = np.zeros((len(x_values), len(y_values)))
# add luminosity from each particle to correct pixel
for k in range(0, len(new_luminosity)):
if rest_frequency[j] > 100:
brightness[location_x[k],location_y[k]] = brightness[location_x[k],location_y[k]] + new_luminosity[k]
else:
brightness[location_x[k],location_y[k]] = max(brightness[location_x[k],location_y[k]], new_luminosity[k])
# add x and y pixel values, and brightnesses to arrays
x_col.append((x_values + min_x + 0.5)*lobe_lengths[0,i]/(npixels//2)) # add 0.5 to get pixel centres and scale back to physical dimensions
y_col.append((y_values + min_y + 0.5)*lobe_lengths[0,i]/(npixels//2))
brightness_col.append(brightness)
else:
x_col.append(None)
y_col.append(None)
brightness_col.append(None)
x_list.append(x_col)
y_list.append(y_col)
brightness_list.append(brightness_col)
return x_list, y_list, brightness_list
# Define functions to plot emissivity maps throughout source evolutionary history
def RAiSE_evolution_maps(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5., equipartition=-1.5, spectral_index=0.7, gammaCValue=5./3, lorentz_min=Lorentzmin, seed=None, rerun=False, cmap='RdPu'):
# function to test type of inputs and convert type where appropriate
frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons = __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz)
# set up plot
fig, axs = plt.subplots(len(source_age), 1, figsize=(12, 1 + (10/axis_ratio[0] + 0.8)*len(source_age)))
if len(source_age) <= 1: # handle case of single image
axs = [axs]
fig.subplots_adjust(hspace=0)
#cmap = cm.get_cmap('binary')
colour_scheme = cm.get_cmap(cmap)
rc('text', usetex=True)
rc('font', size=14)
rc('legend', fontsize=14)
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
for i in range(0, len(source_age)):
if isinstance(rho0Value, (list, np.ndarray)):
if frequency[0] > 0:
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}'.format(axis_ratio[0], np.abs(equipartition[0]), np.abs(np.log10(rho0Value[0])), jet_power[0], 2*np.abs(spectral_index) + 1, active_age[0], jet_lorentz[0], redshift[0], frequency[0], source_age[i])
else:
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_t={:.2f}'.format(axis_ratio[0], np.abs(equipartition[0]), np.abs(np.log10(rho0Value[0])), jet_power[0], 2*np.abs(spectral_index) + 1, active_age[0], jet_lorentz[0], redshift[0], source_age[i])
elif isinstance(halo_mass, (list, np.ndarray)):
if frequency[0] > 0:
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}'.format(axis_ratio[0], np.abs(equipartition[0]), halo_mass[0], jet_power[0], 2*np.abs(spectral_index) + 1, active_age[0], jet_lorentz[0], redshift[0], frequency[0], source_age[i])
else:
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}__t={:.2f}'.format(axis_ratio[0], np.abs(equipartition[0]), halo_mass[0], jet_power[0], 2*np.abs(spectral_index) + 1, active_age[0], jet_lorentz[0], redshift[0], source_age[i])
# read-in data from file (must be RAiSE output of correct format)
if rerun == False:
try:
dataframe = pd.read_csv(filename+'_best.csv', index_col=0)
except:
# run RAiSE HD for set of parameters at requested resolution
RAiSE_run(frequency[0], redshift[0], axis_ratio[0], jet_power[0], source_age[i], halo_mass=halo_mass, rand_profile=rand_profile, betas=betas, regions=regions, rho0Value=rho0Value, temperature=temperature, active_age=active_age[0], jet_lorentz=jet_lorentz[0], equipartition=equipartition[0], spectral_index=spectral_index, gammaCValue=gammaCValue, lorentz_min=Lorentzmin, brightness=True, resolution='best', seed=seed)
dataframe = pd.read_csv(filename+'_best.csv', index_col=0)
else:
# run RAiSE HD for set of parameters at requested resolution
RAiSE_run(frequency[0], redshift[0], axis_ratio[0], jet_power[0], source_age[i], halo_mass=halo_mass, rand_profile=rand_profile, betas=betas, regions=regions, rho0Value=rho0Value, temperature=temperature, active_age=active_age[0], jet_lorentz=jet_lorentz[0], equipartition=equipartition[0], spectral_index=spectral_index, gammaCValue=gammaCValue, lorentz_min=Lorentzmin, brightness=True, resolution='best', seed=seed)
dataframe = pd.read_csv(filename+'_best.csv', index_col=0)
# assign dataframe contents to variables
x, y = (dataframe.index).astype(np.float_), (dataframe.columns).astype(np.float_)
#x, y = x/np.max(x), y/np.max(x)
Y, X = np.meshgrid(y, x)
Z = dataframe.values
if frequency[0] > 0:
Z = Z/np.nanmax(Z)
else:
Z = Z*1e12
Z[Z <= 0] = np.nan
if frequency[0] > 0:
h = axs[i].pcolormesh(X, Y, Z, shading='nearest', cmap=colour_scheme, vmin=0, vmax=1)
else:
h = axs[i].pcolormesh(X, Y, Z, shading='nearest', cmap=colour_scheme, vmin=np.nanmin(Z[0:len(x)//3,:]), vmax=np.nanmax(Z[0:len(x)//3,:]))
axs[i].set_aspect('equal')
axs[i].set_xlim([-1.05*np.max(x), 1.05*np.max(x)])
axs[i].set_ylim([-1.05*np.max(x)/axis_ratio, 1.05*np.max(x)/axis_ratio])
axs[i].xaxis.set_major_formatter(FormatStrFormatter('%g'))
axs[i].yaxis.set_major_formatter(FormatStrFormatter('%g'))
axs[i].plot(np.NaN, np.NaN, '-', color='none', label=str('{:g}'.format(float('{:.2g}'.format(10**source_age[i]/1e6))))+' Myr')
axs[i].legend(frameon=False)
# add a big axes for labels, hide frame
fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axes
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.grid(False)
axs[-1].set_xlabel(r'Jet axis (kpc)', fontsize=14.5, labelpad=10)
plt.ylabel(r'Transverse axis (kpc)', fontsize=14.5, labelpad=15)
if frequency[0] <= 0:
if len(axs) == 1:
cax = fig.add_axes([axs[0].get_position().x1+0.01,axs[0].get_position().y0,0.02,axs[0].get_position().height])
c = plt.colorbar(h, cax=cax, pad=0.025)
else:
cax = fig.add_axes([axs.ravel().tolist().get_position().x1+0.01,axs.ravel().tolist().get_position().y0,0.02,axs.ravel().tolist().get_position().height]) # haven't tested this yet
c = plt.colorbar(h, cax=cax, pad=0.015)
c.set_label(r'Pressure (pPa)', labelpad=12.5)
# show plot and return handle to plot
plt.show()
return fig
# Define function to plot Dt and LD tracks
def RAiSE_evolution_tracks(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5., equipartition=-1.5, spectral_index=0.7, gammaCValue=5./3, lorentz_min=Lorentzmin, resolution='standard', seed=None, rerun=False, labels=None, colors=None, linestyles=None):
# function to test type of inputs and convert type where appropriate
frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons = __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz)
if len(source_age) <= 1:
raise Exception('Evolutionary tracks require more than one source age; provide a list/array of ages.')
if len(frequency) > 1:
warnings.warn('First frequency in list/array will be plotted for every set of parameters.', category=UserWarning)
if not isinstance(colors, (list, np.ndarray)) and not colors == None:
colors = [colors]
elif colors == None:
colors = ['crimson', 'darkorange', 'darkorchid', 'mediumblue']
if not isinstance(linestyles, (list, np.ndarray)) and not linestyles == None:
linestyles = [linestyles]
elif linestyles == None:
linestyles = ['-']
# set up plot
if resolution == None:
fig, axs = plt.subplots(2, 1, figsize=(6, 10), sharex=True)
else:
fig, axs = plt.subplots(3, 1, figsize=(6, 14), sharex=True)
fig2, axs2 = plt.subplots(1, 1, figsize=(6, 6))
fig.subplots_adjust(hspace=0)
rc('text', usetex=True)
rc('font', size=14)
rc('legend', fontsize=14)
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
if resolution == None:
axs[1].set_xlabel(r'Source age (Myr)')
else:
axs[2].set_xlabel(r'Source age (Myr)')
axs[2].set_ylabel(r'Lobe luminosity (W/Hz)')
axs2.set_xlabel(r'Lobe length (kpc)')
axs2.set_ylabel(r'Lobe luminosity (W/Hz)')
axs[0].set_ylabel(r'Lobe length (kpc)')
axs[1].set_ylabel(r'Pressure (Pa)')
# calculate number of plots
nplots = np.max(np.array([len(redshift), len(axis_ratio), len(jet_power), nenvirons, len(active_age), len(equipartition), len(jet_lorentz)]))
time, size, pressure, luminosity, y_min, y_max = [], [], [], [], [], []
for i in range(0, nplots):
if isinstance(rho0Value, (list, np.ndarray)):
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}'.format(axis_ratio[min(len(axis_ratio) - 1, i)], np.abs(equipartition[min(len(equipartition) - 1, i)]), np.abs(np.log10(rho0Value[min(len(rho0Value) - 1, i)])), jet_power[min(len(jet_power) - 1, i)], 2*np.abs(spectral_index) + 1, active_age[min(len(active_age) - 1, i)], jet_lorentz[min(len(jet_lorentz) - 1, i)], redshift[min(len(redshift) - 1, i)])
elif isinstance(halo_mass, (list, np.ndarray)):
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}'.format(axis_ratio[min(len(axis_ratio) - 1, i)], np.abs(equipartition[min(len(equipartition) - 1, i)]), halo_mass[min(len(halo_mass) - 1, i)], jet_power[min(len(jet_power) - 1, i)], 2*np.abs(spectral_index) + 1, active_age[min(len(active_age) - 1, i)], jet_lorentz[min(len(jet_lorentz) - 1, i)], redshift[min(len(redshift) - 1, i)])
# read-in data from file (must be RAiSE output of correct format)
if rerun == False:
try:
dataframe = | pd.read_csv(filename+'.csv', index_col=None) | pandas.read_csv |
import time
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
class CFE():
def __init__(self, cfg_file=None):
super(CFE, self).__init__()
self.cfg_file = cfg_file
############################################################
# ________________________________________________________ #
# ________________________________________________________ #
# GET VALUES FROM CONFIGURATION FILE. #
#
self.config_from_json() #
#
# GET VALUES FROM CONFIGURATION FILE. #
# ________________________________________________________ #
# ________________________________________________________ #
############################################################
# ________________________________________________
# In order to check mass conservation at any time
self.reset_volume_tracking()
# ________________________________________________
# initialize simulation constants
atm_press_Pa=101325.0
unit_weight_water_N_per_m3=9810.0
# ________________________________________________
# Time control
self.time_step_size = 3600
self.timestep_h = self.time_step_size / 3600.0
self.timestep_d = self.timestep_h / 24.0
self.current_time_step = 0
self.current_time = pd.Timestamp(year=1970, month=1, day=1, hour=0)
# ________________________________________________
# Inputs
self.timestep_rainfall_input_m = 0
self.potential_et_m_per_s = 0
# ________________________________________________
# calculated flux variables
self.flux_overland_m = 0 # surface runoff that goes through the GIUH convolution process
self.flux_perc_m = 0 # flux from soil to deeper groundwater reservoir
self.flux_lat_m = 0 # lateral flux in the subsurface to the Nash cascade
self.flux_from_deep_gw_to_chan_m = 0 # flux from the deep reservoir into the channels
self.gw_reservoir_storage_deficit_m = 0 # the available space in the conceptual groundwater reservoir
self.primary_flux = 0 # temporary vars.
self.secondary_flux = 0 # temporary vars.
self.total_discharge = 0
# ________________________________________________
# Evapotranspiration
self.potential_et_m_per_timestep = 0
self.actual_et_m_per_timestep = 0
# ________________________________________________________
# Set these values now that we have the information from the configuration file.
self.runoff_queue_m_per_timestep = np.zeros(len(self.giuh_ordinates))
self.num_giuh_ordinates = len(self.runoff_queue_m_per_timestep)
self.num_lateral_flow_nash_reservoirs = len(self.nash_storage)
# ________________________________________________
# Local values to be used in setting up soil reservoir
trigger_z_m = 0.5
field_capacity_atm_press_fraction = 0.33
H_water_table_m=field_capacity_atm_press_fraction * atm_press_Pa / unit_weight_water_N_per_m3
soil_water_content_at_field_capacity = self.soil_params['smcmax'] * \
np.power(H_water_table_m/self.soil_params['satpsi'],(1.0/self.soil_params['bb']))
Omega = H_water_table_m - trigger_z_m
lower_lim = np.power(Omega , (1.0-1.0/self.soil_params['bb']))/(1.0-1.0/self.soil_params['bb']);
upper_lim = np.power(Omega+self.soil_params['D'],(1.0-1.0/self.soil_params['bb']))/(1.0-1.0/self.soil_params['bb'])
storage_thresh_pow_term = np.power(1.0/self.soil_params['satpsi'],(-1.0/self.soil_params['bb']))
lim_diff = (upper_lim-lower_lim)
field_capacity_power = np.power(1.0/self.soil_params['satpsi'],(-1.0/self.soil_params['bb']))
field_capacity_storage_threshold_m = self.soil_params['smcmax'] * field_capacity_power * lim_diff
# ________________________________________________
# lateral flow function parameters
assumed_near_channel_water_table_slope = 0.01 # [L/L]
lateral_flow_threshold_storage_m = field_capacity_storage_threshold_m
# lateral_flow_linear_reservoir_constant = 2.0 * assumed_near_channel_water_table_slope * \ # Not used
# self.soil_params['mult'] * NWM_soil_params.satdk * \ # Not used
# self.soil_params['D'] * drainage_density_km_per_km2 # Not used
# lateral_flow_linear_reservoir_constant *= 3600.0 # Not used
self.soil_reservoir_storage_deficit_m = 0
# ________________________________________________
# Subsurface reservoirs
self.gw_reservoir = {'is_exponential':True,
'storage_max_m':1.0,
'coeff_primary':0.01,
'exponent_primary':6.0,
'storage_threshold_primary_m':0.0,
'storage_threshold_secondary_m':0.0,
'coeff_secondary':0.0,
'exponent_secondary':1.0}
self.gw_reservoir['storage_m'] = self.gw_reservoir['storage_max_m'] * 0.01
self.volstart += self.gw_reservoir['storage_m']
self.vol_in_gw_start = self.gw_reservoir['storage_m']
self.soil_reservoir = {'is_exponential':False,
'storage_max_m':self.soil_params['smcmax'] * self.soil_params['D'],
'coeff_primary':self.soil_params['satdk'] * self.soil_params['slop'] * 3600.0,
'exponent_primary':1.0,
'storage_threshold_primary_m':self.soil_params['smcmax'] * storage_thresh_pow_term*
(upper_lim-lower_lim),
'coeff_secondary':0.01,
'exponent_secondary':1.0,
'storage_threshold_secondary_m':lateral_flow_threshold_storage_m}
self.soil_reservoir['storage_m'] = self.soil_reservoir['storage_max_m'] * 0.667
self.volstart += self.soil_reservoir['storage_m']
self.vol_soil_start = self.soil_reservoir['storage_m']
# ________________________________________________
# Schaake
self.refkdt = 3.0
self.Schaake_adjusted_magic_constant_by_soil_type = self.refkdt * self.soil_params['satdk'] / 2.0e-06
self.Schaake_output_runoff_m = 0
self.infiltration_depth_m = 0
# ________________________________________________
# Nash cascade
self.K_nash = 0.03
# __________________________________________________________________________________________________________
# MAIN MODEL FUNCTION
def run_cfe(self):
# ________________________________________________
self.volin += self.timestep_rainfall_input_m
# ________________________________________________
self.potential_et_m_per_timestep = self.potential_et_m_per_s * self.time_step_size
# ________________________________________________
# timestep_rainfall_input_m = f(timestep_rainfall_input_m, potential_et_m_per_timestep)
self.et_from_rainfall()
# ________________________________________________
self.soil_reservoir_storage_deficit_m = (self.soil_params['smcmax'] * \
self.soil_params['D'] - \
self.soil_reservoir['storage_m'])
# ________________________________________________
# Calculates the value for surface_runoff_depth_m
self.Schaake_partitioning_scheme()
# ________________________________________________
self.et_from_soil()
# ________________________________________________
if self.soil_reservoir_storage_deficit_m < self.infiltration_depth_m:
# put won't fit back into runoff
self.surface_runoff_depth_m += (self.infiltration_depth_m - soil_reservoir_storage_deficit_m)
self.infiltration_depth_m = self.soil_reservoir_storage_deficit_m
self.soil_reservoir['storage_m'] = self.soil_reservoir['storage_max_m']
# ________________________________________________
self.vol_sch_runoff += self.surface_runoff_depth_m
self.vol_sch_infilt += self.infiltration_depth_m
# ________________________________________________
if self.current_time_step == 0:
self.previous_flux_perc_m = self.flux_perc_m
# ________________________________________________
if self.previous_flux_perc_m > self.soil_reservoir_storage_deficit_m:
diff = self.previous_flux_perc_m - self.soil_reservoir_storage_deficit
self.infiltration_depth_m = self.soil_reservoir_storage_deficit_m
self.vol_sch_runoff += diff
self.vol_sch_infilt += diff
self.surface_runoff_depth_m += diff
# ________________________________________________
self.vol_to_soil += self.infiltration_depth_m
self.soil_reservoir['storage_m'] += self.infiltration_depth_m
# ________________________________________________
# primary_flux, secondary_flux = f(reservoir)
self.conceptual_reservoir_flux_calc( self.soil_reservoir )
# ________________________________________________
self.flux_perc_m = self.primary_flux
self.flux_lat_m = self.secondary_flux
# ________________________________________________
self.gw_reservoir_storage_deficit_m = self.gw_reservoir['storage_max_m'] - self.gw_reservoir['storage_m']
# ________________________________________________
if self.flux_perc_m > self.gw_reservoir_storage_deficit_m:
diff = self.flux_perc_m - self.gw_reservoir_storage_deficit_m
self.flux_perc_m = self.gw_reservoir_storage_deficit_m
self.vol_sch_runoff+=diff
self.vol_sch_infilt-=diff
# ________________________________________________
self.vol_to_gw += self.flux_perc_m
self.vol_soil_to_gw += self.flux_perc_m
self.gw_reservoir['storage_m'] += self.flux_perc_m
self.soil_reservoir['storage_m'] -= self.flux_perc_m
self.soil_reservoir['storage_m'] -= self.flux_lat_m
self.vol_soil_to_lat_flow += self.flux_lat_m #TODO add this to nash cascade as input
self.volout = self.volout + self.flux_lat_m;
# ________________________________________________
# primary_flux, secondary_flux = f(reservoir)
self.conceptual_reservoir_flux_calc( self.gw_reservoir )
# ________________________________________________
self.flux_from_deep_gw_to_chan_m = self.primary_flux
self.vol_from_gw += self.flux_from_deep_gw_to_chan_m
# ________________________________________________
if not self.is_fabs_less_than_epsilon(self.secondary_flux, 1.0e-09):
print("problem with nonzero flux point 1\n")
# ________________________________________________
self.gw_reservoir['storage_m'] -= self.flux_from_deep_gw_to_chan_m
# ________________________________________________
# giuh_runoff_m = f(Schaake_output, giuh_ordinates, runoff_queue_m_per_timestep)
self.convolution_integral()
# ________________________________________________
self.vol_out_giuh += self.flux_giuh_runoff_m
self.volout += self.flux_giuh_runoff_m
self.volout += self.flux_from_deep_gw_to_chan_m
# ________________________________________________
self.nash_cascade()
# ________________________________________________
self.vol_in_nash += self.flux_lat_m
self.vol_out_nash += self.flux_nash_lateral_runoff_m
# ________________________________________________
self.flux_Qout_m = self.flux_giuh_runoff_m + self.flux_nash_lateral_runoff_m + self.flux_from_deep_gw_to_chan_m
self.total_discharge = self.flux_Qout_m * self.catchment_area_km2 * 1000000.0 / 3600.0
# ________________________________________________
self.current_time_step += 1
self.current_time += pd.Timedelta(value=self.time_step_size, unit='s')
return
# __________________________________________________________________________________________________________
def nash_cascade(self):
"""
Solve for the flow through the Nash cascade to delay the
arrival of the lateral flow into the channel
"""
Q = np.zeros(self.num_lateral_flow_nash_reservoirs)
for i in range(self.num_lateral_flow_nash_reservoirs):
Q[i] = self.K_nash * self.nash_storage[i]
self.nash_storage[i] -= Q[i]
if i == 0:
self.nash_storage[i] += self.flux_lat_m
else:
self.nash_storage[i] += Q[i-1]
self.flux_nash_lateral_runoff_m = Q[self.num_lateral_flow_nash_reservoirs - 1]
return
# __________________________________________________________________________________________________________
def convolution_integral(self):
"""
This function solves the convolution integral involving N
GIUH ordinates.
"""
for i in range(self.num_giuh_ordinates - 1):
self.runoff_queue_m_per_timestep[i] += self.giuh_ordinates[i] * self.surface_runoff_depth_m
self.flux_giuh_runoff_m = self.runoff_queue_m_per_timestep[0]
for i in range(self.num_giuh_ordinates - 1): # shift all the entries in preperation ffor the next timestep
self.runoff_queue_m_per_timestep[i] = self.runoff_queue_m_per_timestep[i+1]
return
# __________________________________________________________________________________________________________
def et_from_rainfall(self):
"""
iff it is raining, take PET from rainfall first. Wet veg. is efficient evaporator.
"""
if self.timestep_rainfall_input_m >0.0:
if self.timestep_rainfall_input_m > self.potential_et_m_per_timestep:
self.actual_et_m_per_timestep = self.potential_et_m_per_timestep
self.timestep_rainfall_input_m -= self.actual_et_m_per_timestep
else:
self.potential_et_m_per_timestep -= self.timestep_rainfall_input_m
self.timestep_rainfall_input_m=0.0
return
# __________________________________________________________________________________________________________
########## SINGLE OUTLET EXPONENTIAL RESERVOIR ###############
########## -or- ###############
########## TWO OUTLET NONLINEAR RESERVOIR ###############
def conceptual_reservoir_flux_calc(self, reservoir):
"""
This function calculates the flux from a linear, or nonlinear
conceptual reservoir with one or two outlets, or from an
exponential nonlinear conceptual reservoir with only one outlet.
In the non-exponential instance, each outlet can have its own
activation storage threshold. Flow from the second outlet is
turned off by setting the discharge coeff. to 0.0.
"""
if reservoir['is_exponential'] == True:
flux_exponential = np.exp(reservoir['exponent_primary'] * \
reservoir['storage_m'] / \
reservoir['storage_max_m']) - 1.0
self.primary_flux_m = reservoir['coeff_primary'] * flux_exponential
self.secondary_flux_m=0.0
return
self.primary_flux_m=0.0
storage_above_threshold_m = reservoir['storage_m'] - reservoir['storage_threshold_primary_m']
if storage_above_threshold_m > 0.0:
storage_diff = reservoir['storage_max_m'] - reservoir['storage_threshold_primary_m']
storage_ratio = storage_above_threshold_m / storage_diff
storage_power = np.power(storage_ratio, reservoir['exponent_primary'])
self.primary_flux_m = reservoir['coeff_primary'] * storage_power
if self.primary_flux_m > storage_above_threshold_m:
self.primary_flux_m = storage_above_threshold_m
self.secondary_flux_m = 0.0;
storage_above_threshold_m = reservoir['storage_m'] - reservoir['storage_threshold_secondary_m']
if storage_above_threshold_m > 0.0:
storage_diff = reservoir['storage_max_m'] - reservoir['storage_threshold_secondary_m']
storage_ratio = storage_above_threshold_m / storage_diff
storage_power = np.power(storage_ratio, reservoir['exponent_secondary'])
self.secondary_flux_m = reservoir['coeff_secondary'] * storage_power
if self.secondary_flux_m > (storage_above_threshold_m - self.primary_flux_m):
self.secondary_flux_m = storage_above_threshold_m - self.primary_flux_m
return
# __________________________________________________________________________________________________________
# SCHAAKE RUNOFF PARTITIONING SCHEME
def Schaake_partitioning_scheme(self):
"""
This subtroutine takes water_input_depth_m and partitions it into surface_runoff_depth_m and
infiltration_depth_m using the scheme from Schaake et al. 1996.
!--------------------------------------------------------------------------------
modified by FLO April 2020 to eliminate reference to ice processes,
and to de-obfuscate and use descriptive and dimensionally consistent variable names.
inputs:
timestep_d
Schaake_adjusted_magic_constant_by_soil_type = C*Ks(soiltype)/Ks_ref, where C=3, and Ks_ref=2.0E-06 m/s
column_total_soil_moisture_deficit_m (soil_reservoir_storage_deficit_m)
water_input_depth_m (timestep_rainfall_input_m) amount of water input to soil surface this time step [m]
outputs:
surface_runoff_depth_m amount of water partitioned to surface water this time step [m]
infiltration_depth_m
"""
if 0 < self.timestep_rainfall_input_m:
if 0 > self.soil_reservoir_storage_deficit_m:
self.surface_runoff_depth_m = self.timestep_rainfall_input_m
self.infiltration_depth_m = 0.0
else:
schaake_exp_term = np.exp( - self.Schaake_adjusted_magic_constant_by_soil_type * self.timestep_d)
Schaake_parenthetical_term = (1.0 - schaake_exp_term)
Ic = self.soil_reservoir_storage_deficit_m * Schaake_parenthetical_term
Px = self.timestep_rainfall_input_m
self.infiltration_depth_m = (Px * (Ic / (Px + Ic)))
if 0.0 < (self.timestep_rainfall_input_m - self.infiltration_depth_m):
self.surface_runoff_depth_m = self.timestep_rainfall_input_m - self.infiltration_depth_m
else:
self.surface_runoff_depth_m = 0.0
self.infiltration_depth_m = self.timestep_rainfall_input_m - self.surface_runoff_depth_m
else:
self.surface_runoff_depth_m = 0.0
self.infiltration_depth_m = 0.0
return
# __________________________________________________________________________________________________________
def et_from_soil(self):
"""
take AET from soil moisture storage,
using Budyko type curve to limit PET if wilting<soilmoist<field_capacity
"""
if self.potential_et_m_per_timestep > 0:
print("this should not happen yet. Still debugging the other functions.")
if self.soil_reservoir['storage_m'] >= self.soil_reservoir['storage_threshold_primary_m']:
self.actual_et_m_per_timestep = np.min(self.potential_et_m_per_timestep,
self.soil_reservoir['storage_m'])
self.soil_reservoir['storage_m'] -= self.actual_et_m_per_timestep
self.et_struct['potential_et_m_per_timestep'] = 0.0
elif (self.soil_reservoir['storage_m'] > self.soil_reservoir['wilting_point_m'] and
self.soil_reservoir['storage_m'] < self.soil_reservoir['storage_threshold_primary_m']):
Budyko_numerator = self.soil_reservoir['storage_m'] - self.soil_reservoir['wilting_point_m']
Budyko_denominator = self.soil_reservoir['storage_threshold_primary_m'] - \
self.soil_reservoir['wilting_point_m']
Budyki = Budyko_numerator / Budyko_denominator
self.actual_et_m_per_timestep = Budyko * self.potential_et_m_per_timestep
self.soil_reservoir['storage_m'] -= self.actual_et_m_per_timestep
return
# __________________________________________________________________________________________________________
def is_fabs_less_than_epsilon(self,a,epsilon):
if np.abs(a) < epsilon:
return True
else:
return False
# ________________________________________________
# Mass balance tracking
def reset_volume_tracking(self):
self.volstart = 0
self.vol_sch_runoff = 0
self.vol_sch_infilt = 0
self.vol_out_giuh = 0
self.vol_end_giuh = 0
self.vol_to_gw = 0
self.vol_to_gw_start = 0
self.vol_to_gw_end = 0
self.vol_from_gw = 0
self.vol_in_nash = 0
self.vol_in_nash_end = 0
self.vol_out_nash = 0
self.vol_soil_start = 0
self.vol_to_soil = 0
self.vol_soil_to_lat_flow = 0
self.vol_soil_to_gw = 0
self.vol_soil_end = 0
self.volin = 0
self.volout = 0
self.volend = 0
return
#________________________________________________________
def config_from_json(self):
with open(self.cfg_file) as data_file:
data_loaded = json.load(data_file)
# ___________________________________________________
# MANDATORY CONFIGURATIONS
self.forcing_file = data_loaded['forcing_file']
self.catchment_area_km2 = data_loaded['catchment_area_km2']
self.alpha_fc = data_loaded['alpha_fc']
self.soil_params = {}
self.soil_params['bb'] = data_loaded['soil_params']['bb']
self.soil_params['D'] = data_loaded['soil_params']['D']
self.soil_params['depth'] = data_loaded['soil_params']['depth']
self.soil_params['mult'] = data_loaded['soil_params']['mult']
self.soil_params['satdk'] = data_loaded['soil_params']['satdk']
self.soil_params['satpsi'] = data_loaded['soil_params']['satpsi']
self.soil_params['slop'] = data_loaded['soil_params']['slop']
self.soil_params['smcmax'] = data_loaded['soil_params']['smcmax']
self.soil_params['wltsmc'] = data_loaded['soil_params']['wltsmc']
self.max_gw_storage = data_loaded['max_gw_storage']
self.Cgw = data_loaded['Cgw']
self.expon = data_loaded['expon']
self.gw_storage = data_loaded['gw_storage']
self.soil_storage = data_loaded['soil_storage']
self.K_lf = data_loaded['K_lf']
self.K_nash = data_loaded['K_nash']
self.nash_storage = data_loaded['nash_storage']
self.giuh_ordinates = data_loaded['giuh_ordinates']
# ___________________________________________________
# OPTIONAL CONFIGURATIONS
if 'stand_alone' in data_loaded.keys():
self.stand_alone = data_loaded['stand_alone']
if 'forcing_file' in data_loaded.keys():
self.reads_own_forcing = True
self.forcing_file = data_loaded['forcing_file']
if 'unit_test' in data_loaded.keys():
self.unit_test = data_loaded['unit_test']
self.compare_results_file = data_loaded['compare_results_file']
return
#________________________________________________________
def finalize_mass_balance(self, verbose=True):
self.volend = self.soil_reservoir['storage_m'] + self.gw_reservoir['storage_m']
self.vol_in_gw_end = self.gw_reservoir['storage_m']
self.global_residual = self.volstart + self.volin - self.volout - self.volend
self.schaake_residual = self.volin - self.vol_sch_runoff - self.vol_sch_infilt
self.giuh_residual = self.vol_out_giuh - self.vol_sch_runoff - self.vol_end_giuh
self.soil_residual = self.vol_soil_start + self.vol_sch_infilt - \
self.vol_soil_to_lat_flow - self.vol_soil_end - self.vol_to_gw
self.nash_residual = self.vol_in_nash - self.vol_out_nash - self.vol_in_nash_end
self.gw_residual = self.vol_in_gw_start + self.vol_to_gw - self.vol_from_gw - self.vol_in_gw_end
if verbose:
print("\nGLOBAL MASS BALANCE")
print(" initial volume: {:8.4f}".format(self.volstart))
print(" volume input: {:8.4f}".format(self.volin))
print(" volume output: {:8.4f}".format(self.volout))
print(" final volume: {:8.4f}".format(self.volend))
print(" residual: {:6.4e}".format(self.global_residual))
print("\nSCHAAKE MASS BALANCE")
print(" surface runoff: {:8.4f}".format(self.vol_sch_runoff))
print(" infiltration: {:8.4f}".format(self.vol_sch_infilt))
print("schaake residual: {:6.4e}".format(self.schaake_residual))
print("\nGIUH MASS BALANCE");
print(" vol. into giuh: {:8.4f}".format(self.vol_sch_runoff))
print(" vol. out giuh: {:8.4f}".format(self.vol_out_giuh))
print(" vol. end giuh q: {:8.4f}".format(self.vol_end_giuh))
print(" giuh residual: {:6.4e}".format(self.giuh_residual))
print("\nSOIL WATER CONCEPTUAL RESERVOIR MASS BALANCE")
print(" init soil vol: {:8.4f}".format(self.vol_soil_start))
print(" vol. into soil: {:8.4f}".format(self.vol_sch_infilt))
print("vol.soil2latflow: {:8.4f}".format(self.vol_soil_to_lat_flow))
print(" vol. soil to gw: {:8.4f}".format(self.vol_soil_to_gw))
print(" final vol. soil: {:8.4f}".format(self.vol_soil_end))
print("vol. soil resid.: {:6.4e}".format(self.soil_residual))
print("\nNASH CASCADE CONCEPTUAL RESERVOIR MASS BALANCE")
print(" vol. to nash: {:8.4f}".format(self.vol_in_nash))
print(" vol. from nash: {:8.4f}".format(self.vol_out_nash))
print(" final vol. nash: {:8.4f}".format(self.vol_in_nash_end))
print("nash casc resid.: {:6.4e}".format(self.nash_residual))
print("\nGROUNDWATER CONCEPTUAL RESERVOIR MASS BALANCE")
print("init gw. storage: {:8.4f}".format(self.vol_in_gw_start))
print(" vol to gw: {:8.4f}".format(self.vol_to_gw))
print(" vol from gw: {:8.4f}".format(self.vol_from_gw))
print("final gw.storage: {:8.4f}".format(self.vol_in_gw_end))
print(" gw. residual: {:6.4e}".format(self.gw_residual))
return
#________________________________________________________
def load_forcing_file(self):
self.forcing_data = pd.read_csv(self.forcing_file)
#________________________________________________________
def load_unit_test_data(self):
self.unit_test_data = pd.read_csv(self.compare_results_file)
self.cfe_output_data = pd.DataFrame().reindex_like(self.unit_test_data)
#________________________________________________________
def run_unit_test(self, plot_lims=list(range(490, 550))):
self.load_forcing_file()
self.load_unit_test_data()
self.current_time = | pd.Timestamp(self.forcing_data['time'][0]) | pandas.Timestamp |
from mega_analysis.crosstab.file_paths import file_paths
from mega_analysis.semiology import Semiology, Laterality
from mega_analysis.crosstab.mega_analysis.MEGA_ANALYSIS import MEGA_ANALYSIS
from mega_analysis.crosstab.mega_analysis.QUERY_SEMIOLOGY import QUERY_SEMIOLOGY
from mega_analysis.crosstab.all_localisations import all_localisations
from mega_analysis.crosstab.mega_analysis.exclusions import (exclude_ET, exclude_cortical_stimulation, exclude_spontaneous_semiology,
exclude_postictals)
from mega_analysis.crosstab.lobe_top_level_hierarchy_only import top_level_lobes
from mega_analysis.semiology import recursive_items
from mega_analysis.Sankey_Functions import normalise_top_level_localisation_cols
import pandas as pd
from pandas.testing import assert_frame_equal
# import os
import yaml
import copy
# os.chdir('C:/Users/ali_m/AnacondaProjects/PhD/Semiology-Visualisation-Tool/')
def query_semiology_wrapper_from_scripts(df, semiology_list, semiology_dict_path):
"""
From scripts/figures.py in kd_figures-v3 branch
"""
query_results = {}
for semiology in semiology_list:
query_inspection, num_query_lat, num_query_loc = QUERY_SEMIOLOGY(df,
semiology_term=semiology,
ignore_case=True,
semiology_dict_path=semiology_dict_path,
all_columns_wanted=True)
# col1=col1, col2=col1)
one_query_result = {
'query_inspection': query_inspection,
'num_query_lat': num_query_lat,
'num_query_loc': num_query_loc
}
query_results[semiology] = one_query_result
return query_results
def summary_semio_loc_df_from_scripts(normalise=True):
"""
Lots of copy pasting from scripts/figures.py in kd_figures-v3 branch.
returns query_results which is a nested dictionary
full
spontaneous
topology
{semiologies}
query_inspection
num_query_loc
num_query_lat
"""
# Define paths
repo_dir, resources_dir, excel_path, semiology_dict_path = file_paths()
Semio2Brain_Database = excel_path
with open(semiology_dict_path) as f:
SemioDict = yaml.load(f, Loader=yaml.FullLoader)
region_names = all_localisations()
semiology_list = list(recursive_items(SemioDict))
(original_df,
df_ground_truth, df_study_type,
num_database_articles, num_database_patients, num_database_lat, num_database_loc) = \
MEGA_ANALYSIS(Semio2Brain_Database,
exclude_data=True)
# -----------------------------------
redistribution_spec = {
'FT': ['FL', 'INSULA', 'Lateral Temporal', 'TL'],
'TO': ['Lateral Temporal', 'TL', 'OL'],
'TP': ['Lateral Temporal', 'TL', 'PL'],
'FTP': ['INSULA', 'Lateral Temporal', 'TL', 'FL', 'PL'],
'TPO Junction': ['Lateral Temporal', 'TL', 'PL', 'OL'],
'PO': ['PL', 'OL'],
'FP': ['FL', 'PL'],
'Perisylvian': ['INSULA', 'Lateral Temporal', 'TL', 'FL', 'PL'],
'Sub-Callosal Cortex': ['Ant Cing (frontal, genu)', 'CING']
}
redistributed_df = copy.deepcopy(original_df)
# probably not needed as used exclude_data True when calling M_A
redistributed_df = exclude_postictals(redistributed_df)
for from_region, destination_regions in redistribution_spec.items():
for destination in destination_regions:
redistributed_df[destination] = original_df[destination].fillna(
0) + original_df[from_region].fillna(0)
redistributed_df = redistributed_df.drop(redistribution_spec.keys(), 'columns')
# -----------------------------------
# region_names_re = region_names
# region_names_re['top_level'] = ['TL',
# 'FL',
# 'CING',
# 'PL',
# 'OL',
# 'INSULA',
# 'Hypothalamus',
# 'Cerebellum', ]
# region_names_re['top_level_all_other'] = ['Cerebellum']
df = copy.deepcopy(redistributed_df)
df_SS = exclude_ET(df)
df_SS = exclude_cortical_stimulation(df_SS)
df_TS = exclude_spontaneous_semiology(df)
all_dfs = {
'full': df,
'spontaneous': df_SS,
'topology': df_TS,
}
query_results = {}
for key, df in all_dfs.items():
if normalise:
df, _ = normalise_top_level_localisation_cols(df, Bayesian=True)
query_results[key] = query_semiology_wrapper_from_scripts(df, semiology_list, semiology_dict_path)
return query_results
def marginal_GIF_probabilities(all_combined_gifs):
"""
Input the DataFrame of GIF Parcellations and values for all semiologies.
all_combined_gifs: GIF values
all_combined_gifs is a heatmap df i.e. from patient.get_num_datapoints_dict() # for all data not for single semiology
remember the cols of all_combined_gifs are "Gif Parcellations" and "pt #s"
Returns the marginal_probabilities (row DataFrame)
Future: As sensitivity analyses, should check the variance of marginal prob when using different filters
concretely: marginal_p should be using all the data without filters, but then when using filters, see if we
had used a filtered df using one of the exclusions (e.g. EZ only or based on age),
how much would the marginal_p differ and Bayesian inference vary by?
<NAME> 2021
"""
all_comb_gifs = all_combined_gifs.copy()
# total of gif
gif_total = 0
for k,v in all_comb_gifs.items():
gif_total += v
# now again for the marginal probabilities
for k,v in all_comb_gifs.items():
all_comb_gifs[k] = v/gif_total
# make this a DataFrame
marginal_GIF_prob = pd.DataFrame.from_dict(all_comb_gifs, orient='index', columns=['probability'])
return marginal_GIF_prob.T
def wrapper_marginal_L_S(publication_prior, marginal_semio_df, marginal_loc_df, Lobes, normalise=True,
skip_L=False):
""" wrapper for marginal_Localisation_and_Semiology_probabilities"""
query_results = summary_semio_loc_df_from_scripts(normalise=normalise)
for semio, v in query_results[publication_prior].items():
# avoid postictals as they are empty dataframes (ictal manifestations only)
if semio.startswith('Post'):
continue
if semio.startswith('No Semiology'):
continue
# semio
marginal_semio_df.loc[semio, 'num_query_loc'] = query_results[publication_prior][semio]['num_query_loc']
# locs: first replace any non existing locs e.g. Hypothalamus for fear-anxiety
marginal_loc_df = None
if not skip_L:
for ind_lobe in Lobes:
try:
query_inspection = query_results[publication_prior][semio]['query_inspection']
query_inspection[ind_lobe]
except:
query_inspection.loc[:, ind_lobe] = 0
temp_df = query_inspection[Lobes]
temp_df.fillna(0, inplace=True)
marginal_loc_df = marginal_loc_df.add(temp_df, fill_value=0)
marginal_loc_df.fillna(0, inplace=True)
return marginal_semio_df, marginal_loc_df
def marginal_Localisation_and_Semiology_probabilities(df=None,
normalised=True,
global_loc_normalisation=False,
publication_prior='full',
test=False,
skip_L=False):
""" Returns the marginal localisation and semiology probabilities
> df (optional): preprocessed Semio2Brain DataFrame obtained from MEGA_ANALYSIS after hierarchy reversal.
df = MEGA_ANALYSIS() # for all-data
also used SemioDict
df = hierarchy_reversal()
Assume df is the fully cleaned and hierarchy reversed Semio2Brain descriptions
(or if not hierarchy reveresed, then top-level regions only)
I.e. Use summary_semio_loc_df_from_scripts() to get df of semiology by localisation
as uses top-level regions only without hierarchy_reversal.
rows: semiologies
columns: localisations
can be normalised or not normalised
> publication_prior: 'full', 'spontaneous', or 'topological'
> global normalisation gets all the localisations, then normalises to localising values
> normalised (micronormalisation) is the regular normalisation row by row (by semiology)
returns:
marginal_semio_prob: DataFrame with index of semiologies, and single column of marginal 'probability' (col df)
marginal_loc_prob
"""
# useful for both semio and unnormalised locs:
Lobes = top_level_lobes(Bayesian=True)
# make the df in the form of semiology/locs
marginal_semio_df = | pd.DataFrame() | pandas.DataFrame |
from datetime import date, datetime, timedelta
from dateutil import tz
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndex:
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = date_range("20010101", periods=4, tz="UTC")
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
idx = date_range("20010101", periods=4)
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = DataFrame({"A": idx, "B": dr})
df["C"] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")})
result = df.iloc[5]
expected = Series(
[Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5
)
tm.assert_series_equal(result, expected)
result = df.loc[5]
tm.assert_series_equal(result, expected)
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(
data=pd.to_datetime(["2015-03-30 20:12:32", "2015-03-12 00:11:11"]),
columns=["time"],
)
df["new_col"] = ["new", "old"]
df.time = df.set_index("time").index.tz_localize("UTC")
v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific")
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == "new", "time"] = v
expected = Series([v[0], df.loc[1, "time"]], name="time")
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == "new", "time"] + pd.Timedelta("1s")
df.loc[df.new_col == "new", "time"] = v
tm.assert_series_equal(df.loc[df.new_col == "new", "time"], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp("2016-03-30 14:35:25", tz="Europe/Brussels")]).to_frame()
df = pd.concat([df, df]).reset_index(drop=True)
expected = Timestamp("2016-03-30 14:35:25+0200", tz="Europe/Brussels")
result = df[0][0]
assert result == expected
result = df.iloc[0, 0]
assert result == expected
result = df.loc[0, 0]
assert result == expected
result = df.iat[0, 0]
assert result == expected
result = df.at[0, 0]
assert result == expected
result = df[0].loc[0]
assert result == expected
result = df[0].at[0]
assert result == expected
def test_indexing_with_datetimeindex_tz(self):
# GH 12050
# indexing on a series with a datetimeindex with tz
index = date_range("2015-01-01", periods=2, tz="utc")
ser = Series(range(2), index=index, dtype="int64")
# list-like indexing
for sel in (index, list(index)):
# getitem
tm.assert_series_equal(ser[sel], ser)
# setitem
result = ser.copy()
result[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
tm.assert_series_equal(ser.loc[sel], ser)
# .loc setitem
result = ser.copy()
result.loc[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# single element indexing
# getitem
assert ser[index[1]] == 1
# setitem
result = ser.copy()
result[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
assert ser.loc[index[1]] == 1
# .loc setitem
result = ser.copy()
result.loc[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
def test_partial_setting_with_datetimelike_dtype(self):
# GH9478
# a datetimeindex alignment issue with partial setting
df = DataFrame(
np.arange(6.0).reshape(3, 2),
columns=list("AB"),
index=date_range("1/1/2000", periods=3, freq="1H"),
)
expected = df.copy()
expected["C"] = [expected.index[0]] + [pd.NaT, pd.NaT]
mask = df.A < 1
df.loc[mask, "C"] = df.loc[mask].index
tm.assert_frame_equal(df, expected)
def test_loc_setitem_datetime(self):
# GH 9516
dt1 = Timestamp("20130101 09:00:00")
dt2 = Timestamp("20130101 10:00:00")
for conv in [
lambda x: x,
lambda x: x.to_datetime64(),
lambda x: x.to_pydatetime(),
lambda x: np.datetime64(x),
]:
df = DataFrame()
df.loc[conv(dt1), "one"] = 100
df.loc[conv(dt2), "one"] = 200
expected = DataFrame({"one": [100.0, 200.0]}, index=[dt1, dt2])
tm.assert_frame_equal(df, expected)
def test_series_partial_set_datetime(self):
# GH 11497
idx = date_range("2011-01-01", "2011-01-02", freq="D", name="idx")
ser = Series([0.1, 0.2], index=idx, name="s")
result = ser.loc[[Timestamp("2011-01-01"), | Timestamp("2011-01-02") | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 08:35:09 2019
@author: user
"""
# build first input with noational aggregation
# import build_input_national_aggr
print('####################')
print('BUILDING INPUT DATA FOR DISAGGREGATION OF SWITZERLAND INTO ARCHETYPES')
print('####################')
import os
import itertools
import hashlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import grimsel.auxiliary.sqlutils.aux_sql_func as aql
import datetime
import seaborn as sns
from grimsel.auxiliary.aux_general import print_full
from grimsel.auxiliary.aux_general import translate_id
import config_local as conf
from grimsel.auxiliary.aux_general import expand_rows
base_dir = conf.BASE_DIR
data_path = conf.PATH_CSV
data_path_prv = conf.PATH_CSV + '_national_aggr'
seed = 2
np.random.seed(seed)
db = conf.DATABASE
sc = conf.SCHEMA
#db = 'grimsel_1'
#sc = 'lp_input_ee_dsm'
def append_new_rows(df, tb):
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
aql.write_sql(df[list_col], db=db, sc=sc, tb=tb, if_exists='append')
def del_new_rows(ind, tb, df):
del_list = df[ind].drop_duplicates()
for i in ind:
del_list[i] = '%s = '%i + del_list[i].astype(str)
del_str = ' OR '.join(del_list.apply(lambda x: '(' + ' AND '.join(x) + ')', axis=1))
exec_strg = '''
DELETE FROM {sc}.{tb}
WHERE {del_str}
'''.format(tb=tb, sc=sc, del_str=del_str)
aql.exec_sql(exec_strg, db=db)
#def replace_table(df, tb):
#
## list_col = list(aql.get_sql_cols(tb, sc, db).keys())
#
# aql.write_sql(df, db=db, sc=sc, tb=tb, if_exists='replace')
def append_new_cols(df, tb):
#
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
col_new = dict.fromkeys((set(df.columns.tolist()) - set(list_col)))
for key, value in col_new.items():
col_new[key] = 'DOUBLE PRECISION'
# col_new = dict.fromkeys((set(list_col[0].columns.tolist()) - set(list_col)),1)
aql.add_column(df_src=df,tb_tgt=[sc,tb],col_new=col_new,on_cols=list_col, db=db)
# exec_strg = '''
# AlTER
# DELETE FROM {sc}.{tb}
# WHERE {del_str}
# '''.format(tb=tb, sc=sc, del_str=del_str)
# aql.exec_sql(exec_strg, db=db)
#
# aql.write_sql(df[list_col], db=db, sc=sc, tb=tb, if_exists='append')
#
#aql.exec_sql('''
# ALTER TABLE lp_input_archetypes.profdmnd
# DROP CONSTRAINT profdmnd_pkey,
# DROP CONSTRAINT profdmnd_dmnd_pf_id_fkey;
# ''', db=db)
#%%
dfprop_era_arch = pd.read_csv(base_dir+'/archetype_disaggr/PV/prop_era_arch.csv', sep = ';')
#dfpv_arch = pd.read_csv(os.path.join(base_dir,'PV/surf_prod_arch_pv.csv'),sep=';')
#dfpv_arch = pd.read_csv(os.path.join(base_dir,'PV/surf_prod_arch_pv_prop_0.csv'),sep=';')
dfpv_arch = pd.read_csv(base_dir+'/archetype_disaggr/PV/surf_prod_arch_pv_prop_new.csv',sep=';')
# set nd_id to that potential
#dfpv_arch['pv_power_pot'] = dfpv_arch['el_prod']/(1000*dfkev['flh'].mean())
dfpv_arch = dfpv_arch.groupby(dfpv_arch.nd_id_new).sum()
#dfpv_arch['nd_id_new'] = dfpv_arch.nd_id
#dfpv_arch.loc[:,dfpv_arch.nd_id_new.str.contains('OTH')] == 'OTH_TOT'
#dfpv_arch['cap_pv'] = 1666*(dfpv_arch['pv_power_pot']/dfpv_arch['pv_power_pot'].sum()) # 1666 MW SFOE 2016
dfpv_arch['cap_pv'] = 1666*(dfpv_arch['pv_power_tot_est']/dfpv_arch['pv_power_tot_est'].sum()) # 1666 MW SFOE 2016
dfpv_arch['cap_st_pwr'] = 0
#
#dfpv_arch_CH0 = dfpv_arch.loc['CH0']
#dfpv_arch = dfpv_arch.drop(['CH0'], axis = 0)
dfpv_arch = dfpv_arch.reset_index()
# %%
dfload_arch = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id not in %s'%(['CH0'])).reset_index(drop=True)
dfload_arch['DateTime'] = dfload_arch['DateTime'].astype('datetime64[ns]')
dfload_arch_res = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id.str.contains("SFH") or nd_id.str.contains("MFH")',engine='python').reset_index(drop=True)
dfload_arch_res['DateTime'] = dfload_arch_res['DateTime'].astype('datetime64[ns]')
dfload_arch_notres = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id.str.contains("OCO") or nd_id.str.contains("IND")',engine='python').reset_index(drop=True)
dfload_arch_notres['DateTime'] = dfload_arch_notres['DateTime'].astype('datetime64[ns]')
dfload_arch_CH0 = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id in %s'%(['CH0'])).reset_index(drop=True)
dfload_arch_CH0['DateTime'] = dfload_arch_CH0['DateTime'].astype('datetime64[ns]')
# dfload_arch = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['CH0'],'!=')])
# dfload_arch_res= aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['SFH%','MFH%'],'LIKE')])
# dfload_arch_notres= aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['OCO%','IND%'],'LIKE')])
# dfload_arch_CH0_1 = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['CH0'])])
#dfload_arch = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes')
dfload_dict ={}
dfload_dict_new = {}
df = dfload_arch_res.copy()
df['nd_id_new'] = 0
df['erg_tot_new'] = 0
for i in df.nd_id.unique():
dfload_dict[i] = df.loc[df.nd_id == i]
for l in (0,1,2,3):
df_1 = dfload_dict[i].copy()
df_1['erg_tot_new'] = df_1.loc[:,'erg_tot'] * dfprop_era_arch.loc[dfprop_era_arch.nd_el.str.contains(i+'_'+str(l)),'prop'].reset_index(drop=True).loc[0]
df_1['nd_id_new'] = i+'_'+str(l)
dfload_dict_new[i+'_'+str(l)] = df_1
dfload_arch_res_new = dfload_arch_notres.head(0)
for j in dfload_dict_new:
dfload_arch_res_new = dfload_arch_res_new.append(dfload_dict_new[j],ignore_index=True)
dfload_arch_notres['nd_id_new'] = dfload_arch_notres[['nd_id']]
dfload_arch_notres['erg_tot_new'] = dfload_arch_notres[['erg_tot']]
dfload_arch = dfload_arch_res_new.append(dfload_arch_notres,ignore_index=True)
dfload_arch = dfload_arch.set_index('DateTime')
dfload_arch.index = pd.to_datetime(dfload_arch.index)
dfload_arch_CH0 = dfload_arch_CH0.set_index('DateTime')
dfload_arch = dfload_arch.drop(columns=['nd_id','erg_tot']).rename(columns={'nd_id_new':'nd_id','erg_tot_new':'erg_tot'})
# %%
np.random.seed(3)
dferg_arch = dfload_arch.groupby('nd_id')['erg_tot'].sum()
dferg_arch = dferg_arch.reset_index()
dferg_arch['nd_id_new'] = dferg_arch.nd_id
dict_nd = dferg_arch.set_index('nd_id')['nd_id_new'].to_dict()
# %%
df_solar_canton_raw = | pd.read_csv(base_dir+'/archetype_disaggr/PV/swiss_location_solar.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
from functools import reduce
from copy import deepcopy
from os.path import join, isdir, isfile
from os import listdir
import re
import os
import glob
from sys import stdout
from nilmtk.utils import get_datastore
from nilmtk.datastore import Key
from nilmtk.timeframe import TimeFrame
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.utils import get_module_directory, check_directory_exists
from nilm_metadata import convert_yaml_to_hdf5, save_yaml_to_datastore
"""
TODO:
* The bottleneck appears to be CPU. So could be sped up by using
multiprocessing module to use multiple CPU cores to load SMART channels in
parallel.
"""
def convert_smart(smart_path, output_filename, format='HDF'):
"""
Parameters
----------
smart_path : str
The root path of the REDD low_freq dataset.
output_filename : str
The destination filename (including path and suffix).
format : str
format of output. Either 'HDF' or 'CSV'. Defaults to 'HDF'
"""
def _smart_measurement_mapping_func(house_id, chan_id):
ac_type = 'active' if chan_id <= 2 else 'active'
return [('power', ac_type)]
# Open DataStore
store = get_datastore(output_filename, format, mode='w')
# Convert raw data to DataStore
_convert(smart_path, store, _smart_measurement_mapping_func, 'US/Eastern')
s = join(get_module_directory(),
'dataset_converters',
'smart',
'metadata')
# Add metadata
save_yaml_to_datastore(join(get_module_directory(),
'dataset_converters',
'smart',
'metadata'),
store)
store.close()
print("Done converting SMART to HDF5!")
def _convert(input_path, store, measurement_mapping_func, tz, sort_index=True):
"""
Parameters
----------
input_path : str
The root path of the REDD low_freq dataset.
store : DataStore
The NILMTK DataStore object.
measurement_mapping_func : function
Must take these parameters:
- house_id
- chan_id
Function should return a list of tuples e.g. [('power', 'active')]
tz : str
Timezone e.g. 'US/Eastern'
sort_index : bool
"""
check_directory_exists(input_path)
houses = _find_all_houses(input_path)
years = []
# Iterating though all Homes
b_cnt = 0
for house_id in houses:
b_cnt = b_cnt + 1
print('Loading Home:', house_id, end='... ')
stdout.flush()
years = _find_year(input_path, house_id)
meters_paths_csv = []
df_all_years = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_rebalance_updatecount():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.use_integer_positions(False)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4','c5'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
assert s.value == 1000
assert s.capital == 0
# Update is called once when each weighted security is created (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[1])
s.temp['weights'] = {'c1': 0.5, 'c2':0.5}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[2])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (2)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 6
def test_rebalance_fixedincome():
algo = algos.Rebalance()
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
s = bt.FixedIncomeStrategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
coupons = pd.DataFrame(index=dts, columns=['c2'], data=0)
s.setup(data, coupons=coupons)
s.update(dts[0])
s.temp['notional_value'] = 1000
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000
c1 = s['c1']
assert c1.value == 1000
assert c1.notional_value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000*100
c2 = s['c2']
assert c1.value == 0
assert c1.notional_value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000*100
assert c2.notional_value == 1000
assert c2.position == 1000
assert c2.weight == 1.
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectAll(include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectAll(include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly_n_none():
algo = algos.SelectRandomly(n=None) # Behaves like SelectAll
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectRandomly(n=None, include_no_data=True)
assert algo2(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectRandomly(n=None, include_negative=True)
assert algo3(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[0]] = np.nan
data['c2'][dts[0]] = 95
data['c3'][dts[0]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectRandomly(n=1)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
random.seed(1000)
algo = algos.SelectRandomly(n=1, include_negative=True)
assert algo(s)
assert s.temp.pop('selected') == ['c3']
random.seed(1009)
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c1']
random.seed(1009)
# If selected already set, it will further filter it
s.temp['selected'] = ['c2']
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
def test_select_these():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
algo = algos.SelectThese( ['c1'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectThese( ['c1', 'c2'], include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectThese(['c1', 'c2'], include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where_all():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
s.setup(data, where = where)
s.update(dts[0])
algo = algos.SelectWhere('where')
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectWhere('where', include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectWhere('where', include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere('where')
s.setup(data, where=where)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_where_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere(where)
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_regex():
s = bt.Strategy('s')
algo = algos.SelectRegex( 'c1' )
s.temp['selected'] = ['a1', 'c1', 'c2', 'c11', 'cc1']
assert algo( s )
assert s.temp['selected'] == ['c1', 'c11', 'cc1']
algo = algos.SelectRegex( '^c1$' )
assert algo( s )
assert s.temp['selected'] == ['c1']
def test_resolve_on_the_run():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'b1'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c2'][dts[2]] = -5
on_the_run = pd.DataFrame(index=dts, columns=['c'], data='c1')
on_the_run.loc[dts[2], 'c'] = 'c2'
s.setup(data, on_the_run = on_the_run)
s.update(dts[0])
s.temp['selected'] = ['c', 'b1']
algo = algos.ResolveOnTheRun( 'on_the_run' )
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# make sure don't keep nan
s.update(dts[1])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
# if specify include_no_data then 2
algo2 = algos.ResolveOnTheRun('on_the_run', include_no_data=True)
s.temp['selected'] = ['c', 'b1']
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# behavior on negative prices
s.update(dts[2])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
algo3 = algos.ResolveOnTheRun('on_the_run', include_negative=True)
s.temp['selected'] = ['c', 'b1']
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c2' in selected
assert 'b1' in selected
def test_select_types():
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
c3 = bt.HedgeSecurity('c3')
c4 = bt.CouponPayingHedgeSecurity('c4')
c5 = bt.FixedIncomeSecurity('c5')
s = bt.Strategy('p', children = [c1, c2, c3, c4, c5])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
algo = algos.SelectTypes(include_types=(bt.Security, bt.HedgeSecurity), exclude_types=())
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3'])
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,), exclude_types=(bt.CouponPayingSecurity,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3', 'c5'])
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c2', 'c3'])
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_scale_weights():
s = bt.Strategy('s')
algo = algos.ScaleWeights( -0.5 )
s.temp['weights'] = {'c1': 0.5, 'c2': -0.4, 'c3':0 }
assert algo( s )
assert s.temp['weights'] == {'c1':-0.25, 'c2':0.2, 'c3':0}
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_target():
algo = algos.WeighTarget('target')
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
target = pd.DataFrame(index=dts[:2], columns=['c1', 'c2'], data=0.5)
target['c1'].loc[dts[1]] = 1.0
target['c2'].loc[dts[1]] = 0.0
s.setup( data, target = target )
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.5
assert weights['c2'] == 0.5
s.update(dts[1])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 1.0
assert weights['c2'] == 0.0
s.update(dts[2])
assert not algo(s)
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].loc[dts[1]] = 105
data['c1'].loc[dts[2]] = 95
data['c1'].loc[dts[3]] = 105
data['c1'].loc[dts[4]] = 95
# low vol c2
data['c2'].loc[dts[1]] = 100.1
data['c2'].loc[dts[2]] = 99.9
data['c2'].loc[dts[3]] = 100.1
data['c2'].loc[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_randomly():
s = bt.Strategy('s')
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.WeighRandomly()
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
assert sum( weights.values() ) == 1.
algo = algos.WeighRandomly( (0.3,0.5), 0.95)
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
aae( sum( weights.values() ), 0.95 )
for c in s.temp['selected']:
assert weights[c] <= 0.5
assert weights[c] >= 0.3
def test_set_stat():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( 'test_stat' )
s.setup(data, test_stat = stat)
s.update(dts[0])
print()
print(s.get_data('test_stat'))
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_set_stat_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( stat )
s.setup(data)
s.update(dts[0])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
stat = s.temp['stat']
assert len(stat) == 2
assert stat['c1'] == 105.0 / 100 - 1
assert stat['c2'] == 95.0 / 100 - 1
def test_select_n():
algo = algos.SelectN(n=1, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
algo = algos.SelectN(n=1, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# return 2 we have if all_or_none false
algo = algos.SelectN(n=3, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# return 0 we have if all_or_none true
algo = algos.SelectN(n=3, sort_descending=False, all_or_none=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
def test_select_n_perc():
algo = algos.SelectN(n=0.5, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
def test_select_momentum():
algo = algos.SelectMomentum(n=1, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
actual = s.temp['selected']
assert len(actual) == 1
assert 'c1' in actual
def test_limit_weights():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 0.6, 'c2':0.2, 'c3':0.2}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.5
assert w['c2'] == 0.25
assert w['c3'] == 0.25
algo = algos.LimitWeights(0.3)
assert algo(s)
w = s.temp['weights']
assert w == {}
s.temp['weights'] = {'c1': 0.4, 'c2':0.3, 'c3':0.3}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.4
assert w['c2'] == 0.3
assert w['c3'] == 0.3
def test_limit_deltas():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 1}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.1
s.temp['weights'] = {'c1': 0.05}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.05
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == 0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.5
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1, 'c2': 0.3})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.3
# set exisitng weight
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c1']._weight = 0.3
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c2']._weight = -0.7
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.4
assert w['c2'] == -0.6
def test_rebalance_over_time():
target = mock.MagicMock()
rb = mock.MagicMock()
algo = algos.RebalanceOverTime(n=2)
# patch in rb function
algo._rb = rb
target.temp = {}
target.temp['weights'] = {'a': 1, 'b': 0}
a = mock.MagicMock()
a.weight = 0.
b = mock.MagicMock()
b.weight = 1.
target.children = {'a': a, 'b': b}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 0.5
assert w['b'] == 0.5
assert rb.called
called_tgt = rb.call_args[0][0]
called_tgt_w = called_tgt.temp['weights']
assert len(called_tgt_w) == 2
assert called_tgt_w['a'] == 0.5
assert called_tgt_w['b'] == 0.5
# update weights for next call
a.weight = 0.5
b.weight = 0.5
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 1.
assert w['b'] == 0.
assert rb.call_count == 2
# update weights for next call
# should do nothing now
a.weight = 1
b.weight = 0
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
# no diff in call_count since last time
assert rb.call_count == 2
def test_require():
target = mock.MagicMock()
target.temp = {}
algo = algos.Require(lambda x: len(x) > 0, 'selected')
assert not algo(target)
target.temp['selected'] = []
assert not algo(target)
target.temp['selected'] = ['a', 'b']
assert algo(target)
def test_run_every_n_periods():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=0)
target.now = pd.to_datetime('2010-01-01')
assert algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert algo(target)
target.now = pd.to_datetime('2010-01-05')
assert not algo(target)
def test_run_every_n_periods_offset():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=1)
target.now = pd.to_datetime('2010-01-01')
assert not algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert not algo(target)
target.now = pd.to_datetime('2010-01-05')
assert algo(target)
def test_not():
target = mock.MagicMock()
target.temp = {}
#run except on the 1/2/18
runOnDateAlgo = algos.RunOnDate(pd.to_datetime('2018-01-02'))
notAlgo = algos.Not(runOnDateAlgo)
target.now = pd.to_datetime('2018-01-01')
assert notAlgo(target)
target.now = pd.to_datetime('2018-01-02')
assert not notAlgo(target)
def test_or():
target = mock.MagicMock()
target.temp = {}
#run on the 1/2/18
runOnDateAlgo = algos.RunOnDate(pd.to_datetime('2018-01-02'))
runOnDateAlgo2 = algos.RunOnDate(pd.to_datetime('2018-01-03'))
runOnDateAlgo3 = algos.RunOnDate(pd.to_datetime('2018-01-04'))
runOnDateAlgo4 = algos.RunOnDate(pd.to_datetime('2018-01-04'))
orAlgo = algos.Or([runOnDateAlgo, runOnDateAlgo2, runOnDateAlgo3, runOnDateAlgo4])
#verify it returns false when neither is true
target.now = pd.to_datetime('2018-01-01')
assert not orAlgo(target)
# verify it returns true when the first is true
target.now = pd.to_datetime('2018-01-02')
assert orAlgo(target)
# verify it returns true when the second is true
target.now = pd.to_datetime('2018-01-03')
assert orAlgo(target)
# verify it returns true when both algos return true
target.now = pd.to_datetime('2018-01-04')
assert orAlgo(target)
def test_TargetVol():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=7)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data.loc[dts[0],'c1'] = 95
data.loc[dts[1],'c1'] = 105
data.loc[dts[2],'c1'] = 95
data.loc[dts[3],'c1'] = 105
data.loc[dts[4],'c1'] = 95
data.loc[dts[5],'c1'] = 105
data.loc[dts[6],'c1'] = 95
# low vol c2
data.loc[dts[0], 'c2'] = 99
data.loc[dts[1], 'c2'] = 101
data.loc[dts[2], 'c2'] = 99
data.loc[dts[3], 'c2'] = 101
data.loc[dts[4], 'c2'] = 99
data.loc[dts[5], 'c2'] = 101
data.loc[dts[6], 'c2'] = 99
targetVolAlgo = algos.TargetVol(
0.1,
lookback=pd.DateOffset(days=5),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=1
)
s.setup(data)
s.update(dts[6])
s.temp['weights'] = {'c1':0.5, 'c2':0.5}
assert targetVolAlgo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert np.isclose(weights['c2'],weights['c1'])
unannualized_c2_weight = weights['c1']
targetVolAlgo = algos.TargetVol(
0.1*np.sqrt(252),
lookback=pd.DateOffset(days=5),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=252
)
s.setup(data)
s.update(dts[6])
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
assert targetVolAlgo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert np.isclose(weights['c2'], weights['c1'])
assert np.isclose(unannualized_c2_weight, weights['c2'])
def test_PTE_Rebalance():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=30*4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
# low vol c2
for i,dt in enumerate(dts[:-2]):
if i % 2 == 0:
data.loc[dt,'c1'] = 95
data.loc[dt,'c2'] = 101
else:
data.loc[dt, 'c1'] = 105
data.loc[dt, 'c2'] = 99
dt = dts[-2]
data.loc[dt,'c1'] = 115
data.loc[dt,'c2'] = 97
s.setup(data)
s.update(dts[-2])
s.adjust(1000000)
s.rebalance(0.4,'c1')
s.rebalance(0.6,'c2')
wdf = pd.DataFrame(
np.zeros(data.shape),
columns=data.columns,
index=data.index
)
wdf['c1'] = 0.5
wdf['c2'] = 0.5
PTE_rebalance_Algo = bt.algos.PTE_Rebalance(
0.01,
wdf,
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=252
)
assert PTE_rebalance_Algo(s)
s.rebalance(0.5, 'c1')
s.rebalance(0.5, 'c2')
assert not PTE_rebalance_Algo(s)
def test_close_positions_after_date():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
c3 = bt.Security('c3')
s = bt.Strategy('s', children = [c1, c2, c3])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
c3 = s['c3']
cutoffs= pd.DataFrame( { 'date' : [ dts[1], dts[2] ] }, index = ['c1','c2'] )
algo = algos.ClosePositionsAfterDates( 'cutoffs' )
s.setup(data, cutoffs=cutoffs)
s.update(dts[0])
s.transact( 100, 'c1')
s.transact( 100, 'c2')
s.transact( 100, 'c3')
algo(s)
assert c1.position == 100
assert c2.position == 100
assert c3.position == 100
# Don't run anything on dts[1], even though that's when c1 closes
s.update( dts[2])
algo(s)
assert c1.position == 0
assert c2.position == 0
assert c3.position == 100
assert s.perm['closed'] == set(['c1', 'c2'])
def test_roll_positions_after_date():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
c3 = bt.Security('c3')
s = bt.Strategy('s', children = [c1, c2, c3])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
c3 = s['c3']
roll = pd.DataFrame( { 'date' : [ dts[1], dts[2] ], 'target' : [ 'c3', 'c1' ], 'factor' : [ 0.5, 2.0 ] }, index = ['c1','c2'] )
algo = algos.RollPositionsAfterDates( 'roll' )
s.setup(data, roll=roll)
s.update(dts[0])
s.transact( 100, 'c1')
s.transact( 100, 'c2')
s.transact( 100, 'c3')
algo(s)
assert c1.position == 100
assert c2.position == 100
assert c3.position == 100
# Don't run anything on dts[1], even though that's when c1 closes
s.update( dts[2])
algo(s)
assert c1.position == 200 # From c2
assert c2.position == 0
assert c3.position == 100 + 50
assert s.perm['rolled'] == set(['c1', 'c2'])
def test_replay_transactions():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
transactions = pd.DataFrame( [ ( pd.Timestamp( '2009-12-01 00'), 'c1', 100, 99.5),
( pd.Timestamp( '2010-01-01 10'), 'c1', -100, 101),
( pd.Timestamp( '2010-01-02 00'), 'c2', 50, 103)
],
columns = ['Date', 'Security', 'quantity', 'price'])
transactions = transactions.set_index( ['Date','Security'])
algo = algos.ReplayTransactions( 'transactions' )
s.setup(data, bidoffer={}, transactions=transactions) # Pass bidoffer so it will track bidoffer paid
s.adjust(1000)
s.update(dts[0])
algo(s)
assert c1.position == 100
assert c2.position == 0
assert c1.bidoffer_paid == -50
s.update(dts[1])
algo(s)
assert c1.position == 0
assert c2.position == 50
assert c1.bidoffer_paid == -100
assert c2.bidoffer_paid == 150
def test_replay_transactions_consistency():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = | pd.date_range('2010-01-01', periods=3) | pandas.date_range |
# Data files are too large to include. Download from Kaggle: https://www.kaggle.com/c/home-credit-default-risk/data
# Code source: https://www.kaggle.com/jsaguiar/lightgbm-with-simple-features
import argparse
import pickle
import time
import warnings
from contextlib import contextmanager
import numpy as np
import pandas as pd
import redis
from sklearn.model_selection import train_test_split
from credit_utils import *
from willump.evaluation.willump_executor import willump_execute
warnings.simplefilter(action='ignore', category=FutureWarning)
base_folder = "tests/test_resources/home_credit_default_risk/"
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cascades", action="store_true", help="Cascades?")
parser.add_argument("-b", "--debug", help="Debug Mode", action="store_true")
parser.add_argument("-k", "--top_k", type=int, help="Top-K to return", required=True)
parser.add_argument("-d", "--disable", help="Disable Willump", action="store_true")
parser.add_argument("-r", "--redis", help="Redis IP", type=str)
args = parser.parse_args()
if args.cascades:
cascades = pickle.load(open(base_folder + "training_cascades.pk", "rb"))
else:
cascades = None
top_K = args.top_k
if args.redis is None:
redis_ip = "127.0.0.1"
else:
redis_ip = args.redis
db = redis.StrictRedis(host=redis_ip)
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.5f}s".format(title, time.time() - t0))
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, nan_as_category=True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
# Preprocess application_train.csv and application_test.csv
def application_train_test(num_rows=None, nan_as_category=False):
# Read data and merge
df = pd.read_csv(base_folder + 'application_train.csv', nrows=num_rows)
print("Train samples: {}".format(len(df)))
# Optional: Remove 4 applications with XNA CODE_GENDER (train set)
df = df[df['CODE_GENDER'] != 'XNA']
# Categorical features with Binary encode (0 or 1; two categories)
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = | pd.factorize(df[bin_feature]) | pandas.factorize |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import types
import sys
from collections import defaultdict
from pprint import pformat
import math
from pyqstrat.evaluator import compute_return_metrics, display_return_metrics, plot_return_metrics
from pyqstrat.account import Account
from pyqstrat.pq_utils import *
from pyqstrat.pq_types import ContractGroup
from pyqstrat.plot import TimeSeries, trade_sets_by_reason_code, Subplot, Plot
# In[3]:
def _get_time_series_list(timestamps, names, values, properties):
ts_list = []
for name in names:
line_type, color = None, None
if properties is not None and name in properties:
if 'line_type' in properties[name]: line_type = properties[name]['line_type']
if 'color' in properties[name]: color = properties[name]['color']
y = getattr(values, name)
if not len(y): continue
if y.dtype.type in [np.str_, np.object_, np.datetime64]: continue
ts = TimeSeries(name, timestamps, y, line_type = line_type, color = color)
ts_list.append(ts)
return ts_list
class Strategy:
def __init__(self, timestamps, contract_groups, price_function, starting_equity = 1.0e6, pnl_calc_time = 15 * 60 + 1, trade_lag = 0,
run_final_calc = True, strategy_context = None):
'''
Args:
timestamps (np.array of np.datetime64): The "heartbeat" of the strategy. We will evaluate trading rules and
simulate the market at these times.
price_function: A function that returns the price of a contract at a given timestamp
contract_groups (list of :obj:`ContractGroup`): The contract groups we will potentially trade.
starting_equity (float, optional): Starting equity in Strategy currency. Default 1.e6
pnl_calc_time (int, optional): Time of day used to calculate PNL. Default 15 * 60 (3 pm)
trade_lag (int, optional): Number of bars you want between the order and the trade. For example, if you think it will take
5 seconds to place your order in the market, and your bar size is 1 second, set this to 5. Set this to 0 if you
want to execute your trade at the same time as you place the order, for example, if you have daily bars. Default 0.
run_final_calc (bool, optional): If set, calculates unrealized pnl and net pnl as well as realized pnl when strategy is done.
If you don't need unrealized pnl, turn this off for faster run time. Default True
strategy_context (:obj:`types.SimpleNamespace`, optional): A storage class where you can store key / value pairs
relevant to this strategy. For example, you may have a pre-computed table of correlations that you use in the
indicator or trade rule functions.
If not set, the __init__ function will create an empty member strategy_context object that you can access.
'''
self.name = None
self.timestamps = timestamps
assert(len(contract_groups) and isinstance(contract_groups[0], ContractGroup))
self.contract_groups = contract_groups
if strategy_context is None: strategy_context = types.SimpleNamespace()
self.strategy_context = strategy_context
self.account = Account(contract_groups, timestamps, price_function, strategy_context, starting_equity, pnl_calc_time)
assert trade_lag >= 0, f'trade_lag cannot be negative: {trade_lag}'
self.trade_lag = trade_lag
self.run_final_calc = run_final_calc
self.indicators = {}
self.signals = {}
self.signal_values = defaultdict(types.SimpleNamespace)
self.rule_names = []
self.rules = {}
self.position_filters = {}
self.rule_signals = {}
self.market_sims = []
self._trades = []
self._orders = []
self._open_orders = defaultdict(list)
self.indicator_deps = {}
self.indicator_cgroups = {}
self.indicator_values = defaultdict(types.SimpleNamespace)
self.signal_indicator_deps = {}
self.signal_deps = {}
self.signal_cgroups = {}
self.trades_iter = [[] for x in range(len(timestamps))] # For debugging, we don't really need this as a member variable
def add_indicator(self, name, indicator, contract_groups = None, depends_on = None):
'''
Args:
name: Name of the indicator
indicator: A function that takes strategy timestamps and other indicators and returns a numpy array
containing indicator values. The return array must have the same length as the timestamps object.
Can also be a numpy array or a pandas Series in which case we just store the values.
contract_groups (list of :obj:`ContractGroup`, optional): Contract groups that this indicator applies to.
If not set, it applies to all contract groups. Default None.
depends_on (list of str, optional): Names of other indicators that we need to compute this indicator.
Default None.
'''
self.indicators[name] = indicator
self.indicator_deps[name] = [] if depends_on is None else depends_on
if contract_groups is None: contract_groups = self.contract_groups
if isinstance(indicator, np.ndarray) or isinstance(indicator, pd.Series):
indicator_values = series_to_array(indicator)
for contract_group in contract_groups:
setattr(self.indicator_values[contract_group], name, indicator_values)
self.indicator_cgroups[name] = contract_groups
def add_signal(self, name, signal_function, contract_groups = None, depends_on_indicators = None, depends_on_signals = None):
'''
Args:
name (str): Name of the signal
signal_function (function): A function that takes timestamps and a dictionary of indicator value arrays and
returns a numpy array
containing signal values. The return array must have the same length as the input timestamps
contract_groups (list of :obj:`ContractGroup`, optional): Contract groups that this signal applies to.
If not set, it applies to all contract groups. Default None.
depends_on_indicators (list of str, optional): Names of indicators that we need to compute this signal. Default None.
depends_on_signals (list of str, optional): Names of other signals that we need to compute this signal. Default None.
'''
self.signals[name] = signal_function
self.signal_indicator_deps[name] = [] if depends_on_indicators is None else depends_on_indicators
self.signal_deps[name] = [] if depends_on_signals is None else depends_on_signals
if contract_groups is None: contract_groups = self.contract_groups
self.signal_cgroups[name] = contract_groups
def add_rule(self, name, rule_function, signal_name, sig_true_values = None, position_filter = None):
'''Add a trading rule. Trading rules are guaranteed to run in the order in which you add them. For example, if you set trade_lag to 0,
and want to exit positions and re-enter new ones in the same bar, make sure you add the exit rule before you add the entry rule to the
strategy.
Args:
name (str): Name of the trading rule
rule_function (function): A trading rule function that returns a list of Orders
signal_name (str): The strategy will call the trading rule function when the signal with this name matches sig_true_values
sig_true_values (numpy array, optional): If the signal value at a bar is equal to one of these values,
the Strategy will call the trading rule function. Default [TRUE]
position_filter (str, optional): Can be "zero", "nonzero" or None. Zero rules are only triggered when
the corresponding contract positions are 0
Nonzero rules are only triggered when the corresponding contract positions are non-zero.
If not set, we don't look at position before triggering the rule.
Default None
'''
if sig_true_values is None: sig_true_values = [True]
if name in self.rule_names:
raise Exception(f'Rule {name} already exists')
# Rules should be run in order
self.rule_names.append(name)
self.rule_signals[name] = (signal_name, sig_true_values)
self.rules[name] = rule_function
if position_filter is not None:
assert(position_filter in ['zero', 'nonzero'])
self.position_filters[name] = position_filter
def add_market_sim(self, market_sim_function):
'''Add a market simulator. A market simulator takes a list of Orders as input and returns a list of Trade objects.
Args:
market_sim_function (function): A function that takes a list of Orders and Indicators as input
and returns a list of Trade objects
'''
self.market_sims.append(market_sim_function)
def run_indicators(self, indicator_names = None, contract_groups = None, clear_all = False):
'''Calculate values of the indicators specified and store them.
Args:
indicator_names (list of str, optional): List of indicator names. If None (default) run all indicators
contract_groups (list of :obj:`ContractGroup`, optional): Contract group to run this indicator for.
If None (default), we run it for all contract groups.
clear_all (bool, optional): If set, clears all indicator values before running. Default False.
'''
if indicator_names is None: indicator_names = self.indicators.keys()
if contract_groups is None: contract_groups = self.contract_groups
if clear_all: self.indicator_values = defaultdict(types.SimpleNamespace)
ind_names = []
for ind_name, cgroup_list in self.indicator_cgroups.items():
if len(set(contract_groups).intersection(cgroup_list)): ind_names.append(ind_name)
indicator_names = list(set(ind_names).intersection(indicator_names))
for cgroup in contract_groups:
cgroup_ind_namespace = self.indicator_values[cgroup]
for indicator_name in indicator_names:
# First run all parents
parent_names = self.indicator_deps[indicator_name]
for parent_name in parent_names:
if cgroup in self.indicator_values and hasattr(cgroup_ind_namespace, parent_name): continue
self.run_indicators([parent_name], [cgroup])
# Now run the actual indicator
if cgroup in self.indicator_values and hasattr(cgroup_ind_namespace, indicator_name): continue
indicator_function = self.indicators[indicator_name]
parent_values = types.SimpleNamespace()
for parent_name in parent_names:
setattr(parent_values, parent_name, getattr(cgroup_ind_namespace, parent_name))
if isinstance(indicator_function, np.ndarray) or isinstance(indicator_function, pd.Series):
indicator_values = indicator_function
else:
indicator_values = indicator_function(cgroup, self.timestamps, parent_values, self.strategy_context)
setattr(cgroup_ind_namespace, indicator_name, series_to_array(indicator_values))
def run_signals(self, signal_names = None, contract_groups = None, clear_all = False):
'''Calculate values of the signals specified and store them.
Args:
signal_names (list of str, optional): List of signal names. If None (default) run all signals
contract_groups (list of :obj:`ContractGroup`, optional): Contract groups to run this signal for.
If None (default), we run it for all contract groups.
clear_all (bool, optional): If set, clears all signal values before running. Default False.
'''
if signal_names is None: signal_names = self.signals.keys()
if contract_groups is None: contract_groups = self.contract_groups
if clear_all: self.signal_values = defaultdict(types.SimpleNamespace)
sig_names = []
for sig_name, cgroup_list in self.signal_cgroups.items():
if len(set(contract_groups).intersection(cgroup_list)):
sig_names.append(sig_name)
signal_names = list(set(sig_names).intersection(signal_names))
for cgroup in contract_groups:
for signal_name in signal_names:
if cgroup not in self.signal_cgroups[signal_name]: continue
# First run all parent signals
parent_names = self.signal_deps[signal_name]
for parent_name in parent_names:
if cgroup in self.signal_values and hasattr(self.signal_values[cgroup], parent_name): continue
self.run_signals([parent_name], [cgroup])
# Now run the actual signal
if cgroup in self.signal_values and hasattr(self.signal_values[cgroup], signal_name): continue
signal_function = self.signals[signal_name]
parent_values = types.SimpleNamespace()
for parent_name in parent_names:
sig_vals = getattr(self.signal_values[cgroup], parent_name)
setattr(parent_values, parent_name, sig_vals)
# Get indicators needed for this signal
indicator_values = types.SimpleNamespace()
for indicator_name in self.signal_indicator_deps[signal_name]:
setattr(indicator_values, indicator_name, getattr(self.indicator_values[cgroup], indicator_name))
setattr(self.signal_values[cgroup], signal_name, series_to_array(
signal_function(cgroup, self.timestamps, indicator_values, parent_values, self.strategy_context)))
def _generate_order_iterations(self, rule_names = None, contract_groups = None, start_date = None, end_date = None):
'''
>>> class MockStrat:
... def __init__(self):
... self.timestamps = timestamps
... self.account = self
... self.rules = {'rule_a' : rule_a, 'rule_b' : rule_b}
... self.market_sims = {ibm : market_sim_ibm, aapl : market_sim_aapl}
... self.rule_signals = {'rule_a' : ('sig_a', [1]), 'rule_b' : ('sig_b', [1, -1])}
... self.signal_values = {ibm : types.SimpleNamespace(sig_a = np.array([0., 1., 1.]),
... sig_b = np.array([0., 0., 0.]) ),
... aapl : types.SimpleNamespace(sig_a = np.array([0., 0., 0.]),
... sig_b = np.array([0., -1., -1])
... )}
... self.signal_cgroups = {'sig_a' : [ibm, aapl], 'sig_b' : [ibm, aapl]}
... self.indicator_values = {ibm : types.SimpleNamespace(), aapl : types.SimpleNamespace()}
>>>
>>> def market_sim_aapl(): pass
>>> def market_sim_ibm(): pass
>>> def rule_a(): pass
>>> def rule_b(): pass
>>> timestamps = np.array(['2018-01-01', '2018-01-02', '2018-01-03'], dtype = 'M8[D]')
>>> rule_names = ['rule_a', 'rule_b']
>>> ContractGroup.clear()
>>> ibm = ContractGroup.create('IBM')
>>> aapl = ContractGroup.create('AAPL')
>>> contract_groups = [ibm, aapl]
>>> start_date = np.datetime64('2018-01-01')
>>> end_date = np.datetime64('2018-02-05')
>>> strategy = MockStrat()
>>> Strategy._generate_order_iterations(strategy, rule_names, contract_groups, start_date, end_date)
>>> orders_iter = strategy.orders_iter
>>> assert(len(orders_iter[0]) == 0)
>>> assert(len(orders_iter[1]) == 2)
>>> assert(orders_iter[1][0][1] == ibm)
>>> assert(orders_iter[1][1][1] == aapl)
>>> assert(len(orders_iter[2]) == 0)
'''
start_date, end_date = str2date(start_date), str2date(end_date)
if rule_names is None: rule_names = self.rule_names
if contract_groups is None: contract_groups = self.contract_groups
num_timestamps = len(self.timestamps)
# List of lists, i -> list of orders
orders_iter = [[] for x in range(num_timestamps)]
for rule_name in rule_names:
rule_function = self.rules[rule_name]
for cgroup in contract_groups:
signal_name, sig_true_values = self.rule_signals[rule_name]
if cgroup not in self.signal_cgroups[signal_name]:
# We don't need to call this rule for this contract group
continue
sig_values = getattr(self.signal_values[cgroup], signal_name)
timestamps = self.timestamps
null_value = False if sig_values.dtype == np.dtype('bool') else np.nan
if start_date: sig_values[0:np.searchsorted(timestamps, start_date)] = null_value
if end_date: sig_values[np.searchsorted(timestamps, end_date):] = null_value
indices = np.nonzero(np.isin(sig_values[:num_timestamps], sig_true_values))[0]
# Don't run rules on last index since we cannot fill any orders
if len(indices) and indices[-1] == len(sig_values) -1: indices = indices[:-1]
indicator_values = self.indicator_values[cgroup]
iteration_params = {'indicator_values' : indicator_values, 'signal_values' : sig_values, 'rule_name' : rule_name}
for idx in indices: orders_iter[idx].append((rule_function, cgroup, iteration_params))
self.orders_iter = orders_iter
def run_rules(self, rule_names = None, contract_groups = None, start_date = None, end_date = None):
'''Run trading rules.
Args:
rule_names: List of rule names. If None (default) run all rules
contract_groups (list of :obj:`ContractGroup`, optional): Contract groups to run this rule for.
If None (default), we run it for all contract groups.
start_date: Run rules starting from this date. Default None
end_date: Don't run rules after this date. Default None
'''
start_date, end_date = str2date(start_date), str2date(end_date)
self._generate_order_iterations(rule_names, contract_groups, start_date, end_date)
# Now we know which rules, contract groups need to be applied for each iteration, go through each iteration and apply them
# in the same order they were added to the strategy
for i in range(len(self.orders_iter)):
self._run_iteration(i)
if self.run_final_calc:
self.account.calc(self.timestamps[-1])
def _run_iteration(self, i):
self._sim_market(i)
# Treat all orders as IOC, i.e. if the order was not executed, then its cancelled.
self._open_orders[i] = []
rules = self.orders_iter[i]
for (rule_function, contract_group, params) in rules:
orders = self._get_orders(i, rule_function, contract_group, params)
self._orders += orders
self._open_orders[i + self.trade_lag] += orders
# If the lag is 0, then run rules one by one, and after each rule, run market sim to generate trades and update
# positions. For example, if we have a rule to exit a position and enter a new one, we should make sure
# positions are updated after the first rule before running the second rule. If the lag is not 0,
# run all rules and collect the orders, we don't need to run market sim after each rule
if self.trade_lag == 0: self._sim_market(i)
# If we failed to fully execute any orders in this iteration, add them to the next iteration so we get another chance to execute
open_orders = self._open_orders.get(i)
if open_orders is not None and len(open_orders):
self._open_orders[i + 1] += open_orders
def run(self):
self.run_indicators()
self.run_signals()
self.run_rules()
def _get_orders(self, idx, rule_function, contract_group, params):
try:
indicator_values, signal_values, rule_name = (params['indicator_values'], params['signal_values'], params['rule_name'])
position_filter = self.position_filters[rule_name]
if position_filter is not None:
curr_pos = self.account.position(contract_group, self.timestamps[idx])
if position_filter == 'zero' and not math.isclose(curr_pos, 0): return []
if position_filter == 'nonzero' and math.isclose(curr_pos, 0): return []
orders = rule_function(contract_group, idx, self.timestamps, indicator_values, signal_values, self.account,
self.strategy_context)
except Exception as e:
raise type(e)(f'Exception: {str(e)} at rule: {type(rule_function)} contract_group: {contract_group} index: {idx}'
).with_traceback(sys.exc_info()[2])
return orders
def _sim_market(self, i):
'''
Go through all open orders and run market simulators to generate a list of trades and return any orders that were not filled.
'''
open_orders = self._open_orders.get(i)
if open_orders is None or len(open_orders) == 0: return [], []
# If there is more than one order for a contract, throw away any but the last one.
#seen = set()
#seen_add = seen.add
#open_orders = list(reversed([order for order in reversed(orders) if not (order.contract in seen or seen_add(order.contract))]))
for market_sim_function in self.market_sims:
try:
trades = market_sim_function(open_orders, i, self.timestamps, self.indicator_values, self.signal_values, self.strategy_context)
if len(trades): self.account.add_trades(trades)
self._trades += trades
except Exception as e:
raise type(e)(f'Exception: {str(e)} at index: {i} function: {market_sim_function}').with_traceback(sys.exc_info()[2])
self._open_orders[i] = [order for order in open_orders if order.status != 'filled']
def df_data(self, contract_groups = None, add_pnl = True, start_date = None, end_date = None):
'''
Add indicators and signals to end of market data and return as a pandas dataframe.
Args:
contract_groups (list of :obj:`ContractGroup`, optional): list of contract groups to include. All if set to None (default)
add_pnl: If True (default), include P&L columns in dataframe
start_date: string or numpy datetime64. Default None
end_date: string or numpy datetime64: Default None
'''
start_date, end_date = str2date(start_date), str2date(end_date)
if contract_groups is None: contract_groups = self.contract_groups
timestamps = self.timestamps
if start_date: timestamps = timestamps[timestamps >= start_date]
if end_date: timestamps = timestamps[timestamps <= end_date]
dfs = []
for contract_group in contract_groups:
df = pd.DataFrame({'timestamp' : self.timestamps})
if add_pnl:
df_pnl = self.df_pnl(contract_group)
indicator_values = self.indicator_values[contract_group]
for k in sorted(indicator_values.__dict__):
name = k
# Avoid name collisions
if name in df.columns: name = name + '.ind'
df.insert(len(df.columns), name, getattr(indicator_values, k))
signal_values = self.signal_values[contract_group]
for k in sorted(signal_values.__dict__):
name = k
if name in df.columns: name = name + '.sig'
df.insert(len(df.columns), name, getattr(signal_values, k))
if add_pnl: df = pd.merge(df, df_pnl, on = ['timestamp'], how = 'left')
# Add counter column for debugging
df.insert(len(df.columns), 'i', np.arange(len(df)))
dfs.append(df)
return | pd.concat(dfs) | pandas.concat |
#z-score = ([current trend] - [average historic trends]) / [standard deviation of historic trends])
import os
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.style as style
from matplotlib.dates import DateFormatter, DayLocator
from datetime import datetime
style.available
style.use('seaborn')
import config
import warnings
warnings.filterwarnings('ignore')
def get_score_data():
# Pull in the mention data
df = pd.read_csv('data/mentions.csv', parse_dates=['Mention Datetime'])
df = df.groupby([pd.Grouper(key='Mention Datetime', freq='D'),'Symbol']).sum().reset_index()
df = df.pivot_table(index='Symbol',
columns='Mention Datetime', fill_value=0
).stack().sort_values(by=['Symbol','Mention Datetime']).reset_index()
return df
def top_symbols(df):
# Determines the top symbols based on observations greater than variable "t_post_count" in config.py
all_syms = df['Symbol'].unique()
top_syms = {}
top_syms_list = []
for sym in all_syms:
count = df.loc[df['Symbol'] == sym,['Mention']].sum()
if count[0] > config.t_post_count:
top_syms_list.append(sym)
top_syms[sym] = df[df['Symbol'] == sym].reset_index()
top_syms[sym].drop(['index'], axis=1, inplace=True)
print('The top symbols: ')
print(top_syms_list)
return top_syms, top_syms_list
def is_trending(df, top_syms, top_syms_list):
# Uses z score to determine if a stock is trending
obs = df['Mention']
trending_stocks = []
for sym in top_syms_list:
mn = np.mean(obs)
sd = np.std(obs)
i = top_syms[sym].loc[0][2]
print(i)
zscore = (i-mn)/sd
print('\n'+sym)
print('Z-Score is: '+str(zscore))
if zscore > 1.5:
print('Stock is Trending!')
trending_stocks.append(sym)
else:
print('Not Trending')
with open('obj/trendingstocks.pkl', 'wb') as fp: #Pickling
pickle.dump(trending_stocks, fp)
return trending_stocks
def new_trending(trending_stocks):
if not os.path.exists('data/datetrending.csv'):
newtrend = pd.DataFrame(trending_stocks, columns = ['Symbol'])
newtrend['First Trending'] = datetime.today().strftime('%Y-%m-%d')
newtrend['BuyPrice'] = np.NaN
newtrend.set_index('Symbol', inplace=True)
newtrend.to_csv('data/datetrending.csv', index=True)
print('New trending stocks:')
print(newtrend)
else:
oldtrend = | pd.read_csv('data/datetrending.csv', parse_dates=['First Trending'], index_col='Symbol') | pandas.read_csv |
from datetime import datetime, timedelta
import operator
from typing import Any, Sequence, Type, Union, cast
import warnings
import numpy as np
from pandas._libs import NaT, NaTType, Timestamp, algos, iNaT, lib
from pandas._libs.tslibs.c_timestamp import integer_op_not_supported
from pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import RoundTo, round_nsint64
from pandas._typing import DatetimeLikeScalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
from pandas.core import missing, nanops, ops
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import pandas.core.common as com
from pandas.core.indexers import check_bool_array_indexer
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.ops.invalid import invalid_comparison, make_invalid_op
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
def _datetimelike_array_cmp(cls, op):
"""
Wrap comparison operations to convert Timestamp/Timedelta/Period-like to
boxed scalars/arrays.
"""
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"
@unpack_zerodim_and_defer(opname)
def wrapper(self, other):
if isinstance(other, str):
try:
# GH#18435 strings get a pass from tzawareness compat
other = self._scalar_from_string(other)
except ValueError:
# failed to parse as Timestamp/Timedelta/Period
return invalid_comparison(self, other, op)
if isinstance(other, self._recognized_scalars) or other is NaT:
other = self._scalar_type(other)
self._check_compatible_with(other)
other_i8 = self._unbox_scalar(other)
result = op(self.view("i8"), other_i8)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
if isinstance(other, list):
# TODO: could use pd.Index to do inference?
other = np.array(other)
if not isinstance(other, (np.ndarray, type(self))):
return invalid_comparison(self, other, op)
if is_object_dtype(other):
# We have to use comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(
op, self.astype(object), other
)
o_mask = isna(other)
elif not type(self)._is_recognized_dtype(other.dtype):
return invalid_comparison(self, other, op)
else:
# For PeriodDType this casting is unnecessary
other = type(self)._from_sequence(other)
self._check_compatible_with(other)
result = op(self.view("i8"), other.view("i8"))
o_mask = other._isnan
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return set_function_name(wrapper, opname, cls)
class AttributesMixin:
_data: np.ndarray
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
"""The scalar associated with this datelike
* PeriodArray : Period
* DatetimeArray : Timestamp
* TimedeltaArray : Timedelta
"""
raise AbstractMethodError(self)
def _scalar_from_string(
self, value: str
) -> Union[Period, Timestamp, Timedelta, NaTType]:
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should call ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
def _check_compatible_with(
self, other: Union[Period, Timestamp, Timedelta, NaTType], setitem: bool = False
) -> None:
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if any) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
setitem : bool, default False
For __setitem__ we may have stricter compatiblity resrictions than
for comparisons.
Raises
------
Exception
"""
raise AbstractMethodError(self)
class DatelikeOps:
"""
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
"""
@Substitution(
URL="https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior"
)
def strftime(self, date_format):
"""
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <%(URL)s>`__.
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
ndarray
NumPy ndarray of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
result = self._format_native_types(date_format=date_format, na_rep=np.nan)
return result.astype(object)
class TimelikeOps:
"""
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
_round_doc = """
Perform {op} operation on the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timeseries.offset_aliases>` for
a list of possible `freq` values.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
Only relevant for DatetimeIndex:
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
.. versionadded:: 0.24.0
nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
DatetimeIndex, TimedeltaIndex, or Series
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
"""
_round_example = """>>> rng.round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_floor_example = """>>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_ceil_example = """>>> rng.ceil('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
def _round(self, freq, mode, ambiguous, nonexistent):
# round the local times
if is_datetime64tz_dtype(self):
# operate on naive timestamps, then convert back to aware
naive = self.tz_localize(None)
result = naive._round(freq, mode, ambiguous, nonexistent)
aware = result.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return aware
values = self.view("i8")
result = round_nsint64(values, mode, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
return self._simple_new(result, dtype=self.dtype)
@Appender((_round_doc + _round_example).format(op="round"))
def round(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
@Appender((_round_doc + _floor_example).format(op="floor"))
def floor(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
@Appender((_round_doc + _ceil_example).format(op="ceil"))
def ceil(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):
"""
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
Assumes that __new__/__init__ defines:
_data
_freq
and that the inheriting class has methods:
_generate_range
"""
@property
def ndim(self) -> int:
return self._data.ndim
@property
def shape(self):
return self._data.shape
def reshape(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.reshape(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
def ravel(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.ravel(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
@property
def _box_func(self):
"""
box function to get object from internal representation
"""
raise AbstractMethodError(self)
def _box_values(self, values):
"""
apply box func to passed values
"""
return lib.map_infer(values, self._box_func)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@property
def asi8(self) -> np.ndarray:
"""
Integer representation of the values.
Returns
-------
ndarray
An ndarray with int64 dtype.
"""
# do not cache or you'll create a memory leak
return self._data.view("i8")
@property
def _ndarray_values(self):
return self._data
# ----------------------------------------------------------------
# Rendering Methods
def _format_native_types(self, na_rep="NaT", date_format=None):
"""
Helper method for astype when converting to strings.
Returns
-------
ndarray[str]
"""
raise AbstractMethodError(self)
def _formatter(self, boxed=False):
# TODO: Remove Datetime & DatetimeTZ formatters.
return "'{}'".format
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
@property
def nbytes(self):
return self._data.nbytes
def __array__(self, dtype=None) -> np.ndarray:
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
if is_object_dtype(dtype):
return np.array(list(self), dtype=object)
return self._data
@property
def size(self) -> int:
"""The number of elements in this array."""
return np.prod(self.shape)
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
"""
is_int = lib.is_integer(key)
if lib.is_scalar(key) and not is_int:
raise IndexError(
"only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean "
"arrays are valid indices"
)
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
if lib.is_scalar(val):
# i.e. self.ndim == 1
return self._box_func(val)
return type(self)(val, dtype=self.dtype)
if com.is_bool_indexer(key):
key = check_bool_array_indexer(self, key)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
is_period = is_period_dtype(self)
if is_period:
freq = self.freq
else:
freq = None
if isinstance(key, slice):
if self.freq is not None and key.step is not None:
freq = key.step * self.freq
else:
freq = self.freq
elif key is Ellipsis:
# GH#21282 indexing with Ellipsis is similar to a full slice,
# should preserve `freq` attribute
freq = self.freq
result = getitem(key)
if result.ndim > 1:
# To support MPL which performs slicing with 2 dim
# even though it only has 1 dim by definition
if is_period:
return self._simple_new(result, dtype=self.dtype, freq=freq)
return result
return self._simple_new(result, dtype=self.dtype, freq=freq)
def __setitem__(
self,
key: Union[int, Sequence[int], Sequence[bool], slice],
value: Union[NaTType, Any, Sequence[Any]],
) -> None:
# I'm fudging the types a bit here. "Any" above really depends
# on type(self). For PeriodArray, it's Period (or stuff coercible
# to a period in from_sequence). For DatetimeArray, it's Timestamp...
# I don't know if mypy can do that, possibly with Generics.
# https://mypy.readthedocs.io/en/latest/generics.html
if lib.is_scalar(value) and not isna(value):
value = com.maybe_box_datetimelike(value)
if is_list_like(value):
is_slice = isinstance(key, slice)
if lib.is_scalar(key):
raise ValueError("setting an array element with a sequence.")
if not is_slice:
key = cast(Sequence, key)
if len(key) != len(value) and not com.is_bool_indexer(key):
msg = (
f"shape mismatch: value array of length '{len(key)}' "
"does not match indexing result of length "
f"'{len(value)}'."
)
raise ValueError(msg)
elif not len(key):
return
value = type(self)._from_sequence(value, dtype=self.dtype)
self._check_compatible_with(value, setitem=True)
value = value.asi8
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=True)
value = self._unbox_scalar(value)
elif is_valid_nat_for_dtype(value, self.dtype):
value = iNaT
else:
msg = (
f"'value' should be a '{self._scalar_type.__name__}', 'NaT', "
f"or array of those. Got '{type(value).__name__}' instead."
)
raise TypeError(msg)
self._data[key] = value
self._maybe_clear_freq()
def _maybe_clear_freq(self):
# inplace operations like __setitem__ may invalidate the freq of
# DatetimeArray and TimedeltaArray
pass
def astype(self, dtype, copy=True):
# Some notes on cases we don't have to handle here in the base class:
# 1. PeriodArray.astype handles period -> period
# 2. DatetimeArray.astype handles conversion between tz.
# 3. DatetimeArray.astype handles datetime -> period
from pandas import Categorical
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self._box_values(self.asi8)
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
return self._format_native_types()
elif is_integer_dtype(dtype):
# we deliberately ignore int32 vs. int64 here.
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
values = self.asi8
if is_unsigned_integer_dtype(dtype):
# Again, we ignore int32 vs. int64
values = values.view("uint64")
if copy:
values = values.copy()
return values
elif (
is_datetime_or_timedelta_dtype(dtype)
and not is_dtype_equal(self.dtype, dtype)
) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
elif is_categorical_dtype(dtype):
return Categorical(self, dtype=dtype)
else:
return np.asarray(self, dtype=dtype)
def view(self, dtype=None):
if dtype is None or dtype is self.dtype:
return type(self)(self._data, dtype=self.dtype)
return self._data.view(dtype=dtype)
# ------------------------------------------------------------------
# ExtensionArray Interface
def unique(self):
result = unique1d(self.asi8)
return type(self)(result, dtype=self.dtype)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to an i8 representation,
raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : np.int64
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = iNaT
elif isinstance(fill_value, self._recognized_scalars):
self._check_compatible_with(fill_value)
fill_value = self._scalar_type(fill_value)
fill_value = self._unbox_scalar(fill_value)
else:
raise ValueError(
f"'fill_value' should be a {self._scalar_type}. Got '{fill_value}'."
)
return fill_value
def take(self, indices, allow_fill=False, fill_value=None):
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_values = take(
self.asi8, indices, allow_fill=allow_fill, fill_value=fill_value
)
return type(self)(new_values, dtype=self.dtype)
@classmethod
def _concat_same_type(cls, to_concat):
dtypes = {x.dtype for x in to_concat}
assert len(dtypes) == 1
dtype = list(dtypes)[0]
values = np.concatenate([x.asi8 for x in to_concat])
return cls(values, dtype=dtype)
def copy(self):
values = self.asi8.copy()
return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq)
def _values_for_factorize(self):
return self.asi8, iNaT
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
def _values_for_argsort(self):
return self._data
# ------------------------------------------------------------------
# Additional array methods
# These are not part of the EA API, but we implement them because
# pandas assumes they're there.
def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
"""
if isinstance(value, str):
value = self._scalar_from_string(value)
if not (isinstance(value, (self._scalar_type, type(self))) or isna(value)):
raise ValueError(f"Unexpected type for 'value': {type(value)}")
self._check_compatible_with(value)
if isinstance(value, type(self)):
value = value.asi8
else:
value = self._unbox_scalar(value)
return self.asi8.searchsorted(value, side=side, sorter=sorter)
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
values = self._data.repeat(repeats)
return type(self)(values.view("i8"), dtype=self.dtype)
def value_counts(self, dropna=False):
"""
Return a Series containing counts of unique values.
Parameters
----------
dropna : bool, default True
Don't include counts of NaT values.
Returns
-------
Series
"""
from pandas import Series, Index
if dropna:
values = self[~self.isna()]._data
else:
values = self._data
cls = type(self)
result = value_counts(values, sort=False, dropna=dropna)
index = Index(
cls(result.index.view("i8"), dtype=self.dtype), name=result.index.name
)
return Series(result.values, index=index, name=result.name)
def map(self, mapper):
# TODO(GH-23179): Add ExtensionArray.map
# Need to figure out if we want ExtensionArray.map first.
# If so, then we can refactor IndexOpsMixin._map_values to
# a standalone function and call from here..
# Else, just rewrite _map_infer_values to do the right thing.
from pandas import Index
return Index(self).map(mapper).array
# ------------------------------------------------------------------
# Null Handling
def isna(self):
return self._isnan
@property # NB: override with cache_readonly in immutable subclasses
def _isnan(self):
"""
return if each value is nan
"""
return self.asi8 == iNaT
@property # NB: override with cache_readonly in immutable subclasses
def _hasnans(self):
"""
return if I have any nans; enables various perf speedups
"""
return bool(self._isnan.any())
def _maybe_mask_results(self, result, fill_value=iNaT, convert=None):
"""
Parameters
----------
result : a ndarray
fill_value : object, default iNaT
convert : str, dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine.
"""
if self._hasnans:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
result[self._isnan] = fill_value
return result
def fillna(self, value=None, method=None, limit=None):
# TODO(GH-20300): remove this
# Just overriding to ensure that we avoid an astype(object).
# Either 20300 or a `_values_for_fillna` would avoid this duplication.
if isinstance(value, ABCSeries):
value = value.array
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {len(self)}"
)
value = value[mask]
if mask.any():
if method is not None:
if method == "pad":
func = missing.pad_1d
else:
func = missing.backfill_1d
values = self._data
if not is_period_dtype(self):
# For PeriodArray self._data is i8, which gets copied
# by `func`. Otherwise we need to make a copy manually
# to avoid modifying `self` in-place.
values = values.copy()
new_values = func(values, limit=limit, mask=mask)
if is_datetime64tz_dtype(self):
# we need to pass int64 values to the constructor to avoid
# re-localizing incorrectly
new_values = new_values.view("i8")
new_values = type(self)(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
# ------------------------------------------------------------------
# Frequency Properties/Methods
@property
def freq(self):
"""
Return the frequency object if it is set, otherwise None.
"""
return self._freq
@freq.setter
def freq(self, value):
if value is not None:
value = frequencies.to_offset(value)
self._validate_frequency(self, value)
self._freq = value
@property
def freqstr(self):
"""
Return the frequency object as a string if its set, otherwise None
"""
if self.freq is None:
return None
return self.freq.freqstr
@property # NB: override with cache_readonly in immutable subclasses
def inferred_freq(self):
"""
Tryies to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
if self.ndim != 1:
return None
try:
return frequencies.infer_freq(self)
except ValueError:
return None
@property # NB: override with cache_readonly in immutable subclasses
def _resolution(self):
return frequencies.Resolution.get_reso_from_freq(self.freqstr)
@property # NB: override with cache_readonly in immutable subclasses
def resolution(self):
"""
Returns day, hour, minute, second, millisecond or microsecond
"""
return frequencies.Resolution.get_str(self._resolution)
@classmethod
def _validate_frequency(cls, index, freq, **kwargs):
"""
Validate that a frequency is compatible with the values of a given
Datetime Array/Index or Timedelta Array/Index
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
The index on which to determine if the given frequency is valid
freq : DateOffset
The frequency to validate
"""
if is_period_dtype(cls):
# Frequency validation is not meaningful for Period Array/Index
return None
inferred = index.inferred_freq
if index.size == 0 or inferred == freq.freqstr:
return None
try:
on_freq = cls._generate_range(
start=index[0], end=None, periods=len(index), freq=freq, **kwargs
)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError
except ValueError as e:
if "non-fixed" in str(e):
# non-fixed frequencies are not meaningful for timedelta64;
# we retain that error message
raise e
# GH#11587 the main way this is reached is if the `np.array_equal`
# check above is False. This can also be reached if index[0]
# is `NaT`, in which case the call to `cls._generate_range` will
# raise a ValueError, which we re-raise with a more targeted
# message.
raise ValueError(
f"Inferred frequency {inferred} from passed values "
f"does not conform to passed frequency {freq.freqstr}"
)
# monotonicity/uniqueness properties are called via frequencies.infer_freq,
# see GH#23789
@property
def _is_monotonic_increasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[0]
@property
def _is_monotonic_decreasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[1]
@property
def _is_unique(self):
return len(unique1d(self.asi8)) == len(self)
# ------------------------------------------------------------------
# Arithmetic Methods
_create_comparison_method = classmethod(_datetimelike_array_cmp)
# pow is invalid for all three subclasses; TimedeltaArray will override
# the multiplication and division ops
__pow__ = make_invalid_op("__pow__")
__rpow__ = make_invalid_op("__rpow__")
__mul__ = make_invalid_op("__mul__")
__rmul__ = make_invalid_op("__rmul__")
__truediv__ = make_invalid_op("__truediv__")
__rtruediv__ = make_invalid_op("__rtruediv__")
__floordiv__ = make_invalid_op("__floordiv__")
__rfloordiv__ = make_invalid_op("__rfloordiv__")
__mod__ = make_invalid_op("__mod__")
__rmod__ = make_invalid_op("__rmod__")
__divmod__ = make_invalid_op("__divmod__")
__rdivmod__ = make_invalid_op("__rdivmod__")
def _add_datetimelike_scalar(self, other):
# Overridden by TimedeltaArray
raise TypeError(f"cannot add {type(self).__name__} and {type(other).__name__}")
_add_datetime_arraylike = _add_datetimelike_scalar
def _sub_datetimelike_scalar(self, other):
# Overridden by DatetimeArray
assert other is not NaT
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
_sub_datetime_arraylike = _sub_datetimelike_scalar
def _sub_period(self, other):
# Overridden by PeriodArray
raise TypeError(f"cannot subtract Period from a {type(self).__name__}")
def _add_offset(self, offset):
raise AbstractMethodError(self)
def _add_delta(self, other):
"""
Add a timedelta-like, Tick or TimedeltaIndex-like object
to self, yielding an int64 numpy array
Parameters
----------
delta : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : ndarray[int64]
Notes
-----
The result's name is set outside of _add_delta by the calling
method (__add__ or __sub__), if necessary (i.e. for Indexes).
"""
if isinstance(other, (Tick, timedelta, np.timedelta64)):
new_values = self._add_timedeltalike_scalar(other)
elif is_timedelta64_dtype(other):
# ndarray[timedelta64] or TimedeltaArray/index
new_values = self._add_delta_tdi(other)
return new_values
def _add_timedeltalike_scalar(self, other):
"""
Add a delta of a timedeltalike
return the i8 result view
"""
if isna(other):
# i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds
new_values = np.empty(self.shape, dtype="i8")
new_values[:] = iNaT
return new_values
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view(
"i8"
)
new_values = self._maybe_mask_results(new_values)
return new_values.view("i8")
def _add_delta_tdi(self, other):
"""
Add a delta of a TimedeltaIndex
return the i8 result view
"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
# ndarray[timedelta64]; wrap in TimedeltaIndex for op
from pandas.core.arrays import TimedeltaArray
other = TimedeltaArray._from_sequence(other)
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(
self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan
)
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view("i8")
def _add_nat(self):
"""
Add pd.NaT to self
"""
if is_period_dtype(self):
raise TypeError(
f"Cannot add {type(self).__name__} and {type(NaT).__name__}"
)
# GH#19124 pd.NaT is treated like a timedelta for both timedelta
# and datetime dtypes
result = np.zeros(self.shape, dtype=np.int64)
result.fill(iNaT)
return type(self)(result, dtype=self.dtype, freq=None)
def _sub_nat(self):
"""
Subtract pd.NaT from self
"""
# GH#19124 Timedelta - datetime is not in general well-defined.
# We make an exception for pd.NaT, which in this case quacks
# like a timedelta.
# For datetime64 dtypes by convention we treat NaT as a datetime, so
# this subtraction returns a timedelta64 dtype.
# For period dtype, timedelta64 is a close-enough return dtype.
result = np.zeros(self.shape, dtype=np.int64)
result.fill(iNaT)
return result.view("timedelta64[ns]")
def _sub_period_array(self, other):
"""
Subtract a Period Array/Index from self. This is only valid if self
is itself a Period Array/Index, raises otherwise. Both objects must
have the same frequency.
Parameters
----------
other : PeriodIndex or PeriodArray
Returns
-------
result : np.ndarray[object]
Array of DateOffset objects; nulls represented by NaT.
"""
if not is_period_dtype(self):
raise TypeError(
f"cannot subtract {other.dtype}-dtype from {type(self).__name__}"
)
if self.freq != other.freq:
msg = DIFFERENT_FREQ.format(
cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr
)
raise IncompatibleFrequency(msg)
new_values = checked_add_with_arr(
self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan
)
new_values = np.array([self.freq.base * x for x in new_values])
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = NaT
return new_values
def _addsub_object_array(self, other: np.ndarray, op):
"""
Add or subtract array-like of DateOffset objects
Parameters
----------
other : np.ndarray[object]
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
"""
assert op in [operator.add, operator.sub]
if len(other) == 1:
return op(self, other[0])
warnings.warn(
"Adding/subtracting array of DateOffsets to "
f"{type(self).__name__} not vectorized",
PerformanceWarning,
)
# For EA self.astype('O') returns a numpy array, not an Index
left = self.astype("O")
res_values = op(left, np.array(other))
kwargs = {}
if not is_period_dtype(self):
kwargs["freq"] = "infer"
try:
res = type(self)._from_sequence(res_values, **kwargs)
except ValueError:
# e.g. we've passed a Timestamp to TimedeltaArray
res = res_values
return res
def _time_shift(self, periods, freq=None):
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or str
Frequency increment to shift by.
"""
if freq is not None and freq != self.freq:
if isinstance(freq, str):
freq = frequencies.to_offset(freq)
offset = periods * freq
result = self + offset
return result
if periods == 0:
# immutable so OK
return self.copy()
if self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
start = self[0] + periods * self.freq
end = self[-1] + periods * self.freq
# Note: in the DatetimeTZ case, _generate_range will infer the
# appropriate timezone from `start` and `end`, so tz does not need
# to be passed explicitly.
return self._generate_range(start=start, end=end, periods=None, freq=self.freq)
@unpack_zerodim_and_defer("__add__")
def __add__(self, other):
# scalar others
if other is NaT:
result = self._add_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(other)
elif isinstance(other, DateOffset):
# specifically _not_ a Tick
result = self._add_offset(other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._add_datetimelike_scalar(other)
elif lib.is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._time_shift(other)
# array-like others
elif is_timedelta64_dtype(other):
# TimedeltaIndex, ndarray[timedelta64]
result = self._add_delta(other)
elif is_object_dtype(other):
# e.g. Array/Index of DateOffset objects
result = self._addsub_object_array(other, operator.add)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
# DatetimeIndex, ndarray[datetime64]
return self._add_datetime_arraylike(other)
elif is_integer_dtype(other):
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._addsub_int_array(other, operator.add)
else:
# Includes Categorical, other ExtensionArrays
# For PeriodDtype, if self is a TimedeltaArray and other is a
# PeriodArray with a timedelta-like (i.e. Tick) freq, this
# operation is valid. Defer to the PeriodArray implementation.
# In remaining cases, this will end up raising TypeError.
return NotImplemented
if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray(result)
return result
def __radd__(self, other):
# alias for __add__
return self.__add__(other)
@unpack_zerodim_and_defer("__sub__")
def __sub__(self, other):
# scalar others
if other is NaT:
result = self._sub_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(-other)
elif isinstance(other, DateOffset):
# specifically _not_ a Tick
result = self._add_offset(-other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._sub_datetimelike_scalar(other)
elif lib.is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._time_shift(-other)
elif isinstance(other, Period):
result = self._sub_period(other)
# array-like others
elif is_timedelta64_dtype(other):
# TimedeltaIndex, ndarray[timedelta64]
result = self._add_delta(-other)
elif is_object_dtype(other):
# e.g. Array/Index of DateOffset objects
result = self._addsub_object_array(other, operator.sub)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
# DatetimeIndex, ndarray[datetime64]
result = self._sub_datetime_arraylike(other)
elif is_period_dtype(other):
# PeriodIndex
result = self._sub_period_array(other)
elif is_integer_dtype(other):
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._addsub_int_array(other, operator.sub)
else:
# Includes ExtensionArrays, float_dtype
return NotImplemented
if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray(result)
return result
def __rsub__(self, other):
if is_datetime64_any_dtype(other) and is_timedelta64_dtype(self.dtype):
# ndarray[datetime64] cannot be subtracted from self, so
# we need to wrap in DatetimeArray/Index and flip the operation
if lib.is_scalar(other):
# i.e. np.datetime64 object
return Timestamp(other) - self
if not isinstance(other, DatetimeLikeArrayMixin):
# Avoid down-casting DatetimeIndex
from pandas.core.arrays import DatetimeArray
other = DatetimeArray(other)
return other - self
elif (
is_datetime64_any_dtype(self.dtype)
and hasattr(other, "dtype")
and not is_datetime64_any_dtype(other.dtype)
):
# GH#19959 datetime - datetime is well-defined as timedelta,
# but any other type - datetime is not well-defined.
raise TypeError(
f"cannot subtract {type(self).__name__} from {type(other).__name__}"
)
elif is_period_dtype(self.dtype) and is_timedelta64_dtype(other):
# TODO: Can we simplify/generalize these cases at all?
raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}")
elif is_timedelta64_dtype(self.dtype):
if lib.is_integer(other) or | is_integer_dtype(other) | pandas.core.dtypes.common.is_integer_dtype |
# -*- coding: utf-8 -*-
"""LOGAN
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1T7zfx1dr0Zw3n1nXqy9dLOQpg2DcpzSo
"""
#@title Choose type { run: "auto" }
particle_options = {
"Inclusive": "dp_ana_Inclusive.hdf5",
"GMM_ECAL": "Data/dp_ana_GMM_ECAL.hdf5",
"GMM_Target": "dp_ana_GMM_Target.hdf5",
"PN_ECAL": "dp_ana_PN_ECAL.hdf5",
"PN_Target": "dp_ana_PN_Target.hdf5",
"Inclusive_4e6": "Inclusive_ana_4e6.tfrecord",
"GMM_ECAL_8e5": "GMM_ECAL_ana_8e5.hdf5",
"GMM_Target_8e5": "GMM_Target_ana_8e5.hdf5",
"PN_ECAL_4e5": "PN_ECAL_ana_4e5.hdf5",
"PN_Target_4e5": "PN_Target_ana_4e5.hdf5",
"Inclusive_cut_7GeV": "/content/drive/Shareddrives/desmond.z.he1998.HK(CRN.NGO)/TrainingData/Inclusive_ana_4e6_cut_at_7GeV.hdf5",
'Inclusive_larger_than_7GeV' :'/content/drive/Shareddrives/desmond.z.he1998.HK(CRN.NGO)/TrainingData/Inclusive_ana_4e6_cut_larger_then_7GeV.tfrecord'
}
training_data_path = "/lustre/collider/hezhengting/TrainningData"
particle_type = "PN_ECAL_4e5" #@param ['Inclusive', 'GMM_ECAL', 'GMM_Target', 'PN_ECAL', 'PN_Target', 'Inclusive_4e6', 'GMM_ECAL_8e5', 'GMM_Target_8e5', 'PN_ECAL_4e5', 'PN_Target_4e5', 'Inclusive_cut_7GeV', 'Inclusive_larger_than_7GeV']
particle_label = {particle_type : training_data_path + particle_options[particle_type]}
#Model_to_load = None
#d_model_to_load = None
model_path = "/lustre/collider/hezhengting/LOGAN/Models/"
model_save_time = '2021-07-22_00_49_51'
d_model_to_load = model_path + model_save_time + 'LOGANdiscriminator_PN_ECAL_4e5.hdf5'
Model_to_load = model_path + model_save_time + 'LOGANgenerator_PN_ECAL_4e5.hdf5'
aux_model = None
hdf5_dataset = (list(particle_label.values())[0][-4:] == 'hdf5')
tfrecord_dataset = (list(particle_label.values())[0][-8:] == 'tfrecord')
if tfrecord_dataset:
print("Using tfrecords format")
if hdf5_dataset:
print('Using hdf5 format')
"""# Hyper-parameters form"""
#@title Hyper-parameters form { run: "auto", vertical-output: true, display-mode: "form" }
disc_lr = 1e-3#@param {type:"number"}
disc_opt = "Adam" #@param ["Adam", "Nadam"]
adam_beta_1 = 0.5 #@param {type:"number"}
adam_beta_2 = 0.9 #@param {type:"number"}
decay_steps = 1000 #@param {type:"slider", min:100, max:2000, step:100}
decay_power = 0.5 #@param ["0.5", "1", "2"] {type:"raw"}
decay_rate = 0.9 #@param {type:"number"}
gen_lr = 1e-3 #@param {type:"number"}
gen_opt = "Nadam" #@param ["Adam", "Nadam"]
energy_cut = 1e-3 #@param {type:"number"}
generator_extra_step = 3#@param {type:"integer"}
discriminator_extra_steps = 1#@param {type:"integer"}
batch_size = 500 #@param {type:"slider", min:100, max:1000, step:100}
BATCH_SIZE=batch_size
final_layer_activation = "softplus" #@param ["relu", "softplus"]
z_alpha = 0.9#@param {type:"number"}
z_beta = 0.1 #@param {type:"number"}
g_network_type = "DownSampling" #@param ["UpSampling", "DownSampling"]
use_latent_optimization = True #@param ["True", "False"] {type:"raw"}
end_learning_rate = 1e-6#@param {type:"number"}
lambda_E = 1e2
E_factor = 0
lambda_sparsity = 10
latent_size=1024
g_pfx = 'params_generator_epoch_'
d_pfx = 'params_discriminator_epoch_'
real_input_energies = 8
"""# Configure
## Import
"""
from tensorflow import keras
import tensorflow.keras.backend as K
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam, Nadam
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import os
from datetime import datetime
from tensorflow.keras.layers import (Input, Dense, Reshape, Conv2D, LeakyReLU,
BatchNormalization, LocallyConnected2D,
Activation, ZeroPadding2D, Lambda, Flatten,
Embedding, ELU, Dropout, UpSampling2D, Cropping2D, LayerNormalization,
)
from tensorflow.keras.layers import concatenate
import time
import h5py
##MBD import
#from keras.engine import InputSpec, Layer
from tensorflow.keras.layers import Layer, InputSpec
from tensorflow.keras import initializers, regularizers, constraints, activations
from tensorflow.keras.layers import Lambda, ZeroPadding2D, LocallyConnected2D
#from sklearn.preprocessing import LabelEncoder
#from sklearn.utils import shuffle
import pandas as pd
from tqdm import tqdm
import deepdish as dd
from hep_ml import reweight
from hep_ml.metrics_utils import ks_2samp_weighted
import uproot3
import uproot
from datetime import datetime
import pytz
import seaborn as sn
class Dense3D(Layer):
"""
A 3D, trainable, dense tensor product layer
"""
def __init__(self, first_dim,
last_dim,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(Dense3D, self).__init__(**kwargs)
self.first_dim = first_dim
self.last_dim = last_dim
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(self.first_dim, input_dim, self.last_dim),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.first_dim, self.last_dim),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
)
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs, mask=None):
out = tf.reshape(tf.matmul(inputs, self.kernel), (-1, self.first_dim, self.last_dim))
if self.use_bias:
out += self.bias
return out
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return (input_shape[0], self.first_dim, self.last_dim)
def get_config(self):
config = {
'first_dim': self.first_dim,
'last_dim': self.last_dim,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MinibatchStdev(Layer):
def __init__(self, **kwargs):
super(MinibatchStdev, self).__init__(**kwargs)
# calculate the mean standard deviation across each pixel coord
def call(self, inputs):
mean = K.mean(inputs, axis=0, keepdims=True)
mean_sq_diff = K.mean(K.square(inputs - mean), axis=0, keepdims=True) + 1e-8
mean_pix = K.mean(K.sqrt(mean_sq_diff), keepdims=True)
shape = K.shape(inputs)
output = K.tile(mean_pix, [shape[0], shape[1], shape[2], 1])
return K.concatenate([inputs, output], axis=-1)
# define the output shape of the layer
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
input_shape[-1] += 1
return tuple(input_shape)
class MinibatchVariableStdev(Layer):
def __init__(self, **kwargs):
super(MinibatchVariableStdev, self).__init__(**kwargs)
def call(self, inputs):
mean = K.mean(inputs, axis=0, keepdims=True)
mean_sq_diff = K.mean(K.square(inputs - mean), axis=0, keepdims=True)
mean_pix = K.mean(K.sqrt(mean_sq_diff), keepdims=True)
shape = K.shape(inputs)
mean_output = K.tile(mean, [shape[0], 1])
output = K.tile(mean_pix, [shape[0], 1])
return K.concatenate([inputs, mean_output, output], axis=-1)
# define the output shape of the layer
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
input_shape[-1] += 2
return tuple(input_shape)
"""## Read HDF5 file"""
def read_file(particle, infile):
#h5file = h5py.File(infile, 'r')
h5file = dd.io.load(particle_label[particle])
ECAL_centre = h5file['ECAL_centre']
Energy = h5file["Energy"]
sizes = ECAL_centre.shape
print("There are {} events with {} x {} layout for {}".format(sizes[0], sizes[1], sizes[2],particle))
y = [particle] * ECAL_centre.shape[0]
return ECAL_centre, Energy, sizes, y
if hdf5_dataset:
nb_classes = len(particle_label)
print("There {} types of particles".format(nb_classes))
ECAL_centre, Energy, sizes, y = [
np.concatenate(t) for t in [
a for a in zip(*[read_file(p, f) for p, f in particle_label.items()])
]
]
#le = LabelEncoder()
#y = le.fit_transform(y)
#print(list(le.classes_))
print(list(particle_label.keys())[0])
#ECAL_centre, Energy, y = shuffle(ECAL_centre, Energy, y, random_state=0)
sizes = ECAL_centre.shape
sizes = np.shape(ECAL_centre)
train_images = (ECAL_centre.reshape(sizes[0], 20, 20, 1).astype('float32'))/1000#The scale of eV should be enough
#train_images = train_images * (train_images>energy_cut)
Energy = (Energy.reshape(sizes[0]).astype('float32'))/1000
print("The shape of tranning data is",train_images.shape)
train_dataset = (
tf.data.Dataset.from_tensor_slices({
'images' :train_images,
'energy' :Energy
})
.shuffle(train_images.shape[0], reshuffle_each_iteration=True)
.batch(batch_size)
#.prefetch(tf.data.AUTOTUNE)
)
"""## Read tfrecord"""
def parse_tfr_element(element):
data = {
'images':tf.io.FixedLenFeature([], tf.string),
'energy':tf.io.FixedLenFeature([], tf.float32),
}
content = tf.io.parse_single_example(element, data)
raw_image = content['images']
energy = content['energy']
feature = tf.io.parse_tensor(raw_image, out_type=tf.float32)
feature = tf.reshape(feature, shape=[20,20,1])/1000
energy = tf.reshape(energy, shape=[1])/1000
return {'images': feature,
'energy': energy
}
def get_dataset_small(filename):
#create the dataset
dataset = tf.data.TFRecordDataset(filename)
#pass every single feature through our mapping function
dataset = dataset.map(parse_tfr_element,num_parallel_calls=tf.data.AUTOTUNE)
return dataset
if tfrecord_dataset:
tfrecord_file = list(particle_label.values())[0]
train_dataset = get_dataset_small(tfrecord_file)
test_dataset = get_dataset_small(tfrecord_file)
sizes = []
#sizes.append(test_dataset.reduce(np.int64(0), lambda x, _: x + 1).numpy())
sizes.append(4000000)
print(sizes)
for sample in test_dataset.take(1):
print(sample['images'].shape)
for i in list(sample['images'].shape):
sizes.append(i)
print(sample['energy'].shape)
print(sizes)
train_dataset = train_dataset.shuffle(sizes[0], reshuffle_each_iteration=True)
train_dataset = train_dataset.batch(batch_size)
train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
#train_dataset = train_dataset.cache()
"""# Functions
## Plotting functions
"""
from matplotlib.colors import LogNorm
def plot_2D_image(image, epoch,log=True):
fig, ax = plt.subplots(figsize=(5,5))
if np.max(image)>0:
energy = ax.imshow(
image,norm = LogNorm(
vmin=1e-3,
vmax=6e3
)
)
else :
energy = ax.imshow(
image
)
colorbar = fig.colorbar(energy)
colorbar.set_label(r'Energy (MeV)')
colorbar.ax.tick_params()
xticks = range(20)
yticks = range(20)
#Plot real image
if epoch == -2:
title = 'Real'
#Plot fake image
elif epoch == -1:
title = 'Counterfeit'
else:
title = '2D plot for epoch{:03d}'.format(epoch)
ax.set_title(title)
if epoch > -1:
plt.savefig('2D_image_at_epoch_{:03d}.png'.format(epoch))
plt.xticks(xticks)
plt.yticks(yticks)
plt.show()
def plot_3D_image(image, epoch):
x_gird = np.arange(0, 20, 1)
y_gird = np.arange(0, 20, 1)
X_gird, Y_gird = np.meshgrid(x_gird, y_gird)
fig = plt.figure(figsize=(13, 7))
ax = plt.axes(projection='3d')
surf = ax.plot_surface(X_gird, Y_gird, image, rstride=1, cstride=1, cmap='coolwarm', edgecolor='none')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel(r'Energy (MeV)')
if epoch == -2:
title = 'Real'
elif epoch == -1:
title = 'Counterfeit'
else:
title = 'Surface plot for epoch{:04d}'.format(epoch)
ax.set_title(title)
fig.colorbar(surf, shrink=0.5, aspect=5) # add color bar indicating the energy
ax.view_init(60, 35)
if epoch > -1:
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
def plot_loss(loss, x_title='Epoch',fig_width=10,fig_height=10, title=None):
fig=plt.figure(num=1,figsize=(fig_width,fig_height))
epochs = np.arange(1, len(loss)+1)
#"d_loss","d_cost", "gp", "g_loss", "g_cost","E_loss"
d_loss = loss.d_loss.values
d_cost = loss.d_cost.values
gp = loss.gp.values
g_loss = loss.g_loss.values
g_cost = loss.g_cost.values
E_loss = loss.E_loss.values
ax1=fig.add_subplot(221)
ax1.set_xlabel(x_title)
ax1.set_ylabel('loss')
ax1.plot(epochs, d_loss, color='blue', marker='o', label='Discriminator loss')
ax1.plot(epochs, g_loss, color='red', marker='*',label='Generator loss')
ax1.grid(axis='y')
ax1.legend()
ax2 = fig.add_subplot(222)
ax2.set_xlabel(x_title)
ax2.set_ylabel('loss')
ax2.plot(epochs, d_cost, color='blue', marker='o', label='Discriminator cost')
ax2.plot(epochs, g_cost, color='red', marker='*',label='Generator cost')
ax2.grid(axis='y')
ax2.legend()
ax3 = fig.add_subplot(223)
ax3.set_xlabel(x_title)
ax3.set_ylabel('loss')
ax3.plot(epochs, E_loss, color='green', marker='D',label='Energy loss')
ax3.legend()
ax3.grid(axis='y')
ax3.set_yscale('log')
ax4 = fig.add_subplot(224)
ax4.set_xlabel(x_title)
ax4.set_ylabel('loss')
ax4.plot(epochs, gp, color='green', marker='D',label='Gradient Panelty')
ax4.legend()
ax4.grid(axis='y')
if title:
plt.title()
plt.show()
plt.savefig()
"""## OPS functions"""
def single_layer_energy(x):
shape = x.shape
return K.reshape(K.sum(x, axis=list(range(1, len(shape)))), (-1, 1))
def single_layer_energy_output_shape(input_shape):
shape = list(input_shape)
assert len(shape) == 4
return (shape[0], 1)
def calculate_energy(x):
return Lambda(single_layer_energy, single_layer_energy_output_shape, name='Output_energy')(x)
if final_layer_activation == "relu":
energy_cut = 0
def single_layer_sparsity(x):
shape = x.shape
x = K.cast(x>(energy_cut/1000), #Convert MeV to GeV
K.floatx())
return K.reshape(K.sum(x, axis=list(range(1, len(shape)))), (-1, 1))/400
def calculate_sparsity(x):
return Lambda(single_layer_sparsity, single_layer_energy_output_shape, name='sparsity')(x)
def sparsity_level(x):
_shape = x.shape
shape = K.shape(x)
total = K.cast(K.prod(shape[1:]), K.floatx())
return K.reshape(K.sum(
K.cast(x > (energy_cut), K.floatx()), axis=list(range(1, len(_shape)))
), (-1, 1)) / total
def sparsity_output_shape(input_shape):
shape = list(input_shape)
return (shape[0], 1)
def minibatch_discriminator(x):
""" Computes minibatch discrimination features from input tensor x"""
diffs = K.expand_dims(x, 3) - \
K.expand_dims(K.permute_dimensions(x, [1, 2, 0]), 0)
l1_norm = K.sum(K.abs(diffs), axis=2)
return K.sum(K.exp(-l1_norm), axis=2)
def minibatch_output_shape(input_shape):
""" Computes output shape for a minibatch discrimination layer"""
shape = list(input_shape)
assert len(shape) == 3 # only valid for 3D tensors
return tuple(shape[:2])
def scale(x, v):
return Lambda(lambda _: _ / v)(x)
def energy_loss_function(input_energy, output_energy):
energy_redundent = output_energy - input_energy
energy_gap = input_energy - output_energy
energy_difference = lambda_E * energy_redundent * K.cast(energy_redundent>0, K.floatx()) + E_factor * energy_gap * K.cast(energy_gap>0, K.floatx())
loss = K.mean(energy_difference, axis=-1)
assert len(loss) == 1
return loss
def energy_gap_function(input_energy, output_energy):
energy_gap = input_energy - output_energy
energy_difference = energy_gap * K.cast(energy_gap>0, K.floatx())
return K.mean(energy_difference, axis=-1)
def wasserstein_loss(y_true, y_pred):
return K.mean(y_true * y_pred)
def cut_image(image, energy_cut):
return np.where(image > energy_cut, image, 0)
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = Dense(units, activation=tf.nn.gelu)(x)
x = Dropout(dropout_rate)(x)
return x
"""# Generator and discriminator
## Discriminator
"""
def build_discriminator():
#Input
calorimeter = Input(shape=[20,20,1],name='images')
input_energy = Input(shape=(1,),name='energy')
output_energy = calculate_energy(calorimeter)
discriminator_inputs = [calorimeter, input_energy]
#CNN
x = Conv2D(32, (2, 2), padding='same')(calorimeter)
x = LeakyReLU()(x)
x = ZeroPadding2D((1, 1))(x)
#x = LocallyConnected2D(16, (3, 3), padding='valid', strides=(1, 2))(x)
x = LocallyConnected2D(16, (3, 3), padding='valid', strides=(2, 2))(x)
x = LeakyReLU()(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1, 1))(x)
x = LocallyConnected2D(8, (2, 2), padding='valid')(x)
x = LeakyReLU()(x)
x = BatchNormalization()(x)
x = Dropout(0.1)(x)
x = ZeroPadding2D((1, 1))(x)
#x = LocallyConnected2D(8, (2, 2), padding='valid', strides=(1, 2))(x)
x = LocallyConnected2D(8, (2, 2), padding='valid', strides=(2, 2))(x)
x = LeakyReLU()(x)
x = BatchNormalization()(x)
x = LayerNormalization()(x)
#x = MinibatchStdev(name='feature_stdev')(x)
#Maybe it will be helpful to use LN after CNN, so the feature learned by CNN is not sabotoged.
x = Dropout(0.1)(x)
#x = MinibatchStdev()(x)
x = Flatten()(x)
#minibatch_featurizer = Lambda(minibatch_discriminator, output_shape=minibatch_output_shape)
nb_features = 24
vspace_dim = 24
#K_features = Dense3D(nb_features, vspace_dim, name='K_features')(x)
#mbd_features = Activation('tanh', name='mbd_features')(minibatch_featurizer(K_features))
#features = [x]
#features.append(mbd_features)
#sparsity_detector = Lambda(sparsity_level, sparsity_output_shape, name='pseudo_sparsity')
#empirical_sparsity = sparsity_detector(calorimeter)
#features.append(empirical_sparsity)
#K_sparsity = Dense3D(nb_features, vspace_dim, name='K_sparsity')(empirical_sparsity)
#mbd_sparsity = Activation('tanh', name='mbd_sparsity')(minibatch_featurizer(K_sparsity))
#sparsity = calculate_sparsity(calorimeter)
#K_energy = Dense3D(nb_features, vspace_dim, name='K_energy')(output_energy)
#energy_too_big = Lambda(lambda x: 20 * K.cast(x > 8, K.floatx()))(output_energy)
#mbd_energy = Activation('tanh', name='mbd_energy')(minibatch_featurizer(K_energy))
energy_well = Lambda(lambda x: K.abs(x[0]-x[1]))([output_energy, input_energy])
#well_too_big = Lambda(lambda x: 10 * K.cast(x > 3, K.floatx()))(energy_well)
#redundent_energy = Lambda(lambda x: x[0]-x[1])([output_energy, input_energy])
#positive_redundent = Lambda(lambda x: K.cast(x > 0, K.floatx()))(redundent_energy)
p = concatenate([
#concatenate(features),
x,
energy_well,
#sparsity,
output_energy,
#mbd_features,
#mbd_energy,
#mbd_sparsity,
#well_too_big,
#redundent_energy,
#positive_redundent,
#empirical_sparsity,
#trans_outputs,
])
p = LayerNormalization()(p)
#p = Dense(30)(p)
'''
##################################################################################################
#features = [x]
sparsity_detector = Lambda(sparsity_level, sparsity_output_shape, name='pseudo_sparsity')
empirical_sparsity = sparsity_detector(calorimeter)
empirical_sparsity = MinibatchVariableStdev(name='sparsity_stdev')(empirical_sparsity)
output_energy_stdev = MinibatchVariableStdev(name='energy_stdev')(output_energy)
p = concatenate([
x,
empirical_sparsity,
output_energy_stdev,
])
##################################################################################################
'''
#p = Dense(20)(p)
fake = Dense(1, name='Real_or_fake')(p)
#fake = Dense(1, name='Real_or_fake')(x)
discriminator_outputs = [fake,
output_energy,
]
discriminator = Model(discriminator_inputs, discriminator_outputs, name='Discriminator_model')
return discriminator
print("Building discriminator")
d_model = build_discriminator()
d_model.summary()
keras.utils.plot_model(d_model, show_shapes=True)
"""## Generator"""
def upsample_block(
x,
filters,
activation,
kernel_size=(3, 3),
strides=(1, 1),
up_size=(2, 2),
padding="same",
use_bn=False,
use_bias=True,
use_dropout=False,
drop_value=0.3,
):
x = UpSampling2D(up_size)(x)
x = Conv2D(
filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias
)(x)
if use_bn:
x = BatchNormalization()(x)
if activation:
x = activation(x)
if use_dropout:
x = Dropout(drop_value)(x)
return x
def build_generator(nb_rows = 20, nb_cols = 20):
#Input
latent = Input(shape=(latent_size, ), name='z')
input_energy = Input(shape=(1, ), dtype='float32', name='energy')
generator_inputs = [latent, input_energy]
h = Lambda(lambda x: x[0] * x[1])([latent, input_energy])
h = latent
#Network
if g_network_type == "UpSampling":
x = Dense(3 * 3 * 256, use_bias=False)(h)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
x = Reshape((3, 3, 256))(x)
x = upsample_block(
x,
128,
LeakyReLU(0.2),
strides=(1, 1),
use_bias=False,
use_bn=True,
padding="same",
use_dropout=False,
)
x = upsample_block(
x,
64,
LeakyReLU(0.2),
strides=(1, 1),
use_bias=False,
use_bn=True,
padding="same",
use_dropout=False,
)
#x = upsample_block(x, 1, Activation("sigmoid"), strides=(1, 1), use_bias=False, use_bn=True)
#x = Lambda(lambda x: 8*x)(x)
x = upsample_block(
x, 1, Activation(final_layer_activation), strides=(1, 1), use_bias=False, use_bn=True
)
image = Cropping2D((2, 2))(x)
else :
x = Dense((nb_rows + 2) * (nb_cols + 2) * 36)(h)
x = Reshape((nb_rows + 2, nb_cols + 2, 36))(x)
x = Conv2D(16, (2, 2), padding='same', kernel_initializer='he_uniform')(x)
x = LeakyReLU(alpha=0.03)(x)
x = BatchNormalization()(x)
x = LocallyConnected2D(6, (2, 2), kernel_initializer='he_uniform')(x)
x = LeakyReLU(alpha=0.03)(x)
x = LocallyConnected2D(1, (2, 2),
#use_bias=False,
kernel_initializer='glorot_normal'
#kernel_initializer=initializers.RandomUniform(minval=-0.1, maxval=-0.01, seed=None)
#kernel_initializer=initializers.RandomNormal(mean=-10.0, stddev=0.05, seed=None)
)(x)
#image = Activation("softplus")(x)
image = Activation(final_layer_activation)(x)
#x = Activation("sigmoid")(x)
#image = Lambda(lambda x: 8*x)(x)
#Map (0,1) to (0,8) GeV
#image = Lambda(lambda x: 8*x)(x)
generator = Model(generator_inputs, image, name='Generator_model')
return generator
print("Building generator")
g_model = build_generator()
g_model.summary()
keras.utils.plot_model(g_model, show_shapes=True)
"""# WGAN model"""
class WGAN(keras.Model):
def __init__(
self,
discriminator,
generator,
latent_dim,
discriminator_extra_steps=1,
generator_extra_steps=2,
gp_weight=10.0,
E_loss_weight = 10.0,
#LOGAN
z_alpha = z_alpha,
beta = z_beta
):
super(WGAN, self).__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_size
self.d_steps = discriminator_extra_steps
self.g_steps = generator_extra_steps
self.gp_weight = gp_weight
self.E_loss_weight = E_loss_weight
#LOGAN
self.z_alpha = z_alpha
self.beta = beta
def compile(self, d_optimizer, g_optimizer, d_loss_fn, g_loss_fn, E_loss_fn):
super(WGAN, self).compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.d_loss_fn = d_loss_fn
self.g_loss_fn = g_loss_fn
self.E_loss_fn = E_loss_fn
#@<EMAIL>
def gradient_penalty(self, batch_size, real_images, fake_images, input_energies):
""" Calculates the gradient penalty.
This loss is calculated on an interpolated image
and added to the discriminator loss.
"""
# Get the interpolated image
alpha = tf.random.normal([batch_size, 1, 1, 1], 0.0, 1.0)
diff = fake_images - real_images
interpolated = real_images + alpha * diff
with tf.GradientTape() as gp_tape:
gp_tape.watch(interpolated)
# 1. Get the discriminator output for this interpolated image.
d_inputs = [interpolated, input_energies]
pred, _ = self.discriminator(d_inputs, training=True)
# 2. Calculate the gradients w.r.t to this interpolated image.
grads = gp_tape.gradient(pred, [interpolated])[0]
# 3. Calculate the norm of the gradients.
norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2, 3]))
gp = tf.reduce_mean((norm - 1.0) ** 2)
return gp
def GD(self, z_gradient):
delta_z = self.z_alpha * z_gradient
return delta_z
def NGD(self, z_gradient):
norm_sq = tf.reduce_sum(tf.square(z_gradient))
# delta_z = alpha / (beta + ||g||^2) * g
delta_z = (self.z_alpha / (self.beta + norm_sq)) * z_gradient
return delta_z
#<EMAIL>
def train_step(self, train_data):
real_images = train_data['images']
real_input_energies = train_data['energy']
#if isinstance(real_images, tuple):
# real_images = real_images[0]
# Get the batch size
batch_size = tf.shape(real_images)[0]
#random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
random_latent_vectors = tf.random.uniform(shape=(batch_size, self.latent_dim), minval=-1, maxval=1)
#LOGAN
if use_latent_optimization:
################################################
with tf.GradientTape() as tape:
tape.watch(random_latent_vectors)
# Generate fake images using the generator
g_inputs = {
'z':random_latent_vectors,
'energy':real_input_energies}
generated_images = self.generator(g_inputs, training=True)
# Get the discriminator logits for fake images
d_inputs = {
'images':generated_images,
'energy':real_input_energies
}
gen_img_logits, output_energies = self.discriminator(d_inputs, training=True)
z_gradient = tape.gradient(gen_img_logits, random_latent_vectors)
#GD
#random_latent_vectors -= self.GD(z_gradient)
#NGD
random_latent_vectors = tf.clip_by_value(random_latent_vectors+self.NGD(z_gradient), clip_value_min=-1, clip_value_max=1)
#random_latent_vectors = random_latent_vectors+self.NGD(z_gradient)
################################################
for i in range(self.g_steps):
# Train the generator
# Get the latent vector
with tf.GradientTape() as tape:
g_inputs = {
'z':random_latent_vectors,
'energy':real_input_energies}
generated_images = self.generator(g_inputs, training=True)
# Get the discriminator logits for fake images
d_inputs = {
'images':generated_images,
'energy':real_input_energies
}
gen_img_logits, output_energies = self.discriminator(d_inputs, training=True)
# Calculate the generator loss
g_cost = self.g_loss_fn(gen_img_logits)
E_loss = self.E_loss_fn(real_input_energies, output_energies)
g_loss = g_cost + E_loss * self.E_loss_weight
# Get the gradients w.r.t the generator loss
gen_gradient = tape.gradient(g_loss, self.generator.trainable_variables)
# Update the weights of the generator using the generator optimizer
self.g_optimizer.apply_gradients(
zip(gen_gradient, self.generator.trainable_variables)
)
for i in range(self.d_steps):
g_inputs = {
'z':random_latent_vectors,
'energy':real_input_energies
}
with tf.GradientTape() as tape:
# Get the logits for the real images
#d_inputs = [real_images, real_input_energies]
real_logits, _ = self.discriminator(train_data, training=True)
# Generate fake images from the latent vector
fake_images = self.generator(g_inputs, training=True)
# Get the logits for the fake images
d_inputs = {
'images': fake_images,
'energy': real_input_energies}
fake_logits, _ = self.discriminator(d_inputs, training=True)
# Calculate the discriminator loss using the fake and real image logits
d_cost = self.d_loss_fn(real_img=real_logits, fake_img=fake_logits)
# Calculate the gradient penalty
gp = self.gradient_penalty(batch_size, real_images, fake_images, real_input_energies)
# Add the gradient penalty to the original discriminator loss
d_loss = d_cost + gp * self.gp_weight
# Get the gradients w.r.t the discriminator loss
d_gradient = tape.gradient(d_loss, self.discriminator.trainable_variables)
# Update the weights of the discriminator using the discriminator optimizer
self.d_optimizer.apply_gradients(
zip(d_gradient, self.discriminator.trainable_variables)
)
return {"d_loss": d_loss,"d_cost": d_cost, "gp": gp,"g_loss": g_loss, "g_cost": g_cost,"E_loss": E_loss}
class GANMonitor(keras.callbacks.Callback):
def __init__(self):
self.Event = np.random.randint(sizes[0])
#self.seed_noise = np.random.normal(0, 1, (1, latent_size))
self.seed_noise = np.random.uniform(-1, 1, (1, latent_size))
#self.seed_energy = np.array(Energy[self.Event]).reshape(1,1)
self.seed_energy = 8
self.seed = [self.seed_noise, self.seed_energy]
self.losses = pd.DataFrame(columns = ["d_loss","d_cost", "gp", "g_loss", "g_cost","E_loss"])
self.losses_iter = pd.DataFrame(columns = ["d_loss","d_cost", "gp", "g_loss", "g_cost","E_loss"])
self.batch_num = 0
def on_train_batch_end(self, batch, logs=None):
self.batch_num += 1
'''
if self.batch_num%100 == 1:
generated_image = self.model.generator(self.seed)
test_generated_image = 1000 * generated_image[0,:,:,0]
plot_loss(self.losses)
test_generated_image = cut_image(test_generated_image, energy_cut)
plot_3D_image(test_generated_image, -1)
plot_2D_image(test_generated_image, -1)
plot_loss(self.losses_iter,x_title='Train step', fig_width=30, fig_height=10)
print("The output energy is ",1000 * single_layer_energy(generated_image))
print(np.max(test_generated_image))
print("Logs of batch {}: ".format(batch),logs)
# Get the current learning rate from model's optimizer.
#lr = float(K.get_value(wgan.d_optimizer.learning_rate))
#print("The learning rate of discriminator is {}".format(lr))
'''
self.losses_iter.loc[len(self.losses_iter)] = logs
def on_epoch_end(self, epoch, logs=None):
self.losses.loc[len(self.losses)] = logs
self.model.generator.save_weights('{0}{1:03d}.hdf5'.format(g_pfx, len(self.losses)),
overwrite=True)
self.model.discriminator.save_weights('{0}{1:03d}.hdf5'.format(d_pfx, len(self.losses)),
overwrite=True)
'''
generated_image = self.model.generator(self.seed)
test_generated_image = 1000 * generated_image[0,:,:,0]
'''
if np.min(test_generated_image) != 0 :
min_non_zero_cell = np.min(test_generated_image[np.nonzero(test_generated_image)])
else :
min_non_zero_cell = 0
print("The min is {}".format(min_non_zero_cell))
total_epoch = len(self.losses)
'''
plot_loss(self.losses)
#test_generated_image = np.where(test_generated_image > energy_cut, test_generated_image, 0)
test_generated_image = cut_image(test_generated_image, energy_cut)
plot_3D_image(test_generated_image, total_epoch)
plot_2D_image(test_generated_image, total_epoch)
print("The output energy is ",1000 * single_layer_energy(generated_image))
print(np.max(test_generated_image))
print("Logs of epoch{}: ".format(epoch),logs)p
'''
self.batch_num = 0
#Reset the loss for iter per batch
plot_loss(self.losses_iter, x_title='Train step', fig_width=30, fig_height=10)
self.losses_iter = pd.DataFrame(columns = ["d_loss","d_cost", "gp", "g_loss", "g_cost","E_loss"])
#generator_optimizer = Adam(2e-4, beta_1=0.5, beta_2=0.9)
generator_optimizer = Nadam()
d_lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=disc_lr,
decay_steps=decay_steps,
decay_rate=decay_rate)
d_lr_schedule = keras.optimizers.schedules.PolynomialDecay(initial_learning_rate=disc_lr, decay_steps=decay_steps, end_learning_rate=end_learning_rate,
power=decay_power,cycle=True, name=None)
discriminator_optimizer = Adam(learning_rate=d_lr_schedule,
beta_1=adam_beta_1,
beta_2=adam_beta_2
)
#discriminator_optimizer = Nadam(learning_rate=disc_lr)
# Define the loss functions for the discriminator,
# which should be (fake_loss - real_loss).
# We will add the gradient penalty later to this loss function.
def discriminator_loss(real_img, fake_img):
real_loss = tf.reduce_mean(real_img)
fake_loss = tf.reduce_mean(fake_img)
return fake_loss - real_loss
# Define the loss functions for the generator.
def generator_loss(fake_img):
return -tf.reduce_mean(fake_img)
def energy_loss_function(input_energy, output_energy):
energy_surplus = output_energy - input_energy
energy_gap = input_energy - output_energy
loss = tf.reduce_mean(tf.where(energy_surplus>0, energy_surplus, 0))
#+ E_factor * tf.reduce_mean(tf.where(energy_gap>0, energy_gap, 0))
return loss
def energy_loss(input_energy, output_energy):
return tf.reduce_mean(output_energy - input_energy)
# Instantiate the customer `GANMonitor` Keras callback.
cbk = GANMonitor()
# Instantiate the WGAN model.
wgan = WGAN(
discriminator=d_model,
generator=g_model,
latent_dim=latent_size,
discriminator_extra_steps=discriminator_extra_steps,
generator_extra_steps=generator_extra_step,
E_loss_weight = 10.0
)
# Compile the WGAN model.
wgan.compile(
d_optimizer=discriminator_optimizer,
g_optimizer=generator_optimizer,
g_loss_fn=generator_loss,
d_loss_fn=discriminator_loss,
E_loss_fn=energy_loss_function
)
"""# Train"""
if not Model_to_load:
epochs = 5
wgan.fit(train_dataset.prefetch(tf.data.AUTOTUNE),
#batch_size=BATCH_SIZE,
#Somehow this give a warning
epochs=epochs,
callbacks=[cbk,
#tensorboard_callback,
#lr_callback
]
)
print(wgan.d_optimizer.learning_rate)
print(wgan.g_optimizer.learning_rate)
"""# Load weights"""
if Model_to_load:
g_model.load_weights(Model_to_load)
d_model.load_weights(d_model_to_load)
path = "/content/drive/Shareddrives/desmond.z.he1998.HK(CRN.NGO)/Models/"
save_time = '2021-07-22_04:17:29'
#d_model_to_load = path+'2021-07-14_21:47:32LOGANdiscriminator_PN_Target_4e5.hdf5'
#Model_to_load = path+'2021-07-14_21:47:32LOGANgenerator_PN_Target_4e5.hdf5'
g_model.load_weights('params_generator_epoch_015.hdf5')
d_model.load_weights('params_discriminator_epoch_015.hdf5')
#g_model.load_weights(path+'2021-07-22_04:17:29LOGANgenerator_PN_ECAL_4e5.hdf5')
#d_model.load_weights(path+'2021-07-22_04:17:29LOGANdiscriminator_PN_ECAL_4e5.hdf5')
"""# Test model"""
#@title Cut on variables { run: "auto" }
use_LO = False #@param ["True", "False"] {type:"raw"}
cut_on_variables = False #@param ["True", "False"] {type:"raw"}
test_size_1 = 1000000 #@param {type:"integer"}
"""## Test functions"""
def heat_map(Input_energy, Output_energy, xbin=70, ybin=70):
fig = plt.figure(num=1, figsize=(10,10))
ax = fig.add_subplot(111)
ax.set_xlim(0,9000)
ax.set_ylim(0,9000)
ax.hist2d(Input_energy, Output_energy, bins=(xbin, ybin), cmap=plt.cm.jet, range=((0,9000),(0,9000)))
ax.set_xlabel('Input energy (MeV)')
ax.set_ylabel('Layer energy sum (MeV)')
plt.show()
def plot_hist(image, bin=50):
plt.hist(image, bins=bin, alpha=0.75)
plt.xlabel('Energy (MeV)')
plt.show()
def plot_compare_hist(real_images, fake_images, title,y_log_scale = True, x_log_scale=False):
colors = matplotlib.cm.gnuplot2(np.linspace(0.2, 0.8, 3))
plt.figure(figsize=(10, 10))
plt.hist(real_images, bins=100,histtype='stepfilled', density=True,linewidth=2,
alpha=0.2, color=colors[0],
label=r'GEANT4')
plt.hist(fake_images, bins=100, histtype='step', density=True,linewidth=3,
alpha=1, color=colors[0],
label=r'GAN')
if y_log_scale:
plt.yscale('log')
if x_log_scale:
plt.xscale('log')
plt.legend(loc='upper right', fontsize=20, ncol=2)
plt.xlabel(title)
plt.show()
@tf.function
def optimize_latent(random_latent_vectors, generator, discriminator):
with tf.GradientTape() as tape:
tape.watch(random_latent_vectors)
# Generate fake images using the generator
g_inputs = {
'z':random_latent_vectors,
'energy':real_input_energies}
generated_images = generator(g_inputs, training=False)
# Get the discriminator logits for fake images
d_inputs = {
'images':generated_images,
'energy':real_input_energies
}
gen_img_logits, output_energies = discriminator(d_inputs, training=False)
z_gradient = tape.gradient(gen_img_logits, random_latent_vectors)
delta_z = wgan.NGD(z_gradient)
random_latent_vectors = random_latent_vectors + delta_z
#random_latent_vectors = tf.clip_by_value(random_latent_vectors+delta_z, clip_value_min=-1, clip_value_max=1)
return random_latent_vectors
def generate_test_dataset(test_size = 500000, optimize = True):
#test_noise = np.random.normal(0, 1, (test_size, latent_size))
#test_noise = tf.random.normal(shape=(test_size, latent_size))
#test_noise = optimize_latent(test_noise, g_model, d_model)
if test_size > 1000000:
test_noise = tf.concat([
tf.random.uniform(shape=(1000000, latent_size), minval=-1, maxval=1) for i in range(int(test_size/1000000))
], axis=0)
else:
test_noise = tf.random.uniform(shape=(test_size, latent_size), minval=-1, maxval=1)
if optimize:
test_batch_size = batch_size
multiple = int(test_size/test_batch_size)
test_noise = tf.reshape(test_noise, [multiple, test_batch_size, -1])
#noise_dataset = tf.data.Dataset.from_tensor_slices(test_noise).batch(batch_size)
'''
test_noise = tf.concat([optimize_latent(
tf.random.uniform(shape=(test_batch_size, latent_size), minval=-1, maxval=1),
g_model, d_model
) for i in tqdm(range(multiple))], axis=0)
'''
test_noise = tf.concat([optimize_latent(
test_noise[i],
g_model, d_model
) for i in tqdm(range(multiple))], axis=0)
#test_noise = tf.map_fn(fn=lambda z: optimize_latent(z, g_model, d_model), elems=test_noise)
#test_noise = tf.reshape(test_noise, [multiple*test_batch_size, -1])
#test_sampled_energies = np.random.choice(Energy, test_size)
test_sampled_energies = np.ones(test_size) * 8
plot_hist(np.array(test_noise).flatten())
test_dataset = (
tf.data.Dataset.from_tensor_slices({
'z' :test_noise,
'energy' :test_sampled_energies
})
.batch(batch_size)
)
return test_dataset, test_sampled_energies
"""## Test on 8GeV
### Generate images
"""
#Use our model to generate image and convert to the scale of MeV
#test_size_1 = 498197
#test_size_1 = 4000000
test_dataset_1, test_sampled_energies_1 = generate_test_dataset(test_size = test_size_1,
optimize = use_LO
#optimize = False
)
test_generated_image_1 = 1000 * g_model.predict(test_dataset_1, verbose=1)#GeV -> MeV
#aux_model = "/content/drive/Shareddrives/desmond.z.he1998.HK(CRN.NGO)/Models/2021-05-30_00:59:02WGANgenerator_['Inclusive_cut_7GeV'].hdf5"
aux_model = None
if aux_model:
g_model.load_weights(aux_model)
#test_size_2 = 5000
test_size_2 = 1803
test_dataset_2, test_sampled_energies_2 = generate_test_dataset(test_size = test_size_2)
test_generated_image_2 = 1000 * g_model.predict(test_dataset_2, verbose=1)#GeV -> MeV
test_generated_image = np.concatenate((test_generated_image_1, test_generated_image_2), axis=0)
test_sampled_energies = np.concatenate((test_sampled_energies_1, test_sampled_energies_2), axis=0)
test_size = test_size_1 + test_size_2
else :
test_generated_image = test_generated_image_1
test_sampled_energies = test_sampled_energies_1
test_size = test_size_1
plot_2D_image(cut_image(test_generated_image[0,:,:,0], energy_cut),-1)
print(test_sampled_energies[0])
#Calculated the energies
test_generated_image = cut_image(test_generated_image, energy_cut)
test_generated_image_1.dtype
"""### Check random image"""
print(test_generated_image.shape)
for sample in train_dataset.take(1):
print(sample['images'].shape)
print(sample['energy'].shape)
event_num = np.random.randint(len(sample['images']))
real_image = 1000 * sample['images'][event_num,:,:,0]
real_image = cut_image(real_image,
energy_cut
)
plot_2D_image(real_image,-2)
print(sizes)
event_num = np.random.randint(len(test_generated_image))
fake_image = test_generated_image[event_num,:,:,0]
if hdf5_dataset:
event_num = np.random.randint(len(ECAL_centre))
real_image = ECAL_centre[event_num]
plot_2D_image(real_image,-2)
fake_image = cut_image(fake_image,
1e-3
#np.min(np.where(real_image>0,real_image,100))
)
plot_2D_image(fake_image, -1)
"""### Load variables"""
if hdf5_dataset:
hdf5_filenames = list(particle_label.values())[0][:-5]+'_variables.hdf5'
if tfrecord_dataset:
hdf5_filenames = list(particle_label.values())[0][:-9]+'_variables.hdf5'
print(hdf5_filenames)
variable_data = dd.io.load(hdf5_filenames)
x = 25*np.arange(-9.5,10,1)
y = x
x_coor,y_coor = np.meshgrid(x,y,indexing = 'xy')
x_coor = x_coor.reshape(-1)
y_coor = y_coor.reshape(-1)
def get_total_E(image):
return np.sum(image)
def get_E_max(image):
return np.max(image)
def get_frac_n(image, n):
top_n = np.sort(image.flatten())[::-1][:n]
return np.sum(top_n)/np.sum(image)
def get_x_moment(image):
return np.average(x_coor,weights=image.reshape(image.size))
def get_y_moment(image):
return np.average(y_coor,weights=image.reshape(image.size))
def get_x_sq(image):
return np.average(x_coor**2,weights=image.reshape(image.size))
def get_y_sq(image):
return np.average(y_coor**2,weights=image.reshape(image.size))
def get_r(image):
return np.average(np.sqrt(x_coor**2 + y_coor**2),weights=image.reshape(image.size))
def get_sparsity(image, cut_value):
return np.average(np.where(image>cut_value, 1, 0))
functions_of_variables = {
'total energy': get_total_E,
'x_moment': get_x_moment,
'y_moment': get_y_moment,
'x_sq': get_x_sq,
'y_sq': get_y_sq,
'r': get_r,
'E_max': get_E_max,
'pseudo-sparsity': lambda image: get_sparsity(image, energy_cut),
'frac_50': lambda image: get_frac_n(image, 50),
'frac_100': lambda image: get_frac_n(image, 100),
'frac_200': lambda image: get_frac_n(image, 200)
}
def get_variables(func, images):
return np.fromiter((func(x) for x in images), images.dtype)
def variable_dict(images, **functions):
variables = {}
for key, func in functions.items():
variables[key] = get_variables(func, images)
return variables
generated_variables = variable_dict(test_generated_image, **functions_of_variables)
print(generated_variables.keys())
print(variable_data.keys())
print(type(variable_data))
print(type(generated_variables))
print(test_generated_image.shape)
selected_variables = ['total energy', 'x_moment', 'y_moment', 'x_sq', 'y_sq', 'r', 'E_max']
#selected_variables = generated_variables.keys()
target = pd.DataFrame(data={key:variable_data[key] for key in selected_variables})
original = pd.DataFrame(data={key:generated_variables[key] for key in selected_variables})
if cut_on_variables:
for col in target.columns:
qualified = (original[col] <= target[col].max()) & (original[col] >= target[col].min())
original = original[qualified]
test_generated_image = test_generated_image[qualified]
print(target.columns)
print(original.columns)
print(original.shape)
print(test_generated_image.shape)
"""### Install required package"""
"""### Function to plot"""
columns = original.columns
KS_data = pd.DataFrame(columns=columns)
def draw_distributions(original, target, new_original_weights, y_log_scale= True, x_log_scale=False):
KS_values = {}
colors = matplotlib.cm.gnuplot2(np.linspace(0.2, 0.8, 3))
num_row = np.ceil(len(columns)/2)
plt.figure(figsize=[20, 7*num_row])
for id, column in enumerate(columns, 1):
xlim = np.percentile(np.hstack([target[column]]), [0.01, 99.99])
plt.subplot(num_row, 2, id)
plt.hist(target[column], bins=100, histtype='stepfilled',
density=True,
linewidth=1,
range=(np.min(target[column]), np.max(target[column])),
alpha=0.2, color=colors[0],
label=r'DSS')
plt.hist(original[column], weights=new_original_weights, bins=100, histtype='step',
density=True,
linewidth=2,
range=(np.min(target[column]), np.max(target[column])),
alpha=1, color=colors[0],
label=r'GAN')
if y_log_scale:
plt.yscale('log')
if x_log_scale:
plt.xscale('log')
plt.legend(loc='upper right', fontsize=20, ncol=2)
plt.title(column)
print('KS over ', column, ' = ', ks_2samp_weighted(original[column], target[column],
weights1=new_original_weights, weights2=np.ones(len(target), dtype=float)))
KS_values[column] = ks_2samp_weighted(original[column], target[column],
weights1=new_original_weights, weights2=np.ones(len(target), dtype=float))
return KS_values
"""### Original Distribution"""
original_weights = np.ones(len(original))
KS_data.loc['original'] = draw_distributions(original, target, original_weights)
_ = draw_distributions(original, target, original_weights, y_log_scale=False)
print(np.sum([i for i in _.values()]))
"""# Reweighter
### Weights dataframa
"""
weights_data = pd.DataFrame()
"""### Gradient Boosted Reweighter"""
reweighter = reweight.GBReweighter(n_estimators=50, learning_rate=0.1, max_depth=3, min_samples_leaf=1000,
gb_args={'subsample': 0.4})
reweighter.fit(original, target)
gb_weights_test = reweighter.predict_weights(original)
gb_weights_test = reweighter.predict_weights(original)
# validate reweighting rule on the test part comparing 1d projections
KS_data.loc['Gradient Boosted Reweighter'] = draw_distributions(original, target, gb_weights_test)
_ = draw_distributions(original, target, gb_weights_test, y_log_scale=False)
weights_data['GB_weights'] = gb_weights_test
#plot_2D_image(np.average(test_generated_image, weights=gb_weights_test,axis=0)[:,:,0],-1)
#plot_2D_image(np.mean(ECAL_centre,axis=0),-2)
"""### Bins-based reweighting in n dimensions"""
bins_reweighter = reweight.BinsReweighter(n_bins=100, n_neighs=0)
bins_reweighter.fit(original[['total energy']], target[['total energy']])
bins_weights_test = bins_reweighter.predict_weights(original[['total energy']])
# validate reweighting rule on the test part comparing 1d projections
KS_data.loc['Bins-based: total energy'] = draw_distributions(original, target, bins_weights_test)
#plot_2D_image(np.average(test_generated_image, weights=bins_weights_test,axis=0)[:,:,0],-1)
#plot_2D_image(np.mean(ECAL_centre,axis=0),-2)
weights_data['energy_bin_weights'] = bins_weights_test
bins_reweighter = reweight.BinsReweighter(n_bins=100, n_neighs=0)
bins_reweighter.fit(original[['total energy','x_sq']], target[['total energy','x_sq']])
bins_weights_test = bins_reweighter.predict_weights(original[['total energy','x_sq']])
KS_data.loc['Bins-based: total energy and x_sq'] = draw_distributions(original, target, bins_weights_test)
#plot_2D_image(np.average(test_generated_image, weights=bins_weights_test,axis=0)[:,:,0],-1)
#plot_2D_image(np.mean(ECAL_centre,axis=0),-2)
weights_data['energy&x_sq_bin_weights'] = bins_weights_test
"""### Folding reweighter"""
reweighter_base = reweight.GBReweighter(n_estimators=70,
learning_rate=0.1, max_depth=4, min_samples_leaf=1000,
gb_args={'subsample': 0.4})
reweighter = reweight.FoldingReweighter(reweighter_base, n_folds=2)
# it is not needed divide data into train/test parts; rewighter can be train on the whole samples
reweighter.fit(original, target)
# predict method provides unbiased weights prediction for the whole sample
# folding reweighter contains two reweighters, each is trained on one half of samples
# during predictions each reweighter predicts another half of samples not used in training
folding_weights = reweighter.predict_weights(original)
KS_data.loc['Folding reweighter'] = draw_distributions(original, target, folding_weights)
#plot_2D_image(np.average(test_generated_image, weights=folding_weights,axis=0)[:,:,0],-1)
#plot_2D_image(np.mean(ECAL_centre,axis=0),-2)
_ = draw_distributions(original, target, folding_weights,y_log_scale=False)
weights_data['folding_weights'] = folding_weights
"""### Results"""
ax = KS_data.plot.barh(stacked=True)
print(KS_data.apply(lambda x:x.sum(),axis =1))
"""# Save model"""
from datetime import datetime
import pytz
tz = pytz.timezone('Asia/Shanghai') #GMT +8
t = datetime.fromtimestamp(int(time.time()),
pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d_%H:%M:%S')
print(t)
#save_type = le.inverse_transform(range(nb_classes))
save_type = list(particle_label.keys())[0]
print("Type :",save_type)
g_model.save_weights('/content/drive/Shareddrives/desmond.z.he1998.HK(CRN.NGO)/Models/{}LOGANgenerator_{}.hdf5'.format(t,save_type,
overwrite=True))
d_model.save_weights('/content/drive/Shareddrives/desmond.z.he1998.HK(CRN.NGO)/Models/{}LOGANdiscriminator_{}.hdf5'.format(t,save_type,
overwrite=True))
saved_model_path = '/content/drive/Shareddrives/desmond.z.he1998.HK(CRN.NGO)/saved_model'
#tf.saved_model.save(g_model, saved_model_path)
#tf.saved_model.save(d_model, saved_model_path)
print(tf.__version__)
print(np.__version__)
print(pd.__version__)
"""# Correlation"""
corrMatrix = original.corr()
mask_matrix = np.triu(corrMatrix)
sn.heatmap(corrMatrix, annot=True, cmap="YlGnBu", mask=mask_matrix)
plt.show()
corrMatrix = target.corr()
sn.heatmap(corrMatrix, annot=True, cmap="YlGnBu", mask=mask_matrix)
plt.show()
sn.pairplot(original,kind="hist")
sn.pairplot(target,kind="hist")
original['type'] = 'GAN'
target['type'] = 'DSS'
combined_df = | pd.concat([original,target]) | pandas.concat |
import streamlit as st
import pandas as pd
import subprocess
import os
import base64
import pickle
# Molecular descriptor calculator
def desc_calc():
# Performs the descriptor calculation
bashCommand = "java -Xms2G -Xmx2G -Djava.awt.headless=true -jar ./PaDEL-Descriptor/PaDEL-Descriptor.jar -removesalt -standardizenitro -fingerprints -descriptortypes ./PaDEL-Descriptor/%s -dir ./ -file descriptors_output.csv" % selected_fp
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
# Read in calculated descriptors and display the dataframe
st.subheader('Calculated molecular descriptors')
desc = pd.read_csv('descriptors_output.csv')
st.write(desc)
st.markdown(filedownload(desc), unsafe_allow_html=True)
# Write the data dimension (number of molecules and descriptors)
nmol = desc.shape[0]
ndesc = desc.shape[1]
st.info('Selected fingerprint: ' + user_fp)
st.info('Number of molecules: ' + str(nmol))
st.info('Number of descriptors: ' + str(ndesc-1))
os.remove('molecule.smi')
# File download
def filedownload(df):
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions
href = f'<a href="data:file/csv;base64,{b64}" download="descriptor_{user_fp}.csv">Download CSV File</a>'
return href
# Page title
st.markdown("""
# Molecular Descriptor Calculator
This app allows you to calculate molecular descriptors that can be used for computational drug discovery.
**Credits**
- Descriptor calculated using [PaDEL-Descriptor](http://www.yapcwsoft.com/dd/padeldescriptor/) software.
- Yap CW. [PaDEL‐descriptor: An open source software to calculate molecular descriptors and fingerprints](https://doi.org/10.1002/jcc.21707). ***J Comput Chem*** 32 (2011) 1466-1474.
---
""")
# Sidebar
with st.sidebar.header('1. Upload your CSV data'):
uploaded_file = st.sidebar.file_uploader("Upload your input CSV file", type=["csv"])
with st.sidebar.header('2. Enter column names for 1) Molecule ID and 2) SMILES'):
name_mol = st.sidebar.text_input('Enter column name for Molecule ID', 'molecule_chembl_id')
name_smiles = st.sidebar.text_input('Enter column name for SMILES', 'canonical_smiles')
with st.sidebar.header('3. Set parameters'):
# Select fingerprint
fp_dict = {'AtomPairs2D':'AtomPairs2DFingerprinter.xml',
'AtomPairs2DCount':'AtomPairs2DFingerprintCount.xml',
'CDK':'Fingerprinter.xml',
'CDKextended':'ExtendedFingerprinter.xml',
'CDKgraphonly':'GraphOnlyFingerprinter.xml',
'EState':'EStateFingerprinter.xml',
'KlekotaRoth':'KlekotaRothFingerprinter.xml',
'KlekotaRothCount':'KlekotaRothFingerprintCount.xml',
'MACCS':'MACCSFingerprinter.xml',
'PubChem':'PubchemFingerprinter.xml',
'Substructure':'SubstructureFingerprinter.xml',
'SubstructureCount':'SubstructureFingerprintCount.xml'}
user_fp = st.sidebar.selectbox('Choose fingerprint to calculate', list(fp_dict.keys()) )
selected_fp = fp_dict[user_fp]
# Set number of molecules to compute
df0 = pd.read_csv('acetylcholinesterase_04_bioactivity_data_3class_pIC50.csv')
all_mol = df0.shape[0]
number2calc = st.sidebar.slider('How many molecules to compute?', min_value=10, max_value=all_mol, value=10, step=10)
if uploaded_file is not None:
# Read CSV data
@st.cache
def load_csv():
csv = pd.read_csv(uploaded_file).iloc[:number2calc,1:]
return csv
df = load_csv()
df2 = pd.concat([df[name_smiles], df[name_mol]], axis=1)
# Write CSV data
df2.to_csv('molecule.smi', sep = '\t', header = False, index = False)
st.subheader('Initial data from CSV file')
st.write(df)
st.subheader('Formatted as PADEL input file')
st.write(df2)
with st.spinner("Calculating descriptors..."):
desc_calc()
else:
st.info('Awaiting for CSV file to be uploaded.')
if st.button('Press to use Example Dataset'):
# Read CSV data
@st.cache
def load_data():
# number2calc specifies the number of molecules to compute
df = pd.read_csv('acetylcholinesterase_04_bioactivity_data_3class_pIC50.csv').iloc[:number2calc,1:]
return df
df = load_data()
df2 = | pd.concat([df[name_smiles], df[name_mol]], axis=1) | pandas.concat |
"""
Unit and regression test for the remove module of the molsysmt package.
"""
import molsysmt as msm
import numpy as np
from pandas import DataFrame
def test_remove_1():
molsys = msm.convert(msm.demo['TcTIM']['1tcd.msmpk'], to_form='molsysmt.MolSys')
molsys = msm.remove(molsys, selection='chain_index==[1,2,3]')
df = msm.info(molsys)
true_dict = {'form': {0: 'molsysmt.MolSys'},
'n_atoms': {0: 1906},
'n_groups': {0: 248},
'n_components': {0: 1},
'n_chains': {0: 1},
'n_molecules': {0: 1},
'n_entities': {0: 1},
'n_proteins': {0: 1},
'n_frames': {0: 1}}
true_df = DataFrame(true_dict)
assert df.data.equals(true_df)
def test_remove_2():
molsys = msm.demo['Trp-Cage']['1l2y.pdb']
molsys = msm.convert(molsys, to_form='molsysmt.Trajectory')
molsys = msm.remove(molsys, frame_indices=range(1,38))
df = msm.info(molsys)
true_dict = {'form': {0: 'molsysmt.Trajectory'},
'n_atoms': {0: 304},
'n_groups': {0: None},
'n_components': {0: None},
'n_chains': {0: None},
'n_molecules': {0: None},
'n_entities': {0: None},
'n_frames': {0: 1}}
true_df = | DataFrame(true_dict) | pandas.DataFrame |
"""
Python module for scripting helper functions
"""
from glob import glob
import configparser
import os
import re
import json
import pandas as pd
def set_parameter(parameters_filepath, section_name, parameter_name, parameter_value):
"""
set the specified parameter to the specified value and write back to the *.ini file
:param parameters_filepath: filename (absolute path)
:param section_name: section name under which parameter is
:param parameter_name: parameter name
:param parameter_value: target value
:return:
"""
conf_parameters = configparser.ConfigParser()
conf_parameters.read(parameters_filepath, encoding="UTF-8")
conf_parameters.set(section_name, parameter_name, parameter_value)
with open(parameters_filepath, 'w') as config_file:
conf_parameters.write(config_file)
def df_2_tex(df, filepath):
"""
writes a df to tex file
:param df: dataframe to be converted into tex table
:param filepath: tex filepath
:return:
"""
tex_prefix = r"""\documentclass{standalone}
\usepackage{booktabs}
\begin{document}"""
tex_suffix = r"""\end{document}"""
with open(filepath, "w") as f:
f.write(tex_prefix)
f.write(df.to_latex(float_format="%.1f"))
f.write(tex_suffix)
def file_rank(filename):
"""
assign a rank to the file can be used for sorting
:param filename:
:return:
"""
order = {'natural': 0, 'rfgsm_k': 2, 'dfgsm_k': 1, 'bga_k': 3, 'bca_k': 4, 'grosse': 5}
training_method = re.search("\[training:.*\|", filename).group(0)[:-1].split(':')[-1]
evasion_method = re.search("\|evasion:.*\]", filename).group(0)[:-1].split(':')[-1]
return order[training_method] * 6 + order[evasion_method]
def create_tex_tables(filespath="../result_files"):
"""
Create TeX tables from the results populated under `result_files`
which is generated from running `framework.py`
The tex file is stored in `result_files`
:param filespath: the path where the results in json are stored and the tex files are created
:return:
"""
# read the bscn files
bscn_files = sorted(glob(os.path.join(filespath, "*.txt")), key=lambda x: file_rank(x))
# read the results file
files = sorted(glob(os.path.join(filespath, "*.json")), key=lambda x: file_rank(x))
# dataframes
bscn_df = pd.DataFrame()
evasion_df = pd.DataFrame()
accuracy_df = pd.DataFrame()
afp_df = pd.DataFrame()
bon_accuracy_df = pd.DataFrame()
mal_accuracy_df = pd.DataFrame()
mal_loss_df = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader = StataReader(path)
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(KeyError,
lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with | tm.ensure_clean() | pandas.util.testing.ensure_clean |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Temporal Summation paradigm
# # <NAME>, October 2019
from psychopy import core, visual, event, sound, gui
import numpy as np
import pandas as pd
import os
import sys
# gui information
expName = 'TS'
mygui = gui.Dlg(title=expName)
mygui.addField("Subject ID:")
mygui.addField("hand/knee/back:")
mygui.addField("Session (pre/post):")
mygui.show() # this will present the actual gui to insert stim num and sess num
# create output data specifications
subj_id = mygui.data[0]
bodypart = mygui.data[1]
sess = mygui.data[2]
pth = "C:/Users/PainLab/Documents/POADOS/TemporalSummation/data/"
outFile = pth + expName + "_" + subj_id + "_" + bodypart + "1_" + sess + ".csv"
if os.path.exists(outFile):
sys.exit("Data path " + outFile + " already exists!")
# create a window to display stuff in
win = visual.Window(color=[0, 0, 0],
monitor='testMonitor',
mouseVisible=False,
# screen=2, # 1 = display computer while scanning, 0 = control/MRI computer
size=[1680, 1050], # either specify size or set fullscr = True
fullscr=True)
# instructions for rating scale
instr = visual.TextStim(win,
font='Helvetica Bold',
pos=(0, 0),
height=0.12,
text='Du kommer att få känna stick och skatta hur ont det gör.',
color='LightGray',
wrapWidth=None)
# instructions before first stimulus
alertFirst = visual.TextStim(win,
font='Helvetica Bold',
pos=(0, 0),
height=0.12,
text='Snart kommer du att känna ett stick.',
color='LightGray',
wrapWidth=None)
# instructions before series of stimuli
alertSeries = visual.TextStim(win,
font='Helvetica Bold',
pos=(0, 0),
height=0.12,
text='Snart kommer du att känna flera stick.'
'Skatta hur ont det gör hela tiden (klicka inte).',
color='LightGray',
wrapWidth=None)
# fixation cross to be presented before rating starts
cross = visual.TextStim(win, font='Helvetica Bold', pos=(0, 0), height=0.25, text='+', color='LightGray')
# grey screen
grey = visual.TextStim(win, font='Helvetica Bold', pos=(0, 0), height=0.25, text='', color='LightGray')
# screen between series
intermezzo = visual.TextStim(win, font='Helvetica Bold', pos=(0, 0), height=0.15, text='continue?', color='LightGray')
# rating scale
vasFirst = visual.RatingScale(win,
name='rating',
marker='none', # define this here even though it will not be called!
precision=1, # will give only whole numbers
size=1, # affects the overall rating scale display
stretch=2, # affects horizontal direction of the scale
pos=[0.0, -0.2], # position of scale on screen
tickMarks=[0, 100],
tickHeight=1,
low=0, # minimum
high=100, # maximum
textSize=1.1, # applied to all text elements (anchors, scale)
labels=['Ingen smärta', 'Värsta tänkbara smärta'], # labels=['0', '100'],
textFont='Helvetica Bold',
textColor='LightGray',
scale='Skatta hur ont det gjorde (klicka inte).',
showAccept=False
)
# rating scale
vasSeries = visual.RatingScale(win,
name='rating',
marker='circle', # define this here even though it will not be called!
precision=1, # will give only whole numbers
size=1, # affects the overall rating scale display
stretch=2, # affects horizontal direction of the scale
pos=[0.0, -0.2], # position of scale on screen
tickMarks=[0, 100],
tickHeight=1,
low=0, # minimum
high=100, # maximum
textSize=1.1, # applied to all text elements (anchors, scale)
labels=['Ingen smärta', 'Värsta tänkbara smärta'], # labels=['0', '100'],
textFont='Helvetica Bold',
textColor='LightGray',
scale='Skatta hur ont det gör (även efter sticken).', # title/instructions of scale
showAccept=False
)
# sound for first stimulus
sound_first = sound.Sound('C:/Users/PainLab/Documents/POADOS/TemporalSummation/sounds/first.wav',
volume=1, sampleRate=44100)
# sound for series of stimuli
sound_TS = sound.Sound('C:/Users/PainLab/Documents/POADOS/TemporalSummation/sounds/ts.wav',
volume=1, sampleRate=44100)
# marker that will move with the mouse # # size=(0.1, 0.15) # marker size
fixSpot = visual.GratingStim(win, tex="none", mask="circle", size=(0.06, 0.1), color='red', autoLog=False)
# custom mouse
# the reason I introduce two vm is that the start position is not kept if only one vm is used
vm = visual.CustomMouse(win,
leftLimit=-0.6, rightLimit=0.6, # starts and ends at scale ends
topLimit=-0.2, bottomLimit=-0.2, # you can only move mouse on line along scale
newPos=[-50, 0], # starting position
showLimitBox=False, clickOnUp=True) # clickOnUp does not matter, concerns mouse clicks
vmTS = visual.CustomMouse(win,
leftLimit=-0.6, rightLimit=0.6, # starts and ends at scale ends
topLimit=-0.2, bottomLimit=-0.2, # you can only move mouse on line along scale
newPos=[-50, 0], # starting position
showLimitBox=False, clickOnUp=True) # clickOnUp does not matter, concerns mouse clicks
# mouse to couple to
mouse = event.Mouse(win=win, visible=False)
# set up clocks
clock = core.Clock()
TSclock = core.Clock()
SpareClock = core.Clock()
# other settings
recordedMousePos = np.zeros((5000, 2))
dur5s = 5.0 # duration that most instructions will be presented
dur10s = 10.0
# tmp files
timeData = []
ratingData = []
stimData = []
# # START SERIES 1
clock.reset()
while clock.getTime() < dur5s:
instr.draw() # display instructions for rating scale (0-100)
win.flip()
if event.getKeys(['escape']):
core.quit()
clock.reset()
while clock.getTime() < dur5s:
alertFirst.draw() # display alert first stim
win.flip()
if event.getKeys(['escape']):
core.quit()
# display cross and audio to present first stimulus
cross.draw()
win.flip()
sound_first.play()
core.wait(5) # starts the moment the first sound starts
# first stim rating and cross afterwards = 10s
mouse.setPos(newPos=(0, 0))
vm.pointer = fixSpot # assign the fix spot to the vm pointer
ratingTimer = core.CountdownTimer(dur10s)
while ratingTimer.getTime() > 0: # until timer is at 0
vasFirst.reset()
currentMousePos = vasFirst._getMarkerFromPos(vm.getPos()[0]) # grabs current mouse position (rating)
# Do the drawing
vasFirst.draw()
vm.draw()
win.flip()
if ratingTimer.getTime() < 0.001: # if time runs out, just grab their rating
currentMousePos = round(currentMousePos) # log rating
print('First stim rating: ', currentMousePos)
# assemble data
ratingData.append(currentMousePos)
timeData.append("0")
stimData.append("first")
# data storage
data2store = pd.DataFrame({"Cond": stimData, "Time": timeData, "Rating": ratingData})
if event.getKeys(['escape']):
core.quit()
# display alert for TS series
clock.reset()
while clock.getTime() < dur5s:
alertSeries.draw()
win.flip()
if event.getKeys(['escape']):
core.quit()
# display cross and audio to present TS repetitive stimuli (n=15)
cross.draw()
win.flip()
# set mouse and pointer
mouse.setPos(newPos=(0, 0))
vmTS.pointer = fixSpot
TSclock.reset()
for frame in range(1500): # 60 Hz are 60 frames/s, 1500 frames = 25s (previously: 1800 frames = 30s)
vasSeries.reset()
currentMousePos = vasSeries._getMarkerFromPos(vmTS.getPos()[0]) # grabs current mouse position (rating)
currentTime = TSclock.getTime() # grabs time since globalClock was created
currentTime = round(currentTime, 3)
currentMousePos = round(currentMousePos)
# round Rating to integer/whole number, round Time to 3 decimals in terminal window
print('Time: {}, Rating: {}'.format(round(currentTime, 3), round(currentMousePos)))
timeData.append(currentTime)
ratingData.append(currentMousePos)
stimData.append("ts")
if frame == 180: # after 3s (=180 frames) start play sound
sound_TS.play()
# print("sound")
if frame == 300: # after 5s (=300 frames), 300+120 f, log start stimuli. The sound has 1 extra sec in beginning
timeData.append(currentTime)
ratingData.append(currentMousePos)
stimData.append("startStim")
# print("start")
if frame == 1140: # ca. after 5s + 15s (passed time + num of stim/time to apply them), log end of stimulation
timeData.append(currentTime)
ratingData.append(currentMousePos)
stimData.append("stopStim")
# print("end")
elif event.getKeys(['escape']):
core.quit()
# Do the drawing
vasSeries.draw()
vmTS.draw()
win.flip()
# data storage
data2store = pd.DataFrame({"Cond": stimData, "Time": timeData, "Rating": ratingData})
data2store.to_csv(outFile, sep="\t", index=False) # or comma-separated ","
# pause before continue is possible
grey.draw()
win.flip()
core.wait(2)
###
# transition phase from series 1 to 2
entered = False
while entered is False:
intermezzo.draw() # display intermezzo
win.flip()
if event.getKeys(keyList=['return', 'c']): # continue paradigm with keyboard buttons "enter/return" or "c"
entered = True
elif event.getKeys(['escape']):
core.quit()
###
# name of file for second series
outFile = pth + expName + "_" + subj_id + "_" + bodypart + "2_" + sess + ".csv"
# other settings
recordedMousePos = np.zeros((5000, 2))
# clear tmp files
timeData = []
ratingData = []
stimData = []
# START SERIES 2
core.wait(2)
clock.reset()
while clock.getTime() < dur5s:
instr.draw() # display instructions for rating scale (0-100)
win.flip()
if event.getKeys(['escape']):
core.quit()
clock.reset()
while clock.getTime() < dur5s:
alertFirst.draw() # display alert first stim
win.flip()
if event.getKeys(['escape']):
core.quit()
# display cross and audio to present first stimulus
cross.draw()
win.flip()
sound_first.play()
core.wait(5) # starts the moment the first sound starts
# first stim rating and cross afterwards = 10s
mouse.setPos(newPos=(0, 0))
vm = visual.CustomMouse(win,
leftLimit=-0.6, rightLimit=0.6, # starts and ends at scale ends
topLimit=-0.2, bottomLimit=-0.2, # you can only move mouse on line along scale
newPos=[-50, 0], # starting position
showLimitBox=False, clickOnUp=True) # clickOnUp does not matter, concerns mouse clicks
vm.pointer = fixSpot # assign the fix spot to the vm pointer
ratingTimer = core.CountdownTimer(dur10s)
while ratingTimer.getTime() > 0: # until timer is at 0
vasFirst.reset() # reset scale as it is used several times
currentMousePos = vasFirst._getMarkerFromPos(vm.getPos()[0]) # grabs current mouse position (rating)
# Do the drawing
vasFirst.draw()
vm.draw()
win.flip()
if ratingTimer.getTime() < 0.001: # if time runs out, just grab their rating
currentMousePos = round(currentMousePos) # log rating
print('First stim rating: ', currentMousePos)
# assemble data
ratingData.append(currentMousePos)
timeData.append("0")
stimData.append("first")
# data storage
data2store = pd.DataFrame({"Cond": stimData, "Time": timeData, "Rating": ratingData})
if event.getKeys(['escape']):
core.quit()
# display alert for TS series
clock.reset()
while clock.getTime() < dur5s:
alertSeries.draw()
win.flip()
if event.getKeys(['escape']):
core.quit()
# display cross and audio to present TS repetitive stimuli (n=15)
cross.draw()
win.flip()
# set mouse and assign circle as pointer
mouse.setPos(newPos=(0, 0))
vmTS = visual.CustomMouse(win,
leftLimit=-0.6, rightLimit=0.6, # starts and ends at scale ends
topLimit=-0.2, bottomLimit=-0.2, # you can only move mouse on line along scale
newPos=[-50, 0], # starting position
showLimitBox=False, clickOnUp=True) # clickOnUp does not matter, concerns mouse clicks
vmTS.pointer = fixSpot
TSclock.reset()
for frame in range(1500): # 60 Hz are 60 frames/s, 1500 frames = 25s (previously: 1800 frames = 30s)
vasSeries.reset()
currentMousePos = vasSeries._getMarkerFromPos(vmTS.getPos()[0]) # grabs current mouse position (rating)
currentTime = TSclock.getTime() # grabs time since globalClock was created
currentTime = round(currentTime, 3)
currentMousePos = round(currentMousePos)
# round Rating to integer/whole number, round Time to 3 decimals in terminal window
print('Time: {}, Rating: {}'.format(round(currentTime, 3), round(currentMousePos)))
timeData.append(currentTime)
ratingData.append(currentMousePos)
stimData.append("ts")
if frame == 180: # after 3s (=180 frames) start play sound
sound_TS.play()
# print("sound")
if frame == 300: # after 5s (=300 frames), 300+120 f, log start stimuli. The sound has 1 extra sec in beginning
timeData.append(currentTime)
ratingData.append(currentMousePos)
stimData.append("startStim")
# print("start")
if frame == 1140: # ca. after 5s + 15s (passed time + num of stim/time to apply them), log end of stimulation
timeData.append(currentTime)
ratingData.append(currentMousePos)
stimData.append("stopStim")
# print("end")
elif event.getKeys(['escape']):
core.quit()
# Do the drawing
vasSeries.draw()
vmTS.draw()
win.flip()
# data storage
data2store = pd.DataFrame({"Cond": stimData, "Time": timeData, "Rating": ratingData})
data2store.to_csv(outFile, sep="\t", index=False) # or comma-separated ","
# pause before continue is possible
grey.draw()
win.flip()
core.wait(2)
###
# transition phase from series 2 to 3
entered = False
while entered is False:
intermezzo.draw() # display intermezzo
win.flip()
if event.getKeys(keyList=['return', 'c']):
entered = True
elif event.getKeys(['escape']):
core.quit()
###
# name of file for third series
outFile = pth + expName + "_" + subj_id + "_" + bodypart + "3_" + sess + ".csv"
# other settings
recordedMousePos = np.zeros((5000, 2))
# tmp files
timeData = []
ratingData = []
stimData = []
# START PARADIGM 3
core.wait(2)
clock.reset()
while clock.getTime() < dur5s:
instr.draw() # display instructions for rating scale (0-100)
win.flip()
if event.getKeys(['escape']):
core.quit()
clock.reset()
while clock.getTime() < dur5s:
alertFirst.draw() # display alert first stim
win.flip()
if event.getKeys(['escape']):
core.quit()
# display cross and audio to present first stimulus
cross.draw()
win.flip()
sound_first.play()
core.wait(5) # starts the moment the first sound starts
# first stim rating and cross afterwards = 10s
mouse.setPos(newPos=(0, 0))
vm = visual.CustomMouse(win,
leftLimit=-0.6, rightLimit=0.6, # starts and ends at scale ends
topLimit=-0.2, bottomLimit=-0.2, # you can only move mouse on line along scale
newPos=[-50, 0], # starting position
showLimitBox=False, clickOnUp=True) # clickOnUp does not matter, concerns mouse clicks
vm.pointer = fixSpot # assign the fix spot to the vm pointer
ratingTimer = core.CountdownTimer(dur10s)
while ratingTimer.getTime() > 0: # until timer is at 0
vasFirst.reset()
currentMousePos = vasFirst._getMarkerFromPos(vm.getPos()[0]) # grabs current mouse position (rating)
# Do the drawing
vasFirst.draw()
vm.draw()
win.flip()
if ratingTimer.getTime() < 0.001: # if time runs out, just grab their rating
currentMousePos = round(currentMousePos) # log rating
print('First stim rating: ', currentMousePos)
# assemble data
ratingData.append(currentMousePos)
timeData.append("0")
stimData.append("first")
# data storage
data2store = | pd.DataFrame({"Cond": stimData, "Time": timeData, "Rating": ratingData}) | pandas.DataFrame |
import json
import os
import pickle
import tempfile
from typing import List, Optional
import numpy as np
import pandas as pd
import pytest
import dialogy.constants as const
from dialogy.plugins import MergeASROutputPlugin, XLMRMultiClass
from dialogy.utils import load_file
from dialogy.workflow import Workflow
from tests import load_tests
class MockClassifier:
def __init__(
self,
model_name,
model_dir,
num_labels: Optional[int] = None,
args=None,
**kwargs
):
self.model_name = model_name
self.model_dir = model_dir
self.num_labels = num_labels
self.args = args or {}
self.kwargs = kwargs
if os.path.isdir(self.model_dir):
raise OSError("Model directory")
def predict(self, texts: List[str]):
if texts[0] == "<s> yes </s>":
return [1], np.array([[-7.4609375, 7.640625]])
elif texts[0] == "<s> no </s>":
return [0], np.array([[7.40625, -7.5546875]])
elif texts[0] == "<s> yes </s> <s> s </s>":
return [1], np.array([[-7.47265625, 7.69140625]])
elif texts[0] == "<s> 9 </s> <s> new </s> <s> no </s>":
return [0], np.array([[7.41796875, -7.56640625]])
else:
return [], np.array([[]])
def train_model(self, training_data: pd.DataFrame):
return
def write_intent_to_workflow(w, v):
w.output[const.INTENTS] = v
def update_input(w, v):
w.input[const.CLASSIFICATION_INPUT] = v
def test_xlmr_plugin_no_module_error():
save_val = const.XLMR_MODULE
const.XLMR_MODULE = "this-module-doesn't-exist"
with pytest.raises(ModuleNotFoundError):
XLMRMultiClass(
model_dir=".",
access=lambda w: w.input[const.CLASSIFICATION_INPUT],
mutate=write_intent_to_workflow,
)
const.XLMR_MODULE = save_val
def test_xlmr_plugin_when_no_labelencoder_saved():
save_module_name = const.XLMR_MODULE
save_model_name = const.XLMR_MULTI_CLASS_MODEL
const.XLMR_MODULE = "tests.plugin.text.classification.test_xlmr"
const.XLMR_MULTI_CLASS_MODEL = "MockClassifier"
xlmr_clf = XLMRMultiClass(
model_dir=".",
access=lambda w: w.input[const.CLASSIFICATION_INPUT],
mutate=write_intent_to_workflow,
)
assert isinstance(xlmr_clf, XLMRMultiClass)
assert xlmr_clf.model is None
const.XLMR_MODULE = save_module_name
const.XLMR_MULTI_CLASS_MODEL = save_model_name
def test_xlmr_plugin_when_labelencoder_EOFError(capsys):
save_module_name = const.XLMR_MODULE
save_model_name = const.XLMR_MULTI_CLASS_MODEL
const.XLMR_MODULE = "tests.plugin.text.classification.test_xlmr"
const.XLMR_MULTI_CLASS_MODEL = "MockClassifier"
_, file_path = tempfile.mkstemp(suffix=".pkl")
save_label_encoder_file = const.LABELENCODER_FILE
directory, file_name = os.path.split(file_path)
const.LABELENCODER_FILE = file_name
with capsys.disabled():
xlmr_plugin = XLMRMultiClass(
model_dir=directory,
access=lambda w: w.input[const.CLASSIFICATION_INPUT],
mutate=write_intent_to_workflow,
debug=True,
)
assert xlmr_plugin.model is None
os.remove(file_path)
const.LABELENCODER_FILE = save_label_encoder_file
const.XLMR_MODULE = save_module_name
const.XLMR_MULTI_CLASS_MODEL = save_model_name
def test_xlmr_init_mock():
save_module_name = const.XLMR_MODULE
save_model_name = const.XLMR_MULTI_CLASS_MODEL
const.XLMR_MODULE = "tests.plugin.text.classification.test_xlmr"
const.XLMR_MULTI_CLASS_MODEL = "MockClassifier"
xlmr_clf = XLMRMultiClass(
model_dir=".",
access=lambda w: w.input[const.CLASSIFICATION_INPUT],
mutate=write_intent_to_workflow,
)
xlmr_clf.init_model(5)
assert xlmr_clf.model is not None
const.XLMR_MODULE = save_module_name
const.XLMR_MULTI_CLASS_MODEL = save_model_name
def test_xlmr_init_mock():
save_module_name = const.XLMR_MODULE
save_model_name = const.XLMR_MULTI_CLASS_MODEL
const.XLMR_MODULE = "tests.plugin.text.classification.test_xlmr"
const.XLMR_MULTI_CLASS_MODEL = "MockClassifier"
with pytest.raises(ValueError):
XLMRMultiClass(
model_dir=".",
access=lambda w: w.input[const.CLASSIFICATION_INPUT],
mutate=write_intent_to_workflow,
args_map={"invalid": "value"},
)
const.XLMR_MODULE = save_module_name
const.XLMR_MULTI_CLASS_MODEL = save_model_name
def test_train_xlmr_mock():
save_module_name = const.XLMR_MODULE
save_model_name = const.XLMR_MULTI_CLASS_MODEL
const.XLMR_MODULE = "tests.plugin.text.classification.test_xlmr"
const.XLMR_MULTI_CLASS_MODEL = "MockClassifier"
directory = "/tmp"
file_path = os.path.join(directory, const.LABELENCODER_FILE)
xlmr_clf = XLMRMultiClass(
model_dir=directory,
access=lambda w: w.input[const.CLASSIFICATION_INPUT],
mutate=write_intent_to_workflow,
)
train_df = pd.DataFrame(
[
{"data": "yes", "labels": "_confirm_"},
{"data": "yea", "labels": "_confirm_"},
{"data": "no", "labels": "_cancel_"},
{"data": "nope", "labels": "_cancel_"},
]
)
xlmr_clf.train(train_df)
# This copy loads from the same directory that was trained previously.
# So this instance would have read the labelencoder saved.
xlmr_clf_copy = XLMRMultiClass(
model_dir=directory,
access=lambda w: w.input[const.CLASSIFICATION_INPUT],
mutate=write_intent_to_workflow,
)
assert len(xlmr_clf_copy.labelencoder.classes_) == 2
os.remove(file_path)
const.XLMR_MODULE = save_module_name
const.XLMR_MULTI_CLASS_MODEL = save_model_name
def test_invalid_operations():
save_module_name = const.XLMR_MODULE
save_model_name = const.XLMR_MULTI_CLASS_MODEL
const.XLMR_MODULE = "tests.plugin.text.classification.test_xlmr"
const.XLMR_MULTI_CLASS_MODEL = "MockClassifier"
directory = "/tmp"
file_path = os.path.join(directory, const.LABELENCODER_FILE)
if os.path.exists(file_path):
os.remove(file_path)
xlmr_clf = XLMRMultiClass(
model_dir=directory,
access=lambda w: w.input[const.CLASSIFICATION_INPUT],
mutate=write_intent_to_workflow,
)
with pytest.raises(ValueError):
xlmr_clf.init_model(None)
train_df_empty = | pd.DataFrame() | pandas.DataFrame |
import pandas
import numpy
import similaritymeasures
def stats_between_series(
xaxis_1: pandas.Series,
values_1: pandas.Series,
xaxis_2: pandas.Series,
values_2: pandas.Series,
print_: bool = False,
) -> dict:
"""Dynamic time warping and discret frechet distance for measuring similarity between two temporal sequences
Args:
xaxis_1 (pandas.Series): index axis of the dataframe 1
values_1 (pandas.Series): value axis of the dataframe 1
xaxis_2 (pandas.Series): index axis of the dataframe 2
values_2 (pandas.Series): value axis of the dataframe 2
Returns:
dict: `{"dtw": float, "frechet_dist": float}`
"""
dataframe_1 = pandas.merge(xaxis_1, values_1, right_index=True, left_index=True)
dataframe_2 = pandas.merge(xaxis_2, values_2, right_index=True, left_index=True)
dataframe_1.rename(
columns={xaxis_1.name: "id", values_1.name: "values_1"}, inplace=True
)
dataframe_2.rename(
columns={xaxis_2.name: "id", values_2.name: "values_2"}, inplace=True
)
dataframe_1.set_index("id", inplace=True)
dataframe_2.set_index("id", inplace=True)
unified = | pandas.concat([dataframe_1, dataframe_2], axis=1) | pandas.concat |
import logging
import math
import re
from collections import Counter
import numpy as np
import pandas as pd
from certa.utils import diff
def get_original_prediction(r1, r2, predict_fn):
r1r2 = get_row(r1, r2)
return predict_fn(r1r2)[['nomatch_score', 'match_score']].values[0]
def get_row(r1, r2, lprefix='ltable_', rprefix='rtable_'):
r1_df = pd.DataFrame(data=[r1.values], columns=r1.index)
r2_df = pd.DataFrame(data=[r2.values], columns=r2.index)
r1_df.columns = list(map(lambda col: lprefix + col, r1_df.columns))
r2_df.columns = list(map(lambda col: rprefix + col, r2_df.columns))
r1r2 = pd.concat([r1_df, r2_df], axis=1)
return r1r2
def support_predictions(r1: pd.Series, r2: pd.Series, lsource: pd.DataFrame,
rsource: pd.DataFrame, predict_fn, lprefix, rprefix, num_triangles: int = 100,
class_to_explain: int = None, max_predict: int = -1,
use_w: bool = True, use_q: bool = True):
'''
generate a pd.DataFrame of support predictions to be used to generate open triangles.
:param r1: the "left" record
:param r2: the "right" record
:param lsource: the "left" data source
:param rsource: the "right" data source
:param predict_fn: the ER model prediction function
:param lprefix: the prefix of attributes from the "left" table
:param rprefix: the prefix of attributes from the "right" table
:param num_triangles: number of open triangles to be used to generate the explanation
:param class_to_explain: the class to be explained
:param max_predict: the maximum number of predictions to be performed by the ER model to generate the requested
number of open triangles
:param use_w: whether to use left open triangles
:param use_q: whether to use right open triangles
:return: a pd.DataFrame of record pairs with one record from the original prediction and one record yielding an
opposite prediction by the ER model
'''
r1r2 = get_row(r1, r2)
original_prediction = predict_fn(r1r2)[['nomatch_score', 'match_score']].values[0]
r1r2['id'] = "0@" + str(r1r2[lprefix + 'id'].values[0]) + "#" + "1@" + str(r1r2[rprefix + 'id'].values[0])
copies, copies_left, copies_right = expand_copies(lprefix, lsource, r1, r2, rprefix, rsource)
find_positives, support = get_support(class_to_explain, pd.concat([lsource, copies_left]), max_predict,
original_prediction, predict_fn, r1, r2, pd.concat([rsource, copies_right]),
use_w, use_q, lprefix, rprefix, num_triangles)
if len(support) > 0:
if len(support) > num_triangles:
support = support.sample(n=num_triangles)
else:
logging.warning(f'could find {str(len(support))} triangles of the {str(num_triangles)} requested')
support['label'] = list(map(lambda predictions: int(round(predictions)),
support.match_score.values))
support = support.drop(['match_score', 'nomatch_score'], axis=1)
if class_to_explain == None:
r1r2['label'] = np.argmax(original_prediction)
else:
r1r2['label'] = class_to_explain
support_pairs = pd.concat([r1r2, support], ignore_index=True)
return support_pairs, copies_left, copies_right
else:
logging.warning('no triangles found')
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
def find_candidates_predict(record, source, find_positives, predict_fn, num_candidates, lj=True,
max=-1, lprefix='ltable_', rprefix='rtable_'):
if lj:
records = pd.DataFrame()
records = records.append([record] * len(source), ignore_index=True)
copy = source.copy()
records.columns = list(map(lambda col: lprefix + col, records.columns))
copy.columns = list(map(lambda col: rprefix + col, copy.columns))
records.index = copy.index
samples = pd.concat([records, copy], axis=1)
else:
copy = source.copy()
records = pd.DataFrame()
records = records.append([record] * len(source), ignore_index=True)
records.index = copy.index
copy.columns = list(map(lambda col: lprefix + col, copy.columns))
records.columns = list(map(lambda col: rprefix + col, records.columns))
samples = pd.concat([copy, records], axis=1)
if max > 0:
samples = samples.sample(frac=1)[:max]
record2text = " ".join([str(val) for k, val in record.to_dict().items() if k not in ['id']])
samples['score'] = samples.T.apply(lambda row: cs(record2text, " ".join(row.astype(str))))
samples = samples.sort_values(by='score', ascending=not find_positives)
samples = samples.drop(['score'], axis=1)
result = pd.DataFrame()
batch = num_candidates * 4
splits = min(10, int(len(samples) / batch))
i = 0
while len(result) < num_candidates and i < splits:
batch_samples = samples[batch * i:batch * (i + 1)]
predicted = predict_fn(batch_samples)
if find_positives:
out = predicted[predicted["match_score"] > 0.5]
else:
out = predicted[predicted["match_score"] < 0.5]
if len(out) > 0:
result = pd.concat([result, out], axis=0)
logging.info(f'{i}:{len(out)},{len(result)}')
i += 1
return result
def generate_subsequences(lsource, rsource, max=-1):
new_records_left_df = pd.DataFrame()
for i in np.arange(len(lsource[:max])):
r = lsource.iloc[i]
nr_df = pd.DataFrame(generate_modified(r, start_id=len(new_records_left_df) + len(lsource)))
if len(nr_df) > 0:
nr_df.columns = lsource.columns
new_records_left_df = pd.concat([new_records_left_df, nr_df])
new_records_right_df = pd.DataFrame()
for i in np.arange(len(rsource[:max])):
r = rsource.iloc[i]
nr_df = pd.DataFrame(generate_modified(r, start_id=len(new_records_right_df) + len(rsource)))
if len(nr_df) > 0:
nr_df.columns = rsource.columns
new_records_right_df = pd.concat([new_records_right_df, nr_df])
return new_records_left_df, new_records_right_df
def get_support(class_to_explain, lsource, max_predict, original_prediction, predict_fn, r1, r2,
rsource, use_w, use_q, lprefix, rprefix, num_triangles):
candidates4r1 = pd.DataFrame()
candidates4r2 = pd.DataFrame()
num_candidates = int(num_triangles / 2)
if class_to_explain == None:
findPositives = bool(original_prediction[0] > original_prediction[1])
else:
findPositives = bool(0 == int(class_to_explain))
if use_q:
candidates4r1 = find_candidates_predict(r1, rsource, findPositives, predict_fn, num_candidates,
lj=True, max=max_predict, lprefix=lprefix, rprefix=rprefix)
if use_w:
candidates4r2 = find_candidates_predict(r2, lsource, findPositives, predict_fn, num_candidates,
lj=False, max=max_predict, lprefix=lprefix, rprefix=rprefix)
neighborhood = pd.DataFrame()
candidates = pd.concat([candidates4r1, candidates4r2], ignore_index=True)
if len(candidates) > 0:
candidates['id'] = "0@" + candidates[lprefix + 'id'].astype(str) + "#" + "1@" + candidates[
rprefix + 'id'].astype(str)
if findPositives:
neighborhood = candidates[candidates.match_score >= 0.5].copy()
else:
neighborhood = candidates[candidates.match_score < 0.5].copy()
return findPositives, neighborhood
def generate_modified(record, start_id: int = 0):
new_copies = []
t_len = len(record)
copy = record.copy()
for t in range(t_len):
attr_value = str(copy.get(t))
values = attr_value.split()
for cut in range(1, len(values)):
for new_val in [" ".join(values[cut:]),
" ".join(values[:cut])]: # generate new values with prefix / suffix dropped
new_copy = record.copy()
new_copy[t] = new_val # substitute the new value with missing prefix / suffix on the target attribute
if start_id > 0:
new_copy['id'] = len(new_copies) + start_id
new_copies.append(new_copy)
return new_copies
WORD = re.compile(r'\w+')
def cs(text1, text2):
vec1 = Counter(WORD.findall(text1))
vec2 = Counter(WORD.findall(text2))
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x] ** 2 for x in vec1.keys()])
sum2 = sum([vec2[x] ** 2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def expand_copies(lprefix, lsource, r1, r2, rprefix, rsource):
generated_df = pd.DataFrame()
new_copies_left = []
new_copies_right = []
left = True
for record in [r1, r2]:
r1_df = pd.DataFrame(data=[record.values], columns=record.index)
r2_df = | pd.DataFrame(data=[record.values], columns=record.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@brief test log(time=4s)
"""
import os
import unittest
from io import StringIO
import pandas
import numpy
from pyquickhelper.pycode import ExtTestCase, get_temp_folder
from pandas_streaming.data import dummy_streaming_dataframe
from pandas_streaming.df import StreamingDataFrame
from pandas_streaming.df.dataframe import StreamingDataFrameSchemaError
class TestStreamingDataFrame(ExtTestCase):
def test_shape(self):
sdf = dummy_streaming_dataframe(100)
dfs = list(sdf)
self.assertEqual(len(dfs), 10)
self.assertEqual(len(dfs), 10)
shape = sdf.shape
self.assertEqual(shape, (100, 2))
def test_init(self):
sdf = dummy_streaming_dataframe(100)
df1 = sdf.to_df()
sdf2 = StreamingDataFrame(sdf)
df2 = sdf2.to_df()
self.assertEqualDataFrame(df1, df2)
def test_to_csv(self):
sdf = dummy_streaming_dataframe(100)
st = sdf.to_csv()
self.assertStartsWith(",cint,cstr\n0,0,s0",
st.replace('\r', ''))
st = sdf.to_csv()
self.assertStartsWith(",cint,cstr\n0,0,s0",
st.replace('\r', ''))
def test_iterrows(self):
sdf = dummy_streaming_dataframe(100)
rows = list(sdf.iterrows())
self.assertEqual(sdf.shape[0], len(rows))
rows = list(sdf.iterrows())
self.assertEqual(sdf.shape[0], len(rows))
def test_head(self):
sdf = dummy_streaming_dataframe(100)
st = sdf.head()
self.assertEqual(st.shape, (5, 2))
st = sdf.head(n=20)
self.assertEqual(st.shape, (20, 2))
st = sdf.head(n=20)
self.assertEqual(st.shape, (20, 2))
def test_tail(self):
sdf = dummy_streaming_dataframe(100)
st = sdf.tail()
self.assertEqual(st.shape, (5, 2))
st = sdf.tail(n=20)
self.assertEqual(st.shape, (10, 2))
def test_read_csv(self):
temp = get_temp_folder(__file__, "temp_read_csv")
df = pandas.DataFrame(data=dict(a=[5, 6], b=["er", "r"]))
name = os.path.join(temp, "df.csv")
name2 = os.path.join(temp, "df2.csv")
name3 = os.path.join(temp, "df3.csv")
df.to_csv(name, index=False)
df.to_csv(name2, index=True)
sdf = StreamingDataFrame.read_csv(name)
text = sdf.to_csv(index=False)
self.assertRaise(
lambda: StreamingDataFrame.read_csv(
name2, index_col=0, chunksize=None),
ValueError)
self.assertRaise(
lambda: StreamingDataFrame.read_csv(
name2, index_col=0, iterator=False),
ValueError)
sdf2 = StreamingDataFrame.read_csv(name2, index_col=0)
text2 = sdf2.to_csv(index=True)
sdf2.to_csv(name3, index=True)
with open(name, "r", encoding='utf-8') as f:
exp = f.read()
with open(name2, "r", encoding='utf-8') as f:
exp2 = f.read()
with open(name3, "r", encoding='utf-8') as f:
text3 = f.read()
self.assertEqual(text.replace('\r', ''), exp)
sdf2 = StreamingDataFrame.read_df(df)
self.assertEqualDataFrame(sdf.to_dataframe(), sdf2.to_dataframe())
self.assertEqual(text2.replace('\r', ''), exp2)
self.assertEqual(text3.replace('\r', '').replace('\n\n', '\n'),
exp2.replace('\r', ''))
def test_where(self):
sdf = dummy_streaming_dataframe(100)
cols = sdf.columns
self.assertEqual(list(cols), ['cint', 'cstr'])
dts = sdf.dtypes
self.assertEqual(len(dts), 2)
res = sdf.where(lambda row: row["cint"] == 1)
st = res.to_csv()
self.assertStartsWith(",cint,cstr\n0,,\n1,1.0,s1",
st.replace('\r', ''))
res = sdf.where(lambda row: row["cint"] == 1)
st = res.to_csv()
self.assertStartsWith(",cint,cstr\n0,,\n1,1.0,s1",
st.replace('\r', ''))
def test_dataframe(self):
sdf = dummy_streaming_dataframe(100)
df = sdf.to_dataframe()
self.assertEqual(df.shape, (100, 2))
def test_sample(self):
sdf = dummy_streaming_dataframe(100)
res = sdf.sample(frac=0.1)
self.assertLesser(res.shape[0], 30)
self.assertRaise(lambda: sdf.sample(n=5), ValueError)
res = sdf.sample(frac=0.1)
self.assertLesser(res.shape[0], 30)
self.assertRaise(lambda: sdf.sample(n=5), ValueError)
def test_sample_cache(self):
sdf = dummy_streaming_dataframe(100)
res = sdf.sample(frac=0.1, cache=True)
df1 = res.to_df()
df2 = res.to_df()
self.assertEqualDataFrame(df1, df2)
self.assertTrue(res.is_stable(n=df1.shape[0], do_check=True))
self.assertTrue(res.is_stable(n=df1.shape[0], do_check=False))
res = sdf.sample(frac=0.1, cache=False)
self.assertFalse(res.is_stable(n=df1.shape[0], do_check=False))
def test_sample_reservoir_cache(self):
sdf = dummy_streaming_dataframe(100)
res = sdf.sample(n=10, cache=True, reservoir=True)
df1 = res.to_df()
df2 = res.to_df()
self.assertEqualDataFrame(df1, df2)
self.assertEqual(df1.shape, (10, res.shape[1]))
self.assertRaise(lambda: sdf.sample(n=10, cache=False, reservoir=True),
ValueError)
self.assertRaise(lambda: sdf.sample(frac=0.1, cache=True, reservoir=True),
ValueError)
def test_apply(self):
sdf = dummy_streaming_dataframe(100)
self.assertNotEmpty(list(sdf))
sdf = sdf.applymap(str)
self.assertNotEmpty(list(sdf))
sdf = sdf.apply(lambda row: row[["cint"]] + "r", axis=1)
self.assertNotEmpty(list(sdf))
text = sdf.to_csv(header=False)
self.assertStartsWith("0,0r\n1,1r\n2,2r\n3,3r",
text.replace('\r', ''))
def test_train_test_split(self):
sdf = dummy_streaming_dataframe(100)
tr, te = sdf.train_test_split(index=False, streaming=False)
self.assertRaise(
lambda: StreamingDataFrame.read_str(tr, chunksize=None),
ValueError)
self.assertRaise(
lambda: StreamingDataFrame.read_str(tr, iterator=False),
ValueError)
StreamingDataFrame.read_str(tr.encode('utf-8'))
trsdf = StreamingDataFrame.read_str(tr)
tesdf = StreamingDataFrame.read_str(te)
trdf = trsdf.to_dataframe()
tedf = tesdf.to_dataframe()
df_exp = sdf.to_dataframe()
df_val = pandas.concat([trdf, tedf])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cint").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
def test_train_test_split_streaming(self):
sdf = dummy_streaming_dataframe(100, asfloat=True)
trsdf, tesdf = sdf.train_test_split(
streaming=True, unique_rows=True, partitions=[0.7, 0.3])
trdf = trsdf.to_dataframe()
tedf = tesdf.to_dataframe()
df_exp = sdf.to_dataframe()
df_val = pandas.concat([trdf, tedf])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cfloat").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
trdf2 = trsdf.to_dataframe()
tedf2 = tesdf.to_dataframe()
df_val = pandas.concat([trdf2, tedf2])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cfloat").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
self.assertEqual(trdf.shape, trdf2.shape)
self.assertEqual(tedf.shape, tedf2.shape)
self.assertGreater(trdf.shape[0], tedf.shape[0])
self.assertGreater(trdf2.shape[0], tedf2.shape[0])
def test_train_test_split_streaming_tiny(self):
df = pandas.DataFrame(data=dict(X=[4.5, 6, 7], Y=["a", "b", "c"]))
sdf2 = StreamingDataFrame.read_df(pandas.concat([df, df]))
sdftr, sdfte = sdf2.train_test_split(test_size=0.5)
df1 = sdfte.head()
df2 = sdfte.head()
if df1 is not None or df2 is not None:
self.assertEqualDataFrame(df1, df2)
df1 = sdftr.head()
df2 = sdftr.head()
if df1 is not None or df2 is not None:
self.assertEqualDataFrame(df1, df2)
sdf = StreamingDataFrame.read_df(df)
sdf2 = sdf.concat(sdf, axis=0)
sdftr, sdfte = sdf2.train_test_split(test_size=0.5)
df1 = sdfte.head()
df2 = sdfte.head()
if df1 is not None or df2 is not None:
self.assertEqualDataFrame(df1, df2)
df1 = sdftr.head()
df2 = sdftr.head()
if df1 is not None or df2 is not None:
self.assertEqualDataFrame(df1, df2)
def test_train_test_split_streaming_strat(self):
sdf = dummy_streaming_dataframe(100, asfloat=True,
tify=["t1" if i % 3 else "t0" for i in range(0, 100)])
trsdf, tesdf = sdf.train_test_split(
streaming=True, unique_rows=True, stratify="tify")
trdf = trsdf.to_dataframe()
tedf = tesdf.to_dataframe()
df_exp = sdf.to_dataframe()
df_val = pandas.concat([trdf, tedf])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cfloat").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
trdf = trsdf.to_dataframe()
tedf = tesdf.to_dataframe()
df_val = pandas.concat([trdf, tedf])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cfloat").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
trgr = trdf.groupby("tify").count()
trgr["part"] = 0
tegr = tedf.groupby("tify").count()
tegr["part"] = 1
gr = pandas.concat([trgr, tegr])
self.assertGreater(gr['cfloat'].min(), 4)
def test_train_test_split_file(self):
temp = get_temp_folder(__file__, "temp_train_test_split_file")
names = [os.path.join(temp, "train.txt"),
os.path.join(temp, "test.txt")]
sdf = dummy_streaming_dataframe(100)
sdf.train_test_split(names, index=False, streaming=False)
trsdf = StreamingDataFrame.read_csv(names[0])
tesdf = StreamingDataFrame.read_csv(names[1])
self.assertGreater(trsdf.shape[0], 20)
self.assertGreater(tesdf.shape[0], 20)
trdf = trsdf.to_dataframe()
tedf = tesdf.to_dataframe()
self.assertGreater(trdf.shape[0], 20)
self.assertGreater(tedf.shape[0], 20)
df_exp = sdf.to_dataframe()
df_val = pandas.concat([trdf, tedf])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cint").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
def test_train_test_split_file_pattern(self):
temp = get_temp_folder(__file__, "temp_train_test_split_file_pattern")
sdf = dummy_streaming_dataframe(100)
names = os.path.join(temp, "spl_{0}.txt")
self.assertRaise(lambda: sdf.train_test_split(
names, index=False, streaming=False), ValueError)
names = os.path.join(temp, "spl_{}.txt")
tr, te = sdf.train_test_split(names, index=False, streaming=False)
trsdf = StreamingDataFrame.read_csv(tr)
tesdf = StreamingDataFrame.read_csv(te)
trdf = trsdf.to_dataframe()
tedf = tesdf.to_dataframe()
df_exp = sdf.to_dataframe()
df_val = | pandas.concat([trdf, tedf]) | pandas.concat |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
r"""
eventcluster module.
This module is intended to be used to summarize large numbers of events
into clusters of different patterns. High volume repeating events can
often make it difficult to see unique and interesting items.
The module contains functions to generate clusterable features from
string data. For example, an administration command that does some
maintenance on thousands of servers with a commandline such as:
``install-update -hostname {host.fqdn} -tmp:/tmp/{GUID}/rollback``\ can
be collapsed into a single cluster pattern by ignoring the character
values in the string and using delimiters or tokens to group the values.
This is an unsupervised learning module implemented using SciKit Learn
DBScan.
Contains:
dbcluster_events: generic clustering method using DBSCAN designed to summarize
process events and other similar data by grouping on common features.
add_process_features: derives numerical features from text features such as
commandline and process path.
"""
from binascii import crc32
from functools import lru_cache
from math import log10, floor
import re
from typing import List, Any, Tuple, Union
import numpy as np
import pandas as pd
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import Normalizer
import matplotlib.pyplot as plt
from matplotlib import cm
from ..nbtools.utility import export
from .._version import VERSION
__version__ = VERSION
__author__ = "<NAME>"
# pylint: disable=too-many-arguments, too-many-locals
@export
def dbcluster_events(
data: Any,
cluster_columns: List[Any] = None,
verbose: bool = False,
normalize: bool = True,
time_column: str = "TimeCreatedUtc",
max_cluster_distance: float = 0.01,
min_cluster_samples: int = 2,
**kwargs,
) -> Tuple[pd.DataFrame, DBSCAN, np.ndarray]:
"""
Cluster data set according to cluster_columns features.
Parameters
----------
data : Any
Input data as a pandas DataFrame or numpy array
cluster_columns : List[Any], optional
List of columns to use for features
- for DataFrame this is a list of column names
- for numpy array this is a list of column indexes
verbose : bool, optional
Print additional information about clustering results (the default is False)
normalize : bool, optional
Normalize the input data (should probably always be True)
time_column : str, optional
If there is a time column the output data will be ordered by this
(the default is 'TimeCreatedUtc')
max_cluster_distance : float, optional
DBSCAN eps (max cluster member distance) (the default is 0.01)
min_cluster_samples : int, optional
DBSCAN min_samples (the minimum cluster size) (the default is 2)
Other Parameters
----------------
kwargs: Other arguments are passed to DBSCAN constructor
Returns
-------
Tuple[pd.DataFrame, DBSCAN, np.ndarray]
Output dataframe with clustered rows
DBSCAN model
Normalized data set
"""
allowed_types = [np.ndarray, pd.DataFrame]
x_input = None
if isinstance(data, pd.DataFrame):
if cluster_columns is None:
x_input = data.values
else:
x_input = data[cluster_columns].values
elif isinstance(data, np.ndarray):
if cluster_columns is None:
x_input = data
else:
x_input = data[:, cluster_columns].values
if x_input is None:
mssg = "Input data not in expected format.\n{} is not one of allowed types {}"
type_list = ", ".join([str(t) for t in allowed_types])
mssg = mssg.format(str(type(data)), type_list)
raise ValueError(mssg)
# Create DBSCAN cluster object
db_cluster = DBSCAN(
eps=max_cluster_distance, min_samples=min_cluster_samples, **kwargs
)
# Normalize the data (most clustering algorithms don't do well with
# unnormalized data)
if normalize:
x_norm = Normalizer().fit_transform(x_input)
else:
x_norm = x_input
# fit the data set
db_cluster.fit(x_norm)
labels = db_cluster.labels_
cluster_set, counts = np.unique(labels, return_counts=True)
if verbose:
print(
"Clustering for set size ",
len(x_norm),
" - ",
len(cluster_set),
" clusters",
)
print("Individual cluster sizes: ", ", ".join([str(c) for c in counts]))
clustered_events = _merge_clustered_items(
cluster_set, labels, data, time_column, counts
)
if verbose:
print("Cluster output rows: ", len(clustered_events))
return clustered_events, db_cluster, x_norm
def _merge_clustered_items(
cluster_set: np.array,
labels: np.array,
data: Union[pd.DataFrame, np.array],
time_column: str,
counts: np.array,
) -> pd.DataFrame:
"""
Merge outliers and core clusters into single DataFrame.
Parameters
----------
cluster_set : np.array
The set of clusters
labels : np.array
The cluster labels
data : Union[pd.DataFrame, np.array]
The source data
time_column : str
Name of the Time column
counts : np.array
The counts of members in each cluster
Returns
-------
pd.DataFrame
Merged dataframe
"""
cluster_list = []
# Iterate through clusters, adding exemplar to output frame
# pylint: disable=consider-using-enumerate
# we need to know the index of the item within the loop
for idx in range(len(cluster_set)):
cluster_id = cluster_set[idx]
class_members = labels == cluster_id
if isinstance(data, pd.DataFrame):
time_ordered = data[class_members].sort_values(time_column, ascending=True)
first_event_time = time_ordered[0:][time_column].iat[0]
last_event_time = time_ordered[-1:][time_column].iat[0]
else:
first_event_time = None
last_event_time = None
if cluster_id == -1:
# 'Noise' events are individual items that could not be assigned
# to a cluster and so are unique
cluster_list.append(
data[class_members]
.assign(
Clustered=False,
ClusterId=cluster_id,
ClusterSize=1,
TimeGenerated=first_event_time,
FirstEventTime=first_event_time,
LastEventTime=last_event_time,
)
.astype(
dtype={
"TimeGenerated": "datetime64[ns]",
"FirstEventTime": "datetime64[ns]",
"LastEventTime": "datetime64[ns]",
}
)
)
else:
# Otherwise, just choose the first example of the cluster set
cluster_list.append(
data[class_members]
.assign(
Clustered=True,
ClusterId=cluster_id,
ClusterSize=counts[idx],
TimeGenerated=first_event_time,
FirstEventTime=first_event_time,
LastEventTime=last_event_time,
)[0:1]
.astype(
dtype={
"TimeGenerated": "datetime64[ns]",
"FirstEventTime": "datetime64[ns]",
"LastEventTime": "datetime64[ns]",
}
)
)
# pylint: enable=consider-using-enumerate
return | pd.concat(cluster_list) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.